-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathzerospy_data_centric.cpp
More file actions
2662 lines (2481 loc) · 115 KB
/
zerospy_data_centric.cpp
File metadata and controls
2662 lines (2481 loc) · 115 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#include <unordered_map>
#include <map>
#include <vector>
#include <list>
#include <string>
#include <sys/stat.h>
#include <assert.h>
#include <algorithm>
#ifdef DEBUG_CHECK
#define IF_DEBUG(stat) stat
#else
#define IF_DEBUG(stat)
#endif
// #define ZEROSPY_DEBUG
// #define DEBUG_CHECK
#define _WERROR
#ifdef TIMING
#include <time.h>
#include <math.h>
uint64_t get_miliseconds() {
struct timespec spec;
clock_gettime(CLOCK_REALTIME, &spec);
return spec.tv_sec*1000 + round(spec.tv_nsec / 1.0e6); // Convert nanoseconds to milliseconds
}
#endif
static void *gLock;
#include "dr_api.h"
#include "drmgr.h"
#include "drreg.h"
#include "drutil.h"
// enable data centric with addr info
#define DRCCTLIB_USE_ADDR
#include "drcctlib.h"
#include "utils.h"
#include "trace.h"
#include "bitvec.h"
#include "../cl_include/rapidjson/document.h"
#include "../cl_include/rapidjson/filewritestream.h"
#include "../cl_include/rapidjson/prettywriter.h"
#define WINDOW_ENABLE 1000000
#define WINDOW_DISABLE 100000000
// #define WINDOW_CLEAN 10
int window_enable;
int window_disable;
// Client Options
#include "droption.h"
static droption_t<bool> op_enable_sampling
(DROPTION_SCOPE_CLIENT, "enable_sampling", 0, 0, 64, "Enable Bursty Sampling",
"Enable bursty sampling for lower overhead with less profiling accuracy.");
static droption_t<bool> op_help
(DROPTION_SCOPE_CLIENT, "help", 0, 0, 64, "Show this help",
"Show this help.");
static droption_t<int> op_window
(DROPTION_SCOPE_CLIENT, "window", WINDOW_DISABLE, 0, INT32_MAX, "Window size configuration of sampling",
"Window size of sampling. Only available when sampling is enabled.");
static droption_t<int> op_window_enable
(DROPTION_SCOPE_CLIENT, "window_enable", WINDOW_ENABLE, 0, INT32_MAX, "Window enabled size configuration of sampling",
"Window enabled size of sampling. Only available when sampling is enabled.");
using namespace std;
#define ZEROSPY_PRINTF(format, args...) \
DRCCTLIB_PRINTF_TEMPLATE("zerospy", format, ##args)
#define ZEROSPY_EXIT_PROCESS(format, args...) \
DRCCTLIB_CLIENT_EXIT_PROCESS_TEMPLATE("zerospy", format, \
##args)
#ifdef ARM_CCTLIB
# define OPND_CREATE_CCT_INT OPND_CREATE_INT
#else
# define OPND_CREATE_CCT_INT OPND_CREATE_INT32
#endif
#ifdef ARM_CCTLIB
# define OPND_CREATE_IMMEDIATE_INT OPND_CREATE_INT
#else
# ifdef CCTLIB_64
# define OPND_CREATE_IMMEDIATE_INT OPND_CREATE_INT64
# else
# define OPND_CREATE_IMMEDIATE_INT OPND_CREATE_INT32
# endif
#endif
// We only interest in memory loads
bool
zerospy_filter_read_mem_access_instr(instr_t *instr)
{
return instr_reads_memory(instr);
}
#define ZEROSPY_FILTER_READ_MEM_ACCESS_INSTR zerospy_filter_read_mem_access_instr
static string g_folder_name;
static int tls_idx;
template<int size>
struct cache_t {
void* addr;
int8_t val[size];
};
/* Max number of mem_ref a buffer can have. */
#define MAX_NUM_MEM_REFS 4096
/* The maximum size of buffer for holding mem_refs. */
#define MEM_BUF_SIZE(size) (sizeof(cache_t<size>) * MAX_NUM_MEM_REFS)
static trace_buf_t *trace_buffer_i1;
static trace_buf_t *trace_buffer_i2;
static trace_buf_t *trace_buffer_i4;
static trace_buf_t *trace_buffer_i8;
static trace_buf_t *trace_buffer_i16;
static trace_buf_t *trace_buffer_i32;
static trace_buf_t *trace_buffer_sp1;
static trace_buf_t *trace_buffer_dp1;
static trace_buf_t *trace_buffer_sp4;
static trace_buf_t *trace_buffer_dp2;
static trace_buf_t *trace_buffer_sp8;
static trace_buf_t *trace_buffer_dp4;
struct RedLogs{
uint64_t red; // how many byte zero
bitvec_t redmap; // bitmap logging if a byte is redundant
bitvec_t accmap; // bitmap logging if a byte is accessed
};
typedef unordered_map<uint64_t, RedLogs> RedLogSizeMap;
typedef unordered_map<uint64_t, RedLogSizeMap> RedLogMap;
struct FPRedLogs{
uint64_t red; // how many byte zero
bitvec_t redmap; // bitmap logging if a byte is redundant
bitvec_t accmap; // bitmap logging if a byte is accessed
uint8_t typesz;
};
typedef unordered_map<uint64_t, FPRedLogs> FPRedLogSizeMap;
typedef unordered_map<uint64_t, FPRedLogSizeMap> FPRedLogMap;
#define MINSERT instrlist_meta_preinsert
#define MAKE_OBJID(a, b) (((uint64_t)(a)<<32) | (b))
#define DECODE_TYPE(a) (((uint64_t)(a)&(0xffffffffffffffff))>>32)
#define DECODE_NAME(b) ((uint64_t)(b)&(0x00000000ffffffff))
#define MAKE_APPROX_OBJID(a, b, ts) (((uint64_t)(a)<<32) | ((b)<<8) | (ts))
#define DECODE_APPROX_TYPE(a) (((uint64_t)(a)&(0xffffffffffffffff))>>32)
#define DECODE_APPROX_NAME(b) (((uint64_t)(b)&(0x00000000ffffff00))>>8)
#define DECODE_APPROX_TYPESZ(c) ((uint64_t)(c)&(0x00000000000000ff))
#define MAKE_CNTXT(a, b, c) (((uint64_t)(a)<<32) | ((uint64_t)(b)<<16) | (uint64_t)(c))
#define DECODE_CNTXT(a) (static_cast<ContextHandle_t>((((a)&(0xffffffffffffffff))>>32)))
#define DECODE_ACCLN(b) (((uint64_t)(b)&(0x00000000ffff0000))>>16)
#define DECODE_TYPSZ(c) ((uint64_t)(c)&(0x000000000000ffff))
#define MAX_OBJS_TO_LOG 100
#define delta 0.01
#define CACHE_LINE_SIZE (64)
#ifndef PAGE_SIZE
#define PAGE_SIZE (4*1024)
#endif
#define MAX_REDUNDANT_CONTEXTS_TO_LOG (1000)
// maximum cct depth to print
#define MAX_DEPTH 10
enum {
INSTRACE_TLS_OFFS_BUF_PTR,
INSTRACE_TLS_COUNT, /* total number of TLS slots allocated */
};
static reg_id_t tls_seg;
static uint tls_offs;
#define TLS_SLOT(tls_base, enum_val) (void **)((byte *)(tls_base) + tls_offs + (enum_val))
#define BUF_PTR(tls_base, type, offs) *(type **)TLS_SLOT(tls_base, offs)
// 1M
#define MAX_CLONE_INS 1048576
typedef struct _per_thread_t {
RedLogMap *INTRedMap;
FPRedLogMap *FPRedMap;
file_t output_file;
void* numInsBuff;
int32_t threadId;
vector<instr_t*> *instr_clones;
} per_thread_t;
#define IS_SAMPLED(pt, WINDOW_ENABLE) ((int64_t)(BUF_PTR(pt->numInsBuff, void, INSTRACE_TLS_OFFS_BUF_PTR))<(int64_t)WINDOW_ENABLE)
file_t gFile;
FILE* gJson;
rapidjson::Document gDoc;
rapidjson::Document::AllocatorType &jsonAllocator = gDoc.GetAllocator();
rapidjson::Value metricOverview(rapidjson::kObjectType);
rapidjson::Value totalIntegerRedundantBytes(rapidjson::kObjectType);
rapidjson::Value totalFloatRedundantBytes(rapidjson::kObjectType);
std::map<int32_t, rapidjson::Value> threadDetailedMetricsMap;
#ifndef _WERROR
file_t fwarn;
bool warned=false;
#endif
#ifdef ZEROSPY_DEBUG
file_t gDebug;
#endif
// global metrics
uint64_t grandTotBytesLoad = 0;
uint64_t grandTotBytesRedLoad = 0;
uint64_t grandTotBytesApproxRedLoad = 0;
/*******************************************************************************************/
// TODO: May be further optimized by combining size and data hndl to avoid one more mapping
static inline void AddToRedTable(uint64_t addr, data_handle_t data, uint16_t value, uint16_t total, uint32_t redmap, per_thread_t *pt) __attribute__((always_inline,flatten));
static inline void AddToRedTable(uint64_t addr, data_handle_t data, uint16_t value, uint16_t total, uint32_t redmap, per_thread_t *pt) {
assert(addr<=(uint64_t)data.end_addr);
size_t offset = addr-(uint64_t)data.beg_addr;
size_t size = (uint64_t)data.end_addr - (uint64_t)data.beg_addr;
uint64_t key = MAKE_OBJID(data.object_type,data.sym_name);
RedLogMap::iterator it2 = pt->INTRedMap->find(key);
RedLogSizeMap::iterator it;
// IF_DEBUG(dr_fprintf(
// STDOUT,
// "AddToRedTable 1: offset=%ld, total=%d, size=%ld\n", offset, total, size));
if ( it2 == pt->INTRedMap->end() || (it = it2->second.find(size)) == it2->second.end()) {
RedLogs log;
log.red = value;
#ifdef DEBUG_CHECK
if(offset+total>size) {
printf("AddToRedTable 1: offset=%ld, total=%d, size=%ld\n", offset, total, size);
if(data.object_type == DYNAMIC_OBJECT) {
printf("\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Dynamic Object: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
drcctlib_print_full_cct(STDOUT, data.sym_name, true, true, MAX_DEPTH);
} else {
printf("\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Static Object: %s ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", drcctlib_get_str_from_strpool((uint32_t)data.sym_name));
}
}
#endif
bitvec_alloc(&log.redmap, size);
bitvec_and(&log.redmap, redmap, offset, total);
bitvec_alloc(&log.accmap, size);
bitvec_and(&log.accmap, 0, offset, total);
(*pt->INTRedMap)[key][size] = log;
} else {
assert(it->second.redmap.size==it->second.accmap.size);
assert(size == it->second.redmap.size);
#ifdef DEBUG_CHECK
if(offset+total>size) {
printf("AddToRedTable 2: offset=%ld, total=%d, size=%ld\n", offset, total, size);
if(data.object_type == DYNAMIC_OBJECT) {
printf("\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Dynamic Object: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
drcctlib_print_full_cct(STDOUT, data.sym_name, true, true, MAX_DEPTH);
} else {
printf("\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Static Object: %s ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", drcctlib_get_str_from_strpool((uint32_t)data.sym_name));
}
}
#endif
it->second.red += value;
bitvec_and(&(it->second.redmap), redmap, offset, total);
bitvec_and(&(it->second.accmap), 0, offset, total);
}
}
static inline void AddToApproximateRedTable(uint64_t addr, data_handle_t data, uint16_t value, uint16_t total, uint64_t redmap, uint32_t typesz, per_thread_t *pt) __attribute__((always_inline,flatten));
static inline void AddToApproximateRedTable(uint64_t addr, data_handle_t data, uint16_t value, uint16_t total, uint64_t redmap, uint32_t typesz, per_thread_t *pt) {
// printf("ADDR=%lx, beg_addr=%lx, end_addr=%lx, typesz=%d, index=%ld, size=%ld\n", addr, (uint64_t)data.beg_addr, (uint64_t)data.end_addr, typesz, addr-(uint64_t)data.beg_addr, (uint64_t)data.end_addr - (uint64_t)data.beg_addr);
assert(addr<=(uint64_t)data.end_addr);
size_t offset = addr-(uint64_t)data.beg_addr;
uint64_t key = MAKE_APPROX_OBJID(data.object_type,data.sym_name, typesz);
FPRedLogMap::iterator it2 = pt->FPRedMap->find(key);
FPRedLogSizeMap::iterator it;
// the data size may not aligned with typesz, so use upper bound as the bitvec size
// Note: not aligned case : struct/class with floating and int.
size_t size = (uint64_t)data.end_addr - (uint64_t)data.beg_addr;
if(value > total) {
dr_fprintf(STDERR, "** Warning AddToApproximateTable : value %d, total %d **\n", value, total);
assert(0 && "** BUG #0 Detected. Existing **");
}
if ( it2 == pt->FPRedMap->end() || (it = it2->second.find(size)) == it2->second.end()) {
FPRedLogs log;
log.red = value;
log.typesz = typesz;
#ifdef DEBUG_CHECK
if(offset+total>size) {
printf("AddToApproxRedTable 1: offset=%ld, total=%d, size=%ld\n", offset, total, size);
if(data.object_type == DYNAMIC_OBJECT) {
printf("\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Dynamic Object: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
drcctlib_print_full_cct(STDOUT, data.sym_name, true, true, MAX_DEPTH);
} else {
printf("\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Static Object: %s ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", drcctlib_get_str_from_strpool((uint32_t)data.sym_name));
}
}
#endif
bitvec_alloc(&log.redmap, size);
bitvec_and(&log.redmap, redmap, offset, total);
bitvec_alloc(&log.accmap, size);
bitvec_and(&log.accmap, 0, offset, total);
(*pt->FPRedMap)[key][size] = log;
} else {
assert(it->second.redmap.size==it->second.accmap.size);
assert(size == it->second.redmap.size);
#ifdef DEBUG_CHECK
if(it->second.typesz != typesz) {
printf("it->second.typesz=%d typesz=%d\n", it->second.typesz, typesz);
}
if(offset+total>size) {
printf("AddToApproxRedTable 1: offset=%ld, total=%d, size=%ld\n", offset, total, size);
if(data.object_type == DYNAMIC_OBJECT) {
printf("\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Dynamic Object: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
drcctlib_print_full_cct(STDOUT, data.sym_name, true, true, MAX_DEPTH);
} else {
printf("\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Static Object: %s ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", drcctlib_get_str_from_strpool((uint32_t)data.sym_name));
}
}
#endif
assert(it->second.typesz == typesz);
it->second.red += value;
bitvec_and(&(it->second.redmap), redmap, offset, total);
bitvec_and(&(it->second.accmap), 0, offset, total);
}
}
/*******************************************************************************************/
// single floating point zero byte counter
// 32-bit float: |sign|exp|mantissa| = | 1 | 8 | 23 |
// the redmap of single floating point takes up 5 bits (1 bit sign, 1 bit exp, 3 bit mantissa)
#define SP_MAP_SIZE 5
inline __attribute__((always_inline)) uint64_t count_zero_bytemap_fp(void * addr) {
register uint32_t xx = *((uint32_t*)addr);
// reduce by bits until byte level
// | 0 | x0x0 x0x0 | 0x0 x0x0 x0x0 x0x0 x0x0 x0x0 |
xx = xx | ((xx>>1)&0xffbfffff);
// | x | 00xx 00xx | 0xx 00xx 00xx 00xx 00xx 00xx |
xx = xx | ((xx>>2)&0xffdfffff);
// | x | 0000 xxxx | 000 xxxx 0000 xxxx 0000 xxxx |
xx = xx | ((xx>>4)&0xfff7ffff);
// now xx is byte level reduced, check if it is zero and mask the unused bits
xx = (~xx) & 0x80810101;
// narrowing
xx = xx | (xx>>7) | (xx>>14) | (xx>>20) | (xx>>27);
xx = xx & 0x1f;
return xx;
}
inline __attribute__((always_inline)) bool hasRedundancy_fp(void * addr) {
register uint32_t xx = *((uint32_t*)addr);
return (xx & 0x007f0000)==0;
}
/*******************************************************************************************/
// double floating point zero byte counter
// 64-bit float: |sign|exp|mantissa| = | 1 | 11 | 52 |
// the redmap of single floating point takes up 10 bits (1 bit sign, 2 bit exp, 7 bit mantissa)
#define DP_MAP_SIZE 10
inline __attribute__((always_inline)) uint64_t count_zero_bytemap_dp(void * addr) {
register uint64_t xx = (static_cast<uint64_t*>(addr))[0];
// reduce by bits until byte level
// | 0 | 0x0 x0x0 x0x0 | x0x0 x0x0_x0x0 x0x0_x0x0 x0x0_x0x0 x0x0_x0x0 x0x0_x0x0 x0x0_x0x0 |
xx = xx | ((xx>>1)&(~0x4008000000000000LL));
// | x | 0xx 00xx 00xx | 00xx 00xx_00xx 00xx_00xx 00xx_00xx 00xx_00xx 00xx_00xx 00xx_00xx |
xx = xx | ((xx>>2)&(~0x200c000000000000LL));
// | x | xxx 0000 xxxx | xxxx 0000_xxxx 0000_xxxx 0000_xxxx 0000_xxxx 0000_xxxx 0000_xxxx |
xx = xx | ((xx>>4)&(~0x100f000000000000LL));
// now xx is byte level reduced, check if it is zero and mask the unused bits
xx = (~xx) & 0x9011010101010101LL;
// narrowing
register uint64_t m = xx & 0x1010101010101LL;
m = m | (m>>7);
m = m | (m>>14);
m = m | (m>>28);
m = m & 0x7f;
xx = xx | (xx>>9) | (xx>>7);
xx = (xx >> 45) & 0x380;
xx = m | xx;
return xx;
}
inline __attribute__((always_inline)) bool hasRedundancy_dp(void * addr) {
register uint64_t xx = *((uint64_t*)addr);
return (xx & 0x000f000000000000LL)==0;
}
/***************************************************************************************/
/*********************** floating point full redundancy functions **********************/
/***************************************************************************************/
#if __BYTE_ORDER == __BIG_ENDIAN
typedef union {
float f;
struct {
uint32_t sign : 1;
uint32_t exponent : 8;
uint32_t mantisa : 23;
} parts;
struct {
uint32_t sign : 1;
uint32_t value : 31;
} vars;
} float_cast;
typedef union {
double f;
struct {
uint64_t sign : 1;
uint64_t exponent : 11;
uint64_t mantisa : 52;
} parts;
struct {
uint64_t sign : 1;
uint64_t value : 63;
} vars;
} double_cast;
#elif __BYTE_ORDER == __LITTLE_ENDIAN
typedef union {
float f;
struct {
uint32_t mantisa : 23;
uint32_t exponent : 8;
uint32_t sign : 1;
} parts;
struct {
uint32_t value : 31;
uint32_t sign : 1;
} vars;
} float_cast;
typedef union {
double f;
struct {
uint64_t mantisa : 52;
uint64_t exponent : 11;
uint64_t sign : 1;
} parts;
struct {
uint64_t value : 63;
uint64_t sign : 1;
} vars;
} double_cast;
#else
#error Known Byte Order
#endif
template<int start, int end, int incr>
struct UnrolledConjunctionApprox{
// if the mantisa is 0, the value of the double/float var must be 0
static __attribute__((always_inline)) uint64_t BodyZeros(uint8_t* addr){
if(incr==4)
return ((*(reinterpret_cast<float_cast*>(&addr[start]))).vars.value==0) + (UnrolledConjunctionApprox<start+incr,end,incr>::BodyZeros(addr));
else if(incr==8)
return ((*(reinterpret_cast<double_cast*>(&addr[start]))).vars.value==0) + (UnrolledConjunctionApprox<start+incr,end,incr>::BodyZeros(addr));
return 0;
}
static __attribute__((always_inline)) uint64_t BodyRedMap(uint8_t* addr){
if(incr==4)
return count_zero_bytemap_fp((void*)(addr+start)) | (UnrolledConjunctionApprox<start+incr,end,incr>::BodyRedMap(addr)<<SP_MAP_SIZE);
else if(incr==8)
return count_zero_bytemap_dp((void*)(addr+start)) | (UnrolledConjunctionApprox<start+incr,end,incr>::BodyRedMap(addr)<<DP_MAP_SIZE);
else
assert(0 && "Not Supportted floating size! now only support for FP32 or FP64.");
return 0;
}
static __attribute__((always_inline)) uint64_t BodyHasRedundancy(uint8_t* addr){
if(incr==4)
return hasRedundancy_fp((void*)(addr+start)) || (UnrolledConjunctionApprox<start+incr,end,incr>::BodyHasRedundancy(addr));
else if(incr==8)
return hasRedundancy_dp((void*)(addr+start)) || (UnrolledConjunctionApprox<start+incr,end,incr>::BodyHasRedundancy(addr));
else
assert(0 && "Not Supportted floating size! now only support for FP32 or FP64.");
return 0;
}
};
template<int end, int incr>
struct UnrolledConjunctionApprox<end , end , incr>{
static __attribute__((always_inline)) uint64_t BodyZeros(uint8_t* addr){
return 0;
}
static __attribute__((always_inline)) uint64_t BodyRedMap(uint8_t* addr){
return 0;
}
static __attribute__((always_inline)) uint64_t BodyHasRedundancy(uint8_t* addr){
return 0;
}
};
/****************************************************************************************/
inline __attribute__((always_inline)) uint64_t count_zero_bytemap_int8(uint8_t * addr) {
register uint8_t xx = *((uint8_t*)addr);
// reduce by bits until byte level
xx = xx | (xx>>1) | (xx>>2) | (xx>>3) | (xx>>4) | (xx>>5) | (xx>>6) | (xx>>7);
// now xx is byte level reduced, check if it is zero and mask the unused bits
xx = (~xx) & 0x1;
return xx;
}
inline __attribute__((always_inline)) uint64_t count_zero_bytemap_int16(uint8_t * addr) {
register uint16_t xx = *((uint16_t*)addr);
// reduce by bits until byte level
xx = xx | (xx>>1) | (xx>>2) | (xx>>3) | (xx>>4) | (xx>>5) | (xx>>6) | (xx>>7);
// now xx is byte level reduced, check if it is zero and mask the unused bits
xx = (~xx) & 0x101;
// narrowing
xx = xx | (xx>>7);
xx = xx & 0x3;
return xx;
}
inline __attribute__((always_inline)) uint64_t count_zero_bytemap_int32(uint8_t * addr) {
register uint32_t xx = *((uint32_t*)addr);
// reduce by bits until byte level
xx = xx | (xx>>1) | (xx>>2) | (xx>>3) | (xx>>4) | (xx>>5) | (xx>>6) | (xx>>7);
// now xx is byte level reduced, check if it is zero and mask the unused bits
xx = (~xx) & 0x1010101;
// narrowing
xx = xx | (xx>>7);
xx = xx | (xx>>14);
xx = xx & 0xf;
return xx;
}
inline __attribute__((always_inline)) uint64_t count_zero_bytemap_int64(uint8_t * addr) {
register uint64_t xx = *((uint64_t*)addr);
// reduce by bits until byte level
xx = xx | (xx>>1) | (xx>>2) | (xx>>3) | (xx>>4) | (xx>>5) | (xx>>6) | (xx>>7);
// now xx is byte level reduced, check if it is zero and mask the unused bits
xx = (~xx) & 0x101010101010101LL;
// narrowing
xx = xx | (xx>>7);
xx = xx | (xx>>14);
xx = xx | (xx>>28);
xx = xx & 0xff;
return xx;
}
#ifdef USE_SIMD
uint8_t mask[64] __attribute__((aligned(64))) = { 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1,
0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1,
0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1,
0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1,
0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1,
0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1,
0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1,
0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1 };
uint8_t mask_shuf[32] __attribute__((aligned(64))) = {
0x00, 0x08, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x00, 0x08, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
inline __attribute__((always_inline)) uint64_t count_zero_bytemap_int128(uint8_t * addr) {
uint64_t xx;
__m128i mmx, tmp;
// load 128-bit val
mmx = _mm_loadu_si128((__m128i*)addr);
// Merge all bits within a byte in parallel
// 0x
tmp = _mm_srli_epi64 (mmx, 1);
mmx = _mm_or_si128(mmx, tmp);
// 00xx
tmp = _mm_srli_epi64 (mmx, 2);
mmx = _mm_or_si128(mmx, tmp);
// 0000xxxx
tmp = _mm_srli_epi64 (mmx, 4);
mmx = _mm_or_si128(mmx, tmp);
// x = (~x) & broadcast(0x01)
// the mask is already aligned
tmp = _mm_load_si128((__m128i*)mask);
mmx = _mm_andnot_si128(mmx, tmp);
/* Now SIMD reg_val contains the collected bitmap for each byte, we now
narrow them into each 64-bit element in this packed SIMD register*/
// x = (x>>7 | x)
tmp = _mm_srli_epi64 (mmx, 7);
mmx = _mm_or_si128(mmx, tmp);
// x = (x>>14 | x)
tmp = _mm_srli_epi64 (mmx, 14);
mmx = _mm_or_si128(mmx, tmp);
// x = (x>>28 | x)
tmp = _mm_srli_epi64 (mmx, 28);
mmx = _mm_or_si128(mmx, tmp);
/* After narrowed them by 64-bit elementwise merging, the lowest byte of
each element contains the collected redmap, so we can now narrow them
by select (bytewise permutation).*/
// x = permuteb(x, {0,8,...})
// shuffle: [...clear...] [72:64] [8:0]
// We directly load the simd mask from memory
tmp = _mm_load_si128((__m128i*)mask_shuf);
mmx = _mm_shuffle_epi8(mmx, tmp);
// now store the lower 16-bits into target (INOUT) register
union U128I {
__m128i v;
uint16_t e[8];
} cast;
cast.v = mmx;
xx = (uint64_t)cast.e[0];
return xx;
}
inline __attribute__((always_inline)) uint64_t count_zero_bytemap_int256(uint8_t * addr) {
uint64_t xx;
__m256i mmx, tmp;
// Load data from memory via SIMD instruction
mmx = _mm256_loadu_si256((__m256i*)addr);
// Merge all bits within a byte in parallel
// 0x
tmp = _mm256_srli_epi64(mmx, 1);
mmx = _mm256_or_si256(mmx, tmp);
// 00xx
tmp = _mm256_srli_epi64(mmx, 2);
mmx = _mm256_or_si256(mmx, tmp);
// 0000xxxx
tmp = _mm256_srli_epi64(mmx, 4);
mmx = _mm256_or_si256(mmx, tmp);
// x = (~x) & broadcast(0x01)
tmp = _mm256_load_si256((__m256i*)mask);
mmx = _mm256_andnot_si256(mmx, tmp);
/* Now SIMD reg_val contains the collected bitmap for each byte, we now
narrow them into each 64-bit element in this packed SIMD register*/
// x = (x>>7 | x)
tmp = _mm256_srli_epi64 (mmx, 7);
mmx = _mm256_or_si256(mmx, tmp);
// x = (x>>14 | x)
tmp = _mm256_srli_epi64 (mmx, 14);
mmx = _mm256_or_si256(mmx, tmp);
// x = (x>>28 | x)
tmp = _mm256_srli_epi64 (mmx, 28);
mmx = _mm256_or_si256(mmx, tmp);
/* After narrowed them by 64-bit elementwise merging, the lowest byte of
each element contains the collected redmap, so we can now narrow them
by select (bytewise permutation).*/
// x = permuteb(x, {0,8,...})
// shuffle: [...clear...] [200:192] [136:128] | [...clear...] [72:64] [8:0]
// We directly load the simd mask from memory
tmp = _mm256_load_si256((__m256i*)mask_shuf);
mmx = _mm256_shuffle_epi8(mmx, tmp);
// As shuffle is performed per lane, so we need further merging
// 1. permutation to merge two lanes into the first lane: 8 = (10) (00) -> [...] [192:128] [64:0]
mmx = _mm256_permute4x64_epi64(mmx, 8);
// 2. shuffle again for narrowing into lower 64-bit value, here we reuse the previously loaded mask in simd scratch register
mmx = _mm256_shuffle_epi8(mmx, tmp);
// now store the lower 32-bits into target (INOUT) register
union U256I {
__m256i v;
uint32_t e[8];
} cast;
cast.v = mmx;
xx = (uint64_t)cast.e[0];
return xx;
}
#endif
static const unsigned char BitCountTable4[] __attribute__ ((aligned(64))) = {
0, 0, 1, 2
};
static const unsigned char BitCountTable8[] __attribute__ ((aligned(64))) = {
0, 0, 0, 0, 1, 1, 2, 3
};
static const unsigned char BitCountTable16[] __attribute__ ((aligned(64))) = {
0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 2, 2, 3, 4
};
static const unsigned char BitCountTable256[] __attribute__ ((aligned(64))) = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 7, 8
};
// accessLen & eleSize are size in bits
template<uint32_t AccessLen, uint32_t EleSize>
struct RedMapString {
static __attribute__((always_inline)) string getIntRedMapString(uint64_t redmap) {
// static_assert(AccessLen % EleSize == 0);
string buff =
RedMapString<AccessLen-EleSize, EleSize>::getIntRedMapString(redmap>>(AccessLen-EleSize)) +
" , " + RedMapString<EleSize, EleSize>::getIntRedMapString(redmap);
return buff;
}
};
template<uint32_t AccessLen>
struct RedMapString <AccessLen, AccessLen> {
static __attribute__((always_inline)) string getIntRedMapString(uint64_t redmap) {
string buff = "";
buff += ((redmap>>(AccessLen-1))&0x1) ? "00 " : "XX ";
buff += RedMapString<AccessLen-1, AccessLen-1>::getIntRedMapString(redmap>>1);
return buff;
}
};
template<>
struct RedMapString <1, 1> {
static __attribute__((always_inline)) string getIntRedMapString(uint64_t redmap) {
return string((redmap&0x1) ? "00" : "XX");
}
};
template<uint32_t n_exp, uint32_t n_man>
inline __attribute__((always_inline)) string __getFpRedMapString(uint64_t redmap) {
string buff = "";
const uint32_t signPos = n_exp + n_man;
buff += RedMapString<1,1>::getIntRedMapString(redmap>>signPos) + " | ";
buff += RedMapString<n_exp,n_exp>::getIntRedMapString(redmap>>n_man) + " | ";
buff += RedMapString<n_man,n_man>::getIntRedMapString(redmap);
return buff;
}
template<uint32_t n_exp, uint32_t n_man>
string getFpRedMapString(uint64_t redmap, uint64_t accessLen) {
string buff = "";
uint64_t newAccessLen = accessLen - (n_exp + n_man + 1);
if(newAccessLen==0) {
return __getFpRedMapString<n_exp,n_man>(redmap);
} else {
return getFpRedMapString<n_exp,n_man>(redmap>>newAccessLen, newAccessLen) + " , " + __getFpRedMapString<n_exp,n_man>(redmap);
}
return buff;
}
#define getFpRedMapString_SP(redmap, num) getFpRedMapString<1,3>(redmap, num*5)
#define getFpRedMapString_DP(redmap, num) getFpRedMapString<2,7>(redmap, num*10)
template<int start, int end, int incr>
struct UnrolledConjunction{
// if the mantisa is 0, the value of the double/float var must be 0
static __attribute__((always_inline)) uint64_t BodyRedNum(uint64_t rmap){
// static_assert(start < end);
if(incr==1)
return ((start==0) ? (rmap&0x1) : ((rmap>>start)&0x1)) + (UnrolledConjunction<start+incr,end,incr>::BodyRedNum(rmap));
else if(incr==2)
return ((start==0) ? BitCountTable8[rmap&0x3] : BitCountTable8[(rmap>>start)&0x3]) + (UnrolledConjunction<start+incr,end,incr>::BodyRedNum(rmap));
else if(incr==4)
return ((start==0) ? BitCountTable16[rmap&0xf] : BitCountTable16[(rmap>>start)&0xf]) + (UnrolledConjunction<start+incr,end,incr>::BodyRedNum(rmap));
else if(incr==8)
return ((start==0) ? BitCountTable256[rmap&0xff] : BitCountTable256[(rmap>>start)&0xff]) + (UnrolledConjunction<start+incr,end,incr>::BodyRedNum(rmap));
return 0;
}
static __attribute__((always_inline)) uint64_t BodyRedMap(uint8_t* addr){
// static_assert(start < end);
if(incr==1)
return count_zero_bytemap_int8(addr+start) | (UnrolledConjunction<start+incr,end,incr>::BodyRedMap(addr)<<1);
else if(incr==2)
return count_zero_bytemap_int16(addr+start) | (UnrolledConjunction<start+incr,end,incr>::BodyRedMap(addr)<<2);
else if(incr==4)
return count_zero_bytemap_int32(addr+start) | (UnrolledConjunction<start+incr,end,incr>::BodyRedMap(addr)<<4);
else if(incr==8)
return count_zero_bytemap_int64(addr+start) | (UnrolledConjunction<start+incr,end,incr>::BodyRedMap(addr)<<8);
else
assert(0 && "Not Supportted integer size! now only support for INT8, INT16, INT32 or INT64.");
return 0;
}
static __attribute__((always_inline)) bool BodyHasRedundancy(uint8_t* addr){
if(incr==1)
return (addr[start]==0) || (UnrolledConjunction<start+incr,end,incr>::BodyHasRedundancy(addr));
else if(incr==2)
return (((*((uint16_t*)(&addr[start])))&0xff00)==0) || (UnrolledConjunction<start+incr,end,incr>::BodyRedMap(addr));
else if(incr==4)
return (((*((uint32_t*)(&addr[start])))&0xff000000)==0) || (UnrolledConjunction<start+incr,end,incr>::BodyRedMap(addr));
else if(incr==8)
return (((*((uint64_t*)(&addr[start])))&0xff00000000000000LL)==0) || (UnrolledConjunction<start+incr,end,incr>::BodyRedMap(addr));
else
assert(0 && "Not Supportted integer size! now only support for INT8, INT16, INT32 or INT64.");
return 0;
}
};
template<int end, int incr>
struct UnrolledConjunction<end , end , incr>{
static __attribute__((always_inline)) uint64_t BodyRedNum(uint64_t rmap){
return 0;
}
static __attribute__((always_inline)) uint64_t BodyRedMap(uint8_t* addr){
return 0;
}
static __attribute__((always_inline)) uint64_t BodyHasRedundancy(uint8_t* addr){
return 0;
}
};
/*******************************************************************************************/
// use manual inlined updates
#define RESERVE_AFLAGS(dc, bb, ins) assert(drreg_reserve_aflags (dc, bb, ins)==DRREG_SUCCESS)
#define UNRESERVE_AFLAGS(dc, bb, ins) assert(drreg_unreserve_aflags (dc, bb, ins)==DRREG_SUCCESS)
#define RESERVE_REG(dc, bb, instr, vec, reg) do {\
if (drreg_reserve_register(dc, bb, instr, vec, ®) != DRREG_SUCCESS) { \
ZEROSPY_EXIT_PROCESS("ERROR @ %s:%d: drreg_reserve_register != DRREG_SUCCESS", __FILE__, __LINE__); \
} } while(0)
#define UNRESERVE_REG(dc, bb, instr, reg) do { \
if (drreg_unreserve_register(dc, bb, instr, reg) != DRREG_SUCCESS) { \
ZEROSPY_EXIT_PROCESS("ERROR @ %s:%d: drreg_unreserve_register != DRREG_SUCCESS", __FILE__, __LINE__); \
} } while(0)
# ifdef ARM_CCTLIB
# define DRCCTLIB_LOAD_IMM32_0(dc, Rt, imm) \
INSTR_CREATE_movz((dc), (Rt), (imm), OPND_CREATE_INT(0))
# define DRCCTLIB_LOAD_IMM32_16(dc, Rt, imm) \
INSTR_CREATE_movk((dc), (Rt), (imm), OPND_CREATE_INT(16))
# define DRCCTLIB_LOAD_IMM32_32(dc, Rt, imm) \
INSTR_CREATE_movk((dc), (Rt), (imm), OPND_CREATE_INT(32))
# define DRCCTLIB_LOAD_IMM32_48(dc, Rt, imm) \
INSTR_CREATE_movk((dc), (Rt), (imm), OPND_CREATE_INT(48))
static inline void
minstr_load_wint_to_reg(void *drcontext, instrlist_t *ilist, instr_t *where, reg_id_t reg,
int32_t wint_num)
{
MINSERT(ilist, where,
DRCCTLIB_LOAD_IMM32_0(drcontext, opnd_create_reg(reg),
OPND_CREATE_IMMEDIATE_INT(wint_num & 0xffff)));
wint_num = (wint_num >> 16) & 0xffff;
if(wint_num) {
MINSERT(ilist, where,
DRCCTLIB_LOAD_IMM32_16(drcontext, opnd_create_reg(reg),
OPND_CREATE_IMMEDIATE_INT(wint_num)));
}
}
#ifdef ARM64_CCTLIB
static inline void
minstr_load_wwint_to_reg(void *drcontext, instrlist_t *ilist, instr_t *where,
reg_id_t reg, uint64_t wwint_num)
{
MINSERT(ilist, where,
DRCCTLIB_LOAD_IMM32_0(drcontext, opnd_create_reg(reg),
OPND_CREATE_IMMEDIATE_INT(wwint_num & 0xffff)));
uint64_t tmp = (wwint_num >> 16) & 0xffff;
if(tmp) {
MINSERT(ilist, where,
DRCCTLIB_LOAD_IMM32_16(drcontext, opnd_create_reg(reg),
OPND_CREATE_IMMEDIATE_INT(tmp)));
}
tmp = (wwint_num >> 32) & 0xffff;
if(tmp) {
MINSERT(ilist, where,
DRCCTLIB_LOAD_IMM32_32(drcontext, opnd_create_reg(reg),
OPND_CREATE_IMMEDIATE_INT(tmp)));
}
tmp = (wwint_num >> 48) & 0xffff;
if(tmp) {
MINSERT(ilist, where,
DRCCTLIB_LOAD_IMM32_48(drcontext, opnd_create_reg(reg),
OPND_CREATE_IMMEDIATE_INT(tmp)));
}
}
#endif
# endif
// sample wrapper for sampling without code flush
#if 0
#define SAMPLED_CLEAN_CALL(insert_clean_call) do { \
bool dead; \
reg_id_t reg_ptr; \
RESERVE_AFLAGS(dead, bb, ins); \
assert(drreg_reserve_register(drcontext, bb, ins, NULL, ®_ptr)==DRREG_SUCCESS);
dr_insert_read_raw_tls(drcontext, bb, ins, tls_seg, tls_offs + INSTRACE_TLS_OFFS_BUF_PTR, reg_ptr);\
minstr_load_wint_to_reg(drcontext, ilist, where, reg_val, window_disable);\
MINSERT(bb, ins, XINST_CREATE_cmp(drcontext, opnd_create_reg(reg_ptr), OPND_CREATE_INT32(window_enable))); \
instr_t* restore = INSTR_CREATE_label(drcontext); \
MINSERT(bb, ins, XINST_CREATE_jump_cond(drcontext, DR_PRED_LE, opnd_create_instr(restore))); \
{ insert_clean_call; } \
MINSERT(bb, ins, restore); \
assert(drreg_unreserve_register(drcontext, bb, ins, reg_ptr)==DRREG_SUCCESS);
UNRESERVE_AFLAGS(dead, bb, ins); \
} while(0)
#endif
#define SAMPLED_CLEAN_CALL(insert_clean_call) insert_clean_call
// This clean call will be automatically inlined by -opt_cleancall as it only has one argument
struct BBInstrument {
static void BBUpdate(int insCnt) {
void *drcontext = dr_get_current_drcontext();
per_thread_t *pt = (per_thread_t *)drmgr_get_tls_field(drcontext, tls_idx);
uint64_t val = reinterpret_cast<uint64_t>(BUF_PTR(pt->numInsBuff, void, INSTRACE_TLS_OFFS_BUF_PTR));
val += insCnt;
BUF_PTR(pt->numInsBuff, void, INSTRACE_TLS_OFFS_BUF_PTR) = reinterpret_cast<void*>(val);
}
static void BBUpdateAndCheck(int insCnt) {
void *drcontext = dr_get_current_drcontext();
per_thread_t *pt = (per_thread_t *)drmgr_get_tls_field(drcontext, tls_idx);
uint64_t val = reinterpret_cast<uint64_t>(BUF_PTR(pt->numInsBuff, void, INSTRACE_TLS_OFFS_BUF_PTR));
val += insCnt;
if(val>=(uint64_t)window_disable) { val=val-(uint64_t)window_disable; }
BUF_PTR(pt->numInsBuff, void, INSTRACE_TLS_OFFS_BUF_PTR) = reinterpret_cast<void*>(val);
}
};
template<int accessLen, int elementSize>
inline __attribute__((always_inline))
void CheckAndInsertIntPage_impl(void* drcontext, void* addr, void* pval, per_thread_t *pt) {
// update info
uint8_t* bytes = reinterpret_cast<uint8_t*>(pval);
data_handle_t data_hndl =
drcctlib_get_data_hndl_ignore_stack_data(drcontext, (app_pc)addr);
if(data_hndl.object_type!=DYNAMIC_OBJECT && data_hndl.object_type!=STATIC_OBJECT) {
return ;
}
if(bytes[accessLen-1]!=0) {
// the log have already been clear to 0, so we do nothing here and quick return.
AddToRedTable((uint64_t) addr, data_hndl, 0, accessLen, 0, pt);
return ;
}
uint64_t redByteMap;
switch(accessLen) {
case 1:
redByteMap = count_zero_bytemap_int8(bytes);
break;
case 2:
redByteMap = count_zero_bytemap_int16(bytes);
break;
case 4:
redByteMap = count_zero_bytemap_int32(bytes);
break;
case 8:
redByteMap = count_zero_bytemap_int64(bytes);
break;
case 16:
#ifdef USE_SIMD
redByteMap = count_zero_bytemap_int128(bytes);
#else
redByteMap = count_zero_bytemap_int64(bytes) |
(count_zero_bytemap_int64(bytes+8)<<8);
#endif
break;
case 32:
#ifdef USE_SIMD
redByteMap = count_zero_bytemap_int256(bytes);
#else
redByteMap = count_zero_bytemap_int64(bytes) |
(count_zero_bytemap_int64(bytes+8)<<8) |
(count_zero_bytemap_int64(bytes+16)<<16) |
(count_zero_bytemap_int64(bytes+24)<<24);
#endif
break;
default:
assert(0 && "UNKNOWN ACCESSLEN!\n");
}
#ifdef USE_SSE
if(elementSize==1) {
uint64_t redZero = _mm_popcnt_u64(redByteMap);
} else {
// accessLen == elementSize
uint64_t redByteMap_2 = (~redByteMap) & ((1LL<<accessLen)-1);
uint64_t redZero = _lzcnt_u64(redByteMap_2) - (64-accessLen);
}
AddToRedTable((uint64_t)addr, data_hndl, redZero, accessLen, redByteMap, pt);
#else
uint64_t redZero = UnrolledConjunction<0, accessLen, elementSize>::BodyRedNum(redByteMap);
AddToRedTable((uint64_t)addr, data_hndl, redZero, accessLen, redByteMap, pt);
#endif
}
template<int sz, int esize>
void trace_update_int() {
void* drcontext = dr_get_current_drcontext();
// here we don't need to pass in the trace buffer pointer as we can statically know
// which buffer will be updated at compile time.
trace_buf_t* trace_buffer;
switch (sz) {
case 1:
trace_buffer = trace_buffer_i1; break;
case 2:
trace_buffer = trace_buffer_i2; break;
case 4:
trace_buffer = trace_buffer_i4; break;
case 8:
trace_buffer = trace_buffer_i8; break;
case 16:
trace_buffer = trace_buffer_i16; break;
case 32:
trace_buffer = trace_buffer_i32; break;
}
void* buf_base = trace_buf_get_buffer_base(drcontext, trace_buffer);
void* buf_ptr = trace_buf_get_buffer_ptr(drcontext, trace_buffer);
per_thread_t* pt = (per_thread_t *)drmgr_get_tls_field(drcontext, tls_idx);
cache_t<sz> *trace_base = (cache_t<sz> *)(char *)buf_base;
cache_t<sz> *trace_ptr = (cache_t<sz> *)((char *)buf_ptr);
cache_t<sz> *cache_ptr;
IF_DEBUG(dr_fprintf(
STDOUT,
"UPDATE INT: trace_ptr=%p, base=%p, end=%p, size=%ld, buf_size=%ld, buf_end=%p\n",
trace_ptr, trace_base,
(char *)trace_base + trace_buf_get_buffer_size(drcontext, trace_buffer),
trace_ptr - trace_base, trace_buf_get_buffer_size(drcontext, trace_buffer),