Skip to content

Commit 77ff614

Browse files
executor: decouple kcov memory allocation from the trace
On different platforms and in different coverage collection modes the pointer to the beginning of kcov buffer may or may not differ from the pointer to the region that mmap() returned. Decouple these two pointers, so that the memory is always allocated and deallocated with cov->mmap_alloc_ptr and cov->mmap_alloc_size, and the buffer is accessed via cov->data and cov->data_size. I tried my best to not break Darwin and BSD, but I did not test them.
1 parent 0931f9b commit 77ff614

File tree

5 files changed

+52
-37
lines changed

5 files changed

+52
-37
lines changed

executor/executor.cc

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -348,8 +348,16 @@ struct call_t {
348348
struct cover_t {
349349
int fd;
350350
uint32 size;
351+
// mmap_alloc_ptr is the internal pointer to KCOV mapping, possibly with guard pages.
352+
// It is only used to allocate/deallocate the buffer of mmap_alloc_size.
353+
char* mmap_alloc_ptr;
351354
uint32 mmap_alloc_size;
355+
// data is the pointer to the kcov buffer containing the recorded PCs.
356+
// data may differ from mmap_alloc_ptr.
352357
char* data;
358+
// data_size is set by cover_open(). This is the requested kcov buffer size.
359+
uint32 data_size;
360+
// data_end is simply data + data_size.
353361
char* data_end;
354362
// Currently collecting comparisons.
355363
bool collect_comps;

executor/executor_bsd.h

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ static void cover_open(cover_t* cov, bool extra)
7171
#if GOOS_freebsd
7272
if (ioctl(cov->fd, KIOSETBUFSIZE, kCoverSize))
7373
fail("ioctl init trace write failed");
74-
cov->mmap_alloc_size = kCoverSize * KCOV_ENTRY_SIZE;
74+
cov->data_size = kCoverSize * KCOV_ENTRY_SIZE;
7575
#elif GOOS_openbsd
7676
unsigned long cover_size = kCoverSize;
7777
if (ioctl(cov->fd, KIOSETBUFSIZE, &cover_size))
@@ -83,7 +83,7 @@ static void cover_open(cover_t* cov, bool extra)
8383
if (ioctl(cov->fd, KIOREMOTEATTACH, &args))
8484
fail("ioctl remote attach failed");
8585
}
86-
cov->mmap_alloc_size = kCoverSize * (is_kernel_64_bit ? 8 : 4);
86+
cov->data_size = kCoverSize * (is_kernel_64_bit ? 8 : 4);
8787
#elif GOOS_netbsd
8888
uint64_t cover_size;
8989
if (extra) {
@@ -100,18 +100,20 @@ static void cover_open(cover_t* cov, bool extra)
100100
if (ioctl(cov->fd, KCOV_IOC_SETBUFSIZE, &cover_size))
101101
fail("ioctl init trace write failed");
102102
}
103-
cov->mmap_alloc_size = cover_size * KCOV_ENTRY_SIZE;
103+
cov->data_size = cover_size * KCOV_ENTRY_SIZE;
104104
#endif
105105
}
106106

107107
static void cover_mmap(cover_t* cov)
108108
{
109-
if (cov->data != NULL)
109+
if (cov->mmap_alloc_ptr != NULL)
110110
fail("cover_mmap invoked on an already mmapped cover_t object");
111+
cov->mmap_alloc_size = cov->data_size;
111112
void* mmap_ptr = mmap(NULL, cov->mmap_alloc_size, PROT_READ | PROT_WRITE,
112113
MAP_SHARED, cov->fd, 0);
113114
if (mmap_ptr == MAP_FAILED)
114115
fail("cover mmap failed");
116+
cov->mmap_alloc_ptr = (char*)mmap_ptr;
115117
cov->data = (char*)mmap_ptr;
116118
cov->data_end = cov->data + cov->mmap_alloc_size;
117119
cov->data_offset = is_kernel_64_bit ? sizeof(uint64_t) : sizeof(uint32_t);
@@ -120,13 +122,13 @@ static void cover_mmap(cover_t* cov)
120122

121123
static void cover_protect(cover_t* cov)
122124
{
123-
if (cov->data == NULL)
125+
if (cov->mmap_alloc_ptr == NULL)
124126
fail("cover_protect invoked on an unmapped cover_t object");
125127
#if GOOS_freebsd
126128
size_t mmap_alloc_size = kCoverSize * KCOV_ENTRY_SIZE;
127129
long page_size = sysconf(_SC_PAGESIZE);
128130
if (page_size > 0)
129-
mprotect(cov->data + page_size, mmap_alloc_size - page_size,
131+
mprotect(cov->mmap_alloc_ptr + page_size, mmap_alloc_size - page_size,
130132
PROT_READ);
131133
#elif GOOS_openbsd
132134
int mib[2], page_size;
@@ -135,20 +137,20 @@ static void cover_protect(cover_t* cov)
135137
mib[1] = HW_PAGESIZE;
136138
size_t len = sizeof(page_size);
137139
if (sysctl(mib, ARRAY_SIZE(mib), &page_size, &len, NULL, 0) != -1)
138-
mprotect(cov->data + page_size, mmap_alloc_size - page_size, PROT_READ);
140+
mprotect(cov->mmap_alloc_ptr + page_size, mmap_alloc_size - page_size, PROT_READ);
139141
#endif
140142
}
141143

142144
static void cover_unprotect(cover_t* cov)
143145
{
144-
if (cov->data == NULL)
146+
if (cov->mmap_alloc_ptr == NULL)
145147
fail("cover_unprotect invoked on an unmapped cover_t object");
146148
#if GOOS_freebsd
147149
size_t mmap_alloc_size = kCoverSize * KCOV_ENTRY_SIZE;
148-
mprotect(cov->data, mmap_alloc_size, PROT_READ | PROT_WRITE);
150+
mprotect(cov->mmap_alloc_ptr, mmap_alloc_size, PROT_READ | PROT_WRITE);
149151
#elif GOOS_openbsd
150152
size_t mmap_alloc_size = kCoverSize * sizeof(uintptr_t);
151-
mprotect(cov->data, mmap_alloc_size, PROT_READ | PROT_WRITE);
153+
mprotect(cov->mmap_alloc_ptr, mmap_alloc_size, PROT_READ | PROT_WRITE);
152154
#endif
153155
}
154156

executor/executor_darwin.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ static void cover_open(cover_t* cov, bool extra)
7373

7474
static void cover_mmap(cover_t* cov)
7575
{
76-
if (cov->data != NULL)
76+
if (cov->mmap_alloc_ptr != NULL)
7777
fail("cover_mmap invoked on an already mmapped cover_t object");
7878
uintptr_t mmap_ptr = 0;
7979
if (ksancov_map(cov->fd, &mmap_ptr, &cov->mmap_alloc_size))
@@ -84,6 +84,7 @@ static void cover_mmap(cover_t* cov)
8484
if (cov->mmap_alloc_size > kCoverSize)
8585
fail("mmap allocation size larger than anticipated");
8686

87+
cov->mmap_alloc_ptr = (char*)mmap_ptr;
8788
cov->data = (char*)mmap_ptr;
8889
cov->data_end = cov->data + cov->mmap_alloc_size;
8990
}

executor/executor_linux.h

Lines changed: 20 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,7 @@ static void cover_open(cover_t* cov, bool extra)
114114
: kCoverSize;
115115
if (ioctl(cov->fd, kcov_init_trace, cover_size))
116116
fail("cover init trace write failed");
117-
cov->mmap_alloc_size = cover_size * (is_kernel_64_bit ? 8 : 4);
117+
cov->data_size = cover_size * (is_kernel_64_bit ? 8 : 4);
118118
if (pkeys_enabled)
119119
debug("pkey protection enabled\n");
120120
}
@@ -133,35 +133,37 @@ static void cover_unprotect(cover_t* cov)
133133

134134
static void cover_mmap(cover_t* cov)
135135
{
136-
if (cov->data != NULL)
136+
if (cov->mmap_alloc_ptr != NULL)
137137
fail("cover_mmap invoked on an already mmapped cover_t object");
138-
if (cov->mmap_alloc_size == 0)
138+
if (cov->data_size == 0)
139139
fail("cover_t structure is corrupted");
140140
// Allocate kcov buffer plus two guard pages surrounding it.
141-
char* mapped = (char*)mmap(NULL, cov->mmap_alloc_size + 2 * SYZ_PAGE_SIZE,
142-
PROT_NONE, MAP_PRIVATE | MAP_ANON, -1, 0);
143-
if (mapped == MAP_FAILED)
141+
cov->mmap_alloc_size = cov->data_size + 2 * SYZ_PAGE_SIZE;
142+
cov->mmap_alloc_ptr = (char*)mmap(NULL, cov->mmap_alloc_size,
143+
PROT_NONE, MAP_PRIVATE | MAP_ANON, -1, 0);
144+
if (cov->mmap_alloc_ptr == MAP_FAILED)
144145
exitf("failed to preallocate kcov buffer");
145146
// Now map the kcov buffer to the file, overwriting the existing mapping above.
146147
int prot = flag_read_only_coverage ? PROT_READ : (PROT_READ | PROT_WRITE);
147-
cov->data = (char*)mmap(mapped + SYZ_PAGE_SIZE, cov->mmap_alloc_size,
148-
prot, MAP_SHARED | MAP_FIXED, cov->fd, 0);
149-
if (cov->data == MAP_FAILED)
148+
void* data_buf = (char*)mmap(cov->mmap_alloc_ptr + SYZ_PAGE_SIZE, cov->data_size,
149+
prot, MAP_SHARED | MAP_FIXED, cov->fd, 0);
150+
if (data_buf == MAP_FAILED)
150151
exitf("cover mmap failed");
151-
if (pkeys_enabled && pkey_mprotect(cov->data, cov->mmap_alloc_size, prot, RESERVED_PKEY))
152+
if (pkeys_enabled && pkey_mprotect(data_buf, cov->data_size, prot, RESERVED_PKEY))
152153
exitf("failed to pkey_mprotect kcov buffer");
153-
cov->data_end = cov->data + cov->mmap_alloc_size;
154+
cov->data = (char*)data_buf;
155+
cov->data_end = cov->data + cov->data_size;
154156
cov->data_offset = is_kernel_64_bit ? sizeof(uint64_t) : sizeof(uint32_t);
155157
cov->pc_offset = 0;
156158
}
157159

158160
static void cover_munmap(cover_t* cov)
159161
{
160-
if (cov->data == NULL)
162+
if (cov->mmap_alloc_ptr == NULL)
161163
fail("cover_munmap invoked on a non-mmapped cover_t object");
162-
if (munmap(cov->data - SYZ_PAGE_SIZE, cov->mmap_alloc_size + 2 * SYZ_PAGE_SIZE))
164+
if (munmap(cov->mmap_alloc_ptr, cov->mmap_alloc_size))
163165
fail("cover_munmap failed");
164-
cov->data = NULL;
166+
cov->mmap_alloc_ptr = NULL;
165167
}
166168

167169
static void cover_enable(cover_t* cov, bool collect_comps, bool extra)
@@ -303,8 +305,8 @@ static const char* setup_delay_kcov()
303305
cov.fd = kCoverFd;
304306
cover_open(&cov, false);
305307
cover_mmap(&cov);
306-
char* first = cov.data;
307-
cov.data = nullptr;
308+
char* first = cov.mmap_alloc_ptr;
309+
cov.mmap_alloc_ptr = nullptr;
308310
cover_mmap(&cov);
309311
// If delayed kcov mmap is not supported by the kernel,
310312
// accesses to the second mapping will crash.
@@ -316,9 +318,9 @@ static const char* setup_delay_kcov()
316318
fail("clock_gettime failed");
317319
error = "kernel commit b3d7fe86fbd0 is not present";
318320
} else {
319-
munmap(cov.data - SYZ_PAGE_SIZE, cov.mmap_alloc_size + 2 * SYZ_PAGE_SIZE);
321+
munmap(cov.mmap_alloc_ptr, cov.mmap_alloc_size);
320322
}
321-
munmap(first - SYZ_PAGE_SIZE, cov.mmap_alloc_size + 2 * SYZ_PAGE_SIZE);
323+
munmap(first, cov.mmap_alloc_size);
322324
close(cov.fd);
323325
return error;
324326
}

executor/executor_test.h

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ static intptr_t execute_syscall(const call_t* c, intptr_t a[kMaxArgs])
7575

7676
static void cover_open(cover_t* cov, bool extra)
7777
{
78-
cov->mmap_alloc_size = kCoverSize * sizeof(unsigned long);
78+
cov->data_size = kCoverSize * sizeof(unsigned long);
7979
}
8080

8181
static void cover_enable(cover_t* cov, bool collect_comps, bool extra)
@@ -102,14 +102,16 @@ static void cover_protect(cover_t* cov)
102102

103103
static void cover_mmap(cover_t* cov)
104104
{
105-
if (cov->data != NULL)
105+
if (cov->mmap_alloc_ptr != NULL)
106106
fail("cover_mmap invoked on an already mmapped cover_t object");
107-
if (cov->mmap_alloc_size == 0)
107+
if (cov->data_size == 0)
108108
fail("cover_t structure is corrupted");
109-
cov->data = (char*)mmap(NULL, cov->mmap_alloc_size,
110-
PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
111-
if (cov->data == MAP_FAILED)
109+
cov->mmap_alloc_size = cov->data_size;
110+
cov->mmap_alloc_ptr = (char*)mmap(NULL, cov->mmap_alloc_size,
111+
PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
112+
if (cov->mmap_alloc_ptr == MAP_FAILED)
112113
exitf("cover mmap failed");
114+
cov->data = cov->mmap_alloc_ptr;
113115
cov->data_end = cov->data + cov->mmap_alloc_size;
114116
cov->data_offset = is_kernel_64_bit ? sizeof(uint64_t) : sizeof(uint32_t);
115117
// We don't care about the specific PC values for now.
@@ -125,9 +127,9 @@ static long inject_cover(cover_t* cov, long a, long b)
125127
{
126128
if (cov->data == nullptr)
127129
return ENOENT;
128-
uint32 size = std::min((uint32)b, cov->mmap_alloc_size);
130+
uint32 size = std::min((uint32)b, cov->data_size);
129131
memcpy(cov->data, (void*)a, size);
130-
memset(cov->data + size, 0xcd, std::min<uint64>(100, cov->mmap_alloc_size - size));
132+
memset(cov->data + size, 0xcd, std::min<uint64>(100, cov->data_size - size));
131133
return 0;
132134
}
133135

0 commit comments

Comments
 (0)