Skip to content
This repository was archived by the owner on Jun 15, 2025. It is now read-only.

Commit fc14a15

Browse files
authored
Merge pull request #12 from dermojo/allocator-fix
Allocator fix
2 parents 6fc467d + b66de9e commit fc14a15

File tree

12 files changed

+201
-103
lines changed

12 files changed

+201
-103
lines changed

CMakeLists.txt

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,11 @@ string(TOLOWER "${CMAKE_BUILD_TOOL}" MY_BUILDTOOL)
6363
if(MY_BUILDTOOL MATCHES "(msdev|devenv|nmake|msbuild)")
6464
target_compile_options(testlib PUBLIC /W4 /WX)
6565
else()
66-
target_compile_options(testlib PUBLIC -Wall -Weffc++ -Werror)
66+
target_compile_options(testlib PUBLIC -Wall -Weffc++ -Werror -Wextra -pedantic
67+
-Wunused -Wredundant-decls -Wunreachable-code
68+
-Wold-style-cast -Wshadow
69+
-Wconversion -Wsign-conversion -Wno-conversion-null
70+
-Wcast-align)
6771
# These are enabled on Clang and don't help...
6872
target_compile_options(testlib PUBLIC -Wno-missing-braces)
6973
target_link_libraries(testlib pthread)

include/spsl/hash.hpp

Lines changed: 37 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -76,12 +76,12 @@ inline uint64_t rotl64(uint64_t x, int8_t r)
7676
// Block read - if your platform needs to do endian-swapping or can only
7777
// handle aligned reads, do the conversion here
7878

79-
FORCE_INLINE uint32_t getblock32(const uint32_t* p, int i)
79+
FORCE_INLINE uint32_t getblock32(const uint32_t* p, uint32_t i)
8080
{
8181
return p[i];
8282
}
8383

84-
FORCE_INLINE uint64_t getblock64(const uint64_t* p, int i)
84+
FORCE_INLINE uint64_t getblock64(const uint64_t* p, std::size_t i)
8585
{
8686
return p[i];
8787
}
@@ -115,10 +115,10 @@ FORCE_INLINE uint64_t fmix64(uint64_t k)
115115

116116
//-----------------------------------------------------------------------------
117117

118-
inline void MurmurHash3_x86_32(const void* key, int len, uint32_t seed, void* out)
118+
inline void MurmurHash3_x86_32(const void* key, const uint32_t len, uint32_t seed, void* out)
119119
{
120-
const uint8_t* data = (const uint8_t*)key;
121-
const int nblocks = len / 4;
120+
const uint8_t* data = reinterpret_cast<const uint8_t*>(key);
121+
const uint32_t nblocks = len / 4;
122122

123123
uint32_t h1 = seed;
124124

@@ -128,9 +128,9 @@ inline void MurmurHash3_x86_32(const void* key, int len, uint32_t seed, void* ou
128128
//----------
129129
// body
130130

131-
const uint32_t* blocks = (const uint32_t*)(data + nblocks * 4);
131+
const uint32_t* blocks = reinterpret_cast<const uint32_t*>(data);
132132

133-
for (int i = -nblocks; i; i++)
133+
for (uint32_t i = 0; i < nblocks; ++i)
134134
{
135135
uint32_t k1 = getblock32(blocks, i);
136136

@@ -146,17 +146,17 @@ inline void MurmurHash3_x86_32(const void* key, int len, uint32_t seed, void* ou
146146
//----------
147147
// tail
148148

149-
const uint8_t* tail = (const uint8_t*)(data + nblocks * 4);
149+
const uint8_t* tail = (data + nblocks * 4);
150150

151151
uint32_t k1 = 0;
152152

153153
switch (len & 3)
154154
{
155155
case 3:
156-
k1 ^= tail[2] << 16;
156+
k1 ^= static_cast<uint32_t>(tail[2] << 16);
157157
/* no break */
158158
case 2:
159-
k1 ^= tail[1] << 8;
159+
k1 ^= static_cast<uint32_t>(tail[1] << 8);
160160
/* no break */
161161
case 1:
162162
k1 ^= tail[0];
@@ -173,15 +173,16 @@ inline void MurmurHash3_x86_32(const void* key, int len, uint32_t seed, void* ou
173173

174174
h1 = fmix32(h1);
175175

176-
*(uint32_t*)out = h1;
176+
*static_cast<uint32_t*>(out) = h1;
177177
}
178178

179179
//-----------------------------------------------------------------------------
180180

181-
inline void MurmurHash3_x64_128(const void* key, const int len, const uint32_t seed, void* out)
181+
inline void MurmurHash3_x64_128(const void* key, const std::size_t len, const uint32_t seed,
182+
void* out)
182183
{
183-
const uint8_t* data = (const uint8_t*)key;
184-
const int nblocks = len / 16;
184+
const uint8_t* data = reinterpret_cast<const uint8_t*>(key);
185+
const std::size_t nblocks = len / 16;
185186

186187
uint64_t h1 = seed;
187188
uint64_t h2 = seed;
@@ -192,9 +193,9 @@ inline void MurmurHash3_x64_128(const void* key, const int len, const uint32_t s
192193
//----------
193194
// body
194195

195-
const uint64_t* blocks = (const uint64_t*)(data);
196+
const uint64_t* blocks = reinterpret_cast<const uint64_t*>(data);
196197

197-
for (int i = 0; i < nblocks; i++)
198+
for (std::size_t i = 0; i < nblocks; i++)
198199
{
199200
uint64_t k1 = getblock64(blocks, i * 2 + 0);
200201
uint64_t k2 = getblock64(blocks, i * 2 + 1);
@@ -221,62 +222,62 @@ inline void MurmurHash3_x64_128(const void* key, const int len, const uint32_t s
221222
//----------
222223
// tail
223224

224-
const uint8_t* tail = (const uint8_t*)(data + nblocks * 16);
225+
const uint8_t* tail = (data + nblocks * 16);
225226

226227
uint64_t k1 = 0;
227228
uint64_t k2 = 0;
228229

229230
switch (len & 15)
230231
{
231232
case 15:
232-
k2 ^= ((uint64_t)tail[14]) << 48;
233+
k2 ^= (static_cast<uint64_t>(tail[14])) << 48;
233234
/* no break */
234235
case 14:
235-
k2 ^= ((uint64_t)tail[13]) << 40;
236+
k2 ^= (static_cast<uint64_t>(tail[13])) << 40;
236237
/* no break */
237238
case 13:
238-
k2 ^= ((uint64_t)tail[12]) << 32;
239+
k2 ^= (static_cast<uint64_t>(tail[12])) << 32;
239240
/* no break */
240241
case 12:
241-
k2 ^= ((uint64_t)tail[11]) << 24;
242+
k2 ^= (static_cast<uint64_t>(tail[11])) << 24;
242243
/* no break */
243244
case 11:
244-
k2 ^= ((uint64_t)tail[10]) << 16;
245+
k2 ^= (static_cast<uint64_t>(tail[10])) << 16;
245246
/* no break */
246247
case 10:
247-
k2 ^= ((uint64_t)tail[9]) << 8;
248+
k2 ^= (static_cast<uint64_t>(tail[9])) << 8;
248249
/* no break */
249250
case 9:
250-
k2 ^= ((uint64_t)tail[8]) << 0;
251+
k2 ^= (static_cast<uint64_t>(tail[8])) << 0;
251252
k2 *= c2;
252253
k2 = ROTL64(k2, 33);
253254
k2 *= c1;
254255
h2 ^= k2;
255256
/* no break */
256257

257258
case 8:
258-
k1 ^= ((uint64_t)tail[7]) << 56;
259+
k1 ^= (static_cast<uint64_t>(tail[7])) << 56;
259260
/* no break */
260261
case 7:
261-
k1 ^= ((uint64_t)tail[6]) << 48;
262+
k1 ^= (static_cast<uint64_t>(tail[6])) << 48;
262263
/* no break */
263264
case 6:
264-
k1 ^= ((uint64_t)tail[5]) << 40;
265+
k1 ^= (static_cast<uint64_t>(tail[5])) << 40;
265266
/* no break */
266267
case 5:
267-
k1 ^= ((uint64_t)tail[4]) << 32;
268+
k1 ^= (static_cast<uint64_t>(tail[4])) << 32;
268269
/* no break */
269270
case 4:
270-
k1 ^= ((uint64_t)tail[3]) << 24;
271+
k1 ^= (static_cast<uint64_t>(tail[3])) << 24;
271272
/* no break */
272273
case 3:
273-
k1 ^= ((uint64_t)tail[2]) << 16;
274+
k1 ^= (static_cast<uint64_t>(tail[2])) << 16;
274275
/* no break */
275276
case 2:
276-
k1 ^= ((uint64_t)tail[1]) << 8;
277+
k1 ^= (static_cast<uint64_t>(tail[1])) << 8;
277278
/* no break */
278279
case 1:
279-
k1 ^= ((uint64_t)tail[0]) << 0;
280+
k1 ^= (static_cast<uint64_t>(tail[0])) << 0;
280281
k1 *= c1;
281282
k1 = ROTL64(k1, 31);
282283
k1 *= c2;
@@ -298,8 +299,8 @@ inline void MurmurHash3_x64_128(const void* key, const int len, const uint32_t s
298299
h1 += h2;
299300
h2 += h1;
300301

301-
((uint64_t*)out)[0] = h1;
302-
((uint64_t*)out)[1] = h2;
302+
static_cast<uint64_t*>(out)[0] = h1;
303+
static_cast<uint64_t*>(out)[1] = h2;
303304
}
304305

305306
//-----------------------------------------------------------------------------
@@ -320,7 +321,7 @@ template <>
320321
inline size_t hash_impl<32>(const void* buffer, size_t len)
321322
{
322323
uint32_t result = 0;
323-
murmurhash3::MurmurHash3_x86_32(buffer, len, 0 /* seed */, &result);
324+
murmurhash3::MurmurHash3_x86_32(buffer, static_cast<uint32_t>(len), 0 /* seed */, &result);
324325
return result;
325326
}
326327

@@ -337,10 +338,9 @@ inline size_t hash_impl<64>(const void* buffer, size_t len)
337338
murmurhash3::MurmurHash3_x64_128(buffer, len, 0 /* seed */, result128);
338339

339340
// this is the only "defined" way to convert properly...
340-
// TODO: Why does MSVC complain about size_t != uint64_t?!
341341
size_t result = 0;
342342
memcpy(&result, result128, sizeof(result));
343-
return result;
343+
return result;
344344
}
345345

346346
/**

include/spsl/pagealloc.hpp

Lines changed: 43 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,7 @@ class SensitivePageAllocator
120120
// check for memory that is still in use
121121
bool firstCbCall = true;
122122

123-
// check each chunk and tell the callbcak
123+
// check each chunk and tell the callback
124124
if (m_leakCallback)
125125
{
126126
for (auto& chunk : m_managedChunks)
@@ -160,10 +160,11 @@ class SensitivePageAllocator
160160
chunk.segments |= resetMask;
161161

162162
// notify the callback
163-
m_leakCallback(
164-
this, AllocationInfo{ (char*)chunk.addr + startIndex * segment_size,
165-
(endIndex - startIndex + 1) * segment_size },
166-
firstCbCall);
163+
m_leakCallback(this,
164+
AllocationInfo{ reinterpret_cast<char*>(chunk.addr) +
165+
startIndex * segment_size,
166+
(endIndex - startIndex + 1) * segment_size },
167+
firstCbCall);
167168
firstCbCall = false;
168169
}
169170
}
@@ -198,7 +199,7 @@ class SensitivePageAllocator
198199
/**
199200
* Returns the "default instance", a.k.a. a static instance of the allocator. The instance is
200201
* created upon first use and destroyed when the application exits.
201-
* Note: This functio may throw any exception that the constructor might throw.
202+
* Note: This function may throw any exception that the constructor might throw.
202203
* @return a reference to the instance
203204
*/
204205
static SensitivePageAllocator& getDefaultInstance()
@@ -210,7 +211,7 @@ class SensitivePageAllocator
210211

211212
/**
212213
* Sets the leak callback function. This function is called by the destructor for every memory
213-
* location that hasn't been deallcated yet.
214+
* location that hasn't been deallocated yet.
214215
*
215216
* Note: This method is intentionally *not* thread-safe.
216217
* Another note: Pass @c nullptr to disable leak checks.
@@ -230,7 +231,8 @@ class SensitivePageAllocator
230231
bool first)
231232
{
232233
if (first)
233-
std::cerr << "!!! Leaks detected in PageAllocator(" << (const void*)instance << "):\n";
234+
std::cerr << "!!! Leaks detected in PageAllocator("
235+
<< reinterpret_cast<const void*>(instance) << "):\n";
234236
std::cerr << "!!! " << leak.size << " bytes @ address " << leak.addr << '\n';
235237
}
236238

@@ -258,7 +260,7 @@ class SensitivePageAllocator
258260

259261
static inline constexpr uint64_t getBitmask(std::size_t n)
260262
{
261-
return (n == segmentsPerChunk ? all64 : (((uint64_t)1 << n) - 1));
263+
return (n == segmentsPerChunk ? all64 : ((static_cast<uint64_t>(1) << n) - 1));
262264
}
263265

264266

@@ -383,7 +385,7 @@ class SensitivePageAllocator
383385
{
384386
// found! -> mark as reserved
385387
chunk.segments &= ~mask;
386-
return (char*)chunk.addr + index * segment_size;
388+
return reinterpret_cast<char*>(chunk.addr) + index * segment_size;
387389
}
388390
// shift the mask
389391
mask <<= 1;
@@ -400,7 +402,7 @@ class SensitivePageAllocator
400402
for (std::size_t i = 0; i < m_chunksPerPage; ++i)
401403
{
402404
m_managedChunks.push_back(
403-
ChunkManagementInfo{ (char*)addr + i * chunk_size, addr, all64 });
405+
ChunkManagementInfo{ reinterpret_cast<char*>(addr) + i * chunk_size, addr, all64 });
404406
}
405407
ChunkManagementInfo& chunk = m_managedChunks[m_managedChunks.size() - m_chunksPerPage];
406408

@@ -427,10 +429,12 @@ class SensitivePageAllocator
427429
pointer pageAddr = nullptr;
428430
for (auto& chunk : m_managedChunks)
429431
{
430-
if (addr >= chunk.addr && addr < (char*)chunk.addr + chunk_size)
432+
if (addr >= chunk.addr && addr < reinterpret_cast<char*>(chunk.addr) + chunk_size)
431433
{
432434
// found: calculate the index and the bit mask
433-
std::size_t index = ((char*)addr - (char*)chunk.addr) / segment_size;
435+
std::size_t index = static_cast<std::size_t>(reinterpret_cast<char*>(addr) -
436+
reinterpret_cast<char*>(chunk.addr)) /
437+
segment_size;
434438
uint64_t mask = bitmask << index;
435439
chunk.segments |= mask;
436440

@@ -526,7 +530,7 @@ class SensitivePageAllocator
526530
std::vector<AllocationInfo> m_unmanagedAreas;
527531

528532
/// This function is called by the destructor for every memory location that hasn't been
529-
/// deallocted yet. The default implementation prints using std::cerr.
533+
/// deallocated yet. The default implementation prints using std::cerr.
530534
LeakCallbackFunction m_leakCallback;
531535
};
532536

@@ -545,17 +549,39 @@ class SensitiveSegmentAllocator
545549

546550
// constructors
547551
SensitiveSegmentAllocator() : m_alloc(&SensitivePageAllocator::getDefaultInstance()) {}
548-
explicit SensitiveSegmentAllocator(SensitivePageAllocator& alloc) noexcept : m_alloc(&alloc) {}
549-
explicit SensitiveSegmentAllocator(const SensitiveSegmentAllocator& other) noexcept = default;
552+
SensitiveSegmentAllocator(SensitivePageAllocator& alloc) noexcept : m_alloc(&alloc) {}
553+
SensitiveSegmentAllocator(const SensitiveSegmentAllocator& other) noexcept = default;
554+
SensitiveSegmentAllocator(SensitiveSegmentAllocator&& other) noexcept
555+
: SensitiveSegmentAllocator()
556+
{
557+
this->swap(other);
558+
}
550559
template <class U>
551560
explicit SensitiveSegmentAllocator(const SensitiveSegmentAllocator<U>& other) noexcept
552561
: m_alloc(other.m_alloc)
553562
{
554563
}
555564

565+
SensitiveSegmentAllocator& operator=(const SensitiveSegmentAllocator& other) noexcept = default;
566+
SensitiveSegmentAllocator& operator=(SensitiveSegmentAllocator&& other) noexcept
567+
{
568+
this->swap(other);
569+
return *this;
570+
}
571+
556572
// default destructor
557573
~SensitiveSegmentAllocator() = default;
558574

575+
/**
576+
* Swaps this allocator with another one by swapping the referenced page allocator.
577+
* @param[in] other the allocator to swap with
578+
*/
579+
void swap(SensitiveSegmentAllocator& other) noexcept { std::swap(m_alloc, other.m_alloc); }
580+
581+
SensitivePageAllocator* pageAllocator() const noexcept { return m_alloc; }
582+
583+
// allocation / deallocation
584+
559585
T* allocate(std::size_t n) { return static_cast<T*>(m_alloc->allocate(n * sizeof(T))); }
560586

561587
void deallocate(T* p, std::size_t n) { m_alloc->deallocate(p, n * sizeof(T)); }
@@ -566,7 +592,7 @@ class SensitiveSegmentAllocator
566592
/// non-owning pointer to the "real" allocator (never nullptr, but cannot use a reference...)
567593
SensitivePageAllocator* m_alloc;
568594
};
569-
}
595+
} // namespace spsl
570596

571597

572598
#endif /* SPSL_PAGEALLOC_HPP_ */

include/spsl/policies.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ struct Throw
8686
{
8787
if (cap > max)
8888
throw std::length_error("requested capacity exceeds maximum");
89-
};
89+
}
9090
template <typename char_type, typename size_type>
9191
static size_type checkAssign(const char_type*, size_type n, size_type max)
9292
{

include/spsl/storage_array.hpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -133,7 +133,8 @@ class StorageArray
133133
}
134134
void push_back(char_type c)
135135
{
136-
const size_type n = overflow_policy::checkAppend((size_type)1, c, size(), max_size());
136+
const size_type n =
137+
overflow_policy::checkAppend(static_cast<size_type>(1), c, size(), max_size());
137138
if (n)
138139
{
139140
m_buffer[m_length++] = c;

0 commit comments

Comments
 (0)