@@ -81,6 +81,15 @@ bool RecursiveMallocCallProtector::noRecursion() {
81
81
82
82
#endif // MALLOC_CHECK_RECURSION
83
83
84
+ /* * Support for handling the special UNUSABLE pointer state **/
85
+ const intptr_t UNUSABLE = 0x1 ;
86
+ inline bool isSolidPtr ( void * ptr ) {
87
+ return (UNUSABLE|(intptr_t )ptr)!=UNUSABLE;
88
+ }
89
+ inline bool isNotForUse ( void * ptr ) {
90
+ return (intptr_t )ptr==UNUSABLE;
91
+ }
92
+
84
93
/*
85
94
* Block::objectSize value used to mark blocks allocated by startupAlloc
86
95
*/
@@ -346,14 +355,14 @@ class LocalBlockFields : public GlobalBlockFields, Padding<blockHeaderAlignment
346
355
class Block : public LocalBlockFields ,
347
356
Padding<2 *blockHeaderAlignment - sizeof (LocalBlockFields)> {
348
357
public:
349
- bool empty () const { return allocatedCount==0 && publicFreeList== NULL ; }
358
+ bool empty () const { return allocatedCount==0 && ! isSolidPtr ( publicFreeList) ; }
350
359
inline FreeObject* allocate ();
351
360
inline FreeObject *allocateFromFreeList ();
352
361
inline bool emptyEnoughToUse ();
353
362
bool freeListNonNull () { return freeList; }
354
363
void freePublicObject (FreeObject *objectToFree);
355
364
inline void freeOwnObject (void *object);
356
- void makeEmpty ();
365
+ void reset ();
357
366
void privatizePublicFreeList ( bool cleanup = false );
358
367
void restoreBumpPtr ();
359
368
void privatizeOrphaned (TLSData *tls, unsigned index);
@@ -390,7 +399,7 @@ class Block : public LocalBlockFields,
390
399
// expected after double free
391
400
MALLOC_ASSERT (toFree != freeList, msg);
392
401
// check against head of publicFreeList, to detect double free
393
- // involiving foreign thread
402
+ // involving foreign thread
394
403
MALLOC_ASSERT (toFree != publicFreeList, msg);
395
404
}
396
405
#else
@@ -887,16 +896,6 @@ void BootStrapBlocks::reset()
887
896
static MallocMutex publicFreeListLock; // lock for changes of publicFreeList
888
897
#endif
889
898
890
- const uintptr_t UNUSABLE = 0x1 ;
891
- inline bool isSolidPtr ( void * ptr )
892
- {
893
- return (UNUSABLE|(uintptr_t )ptr)!=UNUSABLE;
894
- }
895
- inline bool isNotForUse ( void * ptr )
896
- {
897
- return (uintptr_t )ptr==UNUSABLE;
898
- }
899
-
900
899
/* ******** End rough utility code **************/
901
900
902
901
/* LifoList assumes zero initialization so a vector of it can be created
@@ -1034,7 +1033,7 @@ Block *MemoryPool::getEmptyBlock(size_t size)
1034
1033
1035
1034
void MemoryPool::returnEmptyBlock (Block *block, bool poolTheBlock)
1036
1035
{
1037
- block->makeEmpty ();
1036
+ block->reset ();
1038
1037
if (poolTheBlock) {
1039
1038
extMemPool.tlsPointerKey .getThreadMallocTLS ()->freeSlabBlocks .returnBlock (block);
1040
1039
}
@@ -1310,7 +1309,7 @@ void Block::freeOwnObject(void *object)
1310
1309
else
1311
1310
STAT_increment (getThreadId (), getIndex (objectSize), freeToActiveBlock);
1312
1311
#endif
1313
- if (allocatedCount== 0 && publicFreeList== NULL ) {
1312
+ if (empty () ) {
1314
1313
// The bump pointer is about to be restored for the block,
1315
1314
// no need to find objectToFree here (this is costly).
1316
1315
@@ -1373,33 +1372,33 @@ void Block::freePublicObject (FreeObject *objectToFree)
1373
1372
void Block::privatizePublicFreeList ( bool cleanup )
1374
1373
{
1375
1374
FreeObject *temp, *localPublicFreeList;
1375
+ const intptr_t endMarker = cleanup? UNUSABLE : 0 ;
1376
1376
1377
1377
// During cleanup of orphaned blocks, the calling thread is not registered as the owner
1378
1378
MALLOC_ASSERT ( cleanup || isOwnedByCurrentThread (), ASSERT_TEXT );
1379
1379
#if FREELIST_NONBLOCKING
1380
1380
temp = publicFreeList;
1381
1381
do {
1382
1382
localPublicFreeList = temp;
1383
- temp = (FreeObject*)AtomicCompareExchange (
1384
- (intptr_t &)publicFreeList,
1385
- 0 , (intptr_t )localPublicFreeList);
1383
+ temp = (FreeObject*)AtomicCompareExchange ( (intptr_t &)publicFreeList,
1384
+ endMarker, (intptr_t )localPublicFreeList);
1386
1385
// no backoff necessary because trying to make change, not waiting for a change
1387
1386
} while ( temp != localPublicFreeList );
1388
1387
#else
1389
1388
STAT_increment (getThreadId (), ThreadCommonCounters, lockPublicFreeList);
1390
1389
{
1391
1390
MallocMutex::scoped_lock scoped_cs (publicFreeListLock);
1392
1391
localPublicFreeList = publicFreeList;
1393
- publicFreeList = NULL ;
1392
+ publicFreeList = endMarker ;
1394
1393
}
1395
1394
temp = localPublicFreeList;
1396
1395
#endif
1397
1396
MALLOC_ITT_SYNC_ACQUIRED (&publicFreeList);
1398
1397
1399
- // there should be something in publicFreeList, unless called by cleanup of orphaned blocks
1400
- MALLOC_ASSERT ( cleanup || localPublicFreeList, ASSERT_TEXT );
1398
+ // publicFreeList must have been UNUSABLE (possible for orphaned blocks) or valid, but not NULL
1399
+ MALLOC_ASSERT ( localPublicFreeList!= NULL , ASSERT_TEXT );
1401
1400
MALLOC_ASSERT ( localPublicFreeList==temp, ASSERT_TEXT );
1402
- if ( isSolidPtr (temp) ) { // return/getPartialBlock could set it to UNUSABLE
1401
+ if ( isSolidPtr (temp) ) {
1403
1402
MALLOC_ASSERT ( allocatedCount <= (slabSize-sizeof (Block))/objectSize, ASSERT_TEXT );
1404
1403
/* other threads did not change the counter freeing our blocks */
1405
1404
allocatedCount--;
@@ -1447,13 +1446,13 @@ void Block::shareOrphaned(intptr_t binTag, unsigned index)
1447
1446
if ((intptr_t )nextPrivatizable==binTag) {
1448
1447
void * oldval;
1449
1448
#if FREELIST_NONBLOCKING
1450
- oldval = (void *)AtomicCompareExchange ((intptr_t &)publicFreeList, ( intptr_t ) UNUSABLE, 0 );
1449
+ oldval = (void *)AtomicCompareExchange ((intptr_t &)publicFreeList, UNUSABLE, 0 );
1451
1450
#else
1452
1451
STAT_increment (getThreadId (), ThreadCommonCounters, lockPublicFreeList);
1453
1452
{
1454
1453
MallocMutex::scoped_lock scoped_cs (publicFreeListLock);
1455
1454
if ( (oldval=publicFreeList)==NULL )
1456
- (uintptr_t &)(publicFreeList) = UNUSABLE;
1455
+ (intptr_t &)(publicFreeList) = UNUSABLE;
1457
1456
}
1458
1457
#endif
1459
1458
if ( oldval!=NULL ) {
@@ -1478,7 +1477,7 @@ void Block::shareOrphaned(intptr_t binTag, unsigned index)
1478
1477
// it is caller responsibility to ensure that the list of blocks
1479
1478
// formed by nextPrivatizable pointers is kept consistent if required.
1480
1479
// if only called from thread shutdown code, it does not matter.
1481
- (uintptr_t &)(nextPrivatizable) = UNUSABLE;
1480
+ (intptr_t &)(nextPrivatizable) = UNUSABLE;
1482
1481
}
1483
1482
1484
1483
void Block::cleanBlockHeader ()
@@ -1549,7 +1548,7 @@ bool OrphanedBlocks::cleanup(Backend* backend)
1549
1548
Block* next = block->next ;
1550
1549
block->privatizePublicFreeList ( /* cleanup=*/ true );
1551
1550
if (block->empty ()) {
1552
- block->makeEmpty ();
1551
+ block->reset ();
1553
1552
// slab blocks in user's pools do not have valid backRefIdx
1554
1553
if (!backend->inUserPool ())
1555
1554
removeBackRef (*(block->getBackRefIdx ()));
@@ -1626,12 +1625,12 @@ bool FreeBlockPool::externalCleanup()
1626
1625
return nonEmpty;
1627
1626
}
1628
1627
1629
- /* We have a block give it back to the malloc block manager */
1630
- void Block::makeEmpty ()
1628
+ /* Prepare the block for returning to FreeBlockPool */
1629
+ void Block::reset ()
1631
1630
{
1632
1631
// it is caller's responsibility to ensure no data is lost before calling this
1633
1632
MALLOC_ASSERT ( allocatedCount==0 , ASSERT_TEXT );
1634
- MALLOC_ASSERT ( publicFreeList== NULL , ASSERT_TEXT );
1633
+ MALLOC_ASSERT ( ! isSolidPtr ( publicFreeList) , ASSERT_TEXT );
1635
1634
if (!isStartupAllocObject ())
1636
1635
STAT_increment (getThreadId (), getIndex (objectSize), freeBlockBack);
1637
1636
0 commit comments