Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
58 changes: 52 additions & 6 deletions runtime/gc_base/IndexableObjectAllocationModel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
#include "MemorySpace.hpp"
#if defined(J9VM_GC_SPARSE_HEAP_ALLOCATION)
#include "AllocationContextBalanced.hpp"
#include "HeapRegionDataForAllocate.hpp"
#include "EnvironmentVLHGC.hpp"
#endif /* defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) */
#if defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) || defined(J9VM_GC_ENABLE_DOUBLE_MAP)
Expand Down Expand Up @@ -345,12 +346,24 @@ MM_IndexableObjectAllocationModel::getSparseAddressAndDecommitLeaves(MM_Environm
MM_GCExtensions *extensions = MM_GCExtensions::getExtensions(envBase);
GC_ArrayObjectModel *indexableObjectModel = &extensions->indexableObjectModel;
const uintptr_t regionSize = extensions->heapRegionManager->getRegionSize();

uintptr_t byteAmount = 0;

/* Determine how many bytes to allocate outside of the spine (in arraylet leaves). */
Assert_MM_true(_allocateDescription.getBytesRequested() >= _allocateDescription.getContiguousBytes());
uintptr_t bytesRemaining = _allocateDescription.getBytesRequested() - _allocateDescription.getContiguousBytes();
#define REGION_RESERVE_THRESHOLD 64
void *allocationContexts[REGION_RESERVE_THRESHOLD];
void **reservedRegionAllocationContexts = allocationContexts;
uintptr_t reservedRegionCount = bytesRemaining / regionSize;

if (reservedRegionCount > REGION_RESERVE_THRESHOLD) {
reservedRegionAllocationContexts = (void **)envBase->getForge()->allocate(reservedRegionCount * sizeof(uintptr_t), MM_AllocationCategory::GC_HEAP, J9_GET_CALLSITE());
}

if (NULL == reservedRegionAllocationContexts) {
/* Handle allocation failure */
return NULL;
}

MM_EnvironmentVLHGC *env = MM_EnvironmentVLHGC::getEnvironment(envBase);
MM_AllocationContextBalanced *commonContext = (MM_AllocationContextBalanced *)env->getCommonAllocationContext();
Expand All @@ -359,26 +372,46 @@ MM_IndexableObjectAllocationModel::getSparseAddressAndDecommitLeaves(MM_Environm
uintptr_t arrayReservedRegionCount = 0;
uintptr_t fraction = 0;
while (0 < bytesRemaining) {
/* Allocate the next arraylet leaf - leaves are allocated solely for the purpose of
/* Allocate the next reserved region - reserved regions are allocated solely for the purpose of
decommitting the memory later on in this function. */
void *reservedAddressLow = NULL;
bool shouldAllocateReservedRegion = true;

if (regionSize > bytesRemaining) {
fraction = bytesRemaining;
/* For code simplicity and lower fragmentation, we always use Common Context. */
shouldAllocateReservedRegion = commonContext->allocateFromSharedArrayReservedRegion(envBase, fraction);
}
if (shouldAllocateReservedRegion) {
_allocateDescription.setSharedReserved(0 != fraction);

reservedAddressLow = envBase->_objectAllocationInterface->allocateArrayletLeaf(
envBase, &_allocateDescription, _allocateDescription.getMemorySpace(), true);

_allocateDescription.setSharedReserved(false);

/* If reservedRegion allocation failed set the result to NULL and return. */
if (NULL == reservedAddressLow) {
Trc_MM_allocateAndConnectNonContiguousArraylet_leafFailure(envBase->getLanguageVMThread());
_allocateDescription.setSpine(NULL);
if (0 != fraction) {
commonContext->recycleToSharedArrayReservedRegion(envBase, fraction);
fraction = 0;
}
spine = NULL;
break;
}

if (0 == fraction) {
MM_HeapRegionDescriptorVLHGC *reservedRegion = (MM_HeapRegionDescriptorVLHGC *)extensions->heapRegionManager->regionDescriptorForAddress(reservedAddressLow);
MM_HeapRegionDataForAllocate *allocateData = &reservedRegion->_allocateData;
if (NULL != allocateData->_originalOwningContext) {
reservedRegionAllocationContexts[arrayReservedRegionCount] = allocateData->_originalOwningContext;
} else {
reservedRegionAllocationContexts[arrayReservedRegionCount] = allocateData->_owningContext;
}
}

/* Disable region for reads and writes, since accessing virtualLargeObjectHeapAddress through DataAddrForContiguous */
void *reservedAddressHigh = (void *)((uintptr_t)reservedAddressLow + regionSize);
bool ret = extensions->heap->decommitMemory(reservedAddressLow, regionSize, reservedAddressLow, reservedAddressHigh);
Expand All @@ -391,17 +424,22 @@ MM_IndexableObjectAllocationModel::getSparseAddressAndDecommitLeaves(MM_Environm
}

bytesRemaining -= OMR_MIN(bytesRemaining, regionSize);
arrayReservedRegionCount += 1;
if (0 == fraction) {
arrayReservedRegionCount += 1;
}
}


if (NULL != spine) {
Assert_MM_true(_layout == GC_ArrayletObjectModel::InlineContiguous);
Assert_MM_true(indexableObjectModel->isVirtualLargeObjectHeapEnabled());

byteAmount = _dataSize;
void *virtualLargeObjectHeapAddress = extensions->largeObjectVirtualMemory->allocateSparseFreeEntryAndMapToHeapObject(spine, byteAmount);

for (uintptr_t idx = 0; idx < arrayReservedRegionCount; idx++) {
extensions->largeObjectVirtualMemory->setAllocationContextForAddress(virtualLargeObjectHeapAddress, reservedRegionAllocationContexts[idx], idx);
}

if (NULL != virtualLargeObjectHeapAddress) {
indexableObjectModel->setDataAddrForContiguous((J9IndexableObject *)spine, virtualLargeObjectHeapAddress);
} else {
Expand All @@ -414,13 +452,21 @@ MM_IndexableObjectAllocationModel::getSparseAddressAndDecommitLeaves(MM_Environm
/* fail to reserve regions or allocateSparseFreeEntry, clean up reserved regions */
if (0 != fraction) {
/* rollback fraction */
commonContext->recycleToSharedArrayReservedRegion(envBase, fraction);
if (commonContext->recycleToSharedArrayReservedRegion(envBase, fraction)) {
commonContext->recycleReservedRegionsForVirtualLargeObjectHeap(env, 1, true);
}
}
if (0 < arrayReservedRegionCount) {
((MM_HeapRegionManagerVLHGC *)extensions->heapRegionManager)->recycleReservedRegionsForVirtualLargeObjectHeap(envBase, arrayReservedRegionCount);
for (uintptr_t idx = 0; idx < arrayReservedRegionCount; idx++) {
((MM_AllocationContextBalanced *)reservedRegionAllocationContexts[idx])->recycleReservedRegionsForVirtualLargeObjectHeap(env, 1, true);
}
}
}

if (reservedRegionCount > REGION_RESERVE_THRESHOLD) {
env->getForge()->free((void *)reservedRegionAllocationContexts);
}

Trc_MM_getSparseAddressAndDecommitLeaves_Exit(envBase->getLanguageVMThread(), spine, (void *)bytesRemaining);

return spine;
Expand Down
29 changes: 21 additions & 8 deletions runtime/gc_vlhgc/AllocationContextBalanced.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -391,18 +391,27 @@ MM_AllocationContextBalanced::lockedAllocateArrayletLeaf(MM_EnvironmentBase *env
* In future, allocations should remember (somewhere in Off-heap meta structures) how many regions came from each AC
* \and release exact same number back to each AC.
*/
MM_AllocationContextTarok *commonContext = (MM_AllocationContextTarok *)env->getCommonAllocationContext();
if (this != commonContext) {
MM_AllocationContextTarok *context = leafAllocateData->_owningContext;
if (NULL != leafAllocateData->_originalOwningContext) {
context = leafAllocateData->_originalOwningContext;
}
if (allocateDescription->getSharedReserved()) {
context = (MM_AllocationContextTarok *)env->getCommonAllocationContext();
}

Assert_MM_true(NULL != context);

if (this != context) {
/* The common allocation context is always an instance of AllocationContextBalanced */
((MM_AllocationContextBalanced *)commonContext)->lockCommon();
((MM_AllocationContextBalanced *)context)->lockCommon();
}

leafAllocateData->pushRegionToArrayReservedRegionList(env, ((MM_AllocationContextBalanced *)commonContext)->getArrayReservedRegionListAddress());
((MM_AllocationContextBalanced *)commonContext)->incrementArrayReservedRegionCount();
leafAllocateData->pushRegionToArrayReservedRegionList(env, ((MM_AllocationContextBalanced *)context)->getArrayReservedRegionListAddress());
((MM_AllocationContextBalanced *)context)->incrementArrayReservedRegionCount();

if (this != commonContext) {
if (this != context) {
/* The common allocation context is always an instance of AllocationContextBalanced */
((MM_AllocationContextBalanced *)commonContext)->unlockCommon();
((MM_AllocationContextBalanced *)context)->unlockCommon();
}
}
#endif /* defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) */
Expand Down Expand Up @@ -719,6 +728,10 @@ MM_AllocationContextBalanced::acquireFreeRegionFromHeap(MM_EnvironmentBase *env)
} while ((NULL == region) && (firstTheftAttempt != _nextToSteal));
}

if (NULL != region) {
region->_allocateData._owningContext = this;
}

return region;
}

Expand Down Expand Up @@ -967,7 +980,7 @@ MM_AllocationContextBalanced::lockedReplenishAndAllocate(MM_EnvironmentBase *env
MM_HeapRegionDescriptorVLHGC *leafRegion = acquireFreeRegionFromHeap(env);
if (NULL != leafRegion) {
result = lockedAllocateArrayletLeaf(env, allocateDescription, leafRegion);
leafRegion->_allocateData._owningContext = this;
// leafRegion->_allocateData._owningContext = this;
Assert_MM_true(leafRegion->getLowAddress() == result);
Trc_MM_AllocationContextBalanced_lockedReplenishAndAllocate_acquiredFreeRegion(env->getLanguageVMThread(), regionSize);
}
Expand Down
23 changes: 16 additions & 7 deletions runtime/gc_vlhgc/CopyForwardScheme.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4174,17 +4174,26 @@ class MM_CopyForwardSchemeRootClearer : public MM_RootScanner
uintptr_t reservedRegionCount = dataSize / regionSize;
uintptr_t fraction = dataSize % regionSize;

MM_AllocationContextBalanced *commonContext = (MM_AllocationContextBalanced *)env->getCommonAllocationContext();
if ((0 != fraction) && commonContext->recycleToSharedArrayReservedRegion(env, fraction)) {
reservedRegionCount += 1;
}

Assert_MM_mustBeClass(_extensions->objectModel.getPreservedClass(&forwardedHeader));
env->_copyForwardStats._offHeapRegionsCleared += 1;
void *dataAddr = _extensions->indexableObjectModel.getDataAddrForContiguous((J9IndexableObject *)objectPtr);

MM_SparseVirtualMemory *largeObjectVirtualMemory = _extensions->largeObjectVirtualMemory;
/* recycle Reserved Regions */
MM_AllocationContextBalanced *context = NULL;
for (uintptr_t index = 0; index < reservedRegionCount; index++) {
context = (MM_AllocationContextBalanced *) largeObjectVirtualMemory->getAllocationContextForAddress(dataAddr, index);
Assert_MM_true(NULL != context);
context->recycleReservedRegionsForVirtualLargeObjectHeap(env, 1);
}

/* crecycle shared reserved region(fraction) */
context = (MM_AllocationContextBalanced *)env->getCommonAllocationContext();
if ((0 != fraction) && context->recycleToSharedArrayReservedRegion(env, fraction)) {
context->recycleReservedRegionsForVirtualLargeObjectHeap(env, 1);
}

_extensions->largeObjectVirtualMemory->freeSparseRegionAndUnmapFromHeapObject(_env, dataAddr, objectPtr, dataSize, sparseDataEntryIterator);
/* recycleLeafRegions for off-heap case */
commonContext->recycleReservedRegionsForVirtualLargeObjectHeap(env, reservedRegionCount);
} else {
void *dataAddr = _extensions->indexableObjectModel.getDataAddrForContiguous((J9IndexableObject *)fwdOjectPtr);
if (NULL != dataAddr) {
Expand Down
17 changes: 13 additions & 4 deletions runtime/gc_vlhgc/GlobalMarkingScheme.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1431,14 +1431,23 @@ class MM_GlobalMarkingSchemeRootClearer : public MM_RootScanner

uintptr_t reservedRegionCount = dataSize / regionSize;
uintptr_t fraction = dataSize % regionSize;
MM_AllocationContextBalanced *commonContext = (MM_AllocationContextBalanced *)env->getCommonAllocationContext();

if ((0 != fraction) && commonContext->recycleToSharedArrayReservedRegion(env, fraction)) {
reservedRegionCount += 1;
MM_SparseVirtualMemory *largeObjectVirtualMemory = _extensions->largeObjectVirtualMemory;
/* recycle Reserved Regions */
MM_AllocationContextBalanced *context = NULL;
for (uintptr_t index = 0; index < reservedRegionCount; index++) {
context = (MM_AllocationContextBalanced *) largeObjectVirtualMemory->getAllocationContextForAddress(dataAddr, index);
Assert_MM_true(NULL != context);
context->recycleReservedRegionsForVirtualLargeObjectHeap(env, 1);
}

/* crecycle shared reserved region(fraction) */
context = (MM_AllocationContextBalanced *)env->getCommonAllocationContext();
if ((0 != fraction) && context->recycleToSharedArrayReservedRegion(env, fraction)) {
context->recycleReservedRegionsForVirtualLargeObjectHeap(env, 1);
}

_extensions->largeObjectVirtualMemory->freeSparseRegionAndUnmapFromHeapObject(_env, dataAddr, objectPtr, dataSize, sparseDataEntryIterator);
commonContext->recycleReservedRegionsForVirtualLargeObjectHeap(env, reservedRegionCount);
}
}
}
Expand Down
11 changes: 0 additions & 11 deletions runtime/gc_vlhgc/HeapRegionManagerVLHGC.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -222,14 +222,3 @@ MM_HeapRegionManagerVLHGC::getHeapMemorySnapshot(MM_GCExtensionsBase *extensions

return snapshot;
}

#if defined(J9VM_GC_SPARSE_HEAP_ALLOCATION)
void
MM_HeapRegionManagerVLHGC::recycleReservedRegionsForVirtualLargeObjectHeap(MM_EnvironmentBase *envBase, uintptr_t reservedRegionCount)
{
MM_EnvironmentVLHGC *env = MM_EnvironmentVLHGC::getEnvironment(envBase);
MM_AllocationContextBalanced *commonContext = (MM_AllocationContextBalanced *)env->getCommonAllocationContext();

commonContext->recycleReservedRegionsForVirtualLargeObjectHeap(env, reservedRegionCount, true);
}
#endif /* defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) */
4 changes: 0 additions & 4 deletions runtime/gc_vlhgc/HeapRegionManagerVLHGC.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -42,10 +42,6 @@ class MM_HeapRegionManagerVLHGC : public MM_HeapRegionManagerTarok

virtual MM_HeapMemorySnapshot *getHeapMemorySnapshot(MM_GCExtensionsBase *extensions, MM_HeapMemorySnapshot *snapshot, bool gcEnd);

#if defined(J9VM_GC_SPARSE_HEAP_ALLOCATION)
void recycleReservedRegionsForVirtualLargeObjectHeap(MM_EnvironmentBase *envBase, uintptr_t reservedRegionCount);
#endif /* defined(J9VM_GC_SPARSE_HEAP_ALLOCATION) */

static MM_HeapRegionManagerVLHGC *newInstance(MM_EnvironmentBase *env, uintptr_t regionSize, uintptr_t tableDescriptorSize, MM_RegionDescriptorInitializer regionDescriptorInitializer, MM_RegionDescriptorDestructor regionDescriptorDestructor);
MM_HeapRegionManagerVLHGC(MM_EnvironmentBase *env, uintptr_t regionSize, uintptr_t tableDescriptorSize, MM_RegionDescriptorInitializer regionDescriptorInitializer, MM_RegionDescriptorDestructor regionDescriptorDestructor);

Expand Down