Skip to content

Commit

Permalink
Merge pull request eclipse-openj9#16820 from dmitripivkine/master
Browse files Browse the repository at this point in the history
Replace J9VM_MODRON_SCAVENGER_CACHE_* to OMR_COPYSCAN_CACHE_*
  • Loading branch information
amicic committed Apr 24, 2023
2 parents 78a79a0 + f1ecdfb commit 8c3b2e2
Show file tree
Hide file tree
Showing 7 changed files with 222 additions and 240 deletions.
18 changes: 0 additions & 18 deletions debugtools/DDR_VM/data/superset-constants.dat
Original file line number Diff line number Diff line change
Expand Up @@ -4406,24 +4406,6 @@ C|SCAN_REASON_DIRTY_CARD
C|SCAN_REASON_OVERFLOWED_REGION
C|SCAN_REASON_PACKET
S|MM_CopyScanCache|MM_CopyScanCachePointer|MM_Base
C|J9VM_MODRON_SCAVENGER_CACHE_MASK_PERSISTENT
C|J9VM_MODRON_SCAVENGER_CACHE_TYPE_CLEARED
C|J9VM_MODRON_SCAVENGER_CACHE_TYPE_COPY
C|J9VM_MODRON_SCAVENGER_CACHE_TYPE_HEAP
C|J9VM_MODRON_SCAVENGER_CACHE_TYPE_LOA
C|J9VM_MODRON_SCAVENGER_CACHE_TYPE_SCAN
C|J9VM_MODRON_SCAVENGER_CACHE_TYPE_SEMISPACE
C|J9VM_MODRON_SCAVENGER_CACHE_TYPE_SPLIT_ARRAY
C|J9VM_MODRON_SCAVENGER_CACHE_TYPE_TENURESPACE
C|OMR_SCAVENGER_CACHE_MASK_PERSISTENT
C|OMR_SCAVENGER_CACHE_TYPE_CLEARED
C|OMR_SCAVENGER_CACHE_TYPE_COPY
C|OMR_SCAVENGER_CACHE_TYPE_HEAP
C|OMR_SCAVENGER_CACHE_TYPE_LOA
C|OMR_SCAVENGER_CACHE_TYPE_SCAN
C|OMR_SCAVENGER_CACHE_TYPE_SEMISPACE
C|OMR_SCAVENGER_CACHE_TYPE_SPLIT_ARRAY
C|OMR_SCAVENGER_CACHE_TYPE_TENURESPACE
S|MM_CycleState$CollectionType|MM_CycleState$CollectionTypePointer|
C|CT_GLOBAL_GARBAGE_COLLECTION
C|CT_GLOBAL_MARK_PHASE
Expand Down
336 changes: 168 additions & 168 deletions runtime/gc_vlhgc/CopyForwardScheme.cpp

Large diffs are not rendered by default.

22 changes: 11 additions & 11 deletions runtime/gc_vlhgc/CopyScanCacheChunkVLHGCInHeap.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,12 +28,12 @@
#include "CopyScanCacheChunkVLHGCInHeap.hpp"


UDATA
uintptr_t
MM_CopyScanCacheChunkVLHGCInHeap::numberOfCachesInChunk(MM_EnvironmentVLHGC *env)
{
UDATA tlhMinimumSize = MM_GCExtensions::getExtensions(env)->tlhMinimumSize;
UDATA sizeToAllocate = sizeof(MM_CopyScanCacheChunkVLHGCInHeap);
UDATA numberOfCaches = 1;
uintptr_t tlhMinimumSize = MM_GCExtensions::getExtensions(env)->tlhMinimumSize;
uintptr_t sizeToAllocate = sizeof(MM_CopyScanCacheChunkVLHGCInHeap);
uintptr_t numberOfCaches = 1;

if (sizeToAllocate < tlhMinimumSize) {
/* calculate number of caches to just barely exceed tlhMinimumSize */
Expand All @@ -42,11 +42,11 @@ MM_CopyScanCacheChunkVLHGCInHeap::numberOfCachesInChunk(MM_EnvironmentVLHGC *env
return numberOfCaches;
}

UDATA
uintptr_t
MM_CopyScanCacheChunkVLHGCInHeap::bytesRequiredToAllocateChunkInHeap(MM_EnvironmentVLHGC *env)
{
UDATA sizeToAllocate = sizeof(MM_CopyScanCacheChunkVLHGCInHeap);
UDATA numberOfCaches = numberOfCachesInChunk(env);
uintptr_t sizeToAllocate = sizeof(MM_CopyScanCacheChunkVLHGCInHeap);
uintptr_t numberOfCaches = numberOfCachesInChunk(env);

/* total size required to allocate */
sizeToAllocate += numberOfCaches * sizeof(MM_CopyScanCacheVLHGC);
Expand All @@ -56,7 +56,7 @@ MM_CopyScanCacheChunkVLHGCInHeap::bytesRequiredToAllocateChunkInHeap(MM_Environm
}

MM_CopyScanCacheChunkVLHGCInHeap *
MM_CopyScanCacheChunkVLHGCInHeap::newInstance(MM_EnvironmentVLHGC *env, void *buffer, UDATA bufferLengthInBytes, MM_CopyScanCacheVLHGC **nextCacheAddr, MM_CopyScanCacheChunkVLHGC *nextChunk)
MM_CopyScanCacheChunkVLHGCInHeap::newInstance(MM_EnvironmentVLHGC *env, void *buffer, uintptr_t bufferLengthInBytes, MM_CopyScanCacheVLHGC **nextCacheAddr, MM_CopyScanCacheChunkVLHGC *nextChunk)
{
/* make sure that the memory extent is exactly the right size to allocate an instance of the receiver */
Assert_MM_true(bytesRequiredToAllocateChunkInHeap(env) == bufferLengthInBytes);
Expand All @@ -70,13 +70,13 @@ MM_CopyScanCacheChunkVLHGCInHeap::newInstance(MM_EnvironmentVLHGC *env, void *bu
}

bool
MM_CopyScanCacheChunkVLHGCInHeap::initialize(MM_EnvironmentVLHGC *env, UDATA cacheEntryCount, MM_CopyScanCacheVLHGC **nextCacheAddr, MM_CopyScanCacheChunkVLHGC *nextChunk)
MM_CopyScanCacheChunkVLHGCInHeap::initialize(MM_EnvironmentVLHGC *env, uintptr_t cacheEntryCount, MM_CopyScanCacheVLHGC **nextCacheAddr, MM_CopyScanCacheChunkVLHGC *nextChunk)
{
bool success = MM_CopyScanCacheChunkVLHGC::initialize(env, cacheEntryCount, nextCacheAddr, nextChunk);
if (success) {
MM_CopyScanCacheVLHGC *structureArrayBase = getBase();
for (UDATA i = 0; i < cacheEntryCount; i++) {
structureArrayBase[i].flags |= J9VM_MODRON_SCAVENGER_CACHE_TYPE_HEAP;
for (uintptr_t i = 0; i < cacheEntryCount; i++) {
structureArrayBase[i].flags |= OMR_COPYSCAN_CACHE_TYPE_HEAP;
}
}
return success;
Expand Down
8 changes: 4 additions & 4 deletions runtime/gc_vlhgc/CopyScanCacheChunkVLHGCInHeap.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,15 +46,15 @@ class MM_CopyScanCacheChunkVLHGCInHeap : public MM_CopyScanCacheChunkVLHGC
* @param env[in] A GC thread
* @return The number of caches which will be allocated as part of an instance of the receiver
*/
static UDATA numberOfCachesInChunk(MM_EnvironmentVLHGC *env);
static uintptr_t numberOfCachesInChunk(MM_EnvironmentVLHGC *env);
/**
* The number of bytes required to allocate an instance of the receiver (since they are all currently the same size).
* @param env[in] A GC thread
* @return The size, in bytes, of the memory extent required to hold one instance of the receiver
*/
static UDATA bytesRequiredToAllocateChunkInHeap(MM_EnvironmentVLHGC *env);
static MM_CopyScanCacheChunkVLHGCInHeap *newInstance(MM_EnvironmentVLHGC *env, void *buffer, UDATA bufferLengthInBytes, MM_CopyScanCacheVLHGC **nextCacheAddr, MM_CopyScanCacheChunkVLHGC *nextChunk);
bool initialize(MM_EnvironmentVLHGC *env, UDATA cacheEntryCount, MM_CopyScanCacheVLHGC **nextCacheAddr, MM_CopyScanCacheChunkVLHGC *nextChunk);
static uintptr_t bytesRequiredToAllocateChunkInHeap(MM_EnvironmentVLHGC *env);
static MM_CopyScanCacheChunkVLHGCInHeap *newInstance(MM_EnvironmentVLHGC *env, void *buffer, uintptr_t bufferLengthInBytes, MM_CopyScanCacheVLHGC **nextCacheAddr, MM_CopyScanCacheChunkVLHGC *nextChunk);
bool initialize(MM_EnvironmentVLHGC *env, uintptr_t cacheEntryCount, MM_CopyScanCacheVLHGC **nextCacheAddr, MM_CopyScanCacheChunkVLHGC *nextChunk);
virtual void kill(MM_EnvironmentVLHGC *env);

/**
Expand Down
48 changes: 24 additions & 24 deletions runtime/gc_vlhgc/CopyScanCacheListVLHGC.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -50,15 +50,15 @@ MM_CopyScanCacheListVLHGC::initialize(MM_EnvironmentVLHGC *env)
_sublistCount = extensions->packetListSplit;
Assert_MM_true(0 < _sublistCount);

UDATA sublistBytes = sizeof(CopyScanCacheSublist) * _sublistCount;
uintptr_t sublistBytes = sizeof(CopyScanCacheSublist) * _sublistCount;
_sublists = (CopyScanCacheSublist *)extensions->getForge()->allocate(sublistBytes, MM_AllocationCategory::FIXED, J9_GET_CALLSITE());
if (NULL == _sublists) {
_sublistCount = 0;
return false;
}

memset((void *)_sublists, 0, sublistBytes);
for (UDATA i = 0; i < _sublistCount; i++) {
for (uintptr_t i = 0; i < _sublistCount; i++) {
if (!_sublists[i]._cacheLock.initialize(env, &extensions->lnrlOptions, "MM_CopyScanCacheListVLHGC:_sublists[]._cacheLock")) {
return false;
}
Expand All @@ -78,7 +78,7 @@ MM_CopyScanCacheListVLHGC::tearDown(MM_EnvironmentVLHGC *env)
}

if (NULL != _sublists) {
for (UDATA i = 0; i < _sublistCount; i++) {
for (uintptr_t i = 0; i < _sublistCount; i++) {
_sublists[i]._cacheLock.tearDown();
}
env->getForge()->free(_sublists);
Expand All @@ -88,11 +88,11 @@ MM_CopyScanCacheListVLHGC::tearDown(MM_EnvironmentVLHGC *env)
}

bool
MM_CopyScanCacheListVLHGC::appendCacheEntries(MM_EnvironmentVLHGC *env, UDATA cacheEntryCount)
MM_CopyScanCacheListVLHGC::appendCacheEntries(MM_EnvironmentVLHGC *env, uintptr_t cacheEntryCount)
{
CopyScanCacheSublist *cacheList = &_sublists[getSublistIndex(env)];
MM_CopyScanCacheChunkVLHGC *chunk = MM_CopyScanCacheChunkVLHGC::newInstance(env, cacheEntryCount, &cacheList->_cacheHead, _chunkHead);
if(NULL != chunk) {
if (NULL != chunk) {
_chunkHead = chunk;
_totalEntryCount += cacheEntryCount;
}
Expand All @@ -101,7 +101,7 @@ MM_CopyScanCacheListVLHGC::appendCacheEntries(MM_EnvironmentVLHGC *env, UDATA ca
}

bool
MM_CopyScanCacheListVLHGC::resizeCacheEntries(MM_EnvironmentVLHGC *env, UDATA totalCacheEntryCount)
MM_CopyScanCacheListVLHGC::resizeCacheEntries(MM_EnvironmentVLHGC *env, uintptr_t totalCacheEntryCount)
{
MM_GCExtensions *ext = MM_GCExtensions::getExtensions(env);

Expand Down Expand Up @@ -136,14 +136,14 @@ MM_CopyScanCacheListVLHGC::removeAllHeapAllocatedChunks(MM_EnvironmentVLHGC *env
/*
* Walk caches list first to remove all references to heap allocated caches
*/
for (UDATA i = 0; i < _sublistCount; i++) {
for (uintptr_t i = 0; i < _sublistCount; i++) {
CopyScanCacheSublist *cacheList = &_sublists[i];

MM_CopyScanCacheVLHGC *previousCache = NULL;
MM_CopyScanCacheVLHGC *cache = cacheList->_cacheHead;

while(cache != NULL) {
if (0 != (cache->flags & J9VM_MODRON_SCAVENGER_CACHE_TYPE_HEAP)) {
while (cache != NULL) {
if (0 != (cache->flags & OMR_COPYSCAN_CACHE_TYPE_HEAP)) {
/* this cache is heap allocated - remove it from list */
if (NULL == previousCache) {
/* remove first element */
Expand All @@ -166,10 +166,10 @@ MM_CopyScanCacheListVLHGC::removeAllHeapAllocatedChunks(MM_EnvironmentVLHGC *env
MM_CopyScanCacheChunkVLHGC *previousChunk = NULL;
MM_CopyScanCacheChunkVLHGC *chunk = _chunkHead;

while(chunk != NULL) {
while (chunk != NULL) {
MM_CopyScanCacheChunkVLHGC *nextChunk = chunk->getNext();

if (0 != (chunk->getBase()->flags & J9VM_MODRON_SCAVENGER_CACHE_TYPE_HEAP)) {
if (0 != (chunk->getBase()->flags & OMR_COPYSCAN_CACHE_TYPE_HEAP)) {
/* this chunk is heap allocated - remove it from list */
if (NULL == previousChunk) {
/* still be a first element */
Expand All @@ -195,7 +195,7 @@ MM_CopyScanCacheListVLHGC::removeAllHeapAllocatedChunks(MM_EnvironmentVLHGC *env
}

MM_CopyScanCacheVLHGC *
MM_CopyScanCacheListVLHGC::allocateCacheEntriesInExistingMemory(MM_EnvironmentVLHGC *env, void *buffer, UDATA bufferLengthInBytes)
MM_CopyScanCacheListVLHGC::allocateCacheEntriesInExistingMemory(MM_EnvironmentVLHGC *env, void *buffer, uintptr_t bufferLengthInBytes)
{
CopyScanCacheSublist *cacheList = &_sublists[getSublistIndex(env)];
MM_CopyScanCacheVLHGC * result = NULL;
Expand All @@ -213,15 +213,15 @@ MM_CopyScanCacheListVLHGC::allocateCacheEntriesInExistingMemory(MM_EnvironmentVL
void
MM_CopyScanCacheListVLHGC::lock()
{
for (UDATA i = 0; i < _sublistCount; ++i) {
for (uintptr_t i = 0; i < _sublistCount; ++i) {
_sublists[i]._cacheLock.acquire();
}
}

void
MM_CopyScanCacheListVLHGC::unlock()
{
for (UDATA i = 0; i < _sublistCount; ++i) {
for (uintptr_t i = 0; i < _sublistCount; ++i) {
_sublists[i]._cacheLock.release();
}
}
Expand Down Expand Up @@ -268,11 +268,11 @@ MM_CopyScanCacheListVLHGC::pushCache(MM_EnvironmentVLHGC *env, MM_CopyScanCacheV
MM_CopyScanCacheVLHGC *
MM_CopyScanCacheListVLHGC::popCacheNoLock(MM_EnvironmentVLHGC *env)
{
UDATA indexStart = getSublistIndex(env);
uintptr_t indexStart = getSublistIndex(env);
MM_CopyScanCacheVLHGC *cache = NULL;

for (UDATA i = 0; (i < _sublistCount) && (NULL == cache); ++i) {
UDATA index = (i + indexStart) % _sublistCount;
for (uintptr_t i = 0; (i < _sublistCount) && (NULL == cache); ++i) {
uintptr_t index = (i + indexStart) % _sublistCount;
CopyScanCacheSublist *cacheList = &_sublists[index];
cache = popCacheInternal(env, cacheList);
}
Expand All @@ -283,10 +283,10 @@ MM_CopyScanCacheListVLHGC::popCacheNoLock(MM_EnvironmentVLHGC *env)
MM_CopyScanCacheVLHGC *
MM_CopyScanCacheListVLHGC::popCache(MM_EnvironmentVLHGC *env)
{
UDATA indexStart = getSublistIndex(env);
uintptr_t indexStart = getSublistIndex(env);
MM_CopyScanCacheVLHGC *cache = NULL;
for (UDATA i = 0; (i < _sublistCount) && (NULL == cache); ++i) {
UDATA index = (i + indexStart) % _sublistCount;
for (uintptr_t i = 0; (i < _sublistCount) && (NULL == cache); ++i) {
uintptr_t index = (i + indexStart) % _sublistCount;
CopyScanCacheSublist *cacheList = &_sublists[index];
if (NULL != cacheList->_cacheHead) {
cacheList->_cacheLock.acquire();
Expand All @@ -301,17 +301,17 @@ bool
MM_CopyScanCacheListVLHGC::isEmpty()
{
MM_CopyScanCacheVLHGC *cache = NULL;
for (UDATA i = 0; (i < _sublistCount) && (NULL == cache); ++i) {
for (uintptr_t i = 0; (i < _sublistCount) && (NULL == cache); ++i) {
cache = _sublists[i]._cacheHead;
}
return NULL == cache;
}

UDATA
uintptr_t
MM_CopyScanCacheListVLHGC::countCaches()
{
UDATA count = 0;
for (UDATA i = 0; i < _sublistCount; ++i) {
uintptr_t count = 0;
for (uintptr_t i = 0; i < _sublistCount; ++i) {
MM_CopyScanCacheVLHGC *cache = _sublists[i]._cacheHead;
while (NULL != cache) {
count++;
Expand Down
16 changes: 8 additions & 8 deletions runtime/gc_vlhgc/CopyScanCacheListVLHGC.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -52,19 +52,19 @@ class MM_CopyScanCacheListVLHGC : public MM_BaseVirtual
MM_CopyScanCacheVLHGC *_cacheHead;
MM_LightweightNonReentrantLock _cacheLock;
} *_sublists;
UDATA _sublistCount; /**< the number of lists (split for parallelism). Must be at least 1 */
uintptr_t _sublistCount; /**< the number of lists (split for parallelism). Must be at least 1 */
MM_CopyScanCacheChunkVLHGC *_chunkHead;
UDATA _totalEntryCount;
uintptr_t _totalEntryCount;
bool _containsHeapAllocatedChunks; /**< True if there are heap-allocated scan cache chunks on the _chunkHead list */

private:
bool appendCacheEntries(MM_EnvironmentVLHGC *env, UDATA cacheEntryCount);
bool appendCacheEntries(MM_EnvironmentVLHGC *env, uintptr_t cacheEntryCount);

/**
* Determine the sublist index for the specified thread.
* @return the index (a valid index into _sublists)
*/
UDATA getSublistIndex(MM_EnvironmentVLHGC *env) { return env->getEnvironmentId() % _sublistCount; }
uintptr_t getSublistIndex(MM_EnvironmentVLHGC *env) { return env->getEnvironmentId() % _sublistCount; }

/**
* Add the specified entry to this list. It is the caller's responsibility to provide synchronization.
Expand Down Expand Up @@ -92,7 +92,7 @@ class MM_CopyScanCacheListVLHGC : public MM_BaseVirtual
* @param env[in] A GC thread
* @param totalCacheEntryCount[in] The number of cache entries which this list should be resized to contain
*/
bool resizeCacheEntries(MM_EnvironmentVLHGC *env, UDATA totalCacheEntryCount);
bool resizeCacheEntries(MM_EnvironmentVLHGC *env, uintptr_t totalCacheEntryCount);

/**
* Removes any heap-allocated scan cache chunks from the receiver's chunk list (calls tearDown on them after removing them from the list).
Expand All @@ -107,7 +107,7 @@ class MM_CopyScanCacheListVLHGC : public MM_BaseVirtual
* @param bufferLengthInBytes[in] The length, in bytes, of the memory extent starting at buffer
* @return A new cache entry if the chunk was successfully allocated and inserted into the receiver's chunk list, NULL if a failure occurred
*/
MM_CopyScanCacheVLHGC * allocateCacheEntriesInExistingMemory(MM_EnvironmentVLHGC *env, void *buffer, UDATA bufferLengthInBytes);
MM_CopyScanCacheVLHGC * allocateCacheEntriesInExistingMemory(MM_EnvironmentVLHGC *env, void *buffer, uintptr_t bufferLengthInBytes);

/**
* Lock the receiver to prevent concurrent modification.
Expand Down Expand Up @@ -161,13 +161,13 @@ class MM_CopyScanCacheListVLHGC : public MM_BaseVirtual
* The implementation walks all caches, so this should only be used for debugging.
* @return the total number of caches in the receiver's lists
*/
UDATA countCaches();
uintptr_t countCaches();

/**
* Answer the total number of caches owned by the receiver, whether or not they are currently in the receiver's lists.
* @return total number of caches
*/
UDATA getTotalCacheCount() { return _totalEntryCount; }
uintptr_t getTotalCacheCount() { return _totalEntryCount; }

/**
* Create a CopyScanCacheList object.
Expand Down
14 changes: 7 additions & 7 deletions runtime/gc_vlhgc/CopyScanCacheVLHGC.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -47,12 +47,12 @@ class MM_CopyScanCacheVLHGC : public MM_CopyScanCache
protected:
public:
GC_ObjectIteratorState _objectIteratorState; /**< the scan state of the partially scanned object */
UDATA _compactGroup; /**< The compact group this cache belongs to */
uintptr_t _compactGroup; /**< The compact group this cache belongs to */
double _allocationAgeSizeProduct; /**< sum of (age * size) products for each object copied to this copy cache */
UDATA _objectSize; /**< sum of objects sizes copied to this copy cache */
U_64 _lowerAgeBound; /**< lowest possible age of any object in this copy cache */
U_64 _upperAgeBound; /**< highest possible age of any object in this copy cache */
UDATA _arraySplitIndex; /**< The index within the array in scanCurrent to start scanning from (meaningful is J9VM_MODRON_SCAVENGER_CACHE_TYPE_SPLIT_ARRAY is set) */
uintptr_t _objectSize; /**< sum of objects sizes copied to this copy cache */
uint64_t _lowerAgeBound; /**< lowest possible age of any object in this copy cache */
uint64_t _upperAgeBound; /**< highest possible age of any object in this copy cache */
uintptr_t _arraySplitIndex; /**< The index within the array in scanCurrent to start scanning from (meaningful is OMR_COPYSCAN_CACHE_TYPE_SPLIT_ARRAY is set) */

/* Members Function */
private:
Expand All @@ -63,7 +63,7 @@ class MM_CopyScanCacheVLHGC : public MM_CopyScanCache
*/
MMINLINE void clearSplitArray()
{
flags &= ~J9VM_MODRON_SCAVENGER_CACHE_TYPE_SPLIT_ARRAY;
flags &= ~OMR_COPYSCAN_CACHE_TYPE_SPLIT_ARRAY;
_arraySplitIndex = 0;
}

Expand All @@ -74,7 +74,7 @@ class MM_CopyScanCacheVLHGC : public MM_CopyScanCache
*/
MMINLINE bool isSplitArray() const
{
return (J9VM_MODRON_SCAVENGER_CACHE_TYPE_SPLIT_ARRAY == (flags & J9VM_MODRON_SCAVENGER_CACHE_TYPE_SPLIT_ARRAY));
return OMR_COPYSCAN_CACHE_TYPE_SPLIT_ARRAY == (flags & OMR_COPYSCAN_CACHE_TYPE_SPLIT_ARRAY);
}

/**
Expand Down

0 comments on commit 8c3b2e2

Please sign in to comment.