Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix crash in prepareToFixMemberNames #18236

Merged
merged 4 commits into from
Oct 6, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion runtime/bcutil/j9bcu.tdf
Original file line number Diff line number Diff line change
Expand Up @@ -261,7 +261,7 @@ TraceException=Trc_BCU_ClassFileOracle_walkMethodCodeAttributeAttributes_Duplica
TraceAssert=Trc_BCU_Assert_SupportedByteCode NoEnv Overhead=1 Level=1 Assert="(0 != sunJavaInstructionSizeTable[P1])"
TraceEntry=Trc_BCU_avl_intern_verify_EntryV2 Noenv Overhead=1 Level=3 Template="BCU avl_intern_verify Entry: tree=%p sharedPool=%p localPool=%p"

TraceAssert=Trc_Assert_BCU_mustHaveExclusiveVMAccess NoEnv overhead=1 Level=1 Assert="(P1)->publicFlags & (J9_PUBLIC_FLAGS_VM_ACCESS | J9_PUBLIC_FLAGS_HALT_THREAD_EXCLUSIVE)"
TraceAssert=Trc_Assert_BCU_mustHaveExclusiveVMAccess NoEnv overhead=1 Level=1 Assert="(0 != (P1)->omrVMThread->exclusiveCount)"

TraceAssert=Trc_BCU_Assert_InternVerificationFailure NoEnv Overhead=1 Level=1 Assert="(0)"

Expand Down
64 changes: 33 additions & 31 deletions runtime/gc_base/ClassLoaderManager.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -633,17 +633,17 @@ bool
MM_ClassLoaderManager::tryEnterClassUnloadMutex(MM_EnvironmentBase *env)
{
bool result = true;

/* now, perform any actions that the global collector needs to take before a collection starts */
if (!_javaVM->isClassUnloadMutexHeldForRedefinition) {
#if defined(J9VM_JIT_CLASS_UNLOAD_RWMONITOR)
if (0 != omrthread_rwmutex_try_enter_write(_javaVM->classUnloadMutex))
#else
if (0 != omrthread_monitor_try_enter(_javaVM->classUnloadMutex))
#endif /* J9VM_JIT_CLASS_UNLOAD_RWMONITOR */
{
/* The JIT currently is in the monitor */
/* We can simply skip unloading classes for this GC */
result = false;
if (0 != omrthread_rwmutex_try_enter_write(_javaVM->classUnloadMutex))
#else /* defined(J9VM_JIT_CLASS_UNLOAD_RWMONITOR) */
if (0 != omrthread_monitor_try_enter(_javaVM->classUnloadMutex))
#endif /* defined(J9VM_JIT_CLASS_UNLOAD_RWMONITOR) */
{
/* The JIT currently is in the monitor */
/* We can simply skip unloading classes for this GC */
result = false;
}
}
return result;
}
Expand All @@ -653,25 +653,25 @@ MM_ClassLoaderManager::enterClassUnloadMutex(MM_EnvironmentBase *env)
{
PORT_ACCESS_FROM_ENVIRONMENT(env);
U_64 quiesceTime = J9CONST64(0);

/* now, perform any actions that the global collector needs to take before a collection starts */
if (!_javaVM->isClassUnloadMutexHeldForRedefinition) {
#if defined(J9VM_JIT_CLASS_UNLOAD_RWMONITOR)
if (0 != omrthread_rwmutex_try_enter_write(_javaVM->classUnloadMutex))
#else
if (0 != omrthread_monitor_try_enter(_javaVM->classUnloadMutex))
#endif /* J9VM_JIT_CLASS_UNLOAD_RWMONITOR */
{
/* The JIT currently is in the monitor */
/* We must interrupt the JIT compilation so the GC can unload classes */
U_64 startTime = j9time_hires_clock();
TRIGGER_J9HOOK_MM_INTERRUPT_COMPILATION(_extensions->hookInterface, (J9VMThread *)env->getLanguageVMThread());
if (0 != omrthread_rwmutex_try_enter_write(_javaVM->classUnloadMutex))
#else /* defined(J9VM_JIT_CLASS_UNLOAD_RWMONITOR) */
if (0 != omrthread_monitor_try_enter(_javaVM->classUnloadMutex))
#endif /* defined(J9VM_JIT_CLASS_UNLOAD_RWMONITOR) */
{
/* The JIT currently is in the monitor */
/* We must interrupt the JIT compilation so the GC can unload classes */
U_64 startTime = j9time_hires_clock();
TRIGGER_J9HOOK_MM_INTERRUPT_COMPILATION(_extensions->hookInterface, (J9VMThread *)env->getLanguageVMThread());
#if defined(J9VM_JIT_CLASS_UNLOAD_RWMONITOR)
omrthread_rwmutex_enter_write(_javaVM->classUnloadMutex);
#else
omrthread_monitor_enter(_javaVM->classUnloadMutex);
#endif /* J9VM_JIT_CLASS_UNLOAD_RWMONITOR */
U_64 endTime = j9time_hires_clock();
quiesceTime = j9time_hires_delta(startTime, endTime, J9PORT_TIME_DELTA_IN_MICROSECONDS);
omrthread_rwmutex_enter_write(_javaVM->classUnloadMutex);
#else /* defined(J9VM_JIT_CLASS_UNLOAD_RWMONITOR) */
omrthread_monitor_enter(_javaVM->classUnloadMutex);
#endif /* defined(J9VM_JIT_CLASS_UNLOAD_RWMONITOR) */
U_64 endTime = j9time_hires_clock();
quiesceTime = j9time_hires_delta(startTime, endTime, J9PORT_TIME_DELTA_IN_MICROSECONDS);
}
}
return quiesceTime;
}
Expand All @@ -680,11 +680,13 @@ void
MM_ClassLoaderManager::exitClassUnloadMutex(MM_EnvironmentBase *env)
{
/* If we allowed class unloading during this gc, we must release the classUnloadMutex */
if (!_javaVM->isClassUnloadMutexHeldForRedefinition) {
#if defined(J9VM_JIT_CLASS_UNLOAD_RWMONITOR)
omrthread_rwmutex_exit_write(_javaVM->classUnloadMutex);
#else
omrthread_monitor_exit(_javaVM->classUnloadMutex);
#endif /* J9VM_JIT_CLASS_UNLOAD_RWMONITOR */
omrthread_rwmutex_exit_write(_javaVM->classUnloadMutex);
#else /* defined(J9VM_JIT_CLASS_UNLOAD_RWMONITOR) */
omrthread_monitor_exit(_javaVM->classUnloadMutex);
#endif /* defined(J9VM_JIT_CLASS_UNLOAD_RWMONITOR) */
}
}

#endif /* J9VM_GC_DYNAMIC_CLASS_UNLOADING */
Expand Down
36 changes: 20 additions & 16 deletions runtime/gc_glue_java/MetronomeDelegate.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -740,20 +740,22 @@ void
MM_MetronomeDelegate::lockClassUnloadMonitor(MM_EnvironmentRealtime *env)
{
/* Grab the classUnloadMonitor so that the JIT and the GC will not interfere with each other */
if (!_javaVM->isClassUnloadMutexHeldForRedefinition) {
#if defined(J9VM_JIT_CLASS_UNLOAD_RWMONITOR)
if (0 != omrthread_rwmutex_try_enter_write(_javaVM->classUnloadMutex)) {
#else
if (0 != omrthread_monitor_try_enter(_javaVM->classUnloadMutex)) {
#endif /* J9VM_JIT_CLASS_UNLOAD_RWMONITOR */
/* Failed acquire the monitor so interrupt the JIT. This will allow the GC
* to continue unloading classes.
*/
TRIGGER_J9HOOK_MM_INTERRUPT_COMPILATION(_extensions->hookInterface, (J9VMThread *)env->getLanguageVMThread());
if (0 != omrthread_rwmutex_try_enter_write(_javaVM->classUnloadMutex)) {
#else /* defined(J9VM_JIT_CLASS_UNLOAD_RWMONITOR) */
if (0 != omrthread_monitor_try_enter(_javaVM->classUnloadMutex)) {
#endif /* defined(J9VM_JIT_CLASS_UNLOAD_RWMONITOR) */
/* Failed acquire the monitor so interrupt the JIT. This will allow the GC
* to continue unloading classes.
*/
TRIGGER_J9HOOK_MM_INTERRUPT_COMPILATION(_extensions->hookInterface, (J9VMThread *)env->getLanguageVMThread());
#if defined(J9VM_JIT_CLASS_UNLOAD_RWMONITOR)
omrthread_rwmutex_enter_write(_javaVM->classUnloadMutex);
#else
omrthread_monitor_enter(_javaVM->classUnloadMutex);
#endif /* J9VM_JIT_CLASS_UNLOAD_RWMONITOR */
omrthread_rwmutex_enter_write(_javaVM->classUnloadMutex);
#else /* defined(J9VM_JIT_CLASS_UNLOAD_RWMONITOR) */
omrthread_monitor_enter(_javaVM->classUnloadMutex);
#endif /* defined(J9VM_JIT_CLASS_UNLOAD_RWMONITOR) */
}
}
}

Expand All @@ -763,11 +765,13 @@ MM_MetronomeDelegate::lockClassUnloadMonitor(MM_EnvironmentRealtime *env)
void
MM_MetronomeDelegate::unlockClassUnloadMonitor(MM_EnvironmentRealtime *env)
{
if (!_javaVM->isClassUnloadMutexHeldForRedefinition) {
#if defined(J9VM_JIT_CLASS_UNLOAD_RWMONITOR)
omrthread_rwmutex_exit_write(_javaVM->classUnloadMutex);
#else
omrthread_monitor_exit(_javaVM->classUnloadMutex);
#endif /* J9VM_JIT_CLASS_UNLOAD_RWMONITOR */
omrthread_rwmutex_exit_write(_javaVM->classUnloadMutex);
#else /* defined(J9VM_JIT_CLASS_UNLOAD_RWMONITOR) */
omrthread_monitor_exit(_javaVM->classUnloadMutex);
#endif /* defined(J9VM_JIT_CLASS_UNLOAD_RWMONITOR) */
}
}

void
Expand Down
17 changes: 16 additions & 1 deletion runtime/jvmti/jvmtiClass.c
Original file line number Diff line number Diff line change
Expand Up @@ -1208,7 +1208,11 @@ redefineClassesCommon(jvmtiEnv* env,
* We can update the VM data structures without any inconsistencies being observed by
* concurrently running compilations. Afterward, resuming compilations will be interrupted
* if necessary before they can observe an inconsistency.
*
* NOTE: The logic below can cause GC to run, and GC could decide to unload classes.
* To avoid deadlock, prevent GC from redundantly entering the class unload mutex.
*/
vm->isClassUnloadMutexHeldForRedefinition = TRUE;

if (J9_ARE_ANY_BITS_SET(vm->runtimeFlags, J9_RUNTIME_DYNAMIC_HEAPIFICATION)) {
/* Look for stack-allocated objects only if the JIT is enabled and not running in FSD mode */
Expand All @@ -1225,8 +1229,17 @@ redefineClassesCommon(jvmtiEnv* env,
rc = determineClassesToRecreate(currentThread, class_count, specifiedClasses, &classPairs,
&methodPairs, jitEventDataPtr, !extensionsEnabled);
if (rc == JVMTI_ERROR_NONE) {
/* Identify the MemberNames needing fix-up based on classPairs. */
#if defined(J9VM_OPT_OPENJDK_METHODHANDLE)
/* Eliminate dark matter so that none will be encountered in prepareToFixMemberNames(). */
UDATA savedAllowUserHeapWalkFlag = vm->requiredDebugAttributes & J9VM_DEBUG_ATTRIBUTE_ALLOW_USER_HEAP_WALK;
vm->requiredDebugAttributes |= J9VM_DEBUG_ATTRIBUTE_ALLOW_USER_HEAP_WALK;
vm->memoryManagerFunctions->j9gc_modron_global_collect_with_overrides(currentThread, J9MMCONSTANT_EXPLICIT_GC_EXCLUSIVE_VMACCESS_ALREADY_ACQUIRED);
if (0 == savedAllowUserHeapWalkFlag) {
/* Clear the flag to restore its original value. */
vm->requiredDebugAttributes &= ~J9VM_DEBUG_ATTRIBUTE_ALLOW_USER_HEAP_WALK;
}

/* Identify the MemberNames needing fix-up based on classPairs. */
memberNamesToFix = prepareToFixMemberNames(currentThread, classPairs);
#endif /* defined(J9VM_OPT_OPENJDK_METHODHANDLE) */

Expand Down Expand Up @@ -1363,6 +1376,8 @@ redefineClassesCommon(jvmtiEnv* env,

hashTableFree(classPairs);

vm->isClassUnloadMutexHeldForRedefinition = FALSE;

#if defined(J9VM_JIT_CLASS_UNLOAD_RWMONITOR)
omrthread_rwmutex_exit_write(vm->classUnloadMutex);
#else /* defined(J9VM_JIT_CLASS_UNLOAD_RWMONITOR) */
Expand Down
10 changes: 10 additions & 0 deletions runtime/oti/j9nonbuilder.h
Original file line number Diff line number Diff line change
Expand Up @@ -5773,6 +5773,16 @@ typedef struct J9JavaVM {
#else
omrthread_monitor_t classUnloadMutex;
#endif
/* When GC runs as part of class redefinition, it could unload classes and
* therefore try to acquire the class unload mutex, which is already held
* to prevent JIT compilation from running concurrently with redefinition.
* Such an acquisition attempt could deadlock if it occurs on a different
* thread, even though the acquisition would be morally recursive.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Perhaps "morally" should be "normally" here?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

An acquisition that occurs on a different thread is technically never a recursive acquisition. However, in this case it's tantamount to one, and the fact that it's occurring on another thread is only a technicality. So I meant morally recursive as opposed to technically recursive

*
* isClassUnloadMutexHeldForRedefinition allows GC to detect this situation
* and skip acquiring the mutex.
*/
BOOLEAN isClassUnloadMutexHeldForRedefinition;
UDATA java2J9ThreadPriorityMap[11];
UDATA j9Thread2JavaPriorityMap[12];
UDATA priorityAsyncEventDispatch;
Expand Down
59 changes: 40 additions & 19 deletions runtime/vm/VMAccess.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,6 @@ acquireExclusiveVMAccess(J9VMThread * vmThread)
Assert_VM_true(currentVMThread(vm) == vmThread);
}
Assert_VM_mustHaveVMAccess(vmThread);
Assert_VM_true(0 == vmThread->safePointCount);
#if defined(J9VM_INTERP_ATOMIC_FREE_JNI)
// Current thread must have entered the VM before acquiring exclusive
Assert_VM_false(vmThread->inNative);
Expand All @@ -138,6 +137,9 @@ acquireExclusiveVMAccess(J9VMThread * vmThread)
* the thread already has exclusive access
*/
if ( ++(vmThread->omrVMThread->exclusiveCount) == 1 ) {
/* If we have safe-point access then exclusiveCount should have already been positive. */
Assert_VM_true(0 == vmThread->safePointCount);

omrthread_monitor_enter(vmThread->publicFlagsMutex);
if (J9_ARE_NO_BITS_SET(vmThread->publicFlags, J9_PUBLIC_FLAGS_NOT_AT_SAFE_POINT)) {
VM_VMAccess::setPublicFlags(vmThread, J9_PUBLIC_FLAGS_NOT_AT_SAFE_POINT | J9_PUBLIC_FLAGS_EXCLUSIVE_SET_NOT_SAFE);
Expand Down Expand Up @@ -327,7 +329,7 @@ acquireExclusiveVMAccess(J9VMThread * vmThread)

vm->omrVM->exclusiveVMAccessStats.endTime = j9time_hires_clock();
}
Assert_VM_true(J9_XACCESS_EXCLUSIVE == vm->exclusiveAccessState);
Assert_VM_true((J9_XACCESS_EXCLUSIVE == vm->exclusiveAccessState) || (J9_XACCESS_EXCLUSIVE == vm->safePointState));
Trc_VM_acquireExclusiveVMAccess_Exit(vmThread);
}

Expand Down Expand Up @@ -487,6 +489,21 @@ void internalReleaseVMAccessSetStatus(J9VMThread * vmThread, UDATA flags)
VM_VMAccess::inlineReleaseVMAccessSetStatus(vmThread, flags);
}

/** @brief Free any cached decompilation record and empty the UTF cache.
*
* @param[in] currentThread the J9VMThread in which to clear caches
*/
static void clearThreadLocalCachesPostExclusive(J9VMThread *currentThread)
{
PORT_ACCESS_FROM_JAVAVM(currentThread->javaVM);
j9mem_free_memory(currentThread->lastDecompilation);
currentThread->lastDecompilation = NULL;
J9HashTable *utfCache = currentThread->utfCache;
if (NULL != utfCache) {
currentThread->utfCache = NULL;
hashTableFree(utfCache);
}
}

void releaseExclusiveVMAccess(J9VMThread * vmThread)
{
Expand All @@ -498,9 +515,12 @@ void releaseExclusiveVMAccess(J9VMThread * vmThread)
}
Assert_VM_mustHaveVMAccess(vmThread);
Assert_VM_false(vmThread->omrVMThread->exclusiveCount == 0);
Assert_VM_true(J9_XACCESS_EXCLUSIVE == vm->exclusiveAccessState);
Assert_VM_true((J9_XACCESS_EXCLUSIVE == vm->exclusiveAccessState) || (J9_XACCESS_EXCLUSIVE == vm->safePointState));

if (--(vmThread->omrVMThread->exclusiveCount) == 0) {
/* If we have safe-point access then non-recursive release should happen in releaseSafePointVMAccess(). */
Assert_VM_true(0 == vmThread->safePointCount);

/* Acquire these monitors in the same order as in allocateVMThread to prevent deadlock */

/* Check the exclusive access queue */
Expand Down Expand Up @@ -598,25 +618,13 @@ void releaseExclusiveVMAccess(J9VMThread * vmThread)
/* Make sure stale thread pointers don't exist in the stats */
vm->omrVM->exclusiveVMAccessStats.requester = NULL;
vm->omrVM->exclusiveVMAccessStats.lastResponder = NULL;
/* Free any cached decompilation records and empty the UTF cache */
PORT_ACCESS_FROM_JAVAVM(vm);
j9mem_free_memory(currentThread->lastDecompilation);
currentThread->lastDecompilation = NULL;
J9HashTable *utfCache = currentThread->utfCache;
if (NULL != utfCache) {
currentThread->utfCache = NULL;
hashTableFree(utfCache);
}

clearThreadLocalCachesPostExclusive(currentThread);
while ((currentThread = currentThread->linkNext) != vmThread) {
j9mem_free_memory(currentThread->lastDecompilation);
currentThread->lastDecompilation = NULL;
J9HashTable *utfCache = currentThread->utfCache;
if (NULL != utfCache) {
currentThread->utfCache = NULL;
hashTableFree(utfCache);
}
clearThreadLocalCachesPostExclusive(currentThread);
VM_VMAccess::clearPublicFlags(currentThread, J9_PUBLIC_FLAGS_HALT_THREAD_EXCLUSIVE | J9_PUBLIC_FLAGS_NOT_COUNTED_BY_EXCLUSIVE);
}

omrthread_monitor_notify_all(vm->exclusiveAccessMutex);
omrthread_monitor_exit(vm->exclusiveAccessMutex);
omrthread_monitor_exit(vmThread->publicFlagsMutex);
Expand Down Expand Up @@ -1114,7 +1122,15 @@ acquireSafePointVMAccess(J9VMThread * vmThread)
* the thread already has exclusive access
*/
if ( ++(vmThread->safePointCount) == 1 ) {
/* When first acquiring safe-point access, we had better not already have exclusive access.
* If we do, then all threads are stopped, but they're not necessarily all stopped at safe
* points, and getting them all to safe points would require releasing exclusive.
*/
Assert_VM_true(0 == vmThread->omrVMThread->exclusiveCount);

/* On the other hand, once we acquire safe-point access, we'll also have exclusive access. */
vmThread->omrVMThread->exclusiveCount = 1;

internalReleaseVMAccess(vmThread);
omrthread_monitor_enter(vm->exclusiveAccessMutex);
while(J9_XACCESS_NONE != vm->safePointState) {
Expand Down Expand Up @@ -1226,13 +1242,18 @@ releaseSafePointVMAccess(J9VMThread * vmThread)
}
Assert_VM_mustHaveVMAccess(vmThread);
Assert_VM_false(vmThread->safePointCount == 0);
Assert_VM_true(1 == vmThread->omrVMThread->exclusiveCount);
Assert_VM_true(J9_XACCESS_EXCLUSIVE == vm->safePointState);

if (--(vmThread->safePointCount) == 0) {
J9VMThread *currentThread = vmThread;
do {
clearThreadLocalCachesPostExclusive(currentThread);
VM_VMAccess::clearPublicFlags(currentThread, J9_PUBLIC_FLAGS_HALTED_AT_SAFE_POINT | J9_PUBLIC_FLAGS_NOT_COUNTED_BY_SAFE_POINT, true);
} while ((currentThread = currentThread->linkNext) != vmThread);

vmThread->omrVMThread->exclusiveCount = 0;

omrthread_monitor_enter(vm->exclusiveAccessMutex);
vm->safePointState = J9_XACCESS_NONE;
omrthread_monitor_notify_all(vm->exclusiveAccessMutex);
Expand Down