diff --git a/src/hotspot/share/classfile/classLoaderData.cpp b/src/hotspot/share/classfile/classLoaderData.cpp index 00687d21a7e..16837da9cf2 100644 --- a/src/hotspot/share/classfile/classLoaderData.cpp +++ b/src/hotspot/share/classfile/classLoaderData.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -407,9 +407,6 @@ void ClassLoaderData::methods_do(void f(Method*)) { } void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) { - // To call this, one must have the MultiArray_lock held, but the _klasses list still has lock free reads. - assert_locked_or_safepoint(MultiArray_lock); - // Lock-free access requires load_acquire for (Klass* k = Atomic::load_acquire(&_klasses); k != nullptr; k = k->next_link()) { // Filter out InstanceKlasses (or their ObjArrayKlasses) that have not entered the diff --git a/src/hotspot/share/memory/metaspace.cpp b/src/hotspot/share/memory/metaspace.cpp index 573df165d0d..22ec4d942a6 100644 --- a/src/hotspot/share/memory/metaspace.cpp +++ b/src/hotspot/share/memory/metaspace.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2017, 2021 SAP SE. All rights reserved. * Copyright (c) 2023, Red Hat, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -858,6 +858,7 @@ MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, assert(false, "Should not allocate with exception pending"); return nullptr; // caller does a CHECK_NULL too } + assert(!THREAD->owns_locks(), "allocating metaspace while holding mutex"); MetaWord* result = allocate(loader_data, word_size, type); diff --git a/src/hotspot/share/oops/arrayKlass.cpp b/src/hotspot/share/oops/arrayKlass.cpp index 60382daf9a4..3b97b25da6c 100644 --- a/src/hotspot/share/oops/arrayKlass.cpp +++ b/src/hotspot/share/oops/arrayKlass.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -130,26 +130,21 @@ ArrayKlass* ArrayKlass::array_klass(int n, TRAPS) { // lock-free read needs acquire semantics if (higher_dimension_acquire() == nullptr) { - ResourceMark rm(THREAD); - { - // Ensure atomic creation of higher dimensions - MutexLocker mu(THREAD, MultiArray_lock); + // Ensure atomic creation of higher dimensions + RecursiveLocker rl(MultiArray_lock, THREAD); - // Check if another thread beat us - if (higher_dimension() == nullptr) { - - // Create multi-dim klass object and link them together - ObjArrayKlass* ak = + if (higher_dimension() == nullptr) { + // Create multi-dim klass object and link them together + ObjArrayKlass* ak = ObjArrayKlass::allocate_objArray_klass(class_loader_data(), dim + 1, this, CHECK_NULL); - ak->set_lower_dimension(this); - // use 'release' to pair with lock-free load - release_set_higher_dimension(ak); - assert(ak->is_objArray_klass(), "incorrect initialization of ObjArrayKlass"); - } + // use 'release' to pair with lock-free load + release_set_higher_dimension(ak); + assert(ak->lower_dimension() == this, "lower dimension mismatch"); } } - ObjArrayKlass *ak = higher_dimension(); + ObjArrayKlass* ak = higher_dimension(); + assert(ak != nullptr, "should be set"); THREAD->check_possible_safepoint(); return ak->array_klass(n, THREAD); } diff --git a/src/hotspot/share/oops/instanceKlass.cpp b/src/hotspot/share/oops/instanceKlass.cpp index e236e583d24..c02acd97cb6 100644 --- a/src/hotspot/share/oops/instanceKlass.cpp +++ b/src/hotspot/share/oops/instanceKlass.cpp @@ -1545,23 +1545,22 @@ void InstanceKlass::check_valid_for_instantiation(bool throwError, TRAPS) { ArrayKlass* InstanceKlass::array_klass(int n, TRAPS) { // Need load-acquire for lock-free read if (array_klasses_acquire() == nullptr) { - ResourceMark rm(THREAD); - JavaThread *jt = THREAD; - { - // Atomic creation of array_klasses - MutexLocker ma(THREAD, MultiArray_lock); - // Check if update has already taken place - if (array_klasses() == nullptr) { - ObjArrayKlass* k = ObjArrayKlass::allocate_objArray_klass(class_loader_data(), 1, this, CHECK_NULL); - // use 'release' to pair with lock-free load - release_set_array_klasses(k); - } + // Recursively lock array allocation + RecursiveLocker rl(MultiArray_lock, THREAD); + + // Check if another thread created the array klass while we were waiting for the lock. + if (array_klasses() == nullptr) { + ObjArrayKlass* k = ObjArrayKlass::allocate_objArray_klass(class_loader_data(), 1, this, CHECK_NULL); + // use 'release' to pair with lock-free load + release_set_array_klasses(k); } } + // array_klasses() will always be set at this point - ObjArrayKlass* oak = array_klasses(); - return oak->array_klass(n, THREAD); + ObjArrayKlass* ak = array_klasses(); + assert(ak != nullptr, "should be set"); + return ak->array_klass(n, THREAD); } ArrayKlass* InstanceKlass::array_klass_or_null(int n) { @@ -2762,7 +2761,7 @@ void InstanceKlass::restore_unshareable_info(ClassLoaderData* loader_data, Handl if (array_klasses() != nullptr) { // To get a consistent list of classes we need MultiArray_lock to ensure // array classes aren't observed while they are being restored. - MutexLocker ml(MultiArray_lock); + RecursiveLocker rl(MultiArray_lock, THREAD); assert(this == array_klasses()->bottom_klass(), "sanity"); // Array classes have null protection domain. // --> see ArrayKlass::complete_create_array_klass() diff --git a/src/hotspot/share/oops/objArrayKlass.cpp b/src/hotspot/share/oops/objArrayKlass.cpp index d7cba58b6b3..b477948917e 100644 --- a/src/hotspot/share/oops/objArrayKlass.cpp +++ b/src/hotspot/share/oops/objArrayKlass.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,36 +60,19 @@ ObjArrayKlass* ObjArrayKlass::allocate_objArray_klass(ClassLoaderData* loader_da // Eagerly allocate the direct array supertype. Klass* super_klass = nullptr; if (!Universe::is_bootstrapping() || vmClasses::Object_klass_loaded()) { + assert(MultiArray_lock->holds_lock(THREAD), "must hold lock after bootstrapping"); Klass* element_super = element_klass->super(); if (element_super != nullptr) { // The element type has a direct super. E.g., String[] has direct super of Object[]. - super_klass = element_super->array_klass_or_null(); - bool supers_exist = super_klass != nullptr; // Also, see if the element has secondary supertypes. - // We need an array type for each. + // We need an array type for each before creating this array type. + super_klass = element_super->array_klass(CHECK_NULL); const Array* element_supers = element_klass->secondary_supers(); - for( int i = element_supers->length()-1; i >= 0; i-- ) { + for (int i = element_supers->length() - 1; i >= 0; i--) { Klass* elem_super = element_supers->at(i); - if (elem_super->array_klass_or_null() == nullptr) { - supers_exist = false; - break; - } - } - if (!supers_exist) { - // Oops. Not allocated yet. Back out, allocate it, and retry. - Klass* ek = nullptr; - { - MutexUnlocker mu(MultiArray_lock); - super_klass = element_super->array_klass(CHECK_NULL); - for( int i = element_supers->length()-1; i >= 0; i-- ) { - Klass* elem_super = element_supers->at(i); - elem_super->array_klass(CHECK_NULL); - } - // Now retry from the beginning - ek = element_klass->array_klass(n, CHECK_NULL); - } // re-lock - return ObjArrayKlass::cast(ek); + elem_super->array_klass(CHECK_NULL); } + // Fall through because inheritance is acyclic and we hold the global recursive lock to allocate all the arrays. } else { // The element type is already Object. Object[] has direct super of Object. super_klass = vmClasses::Object_klass(); @@ -150,6 +133,10 @@ ObjArrayKlass::ObjArrayKlass(int n, Klass* element_klass, Symbol* name) : ArrayK set_bottom_klass(bk); set_class_loader_data(bk->class_loader_data()); + if (element_klass->is_array_klass()) { + set_lower_dimension(ArrayKlass::cast(element_klass)); + } + set_layout_helper(array_layout_helper(T_OBJECT)); assert(is_array_klass(), "sanity"); assert(is_objArray_klass(), "sanity"); diff --git a/src/hotspot/share/prims/jvmtiExport.cpp b/src/hotspot/share/prims/jvmtiExport.cpp index 0a436b2778e..2afbe720732 100644 --- a/src/hotspot/share/prims/jvmtiExport.cpp +++ b/src/hotspot/share/prims/jvmtiExport.cpp @@ -3146,9 +3146,6 @@ bool JvmtiSampledObjectAllocEventCollector::object_alloc_is_safe_to_sample() { return false; } - if (MultiArray_lock->owner() == thread) { - return false; - } return true; } diff --git a/src/hotspot/share/prims/jvmtiGetLoadedClasses.cpp b/src/hotspot/share/prims/jvmtiGetLoadedClasses.cpp index ce3ce616c09..8d6a0736afe 100644 --- a/src/hotspot/share/prims/jvmtiGetLoadedClasses.cpp +++ b/src/hotspot/share/prims/jvmtiGetLoadedClasses.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -102,10 +102,6 @@ JvmtiGetLoadedClasses::getLoadedClasses(JvmtiEnv *env, jint* classCountPtr, jcla LoadedClassesClosure closure(env, false); { - // To get a consistent list of classes we need MultiArray_lock to ensure - // array classes aren't created. - MutexLocker ma(MultiArray_lock); - // Iterate through all classes in ClassLoaderDataGraph // and collect them using the LoadedClassesClosure MutexLocker mcld(ClassLoaderDataGraph_lock); @@ -122,8 +118,9 @@ JvmtiGetLoadedClasses::getClassLoaderClasses(JvmtiEnv *env, jobject initiatingLo LoadedClassesClosure closure(env, true); { // To get a consistent list of classes we need MultiArray_lock to ensure - // array classes aren't created during this walk. - MutexLocker ma(MultiArray_lock); + // array classes aren't created by another thread during this walk. This walks through the + // InstanceKlass::_array_klasses links. + RecursiveLocker ma(MultiArray_lock, Thread::current()); MutexLocker sd(SystemDictionary_lock); oop loader = JNIHandles::resolve(initiatingLoader); // All classes loaded from this loader as initiating loader are diff --git a/src/hotspot/share/runtime/mutex.cpp b/src/hotspot/share/runtime/mutex.cpp index 3fd7fbd8300..6466d18e538 100644 --- a/src/hotspot/share/runtime/mutex.cpp +++ b/src/hotspot/share/runtime/mutex.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,6 +31,7 @@ #include "runtime/os.inline.hpp" #include "runtime/osThread.hpp" #include "runtime/safepointMechanism.inline.hpp" +#include "runtime/semaphore.inline.hpp" #include "runtime/threadCrashProtection.hpp" #include "utilities/events.hpp" #include "utilities/macros.hpp" @@ -522,3 +523,33 @@ void Mutex::set_owner_implementation(Thread *new_owner) { } } #endif // ASSERT + + +RecursiveMutex::RecursiveMutex() : _sem(1), _owner(nullptr), _recursions(0) {} + +void RecursiveMutex::lock(Thread* current) { + assert(current == Thread::current(), "must be current thread"); + if (current == _owner) { + _recursions++; + } else { + // can be called by jvmti by VMThread. + if (current->is_Java_thread()) { + _sem.wait_with_safepoint_check(JavaThread::cast(current)); + } else { + _sem.wait(); + } + _recursions++; + assert(_recursions == 1, "should be"); + _owner = current; + } +} + +void RecursiveMutex::unlock(Thread* current) { + assert(current == Thread::current(), "must be current thread"); + assert(current == _owner, "must be owner"); + _recursions--; + if (_recursions == 0) { + _owner = nullptr; + _sem.signal(); + } +} diff --git a/src/hotspot/share/runtime/mutex.hpp b/src/hotspot/share/runtime/mutex.hpp index 10671d651b1..8818447e5dd 100644 --- a/src/hotspot/share/runtime/mutex.hpp +++ b/src/hotspot/share/runtime/mutex.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ #include "memory/allocation.hpp" #include "runtime/atomic.hpp" +#include "runtime/semaphore.hpp" #if defined(LINUX) || defined(AIX) || defined(BSD) # include "mutex_posix.hpp" @@ -241,4 +242,24 @@ class PaddedMonitor : public Monitor { PaddedMonitor(Rank rank, const char *name) : Monitor(rank, name) {}; }; +// RecursiveMutex is a minimal implementation, and has no safety and rank checks that Mutex has. +// There are also no checks that the recursive lock is not held when going to Java or to JNI, like +// other JVM mutexes have. This should be used only for cases where the alternatives with all the +// nice safety features don't work. +// Waiting on the RecursiveMutex partipates in the safepoint protocol if the current thread is a Java thread, +// (ie. waiting sets JavaThread to blocked) +class RecursiveMutex : public CHeapObj { + Semaphore _sem; + Thread* _owner; + int _recursions; + + NONCOPYABLE(RecursiveMutex); + public: + RecursiveMutex(); + void lock(Thread* current); + void unlock(Thread* current); + // For use in asserts + bool holds_lock(Thread* current) { return _owner == current; } +}; + #endif // SHARE_RUNTIME_MUTEX_HPP diff --git a/src/hotspot/share/runtime/mutexLocker.cpp b/src/hotspot/share/runtime/mutexLocker.cpp index efd8490cd80..09976493fe9 100644 --- a/src/hotspot/share/runtime/mutexLocker.cpp +++ b/src/hotspot/share/runtime/mutexLocker.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -86,7 +86,6 @@ Monitor* Compilation_lock = nullptr; Mutex* CompileTaskAlloc_lock = nullptr; Mutex* CompileStatistics_lock = nullptr; Mutex* DirectivesStack_lock = nullptr; -Mutex* MultiArray_lock = nullptr; Monitor* Terminator_lock = nullptr; Monitor* InitCompleted_lock = nullptr; Monitor* BeforeExit_lock = nullptr; @@ -156,6 +155,8 @@ Monitor* JVMCI_lock = nullptr; Monitor* JVMCIRuntime_lock = nullptr; #endif +// Only one RecursiveMutex +RecursiveMutex* MultiArray_lock = nullptr; #define MAX_NUM_MUTEX 128 static Mutex* _mutex_array[MAX_NUM_MUTEX]; @@ -269,7 +270,6 @@ void mutex_init() { MUTEX_DEFN(MethodCompileQueue_lock , PaddedMonitor, safepoint); MUTEX_DEFN(CompileStatistics_lock , PaddedMutex , safepoint); MUTEX_DEFN(DirectivesStack_lock , PaddedMutex , nosafepoint); - MUTEX_DEFN(MultiArray_lock , PaddedMutex , safepoint); MUTEX_DEFN(JvmtiThreadState_lock , PaddedMutex , safepoint); // Used by JvmtiThreadState/JvmtiEventController MUTEX_DEFN(EscapeBarrier_lock , PaddedMonitor, nosafepoint); // Used to synchronize object reallocation/relocking triggered by JVMTI @@ -283,6 +283,7 @@ void mutex_init() { MUTEX_DEFN(PeriodicTask_lock , PaddedMonitor, safepoint, true); MUTEX_DEFN(RedefineClasses_lock , PaddedMonitor, safepoint); MUTEX_DEFN(Verify_lock , PaddedMutex , safepoint); + MUTEX_DEFN(ClassLoaderDataGraph_lock , PaddedMutex , safepoint); if (WhiteBoxAPI) { MUTEX_DEFN(Compilation_lock , PaddedMonitor, nosafepoint); @@ -334,7 +335,6 @@ void mutex_init() { MUTEX_DEFL(PerfDataMemAlloc_lock , PaddedMutex , Heap_lock); MUTEX_DEFL(PerfDataManager_lock , PaddedMutex , Heap_lock); - MUTEX_DEFL(ClassLoaderDataGraph_lock , PaddedMutex , MultiArray_lock); MUTEX_DEFL(VMOperation_lock , PaddedMonitor, Heap_lock, true); MUTEX_DEFL(ClassInitError_lock , PaddedMonitor, Threads_lock); @@ -357,6 +357,9 @@ void mutex_init() { // JVMCIRuntime_lock must be acquired before JVMCI_lock to avoid deadlock MUTEX_DEFL(JVMCI_lock , PaddedMonitor, JVMCIRuntime_lock); #endif + + // Allocate RecursiveMutex + MultiArray_lock = new RecursiveMutex(); } #undef MUTEX_DEFL diff --git a/src/hotspot/share/runtime/mutexLocker.hpp b/src/hotspot/share/runtime/mutexLocker.hpp index 840d01e62fa..7729189e18a 100644 --- a/src/hotspot/share/runtime/mutexLocker.hpp +++ b/src/hotspot/share/runtime/mutexLocker.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -83,7 +83,6 @@ extern Monitor* Compilation_lock; // a lock used to pause compila extern Mutex* CompileTaskAlloc_lock; // a lock held when CompileTasks are allocated extern Mutex* CompileStatistics_lock; // a lock held when updating compilation statistics extern Mutex* DirectivesStack_lock; // a lock held when mutating the dirstack and ref counting directives -extern Mutex* MultiArray_lock; // a lock used to guard allocation of multi-dim arrays extern Monitor* Terminator_lock; // a lock used to guard termination of the vm extern Monitor* InitCompleted_lock; // a lock used to signal threads waiting on init completed extern Monitor* BeforeExit_lock; // a lock used to guard cleanups and shutdown hooks @@ -332,4 +331,21 @@ class MutexUnlocker: StackObj { } }; +// Instance of a RecursiveLock that may be held through Java heap allocation, which may include calls to Java, +// and JNI event notification for resource exhaustion for metaspace or heap. +extern RecursiveMutex* MultiArray_lock; + +// RAII locker for a RecursiveMutex. See comments in mutex.hpp for more information. +class RecursiveLocker { + RecursiveMutex* _lock; + Thread* _thread; + public: + RecursiveLocker(RecursiveMutex* lock, Thread* current) : _lock(lock), _thread(current) { + _lock->lock(_thread); + } + ~RecursiveLocker() { + _lock->unlock(_thread); + } +}; + #endif // SHARE_RUNTIME_MUTEXLOCKER_HPP