8268406: Deallocate jmethodID native memory

Reviewed-by: dholmes, sspitsyn, dcubed, eosterlund, aboldtch
This commit is contained in:
Coleen Phillimore 2025-06-27 11:20:49 +00:00
parent aa26cede63
commit d8f9b188fa
16 changed files with 458 additions and 297 deletions

View File

@ -65,6 +65,7 @@
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/access.inline.hpp"
#include "oops/jmethodIDTable.hpp"
#include "oops/klass.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/oopHandle.inline.hpp"
@ -578,6 +579,33 @@ void ClassLoaderData::remove_class(Klass* scratch_class) {
ShouldNotReachHere(); // should have found this class!!
}
void ClassLoaderData::add_jmethod_id(jmethodID mid) {
MutexLocker m1(metaspace_lock(), Mutex::_no_safepoint_check_flag);
if (_jmethod_ids == nullptr) {
_jmethod_ids = new (mtClass) GrowableArray<jmethodID>(32, mtClass);
}
_jmethod_ids->push(mid);
}
// Method::remove_jmethod_ids removes jmethodID entries from the table which
// releases memory.
// Because native code (e.g., JVMTI agent) holding jmethod_ids may access them
// after the associated classes and class loader are unloaded, subsequent lookups
// for these ids will return null since they are no longer found in the table.
// The Java Native Interface Specification says "method ID
// does not prevent the VM from unloading the class from which the ID has
// been derived. After the class is unloaded, the method or field ID becomes
// invalid".
void ClassLoaderData::remove_jmethod_ids() {
MutexLocker ml(JmethodIdCreation_lock, Mutex::_no_safepoint_check_flag);
for (int i = 0; i < _jmethod_ids->length(); i++) {
jmethodID mid = _jmethod_ids->at(i);
JmethodIDTable::remove(mid);
}
delete _jmethod_ids;
_jmethod_ids = nullptr;
}
void ClassLoaderData::unload() {
_unloading = true;
@ -599,19 +627,8 @@ void ClassLoaderData::unload() {
// after erroneous classes are released.
classes_do(InstanceKlass::unload_class);
// Method::clear_jmethod_ids only sets the jmethod_ids to null without
// releasing the memory for related JNIMethodBlocks and JNIMethodBlockNodes.
// This is done intentionally because native code (e.g. JVMTI agent) holding
// jmethod_ids may access them after the associated classes and class loader
// are unloaded. The Java Native Interface Specification says "method ID
// does not prevent the VM from unloading the class from which the ID has
// been derived. After the class is unloaded, the method or field ID becomes
// invalid". In real world usages, the native code may rely on jmethod_ids
// being null after class unloading. Hence, it is unsafe to free the memory
// from the VM side without knowing when native code is going to stop using
// them.
if (_jmethod_ids != nullptr) {
Method::clear_jmethod_ids(this);
remove_jmethod_ids();
}
}
@ -1037,9 +1054,7 @@ void ClassLoaderData::print_on(outputStream* out) const {
out->print_cr(" - dictionary " INTPTR_FORMAT, p2i(_dictionary));
}
if (_jmethod_ids != nullptr) {
out->print (" - jmethod count ");
Method::print_jmethod_ids_count(this, out);
out->print_cr("");
out->print_cr(" - jmethod count %d", _jmethod_ids->length());
}
out->print_cr(" - deallocate list " INTPTR_FORMAT, p2i(_deallocate_list));
out->print_cr(" - next CLD " INTPTR_FORMAT, p2i(_next));

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -53,7 +53,6 @@
// and provides iterators for root tracing and other GC operations.
class ClassLoaderDataGraph;
class JNIMethodBlock;
class ModuleEntry;
class PackageEntry;
class ModuleEntryTable;
@ -143,10 +142,9 @@ class ClassLoaderData : public CHeapObj<mtClass> {
ModuleEntry* _unnamed_module; // This class loader's unnamed module.
Dictionary* _dictionary; // The loaded InstanceKlasses, including initiated by this class loader
// These method IDs are created for the class loader and set to null when the
// class loader is unloaded. They are rarely freed, only for redefine classes
// and if they lose a data race in InstanceKlass.
JNIMethodBlock* _jmethod_ids;
// These method IDs are created for the class loader and removed when the
// class loader is unloaded.
GrowableArray<jmethodID>* _jmethod_ids;
// Metadata to be deallocated when it's safe at class unloading, when
// this class loader isn't unloaded itself.
@ -316,8 +314,9 @@ private:
void classes_do(KlassClosure* klass_closure);
Klass* klasses() { return _klasses; }
JNIMethodBlock* jmethod_ids() const { return _jmethod_ids; }
void set_jmethod_ids(JNIMethodBlock* new_block) { _jmethod_ids = new_block; }
void add_jmethod_id(jmethodID id);
void remove_jmethod_ids();
GrowableArray<jmethodID>* jmethod_ids() const { return _jmethod_ids; }
void print() const;
void print_on(outputStream* out) const PRODUCT_RETURN;

View File

@ -60,6 +60,7 @@
#include "oops/compressedOops.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/instanceMirrorKlass.hpp"
#include "oops/jmethodIDTable.hpp"
#include "oops/klass.inline.hpp"
#include "oops/objArrayOop.inline.hpp"
#include "oops/objLayout.hpp"
@ -436,6 +437,9 @@ void Universe::genesis(TRAPS) {
vmSymbols::initialize();
// Initialize table for matching jmethodID, before SystemDictionary.
JmethodIDTable::initialize();
SystemDictionary::initialize(CHECK);
// Create string constants
@ -444,7 +448,6 @@ void Universe::genesis(TRAPS) {
s = StringTable::intern("-2147483648", CHECK);
_the_min_jint_string = OopHandle(vm_global(), s);
#if INCLUDE_CDS
if (CDSConfig::is_using_archive()) {
// Verify shared interfaces array.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -58,6 +58,7 @@
f(mtMetaspace, "Metaspace") \
f(mtStringDedup, "String Deduplication") \
f(mtObjectMonitor, "Object Monitors") \
f(mtJNI, "JNI") \
f(mtNone, "Unknown") \
//end

View File

@ -2395,13 +2395,26 @@ jmethodID InstanceKlass::update_jmethod_id(jmethodID* jmeths, Method* method, in
return new_id;
}
// Allocate the jmethodID cache.
static jmethodID* create_jmethod_id_cache(size_t size) {
jmethodID* jmeths = NEW_C_HEAP_ARRAY(jmethodID, size + 1, mtClass);
memset(jmeths, 0, (size + 1) * sizeof(jmethodID));
// cache size is stored in element[0], other elements offset by one
jmeths[0] = (jmethodID)size;
return jmeths;
}
// When reading outside a lock, use this.
jmethodID* InstanceKlass::methods_jmethod_ids_acquire() const {
return Atomic::load_acquire(&_methods_jmethod_ids);
}
void InstanceKlass::release_set_methods_jmethod_ids(jmethodID* jmeths) {
Atomic::release_store(&_methods_jmethod_ids, jmeths);
}
// Lookup or create a jmethodID.
// This code is called by the VMThread and JavaThreads so the
// locking has to be done very carefully to avoid deadlocks
// and/or other cache consistency problems.
//
jmethodID InstanceKlass::get_jmethod_id(const methodHandle& method_h) {
Method* method = method_h();
jmethodID InstanceKlass::get_jmethod_id(Method* method) {
int idnum = method->method_idnum();
jmethodID* jmeths = methods_jmethod_ids_acquire();
@ -2422,15 +2435,12 @@ jmethodID InstanceKlass::get_jmethod_id(const methodHandle& method_h) {
if (jmeths == nullptr) {
MutexLocker ml(JmethodIdCreation_lock, Mutex::_no_safepoint_check_flag);
jmeths = methods_jmethod_ids_acquire();
jmeths = _methods_jmethod_ids;
// Still null?
if (jmeths == nullptr) {
size_t size = idnum_allocated_count();
assert(size > (size_t)idnum, "should already have space");
jmeths = NEW_C_HEAP_ARRAY(jmethodID, size + 1, mtClass);
memset(jmeths, 0, (size + 1) * sizeof(jmethodID));
// cache size is stored in element[0], other elements offset by one
jmeths[0] = (jmethodID)size;
jmeths = create_jmethod_id_cache(size);
jmethodID new_id = update_jmethod_id(jmeths, method, idnum);
// publish jmeths
@ -2460,10 +2470,7 @@ void InstanceKlass::update_methods_jmethod_cache() {
if (old_size < size + 1) {
// Allocate a larger one and copy entries to the new one.
// They've already been updated to point to new methods where applicable (i.e., not obsolete).
jmethodID* new_cache = NEW_C_HEAP_ARRAY(jmethodID, size + 1, mtClass);
memset(new_cache, 0, (size + 1) * sizeof(jmethodID));
// The cache size is stored in element[0]; the other elements are offset by one.
new_cache[0] = (jmethodID)size;
jmethodID* new_cache = create_jmethod_id_cache(size);
for (int i = 1; i <= (int)old_size; i++) {
new_cache[i] = cache[i];
@ -2474,23 +2481,29 @@ void InstanceKlass::update_methods_jmethod_cache() {
}
}
// Figure out how many jmethodIDs haven't been allocated, and make
// sure space for them is pre-allocated. This makes getting all
// method ids much, much faster with classes with more than 8
// Make a jmethodID for all methods in this class. This makes getting all method
// ids much, much faster with classes with more than 8
// methods, and has a *substantial* effect on performance with jvmti
// code that loads all jmethodIDs for all classes.
void InstanceKlass::ensure_space_for_methodids(int start_offset) {
int new_jmeths = 0;
int length = methods()->length();
for (int index = start_offset; index < length; index++) {
Method* m = methods()->at(index);
jmethodID id = m->find_jmethod_id_or_null();
if (id == nullptr) {
new_jmeths++;
}
void InstanceKlass::make_methods_jmethod_ids() {
MutexLocker ml(JmethodIdCreation_lock, Mutex::_no_safepoint_check_flag);
jmethodID* jmeths = _methods_jmethod_ids;
if (jmeths == nullptr) {
jmeths = create_jmethod_id_cache(idnum_allocated_count());
release_set_methods_jmethod_ids(jmeths);
}
if (new_jmeths != 0) {
Method::ensure_jmethod_ids(class_loader_data(), new_jmeths);
int length = methods()->length();
for (int index = 0; index < length; index++) {
Method* m = methods()->at(index);
int idnum = m->method_idnum();
assert(!m->is_old(), "should not have old methods or I'm confused");
jmethodID id = Atomic::load_acquire(&jmeths[idnum + 1]);
if (!m->is_overpass() && // skip overpasses
id == nullptr) {
id = Method::make_jmethod_id(class_loader_data(), m);
Atomic::release_store(&jmeths[idnum + 1], id);
}
}
}
@ -2923,7 +2936,7 @@ void InstanceKlass::release_C_heap_structures(bool release_sub_metadata) {
JNIid::deallocate(jni_ids());
set_jni_ids(nullptr);
jmethodID* jmeths = methods_jmethod_ids_acquire();
jmethodID* jmeths = _methods_jmethod_ids;
if (jmeths != nullptr) {
release_set_methods_jmethod_ids(nullptr);
FreeHeap(jmeths);
@ -4275,17 +4288,14 @@ bool InstanceKlass::should_clean_previous_versions_and_reset() {
return ret;
}
// This nulls out jmethodIDs for all methods in 'klass'
// It needs to be called explicitly for all previous versions of a class because these may not be cleaned up
// during class unloading.
// We can not use the jmethodID cache associated with klass directly because the 'previous' versions
// do not have the jmethodID cache filled in. Instead, we need to lookup jmethodID for each method and this
// is expensive - O(n) for one jmethodID lookup. For all contained methods it is O(n^2).
// The reason for expensive jmethodID lookup for each method is that there is no direct link between method and jmethodID.
void InstanceKlass::clear_jmethod_ids(InstanceKlass* klass) {
// This nulls out the jmethodID for all obsolete methods in the previous version of the 'klass'.
// These obsolete methods only exist in the previous version and we're about to delete the memory for them.
// The jmethodID for these are deallocated when we unload the class, so this doesn't remove them from the table.
void InstanceKlass::clear_obsolete_jmethod_ids(InstanceKlass* klass) {
Array<Method*>* method_refs = klass->methods();
for (int k = 0; k < method_refs->length(); k++) {
Method* method = method_refs->at(k);
// Only need to clear obsolete methods.
if (method != nullptr && method->is_obsolete()) {
method->clear_jmethod_id();
}
@ -4335,7 +4345,7 @@ void InstanceKlass::purge_previous_version_list() {
// Unlink from previous version list.
assert(pv_node->class_loader_data() == loader_data, "wrong loader_data");
InstanceKlass* next = pv_node->previous_versions();
clear_jmethod_ids(pv_node); // jmethodID maintenance for the unloaded class
clear_obsolete_jmethod_ids(pv_node); // jmethodID maintenance for the unloaded class
pv_node->link_previous_versions(nullptr); // point next to null
last->link_previous_versions(next);
// Delete this node directly. Nothing is referring to it and we don't

View File

@ -781,8 +781,8 @@ public:
u2 method_index);
// jmethodID support
jmethodID get_jmethod_id(const methodHandle& method_h);
void ensure_space_for_methodids(int start_offset = 0);
jmethodID get_jmethod_id(Method* method);
void make_methods_jmethod_ids();
jmethodID jmethod_id_or_null(Method* method);
void update_methods_jmethod_cache();
@ -1056,10 +1056,10 @@ private:
Atomic::store(&_init_thread, thread);
}
inline jmethodID* methods_jmethod_ids_acquire() const;
inline void release_set_methods_jmethod_ids(jmethodID* jmeths);
// This nulls out jmethodIDs for all methods in 'klass'
static void clear_jmethod_ids(InstanceKlass* klass);
jmethodID* methods_jmethod_ids_acquire() const;
void release_set_methods_jmethod_ids(jmethodID* jmeths);
// This nulls out obsolete jmethodIDs for all methods in 'klass'.
static void clear_obsolete_jmethod_ids(InstanceKlass* klass);
jmethodID update_jmethod_id(jmethodID* jmeths, Method* method, int idnum);
public:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -71,14 +71,6 @@ inline void InstanceKlass::release_set_array_klasses(ObjArrayKlass* k) {
Atomic::release_store(&_array_klasses, k);
}
inline jmethodID* InstanceKlass::methods_jmethod_ids_acquire() const {
return Atomic::load_acquire(&_methods_jmethod_ids);
}
inline void InstanceKlass::release_set_methods_jmethod_ids(jmethodID* jmeths) {
Atomic::release_store(&_methods_jmethod_ids, jmeths);
}
// The iteration over the oops in objects is a hot path in the GC code.
// By force inlining the following functions, we get similar GC performance
// as the previous macro based implementation.

View File

@ -0,0 +1,194 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "logging/log.hpp"
#include "memory/allocation.hpp"
#include "memory/resourceArea.hpp"
#include "oops/jmethodIDTable.hpp"
#include "oops/method.hpp"
#include "runtime/mutexLocker.hpp"
#include "utilities/concurrentHashTable.inline.hpp"
#include "utilities/concurrentHashTableTasks.inline.hpp"
#include "utilities/macros.hpp"
// Save (jmethod, Method*) in a hashtable to lookup Method.
// The CHT is for performance because it has lock free lookup.
// The value of the next jmethodID. This only increments (always unique IDs).
static uint64_t _jmethodID_counter = 0;
// Tracks the number of jmethodID entries in the _jmethod_id_table.
// Incremented on insert, decremented on remove. Used to track if we need to resize the table.
static uint64_t _jmethodID_entry_count = 0;
uint64_t JmethodIDTable::get_entry_count() { return _jmethodID_entry_count; }
class JmethodEntry {
public:
uint64_t _id;
Method* _method;
JmethodEntry(uint64_t id, Method* method) : _id(id), _method(method) {}
};
class JmethodIDTableConfig : public AllStatic {
public:
typedef JmethodEntry Value;
static void* allocate_node(void* context, size_t size, Value const& value) {
return AllocateHeap(size, mtJNI);
}
static void free_node(void* context, void* memory, Value const& value) {
FreeHeap(memory);
}
static uintx get_hash(Value const& value, bool* is_dead) {
*is_dead = false;
return value._id;
}
static bool is_dead(Value const& value) {
return false;
}
};
using MethodIdTable = ConcurrentHashTable<JmethodIDTableConfig, mtJNI>;
static MethodIdTable* _jmethod_id_table = nullptr;
void JmethodIDTable::initialize() {
const size_t start_size = 10;
// 2^24 is max size
const size_t end_size = 24;
// If a chain gets to 32 something might be wrong.
const size_t grow_hint = 32;
_jmethod_id_table = new MethodIdTable(start_size, end_size, grow_hint);
}
class JmethodIDLookup : StackObj {
private:
uint64_t _mid;
public:
JmethodIDLookup(const uint64_t mid) : _mid(mid) {}
uintx get_hash() const {
return _mid;
}
bool equals(JmethodEntry* value) {
return _mid == value->_id;
}
bool is_dead(JmethodEntry* value) {
return false;
}
};
static JmethodEntry* get_jmethod_entry(jmethodID mid) {
assert(mid != nullptr, "JNI method id should not be null");
Thread* current = Thread::current();
JmethodIDLookup lookup((uint64_t)mid);
JmethodEntry* result = nullptr;
auto get = [&] (JmethodEntry* value) {
// function called if value is found so is never null
result = value;
};
bool needs_rehashing = false;
_jmethod_id_table->get(current, lookup, get, &needs_rehashing);
assert(!needs_rehashing, "should never need rehashing");
return result;
}
Method* JmethodIDTable::resolve_jmethod_id(jmethodID mid) {
JmethodEntry* result = get_jmethod_entry(mid);
return result == nullptr ? nullptr : result->_method;
}
const unsigned _resize_load_trigger = 5; // load factor that will trigger the resize
static unsigned table_size(Thread* current) {
return 1 << _jmethod_id_table->get_size_log2(current);
}
static bool needs_resize(Thread* current) {
return ((_jmethodID_entry_count > (_resize_load_trigger * table_size(current))) &&
!_jmethod_id_table->is_max_size_reached());
}
// Add a method id to the jmethod_ids.
jmethodID JmethodIDTable::make_jmethod_id(Method* m) {
bool grow_hint, clean_hint;
assert_locked_or_safepoint(JmethodIdCreation_lock);
// Update jmethodID global counter.
_jmethodID_counter++;
JmethodEntry new_entry(_jmethodID_counter, m);
Thread* current = Thread::current();
JmethodIDLookup lookup(_jmethodID_counter);
bool created = _jmethod_id_table->insert(current, lookup, new_entry, &grow_hint, &clean_hint);
assert(created, "must be");
log_debug(jmethod)("Inserted jmethod id " UINT64_FORMAT_X, _jmethodID_counter);
// Increment number of entries in the table.
_jmethodID_entry_count++;
// Resize table if it needs to grow. The _jmethod_id_table has a good distribution.
if (needs_resize(current)) {
_jmethod_id_table->grow(current);
log_info(jmethod)("Growing table to %d for " UINT64_FORMAT " entries", table_size(current), _jmethodID_counter);
}
return (jmethodID)_jmethodID_counter;
}
void JmethodIDTable::remove(jmethodID jmid) {
assert_locked_or_safepoint(JmethodIdCreation_lock);
JmethodIDLookup lookup((uint64_t)jmid);
Thread* current = Thread::current();
JmethodEntry* result;
auto get = [&] (JmethodEntry* value) {
// The function that is called if value is found, so is never null.
result = value;
};
bool removed = _jmethod_id_table->remove(current, lookup, get);
assert(removed, "must be");
log_debug(jmethod)("Removed jmethod id " UINT64_FORMAT_X, (uint64_t)jmid);
// Decrement number of entries in the table.
_jmethodID_entry_count--;
}
void JmethodIDTable::change_method_associated_with_jmethod_id(jmethodID jmid, Method* new_method) {
assert_locked_or_safepoint(JmethodIdCreation_lock);
JmethodEntry* result = get_jmethod_entry(jmid);
// Change table entry to point to the new method.
log_debug(jmethod)("Changed jmethod id " UINT64_FORMAT_X " from " PTR_FORMAT " to " PTR_FORMAT, (uint64_t)jmid,
p2i(result->_method), p2i(new_method));
result->_method = new_method;
}
void JmethodIDTable::clear_jmethod_id(jmethodID jmid, Method* obsolete_method) {
assert_locked_or_safepoint(JmethodIdCreation_lock);
JmethodEntry* result = get_jmethod_entry(jmid);
// We need to make sure that jmethodID actually resolves to this method
// - multiple redefined versions may share jmethodID slots and if a method
// has already been rewired to a newer version we could be clearing
// the reference to a still existing method instance.
if (result->_method == obsolete_method) {
result->_method = nullptr;
}
}

View File

@ -0,0 +1,55 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_OOPS_JMETHODIDTABLE_HPP
#define SHARE_OOPS_JMETHODIDTABLE_HPP
#include "jni.h"
#include "memory/allocation.hpp"
// Class for associating Method with jmethodID
class Method;
class JmethodIDTable : public AllStatic {
public:
static void initialize();
// Given a Method return a jmethodID.
static jmethodID make_jmethod_id(Method* m);
// Given a jmethodID, return a Method.
static Method* resolve_jmethod_id(jmethodID mid);
// Class unloading support, remove the associations from the tables. Stale jmethodID will
// not be found and return null.
static void remove(jmethodID mid);
// RedefineClasses support
static void change_method_associated_with_jmethod_id(jmethodID jmid, Method* new_method);
static void clear_jmethod_id(jmethodID jmid, Method* m);
static uint64_t get_entry_count();
};
#endif // SHARE_OOPS_JMETHODIDTABLE_HPP

View File

@ -53,6 +53,7 @@
#include "nmt/memTracker.hpp"
#include "oops/constMethod.hpp"
#include "oops/constantPool.hpp"
#include "oops/jmethodIDTable.hpp"
#include "oops/klass.inline.hpp"
#include "oops/method.inline.hpp"
#include "oops/methodData.hpp"
@ -2059,172 +2060,33 @@ void BreakpointInfo::clear(Method* method) {
#endif // INCLUDE_JVMTI
// jmethodID handling
// This is a block allocating object, sort of like JNIHandleBlock, only a
// lot simpler.
// It's allocated on the CHeap because once we allocate a jmethodID, we can
// never get rid of it.
static const int min_block_size = 8;
class JNIMethodBlockNode : public CHeapObj<mtClass> {
friend class JNIMethodBlock;
Method** _methods;
int _number_of_methods;
int _top;
JNIMethodBlockNode* _next;
public:
JNIMethodBlockNode(int num_methods = min_block_size);
~JNIMethodBlockNode() { FREE_C_HEAP_ARRAY(Method*, _methods); }
void ensure_methods(int num_addl_methods) {
if (_top < _number_of_methods) {
num_addl_methods -= _number_of_methods - _top;
if (num_addl_methods <= 0) {
return;
}
}
if (_next == nullptr) {
_next = new JNIMethodBlockNode(MAX2(num_addl_methods, min_block_size));
} else {
_next->ensure_methods(num_addl_methods);
}
}
};
class JNIMethodBlock : public CHeapObj<mtClass> {
JNIMethodBlockNode _head;
JNIMethodBlockNode *_last_free;
public:
static Method* const _free_method;
JNIMethodBlock(int initial_capacity = min_block_size)
: _head(initial_capacity), _last_free(&_head) {}
void ensure_methods(int num_addl_methods) {
_last_free->ensure_methods(num_addl_methods);
}
Method** add_method(Method* m) {
for (JNIMethodBlockNode* b = _last_free; b != nullptr; b = b->_next) {
if (b->_top < b->_number_of_methods) {
// top points to the next free entry.
int i = b->_top;
b->_methods[i] = m;
b->_top++;
_last_free = b;
return &(b->_methods[i]);
} else if (b->_top == b->_number_of_methods) {
// if the next free entry ran off the block see if there's a free entry
for (int i = 0; i < b->_number_of_methods; i++) {
if (b->_methods[i] == _free_method) {
b->_methods[i] = m;
_last_free = b;
return &(b->_methods[i]);
}
}
// Only check each block once for frees. They're very unlikely.
// Increment top past the end of the block.
b->_top++;
}
// need to allocate a next block.
if (b->_next == nullptr) {
b->_next = _last_free = new JNIMethodBlockNode();
}
}
guarantee(false, "Should always allocate a free block");
return nullptr;
}
bool contains(Method** m) {
if (m == nullptr) return false;
for (JNIMethodBlockNode* b = &_head; b != nullptr; b = b->_next) {
if (b->_methods <= m && m < b->_methods + b->_number_of_methods) {
// This is a bit of extra checking, for two reasons. One is
// that contains() deals with pointers that are passed in by
// JNI code, so making sure that the pointer is aligned
// correctly is valuable. The other is that <= and > are
// technically not defined on pointers, so the if guard can
// pass spuriously; no modern compiler is likely to make that
// a problem, though (and if one did, the guard could also
// fail spuriously, which would be bad).
ptrdiff_t idx = m - b->_methods;
if (b->_methods + idx == m) {
return true;
}
}
}
return false; // not found
}
// During class unloading the methods are cleared, which is different
// than freed.
void clear_all_methods() {
for (JNIMethodBlockNode* b = &_head; b != nullptr; b = b->_next) {
for (int i = 0; i< b->_number_of_methods; i++) {
b->_methods[i] = nullptr;
}
}
}
#ifndef PRODUCT
int count_methods() {
// count all allocated methods
int count = 0;
for (JNIMethodBlockNode* b = &_head; b != nullptr; b = b->_next) {
for (int i = 0; i< b->_number_of_methods; i++) {
if (b->_methods[i] != _free_method) count++;
}
}
return count;
}
#endif // PRODUCT
};
// Something that can't be mistaken for an address or a markWord
Method* const JNIMethodBlock::_free_method = (Method*)55;
JNIMethodBlockNode::JNIMethodBlockNode(int num_methods) : _top(0), _next(nullptr) {
_number_of_methods = MAX2(num_methods, min_block_size);
_methods = NEW_C_HEAP_ARRAY(Method*, _number_of_methods, mtInternal);
for (int i = 0; i < _number_of_methods; i++) {
_methods[i] = JNIMethodBlock::_free_method;
}
}
void Method::ensure_jmethod_ids(ClassLoaderData* cld, int capacity) {
// Have to add jmethod_ids() to class loader data thread-safely.
// Also have to add the method to the list safely, which the lock
// protects as well.
MutexLocker ml(JmethodIdCreation_lock, Mutex::_no_safepoint_check_flag);
if (cld->jmethod_ids() == nullptr) {
cld->set_jmethod_ids(new JNIMethodBlock(capacity));
} else {
cld->jmethod_ids()->ensure_methods(capacity);
}
}
// jmethodIDs are 64-bit integers that will never run out and are mapped in a table
// to their Method and vice versa. If JNI code has access to stale jmethodID, this
// wastes no memory but the Method* returned is null.
// Add a method id to the jmethod_ids
jmethodID Method::make_jmethod_id(ClassLoaderData* cld, Method* m) {
// Have to add jmethod_ids() to class loader data thread-safely.
// Also have to add the method to the list safely, which the lock
// Also have to add the method to the InstanceKlass list safely, which the lock
// protects as well.
assert(JmethodIdCreation_lock->owned_by_self(), "sanity check");
jmethodID jmid = JmethodIDTable::make_jmethod_id(m);
assert(jmid != nullptr, "must be created");
ResourceMark rm;
log_debug(jmethod)("Creating jmethodID for Method %s", m->external_name());
if (cld->jmethod_ids() == nullptr) {
cld->set_jmethod_ids(new JNIMethodBlock());
}
// jmethodID is a pointer to Method*
return (jmethodID)cld->jmethod_ids()->add_method(m);
// Add to growable array in CLD.
cld->add_jmethod_id(jmid);
return jmid;
}
// This looks in the InstanceKlass cache, then calls back to make_jmethod_id if not found.
jmethodID Method::jmethod_id() {
methodHandle mh(Thread::current(), this);
return method_holder()->get_jmethod_id(mh);
return method_holder()->get_jmethod_id(this);
}
// Get the Method out of the table given the method id.
Method* Method::resolve_jmethod_id(jmethodID mid) {
assert(mid != nullptr, "JNI method id should not be null");
return JmethodIDTable::resolve_jmethod_id(mid);
}
void Method::change_method_associated_with_jmethod_id(jmethodID jmid, Method* new_method) {
@ -2232,25 +2094,34 @@ void Method::change_method_associated_with_jmethod_id(jmethodID jmid, Method* ne
// scratch method holder.
assert(resolve_jmethod_id(jmid)->method_holder()->class_loader()
== new_method->method_holder()->class_loader() ||
new_method->method_holder()->class_loader() == nullptr, // allow Unsafe substitution
new_method->method_holder()->class_loader() == nullptr, // allow substitution to Unsafe method
"changing to a different class loader");
// Just change the method in place, jmethodID pointer doesn't change.
*((Method**)jmid) = new_method;
JmethodIDTable::change_method_associated_with_jmethod_id(jmid, new_method);
}
bool Method::is_method_id(jmethodID mid) {
// If there's a jmethodID for this method, clear the Method
// but leave jmethodID for this method in the table.
// It's deallocated with class unloading.
void Method::clear_jmethod_id() {
jmethodID mid = method_holder()->jmethod_id_or_null(this);
if (mid != nullptr) {
JmethodIDTable::clear_jmethod_id(mid, this);
}
}
bool Method::validate_jmethod_id(jmethodID mid) {
Method* m = resolve_jmethod_id(mid);
assert(m != nullptr, "should be called with non-null method");
InstanceKlass* ik = m->method_holder();
ClassLoaderData* cld = ik->class_loader_data();
if (cld->jmethod_ids() == nullptr) return false;
return (cld->jmethod_ids()->contains((Method**)mid));
return (cld->jmethod_ids()->contains(mid));
}
Method* Method::checked_resolve_jmethod_id(jmethodID mid) {
if (mid == nullptr) return nullptr;
Method* o = resolve_jmethod_id(mid);
if (o == nullptr || o == JNIMethodBlock::_free_method) {
if (o == nullptr) {
return nullptr;
}
// Method should otherwise be valid. Assert for testing.
@ -2259,7 +2130,7 @@ Method* Method::checked_resolve_jmethod_id(jmethodID mid) {
// unloaded, we need to return null here too because after a safepoint, its memory
// will be reclaimed.
return o->method_holder()->is_loader_alive() ? o : nullptr;
};
}
void Method::set_on_stack(const bool value) {
// Set both the method itself and its constant pool. The constant pool
@ -2280,25 +2151,6 @@ void Method::record_gc_epoch() {
constants()->cache()->record_gc_epoch();
}
// Called when the class loader is unloaded to make all methods weak.
void Method::clear_jmethod_ids(ClassLoaderData* loader_data) {
loader_data->jmethod_ids()->clear_all_methods();
}
void Method::clear_jmethod_id() {
// Being at a safepoint prevents racing against other class redefinitions
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
// The jmethodID is not stored in the Method instance, we need to look it up first
jmethodID methodid = find_jmethod_id_or_null();
// We need to make sure that jmethodID actually resolves to this method
// - multiple redefined versions may share jmethodID slots and if a method
// has already been rewired to a newer version we could be removing reference
// to a still existing method instance
if (methodid != nullptr && *((Method**)methodid) == this) {
*((Method**)methodid) = nullptr;
}
}
bool Method::has_method_vptr(const void* ptr) {
Method m;
// This assumes that the vtbl pointer is the first word of a C++ object.
@ -2323,13 +2175,6 @@ bool Method::is_valid_method(const Method* m) {
}
}
#ifndef PRODUCT
void Method::print_jmethod_ids_count(const ClassLoaderData* loader_data, outputStream* out) {
out->print("%d", loader_data->jmethod_ids()->count_methods());
}
#endif // PRODUCT
// Printing
#ifndef PRODUCT

View File

@ -704,18 +704,10 @@ public:
// refers to null (as is the case for any weak reference).
static jmethodID make_jmethod_id(ClassLoaderData* cld, Method* mh);
// Ensure there is enough capacity in the internal tracking data
// structures to hold the number of jmethodIDs you plan to generate.
// This saves substantial time doing allocations.
static void ensure_jmethod_ids(ClassLoaderData* cld, int capacity);
// Use resolve_jmethod_id() in situations where the caller is expected
// to provide a valid jmethodID; the only sanity checks are in asserts;
// result guaranteed not to be null.
inline static Method* resolve_jmethod_id(jmethodID mid) {
assert(mid != nullptr, "JNI method id should not be null");
return *((Method**)mid);
}
static Method* resolve_jmethod_id(jmethodID mid);
// Use checked_resolve_jmethod_id() in situations where the caller
// should provide a valid jmethodID, but might not. Null is returned
@ -723,10 +715,9 @@ public:
static Method* checked_resolve_jmethod_id(jmethodID mid);
static void change_method_associated_with_jmethod_id(jmethodID old_jmid_ptr, Method* new_method);
static bool is_method_id(jmethodID mid);
static bool validate_jmethod_id(jmethodID mid);
// Clear methods
static void clear_jmethod_ids(ClassLoaderData* loader_data);
// Clear jmethodID
void clear_jmethod_id();
static void print_jmethod_ids_count(const ClassLoaderData* loader_data, outputStream* out) PRODUCT_RETURN;

View File

@ -439,7 +439,7 @@ Method* jniCheck::validate_jmethod_id(JavaThread* thr, jmethodID method_id) {
}
// jmethodIDs are handles in the class loader data,
// but that can be expensive so check it last
else if (!Method::is_method_id(method_id)) {
else if (!Method::validate_jmethod_id(method_id)) {
ReportJNIFatalError(thr, fatal_non_weak_method);
}
return m;

View File

@ -2768,9 +2768,11 @@ JvmtiEnv::GetClassMethods(oop k_mirror, jint* method_count_ptr, jmethodID** meth
int result_length = ik->methods()->length();
jmethodID* result_list = (jmethodID*)jvmtiMalloc(result_length * sizeof(jmethodID));
int index;
bool jmethodids_found = true;
int skipped = 0; // skip overpass methods
// Make jmethodIDs for all non-overpass methods.
ik->make_methods_jmethod_ids();
for (index = 0; index < result_length; index++) {
Method* m = ik->methods()->at(index);
// Depending on can_maintain_original_method_order capability use the original
@ -2783,20 +2785,8 @@ JvmtiEnv::GetClassMethods(oop k_mirror, jint* method_count_ptr, jmethodID** meth
skipped++;
continue;
}
jmethodID id;
if (jmethodids_found) {
id = m->find_jmethod_id_or_null();
if (id == nullptr) {
// If we find an uninitialized value, make sure there is
// enough space for all the uninitialized values we might
// find.
ik->ensure_space_for_methodids(index);
jmethodids_found = false;
id = m->jmethod_id();
}
} else {
id = m->jmethod_id();
}
jmethodID id = m->find_jmethod_id_or_null();
assert(id != nullptr, "should be created above");
result_list[result_index] = id;
}

View File

@ -3789,6 +3789,13 @@ void VM_RedefineClasses::AdjustAndCleanMetadata::do_klass(Klass* k) {
void VM_RedefineClasses::update_jmethod_ids() {
for (int j = 0; j < _matching_methods_length; ++j) {
Method* old_method = _matching_old_methods[j];
// The method_idnum should be within the range of 1..number-of-methods
// until incremented later for obsolete methods.
// The increment is so if a jmethodID is created for an old obsolete method
// it gets a new jmethodID cache slot in the InstanceKlass.
// They're cleaned out later when all methods of the previous version are purged.
assert(old_method->method_idnum() <= _old_methods->length(),
"shouldn't be incremented yet for obsolete methods");
jmethodID jmid = old_method->find_jmethod_id_or_null();
if (jmid != nullptr) {
// There is a jmethodID, change it to point to the new method

View File

@ -236,7 +236,7 @@ void mutex_init() {
MUTEX_DEFN(Service_lock , PaddedMonitor, service); // used for service thread operations
MUTEX_DEFN(Notification_lock , PaddedMonitor, service); // used for notification thread operations
MUTEX_DEFN(JmethodIdCreation_lock , PaddedMutex , nosafepoint-2); // used for creating jmethodIDs.
MUTEX_DEFN(JmethodIdCreation_lock , PaddedMutex , nosafepoint-1); // used for creating jmethodIDs can also lock HandshakeState_lock
MUTEX_DEFN(InvokeMethodTypeTable_lock , PaddedMutex , safepoint);
MUTEX_DEFN(InvokeMethodIntrinsicTable_lock , PaddedMonitor, safepoint);
MUTEX_DEFN(AdapterHandlerLibrary_lock , PaddedMutex , safepoint);

View File

@ -0,0 +1,59 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "classfile/vmClasses.hpp"
#include "memory/resourceArea.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/jmethodIDTable.hpp"
#include "oops/method.hpp"
#include "unittest.hpp"
#include "utilities/growableArray.hpp"
// Tests for creating and deleting jmethodIDs
TEST_VM(jmethodIDTable, test_jmethod_ids) {
InstanceKlass* klass = vmClasses::ClassLoader_klass();
Array<Method*>* methods = klass->methods();
int length = methods->length();
// How many entries are in the jmethodID table?
uint64_t initial_entries = JmethodIDTable::get_entry_count();
ResourceMark rm;
GrowableArray<uint64_t> ints(10);
for (int i = 0; i < length; i++) {
Method* m = methods->at(i);
jmethodID mid = m->jmethod_id();
ints.push((uint64_t)mid);
}
uint64_t entries_now = JmethodIDTable::get_entry_count();
ASSERT_TRUE(entries_now == initial_entries + length) << "should have more entries " << entries_now << " " << initial_entries + length;
// Test that new entries aren't created, and the values are the same.
for (int i = 0; i < length; i++) {
Method* m = methods->at(i);
jmethodID mid = m->jmethod_id();
ASSERT_TRUE(ints.at(i) == (uint64_t)mid) << "should be the same";
}
// should have the same number of entries
entries_now = JmethodIDTable::get_entry_count();
ASSERT_TRUE(entries_now == initial_entries + length) << "should have more entries " << entries_now << " " << initial_entries + length;
}