8139595: MethodHandles::remove_dependent_nmethod is not MT safe

Reviewed-by: jrose, coleenp
This commit is contained in:
Vladimir Ivanov 2015-11-18 03:03:43 +03:00
parent f0c12f35cb
commit 1cfbe2dec5
12 changed files with 574 additions and 491 deletions

View File

@ -28,6 +28,7 @@
#include "classfile/stringTable.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/debugInfo.hpp"
#include "code/dependencyContext.hpp"
#include "code/pcDesc.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/oopFactory.hpp"
@ -3216,14 +3217,16 @@ void java_lang_invoke_MethodHandleNatives_CallSiteContext::compute_offsets() {
}
}
nmethodBucket* java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(oop call_site) {
DependencyContext java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(oop call_site) {
assert(java_lang_invoke_MethodHandleNatives_CallSiteContext::is_instance(call_site), "");
return (nmethodBucket*) (address) call_site->long_field(_vmdependencies_offset);
}
void java_lang_invoke_MethodHandleNatives_CallSiteContext::set_vmdependencies(oop call_site, nmethodBucket* context) {
assert(java_lang_invoke_MethodHandleNatives_CallSiteContext::is_instance(call_site), "");
call_site->long_field_put(_vmdependencies_offset, (jlong) (address) context);
intptr_t* vmdeps_addr = (intptr_t*)call_site->address_field_addr(_vmdependencies_offset);
#ifndef ASSERT
DependencyContext dep_ctx(vmdeps_addr);
#else
// Verify that call_site isn't moved during DependencyContext lifetime.
DependencyContext dep_ctx(vmdeps_addr, Handle(call_site));
#endif // ASSERT
return dep_ctx;
}
// Support for java_security_AccessControlContext

View File

@ -1212,6 +1212,8 @@ public:
#define CALLSITECONTEXT_INJECTED_FIELDS(macro) \
macro(java_lang_invoke_MethodHandleNatives_CallSiteContext, vmdependencies, intptr_signature, false)
class DependencyContext;
class java_lang_invoke_MethodHandleNatives_CallSiteContext : AllStatic {
friend class JavaClasses;
@ -1222,8 +1224,7 @@ private:
public:
// Accessors
static nmethodBucket* vmdependencies(oop context);
static void set_vmdependencies(oop context, nmethodBucket* bucket);
static DependencyContext vmdependencies(oop context);
// Testers
static bool is_subclass(Klass* klass) {

View File

@ -0,0 +1,347 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "code/nmethod.hpp"
#include "code/dependencies.hpp"
#include "code/dependencyContext.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/atomic.hpp"
#include "runtime/perfData.hpp"
#include "utilities/exceptions.hpp"
PerfCounter* DependencyContext::_perf_total_buckets_allocated_count = NULL;
PerfCounter* DependencyContext::_perf_total_buckets_deallocated_count = NULL;
PerfCounter* DependencyContext::_perf_total_buckets_stale_count = NULL;
PerfCounter* DependencyContext::_perf_total_buckets_stale_acc_count = NULL;
void dependencyContext_init() {
DependencyContext::init();
}
void DependencyContext::init() {
if (UsePerfData) {
EXCEPTION_MARK;
_perf_total_buckets_allocated_count =
PerfDataManager::create_counter(SUN_CI, "nmethodBucketsAllocated", PerfData::U_Events, CHECK);
_perf_total_buckets_deallocated_count =
PerfDataManager::create_counter(SUN_CI, "nmethodBucketsDeallocated", PerfData::U_Events, CHECK);
_perf_total_buckets_stale_count =
PerfDataManager::create_counter(SUN_CI, "nmethodBucketsStale", PerfData::U_Events, CHECK);
_perf_total_buckets_stale_acc_count =
PerfDataManager::create_counter(SUN_CI, "nmethodBucketsStaleAccumulated", PerfData::U_Events, CHECK);
}
}
//
// Walk the list of dependent nmethods searching for nmethods which
// are dependent on the changes that were passed in and mark them for
// deoptimization. Returns the number of nmethods found.
//
int DependencyContext::mark_dependent_nmethods(DepChange& changes) {
int found = 0;
for (nmethodBucket* b = dependencies(); b != NULL; b = b->next()) {
nmethod* nm = b->get_nmethod();
// since dependencies aren't removed until an nmethod becomes a zombie,
// the dependency list may contain nmethods which aren't alive.
if (b->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) {
if (TraceDependencies) {
ResourceMark rm;
tty->print_cr("Marked for deoptimization");
changes.print();
nm->print();
nm->print_dependencies();
}
nm->mark_for_deoptimization();
found++;
}
}
return found;
}
//
// Add an nmethod to the dependency context.
// It's possible that an nmethod has multiple dependencies on a klass
// so a count is kept for each bucket to guarantee that creation and
// deletion of dependencies is consistent.
//
void DependencyContext::add_dependent_nmethod(nmethod* nm, bool expunge) {
assert_lock_strong(CodeCache_lock);
for (nmethodBucket* b = dependencies(); b != NULL; b = b->next()) {
if (nm == b->get_nmethod()) {
b->increment();
return;
}
}
set_dependencies(new nmethodBucket(nm, dependencies()));
if (UsePerfData) {
_perf_total_buckets_allocated_count->inc();
}
if (expunge) {
// Remove stale entries from the list.
expunge_stale_entries();
}
}
//
// Remove an nmethod dependency from the context.
// Decrement count of the nmethod in the dependency list and, optionally, remove
// the bucket completely when the count goes to 0. This method must find
// a corresponding bucket otherwise there's a bug in the recording of dependencies.
// Can be called concurrently by parallel GC threads.
//
void DependencyContext::remove_dependent_nmethod(nmethod* nm, bool expunge) {
assert_locked_or_safepoint(CodeCache_lock);
nmethodBucket* first = dependencies();
nmethodBucket* last = NULL;
for (nmethodBucket* b = first; b != NULL; b = b->next()) {
if (nm == b->get_nmethod()) {
int val = b->decrement();
guarantee(val >= 0, "Underflow: %d", val);
if (val == 0) {
if (expunge) {
if (last == NULL) {
set_dependencies(b->next());
} else {
last->set_next(b->next());
}
delete b;
if (UsePerfData) {
_perf_total_buckets_deallocated_count->inc();
}
} else {
// Mark the context as having stale entries, since it is not safe to
// expunge the list right now.
set_has_stale_entries(true);
if (UsePerfData) {
_perf_total_buckets_stale_count->inc();
_perf_total_buckets_stale_acc_count->inc();
}
}
}
if (expunge) {
// Remove stale entries from the list.
expunge_stale_entries();
}
return;
}
last = b;
}
#ifdef ASSERT
tty->print_raw_cr("### can't find dependent nmethod");
nm->print();
#endif // ASSERT
ShouldNotReachHere();
}
//
// Reclaim all unused buckets.
//
void DependencyContext::expunge_stale_entries() {
assert_locked_or_safepoint(CodeCache_lock);
if (!has_stale_entries()) {
assert(!find_stale_entries(), "inconsistent info");
return;
}
nmethodBucket* first = dependencies();
nmethodBucket* last = NULL;
int removed = 0;
for (nmethodBucket* b = first; b != NULL;) {
assert(b->count() >= 0, "bucket count: %d", b->count());
nmethodBucket* next = b->next();
if (b->count() == 0) {
if (last == NULL) {
first = next;
} else {
last->set_next(next);
}
removed++;
delete b;
// last stays the same.
} else {
last = b;
}
b = next;
}
set_dependencies(first);
set_has_stale_entries(false);
if (UsePerfData && removed > 0) {
_perf_total_buckets_deallocated_count->inc(removed);
_perf_total_buckets_stale_count->dec(removed);
}
}
//
// Invalidate all dependencies in the context
int DependencyContext::remove_all_dependents() {
assert_locked_or_safepoint(CodeCache_lock);
nmethodBucket* b = dependencies();
set_dependencies(NULL);
int marked = 0;
int removed = 0;
while (b != NULL) {
nmethod* nm = b->get_nmethod();
if (b->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization()) {
nm->mark_for_deoptimization();
marked++;
}
nmethodBucket* next = b->next();
removed++;
delete b;
b = next;
}
set_has_stale_entries(false);
if (UsePerfData && removed > 0) {
_perf_total_buckets_deallocated_count->inc(removed);
}
return marked;
}
#ifndef PRODUCT
void DependencyContext::print_dependent_nmethods(bool verbose) {
int idx = 0;
for (nmethodBucket* b = dependencies(); b != NULL; b = b->next()) {
nmethod* nm = b->get_nmethod();
tty->print("[%d] count=%d { ", idx++, b->count());
if (!verbose) {
nm->print_on(tty, "nmethod");
tty->print_cr(" } ");
} else {
nm->print();
nm->print_dependencies();
tty->print_cr("--- } ");
}
}
}
bool DependencyContext::is_dependent_nmethod(nmethod* nm) {
for (nmethodBucket* b = dependencies(); b != NULL; b = b->next()) {
if (nm == b->get_nmethod()) {
#ifdef ASSERT
int count = b->count();
assert(count >= 0, "count shouldn't be negative: %d", count);
#endif
return true;
}
}
return false;
}
bool DependencyContext::find_stale_entries() {
for (nmethodBucket* b = dependencies(); b != NULL; b = b->next()) {
if (b->count() == 0) return true;
}
return false;
}
#endif //PRODUCT
int nmethodBucket::decrement() {
return Atomic::add(-1, (volatile int *)&_count);
}
/////////////// Unit tests ///////////////
#ifndef PRODUCT
class TestDependencyContext {
public:
nmethod* _nmethods[3];
intptr_t _dependency_context;
TestDependencyContext() : _dependency_context(DependencyContext::EMPTY) {
CodeCache_lock->lock_without_safepoint_check();
DependencyContext depContext(&_dependency_context);
_nmethods[0] = reinterpret_cast<nmethod*>(0x8 * 0);
_nmethods[1] = reinterpret_cast<nmethod*>(0x8 * 1);
_nmethods[2] = reinterpret_cast<nmethod*>(0x8 * 2);
depContext.add_dependent_nmethod(_nmethods[2]);
depContext.add_dependent_nmethod(_nmethods[1]);
depContext.add_dependent_nmethod(_nmethods[0]);
}
~TestDependencyContext() {
wipe();
CodeCache_lock->unlock();
}
static void testRemoveDependentNmethod(int id, bool delete_immediately) {
TestDependencyContext c;
DependencyContext depContext(&c._dependency_context);
assert(!has_stale_entries(depContext), "check");
nmethod* nm = c._nmethods[id];
depContext.remove_dependent_nmethod(nm, delete_immediately);
if (!delete_immediately) {
assert(has_stale_entries(depContext), "check");
assert(depContext.is_dependent_nmethod(nm), "check");
depContext.expunge_stale_entries();
}
assert(!has_stale_entries(depContext), "check");
assert(!depContext.is_dependent_nmethod(nm), "check");
}
static void testRemoveDependentNmethod() {
testRemoveDependentNmethod(0, false);
testRemoveDependentNmethod(1, false);
testRemoveDependentNmethod(2, false);
testRemoveDependentNmethod(0, true);
testRemoveDependentNmethod(1, true);
testRemoveDependentNmethod(2, true);
}
static void test() {
testRemoveDependentNmethod();
}
static bool has_stale_entries(DependencyContext ctx) {
assert(ctx.has_stale_entries() == ctx.find_stale_entries(), "check");
return ctx.has_stale_entries();
}
void wipe() {
DependencyContext ctx(&_dependency_context);
nmethodBucket* b = ctx.dependencies();
ctx.set_dependencies(NULL);
ctx.set_has_stale_entries(false);
while (b != NULL) {
nmethodBucket* next = b->next();
delete b;
b = next;
}
}
};
void TestDependencyContext_test() {
TestDependencyContext::test();
}
#endif // PRODUCT

View File

@ -0,0 +1,152 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_CODE_DEPENDENCYCONTEXT_HPP
#define SHARE_VM_CODE_DEPENDENCYCONTEXT_HPP
#include "memory/allocation.hpp"
#include "oops/oop.hpp"
#include "runtime/handles.hpp"
#include "runtime/perfData.hpp"
class nmethod;
class DepChange;
//
// nmethodBucket is used to record dependent nmethods for
// deoptimization. nmethod dependencies are actually <klass, method>
// pairs but we really only care about the klass part for purposes of
// finding nmethods which might need to be deoptimized. Instead of
// recording the method, a count of how many times a particular nmethod
// was recorded is kept. This ensures that any recording errors are
// noticed since an nmethod should be removed as many times are it's
// added.
//
class nmethodBucket: public CHeapObj<mtClass> {
friend class VMStructs;
private:
nmethod* _nmethod;
int _count;
nmethodBucket* _next;
public:
nmethodBucket(nmethod* nmethod, nmethodBucket* next) :
_nmethod(nmethod), _next(next), _count(1) {}
int count() { return _count; }
int increment() { _count += 1; return _count; }
int decrement();
nmethodBucket* next() { return _next; }
void set_next(nmethodBucket* b) { _next = b; }
nmethod* get_nmethod() { return _nmethod; }
};
//
// Utility class to manipulate nmethod dependency context.
// The context consists of nmethodBucket* (a head of a linked list)
// and a boolean flag (does the list contains stale entries). The structure is
// encoded as an intptr_t: lower bit is used for the flag. It is possible since
// nmethodBucket* is aligned - the structure is malloc'ed in C heap.
// Dependency context can be attached either to an InstanceKlass (_dep_context field)
// or CallSiteContext oop for call_site_target dependencies (see javaClasses.hpp).
// DependencyContext class operates on some location which holds a intptr_t value.
//
class DependencyContext : public StackObj {
friend class VMStructs;
friend class TestDependencyContext;
private:
enum TagBits { _has_stale_entries_bit = 1, _has_stale_entries_mask = 1 };
intptr_t* _dependency_context_addr;
void set_dependencies(nmethodBucket* b) {
assert((intptr_t(b) & _has_stale_entries_mask) == 0, "should be aligned");
if (has_stale_entries()) {
*_dependency_context_addr = intptr_t(b) | _has_stale_entries_mask;
} else {
*_dependency_context_addr = intptr_t(b);
}
}
void set_has_stale_entries(bool x) {
if (x) {
*_dependency_context_addr |= _has_stale_entries_mask;
} else {
*_dependency_context_addr &= ~_has_stale_entries_mask;
}
}
nmethodBucket* dependencies() {
intptr_t value = *_dependency_context_addr;
return (nmethodBucket*) (value & ~_has_stale_entries_mask);
}
bool has_stale_entries() const {
intptr_t value = *_dependency_context_addr;
return (value & _has_stale_entries_mask) != 0;
}
static PerfCounter* _perf_total_buckets_allocated_count;
static PerfCounter* _perf_total_buckets_deallocated_count;
static PerfCounter* _perf_total_buckets_stale_count;
static PerfCounter* _perf_total_buckets_stale_acc_count;
public:
#ifdef ASSERT
// Verification for dependency contexts rooted at Java objects.
Handle _base; // non-NULL if dependency context resides in an oop (e.g. CallSite).
oop _base_oop;
DependencyContext(intptr_t* addr, Handle base = Handle())
: _dependency_context_addr(addr), _base(base)
{
_base_oop = _base();
}
~DependencyContext() {
// Base oop relocation invalidates _dependency_context_addr.
assert(_base_oop == _base(), "base oop relocation is forbidden");
}
#else
DependencyContext(intptr_t* addr) : _dependency_context_addr(addr) {}
#endif // ASSERT
static const intptr_t EMPTY = 0; // dependencies = NULL, has_stale_entries = false
static void init();
int mark_dependent_nmethods(DepChange& changes);
void add_dependent_nmethod(nmethod* nm, bool expunge_stale_entries = false);
void remove_dependent_nmethod(nmethod* nm, bool expunge_stale_entries = false);
int remove_all_dependents();
void expunge_stale_entries();
#ifndef PRODUCT
void print_dependent_nmethods(bool verbose);
bool is_dependent_nmethod(nmethod* nm);
bool find_stale_entries();
#endif //PRODUCT
};
#endif // SHARE_VM_CODE_DEPENDENCYCONTEXT_HPP

View File

@ -26,6 +26,7 @@
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
#include "code/dependencyContext.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/compileLog.hpp"
#include "compiler/compilerOracle.hpp"
@ -547,7 +548,6 @@ void CompileBroker::compilation_init() {
PerfData::U_Ticks, CHECK);
}
if (UsePerfData) {
EXCEPTION_MARK;

View File

@ -27,6 +27,7 @@
#include "classfile/systemDictionary.hpp"
#include "classfile/verifier.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/dependencyContext.hpp"
#include "compiler/compileBroker.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "gc/shared/specialized_oop_closures.hpp"
@ -203,7 +204,6 @@ InstanceKlass::InstanceKlass(int vtable_len,
int iksize = InstanceKlass::size(vtable_len, itable_len, nonstatic_oop_map_size,
access_flags.is_interface(), is_anonymous);
set_vtable_length(vtable_len);
set_itable_length(itable_len);
set_static_field_size(static_field_size);
@ -232,7 +232,7 @@ InstanceKlass::InstanceKlass(int vtable_len,
set_static_oop_field_count(0);
set_nonstatic_field_size(0);
set_is_marked_dependent(false);
set_has_unloaded_dependent(false);
_dep_context = DependencyContext::EMPTY;
set_init_state(InstanceKlass::allocated);
set_init_thread(NULL);
set_reference_type(rt);
@ -246,7 +246,6 @@ InstanceKlass::InstanceKlass(int vtable_len,
set_annotations(NULL);
set_jvmti_cached_class_field_map(NULL);
set_initial_method_idnum(0);
_dependencies = NULL;
set_jvmti_cached_class_field_map(NULL);
set_cached_class_file(NULL);
set_initial_method_idnum(0);
@ -1854,200 +1853,30 @@ jmethodID InstanceKlass::jmethod_id_or_null(Method* method) {
return id;
}
int nmethodBucket::decrement() {
return Atomic::add(-1, (volatile int *)&_count);
inline DependencyContext InstanceKlass::dependencies() {
DependencyContext dep_context(&_dep_context);
return dep_context;
}
//
// Walk the list of dependent nmethods searching for nmethods which
// are dependent on the changes that were passed in and mark them for
// deoptimization. Returns the number of nmethods found.
//
int nmethodBucket::mark_dependent_nmethods(nmethodBucket* deps, DepChange& changes) {
assert_locked_or_safepoint(CodeCache_lock);
int found = 0;
for (nmethodBucket* b = deps; b != NULL; b = b->next()) {
nmethod* nm = b->get_nmethod();
// since dependencies aren't removed until an nmethod becomes a zombie,
// the dependency list may contain nmethods which aren't alive.
if (b->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) {
if (TraceDependencies) {
ResourceMark rm;
tty->print_cr("Marked for deoptimization");
changes.print();
nm->print();
nm->print_dependencies();
}
nm->mark_for_deoptimization();
found++;
}
}
return found;
}
//
// Add an nmethodBucket to the list of dependencies for this nmethod.
// It's possible that an nmethod has multiple dependencies on this klass
// so a count is kept for each bucket to guarantee that creation and
// deletion of dependencies is consistent. Returns new head of the list.
//
nmethodBucket* nmethodBucket::add_dependent_nmethod(nmethodBucket* deps, nmethod* nm) {
assert_locked_or_safepoint(CodeCache_lock);
for (nmethodBucket* b = deps; b != NULL; b = b->next()) {
if (nm == b->get_nmethod()) {
b->increment();
return deps;
}
}
return new nmethodBucket(nm, deps);
}
//
// Decrement count of the nmethod in the dependency list and remove
// the bucket completely when the count goes to 0. This method must
// find a corresponding bucket otherwise there's a bug in the
// recording of dependencies. Returns true if the bucket was deleted,
// or marked ready for reclaimation.
bool nmethodBucket::remove_dependent_nmethod(nmethodBucket** deps, nmethod* nm, bool delete_immediately) {
assert_locked_or_safepoint(CodeCache_lock);
nmethodBucket* first = *deps;
nmethodBucket* last = NULL;
for (nmethodBucket* b = first; b != NULL; b = b->next()) {
if (nm == b->get_nmethod()) {
int val = b->decrement();
guarantee(val >= 0, "Underflow: %d", val);
if (val == 0) {
if (delete_immediately) {
if (last == NULL) {
*deps = b->next();
} else {
last->set_next(b->next());
}
delete b;
}
}
return true;
}
last = b;
}
#ifdef ASSERT
tty->print_raw_cr("### can't find dependent nmethod");
nm->print();
#endif // ASSERT
ShouldNotReachHere();
return false;
}
// Convenience overload, for callers that don't want to delete the nmethodBucket entry.
bool nmethodBucket::remove_dependent_nmethod(nmethodBucket* deps, nmethod* nm) {
nmethodBucket** deps_addr = &deps;
return remove_dependent_nmethod(deps_addr, nm, false /* Don't delete */);
}
//
// Reclaim all unused buckets. Returns new head of the list.
//
nmethodBucket* nmethodBucket::clean_dependent_nmethods(nmethodBucket* deps) {
nmethodBucket* first = deps;
nmethodBucket* last = NULL;
nmethodBucket* b = first;
while (b != NULL) {
assert(b->count() >= 0, "bucket count: %d", b->count());
nmethodBucket* next = b->next();
if (b->count() == 0) {
if (last == NULL) {
first = next;
} else {
last->set_next(next);
}
delete b;
// last stays the same.
} else {
last = b;
}
b = next;
}
return first;
}
#ifndef PRODUCT
void nmethodBucket::print_dependent_nmethods(nmethodBucket* deps, bool verbose) {
int idx = 0;
for (nmethodBucket* b = deps; b != NULL; b = b->next()) {
nmethod* nm = b->get_nmethod();
tty->print("[%d] count=%d { ", idx++, b->count());
if (!verbose) {
nm->print_on(tty, "nmethod");
tty->print_cr(" } ");
} else {
nm->print();
nm->print_dependencies();
tty->print_cr("--- } ");
}
}
}
bool nmethodBucket::is_dependent_nmethod(nmethodBucket* deps, nmethod* nm) {
for (nmethodBucket* b = deps; b != NULL; b = b->next()) {
if (nm == b->get_nmethod()) {
#ifdef ASSERT
int count = b->count();
assert(count >= 0, "count shouldn't be negative: %d", count);
#endif
return true;
}
}
return false;
}
#endif //PRODUCT
int InstanceKlass::mark_dependent_nmethods(DepChange& changes) {
assert_locked_or_safepoint(CodeCache_lock);
return nmethodBucket::mark_dependent_nmethods(_dependencies, changes);
}
void InstanceKlass::clean_dependent_nmethods() {
assert_locked_or_safepoint(CodeCache_lock);
if (has_unloaded_dependent()) {
_dependencies = nmethodBucket::clean_dependent_nmethods(_dependencies);
set_has_unloaded_dependent(false);
}
#ifdef ASSERT
else {
// Verification
for (nmethodBucket* b = _dependencies; b != NULL; b = b->next()) {
assert(b->count() >= 0, "bucket count: %d", b->count());
assert(b->count() != 0, "empty buckets need to be cleaned");
}
}
#endif
return dependencies().mark_dependent_nmethods(changes);
}
void InstanceKlass::add_dependent_nmethod(nmethod* nm) {
assert_locked_or_safepoint(CodeCache_lock);
_dependencies = nmethodBucket::add_dependent_nmethod(_dependencies, nm);
dependencies().add_dependent_nmethod(nm);
}
void InstanceKlass::remove_dependent_nmethod(nmethod* nm, bool delete_immediately) {
assert_locked_or_safepoint(CodeCache_lock);
if (nmethodBucket::remove_dependent_nmethod(&_dependencies, nm, delete_immediately)) {
set_has_unloaded_dependent(true);
}
dependencies().remove_dependent_nmethod(nm, delete_immediately);
}
#ifndef PRODUCT
void InstanceKlass::print_dependent_nmethods(bool verbose) {
nmethodBucket::print_dependent_nmethods(_dependencies, verbose);
dependencies().print_dependent_nmethods(verbose);
}
bool InstanceKlass::is_dependent_nmethod(nmethod* nm) {
return nmethodBucket::is_dependent_nmethod(_dependencies, nm);
return dependencies().is_dependent_nmethod(nm);
}
#endif //PRODUCT
@ -2055,7 +1884,9 @@ void InstanceKlass::clean_weak_instanceklass_links(BoolObjectClosure* is_alive)
clean_implementors_list(is_alive);
clean_method_data(is_alive);
clean_dependent_nmethods();
// Since GC iterates InstanceKlasses sequentially, it is safe to remove stale entries here.
DependencyContext dep_context(&_dep_context);
dep_context.expunge_stale_entries();
}
void InstanceKlass::clean_implementors_list(BoolObjectClosure* is_alive) {
@ -2102,6 +1933,8 @@ void InstanceKlass::remove_unshareable_info() {
constants()->remove_unshareable_info();
assert(_dep_context == DependencyContext::EMPTY, "dependency context is not shareable");
for (int i = 0; i < methods()->length(); i++) {
Method* m = methods()->at(i);
m->remove_unshareable_info();
@ -2231,12 +2064,10 @@ void InstanceKlass::release_C_heap_structures() {
}
// release dependencies
nmethodBucket* b = _dependencies;
_dependencies = NULL;
while (b != NULL) {
nmethodBucket* next = b->next();
delete b;
b = next;
{
DependencyContext ctx(&_dep_context);
int marked = ctx.remove_all_dependents();
assert(marked == 0, "all dependencies should be already invalidated");
}
// Deallocate breakpoint records
@ -3558,199 +3389,3 @@ jint InstanceKlass::get_cached_class_file_len() {
unsigned char * InstanceKlass::get_cached_class_file_bytes() {
return VM_RedefineClasses::get_cached_class_file_bytes(_cached_class_file);
}
/////////////// Unit tests ///////////////
#ifndef PRODUCT
class TestNmethodBucketContext {
public:
nmethod* _nmethodLast;
nmethod* _nmethodMiddle;
nmethod* _nmethodFirst;
nmethodBucket* _bucketLast;
nmethodBucket* _bucketMiddle;
nmethodBucket* _bucketFirst;
nmethodBucket* _bucketList;
TestNmethodBucketContext() {
CodeCache_lock->lock_without_safepoint_check();
_nmethodLast = reinterpret_cast<nmethod*>(0x8 * 0);
_nmethodMiddle = reinterpret_cast<nmethod*>(0x8 * 1);
_nmethodFirst = reinterpret_cast<nmethod*>(0x8 * 2);
_bucketLast = new nmethodBucket(_nmethodLast, NULL);
_bucketMiddle = new nmethodBucket(_nmethodMiddle, _bucketLast);
_bucketFirst = new nmethodBucket(_nmethodFirst, _bucketMiddle);
_bucketList = _bucketFirst;
}
~TestNmethodBucketContext() {
delete _bucketLast;
delete _bucketMiddle;
delete _bucketFirst;
CodeCache_lock->unlock();
}
};
class TestNmethodBucket {
public:
static void testRemoveDependentNmethodFirstDeleteImmediately() {
TestNmethodBucketContext c;
nmethodBucket::remove_dependent_nmethod(&c._bucketList, c._nmethodFirst, true /* delete */);
assert(c._bucketList == c._bucketMiddle, "check");
assert(c._bucketList->next() == c._bucketLast, "check");
assert(c._bucketList->next()->next() == NULL, "check");
// Cleanup before context is deleted.
c._bucketFirst = NULL;
}
static void testRemoveDependentNmethodMiddleDeleteImmediately() {
TestNmethodBucketContext c;
nmethodBucket::remove_dependent_nmethod(&c._bucketList, c._nmethodMiddle, true /* delete */);
assert(c._bucketList == c._bucketFirst, "check");
assert(c._bucketList->next() == c._bucketLast, "check");
assert(c._bucketList->next()->next() == NULL, "check");
// Cleanup before context is deleted.
c._bucketMiddle = NULL;
}
static void testRemoveDependentNmethodLastDeleteImmediately() {
TestNmethodBucketContext c;
nmethodBucket::remove_dependent_nmethod(&c._bucketList, c._nmethodLast, true /* delete */);
assert(c._bucketList == c._bucketFirst, "check");
assert(c._bucketList->next() == c._bucketMiddle, "check");
assert(c._bucketList->next()->next() == NULL, "check");
// Cleanup before context is deleted.
c._bucketLast = NULL;
}
static void testRemoveDependentNmethodFirstDeleteDeferred() {
TestNmethodBucketContext c;
nmethodBucket::remove_dependent_nmethod(&c._bucketList, c._nmethodFirst, false /* delete */);
assert(c._bucketList == c._bucketFirst, "check");
assert(c._bucketList->next() == c._bucketMiddle, "check");
assert(c._bucketList->next()->next() == c._bucketLast, "check");
assert(c._bucketList->next()->next()->next() == NULL, "check");
assert(c._bucketFirst->count() == 0, "check");
assert(c._bucketMiddle->count() == 1, "check");
assert(c._bucketLast->count() == 1, "check");
}
static void testRemoveDependentNmethodMiddleDeleteDeferred() {
TestNmethodBucketContext c;
nmethodBucket::remove_dependent_nmethod(&c._bucketList, c._nmethodMiddle, false /* delete */);
assert(c._bucketList == c._bucketFirst, "check");
assert(c._bucketList->next() == c._bucketMiddle, "check");
assert(c._bucketList->next()->next() == c._bucketLast, "check");
assert(c._bucketList->next()->next()->next() == NULL, "check");
assert(c._bucketFirst->count() == 1, "check");
assert(c._bucketMiddle->count() == 0, "check");
assert(c._bucketLast->count() == 1, "check");
}
static void testRemoveDependentNmethodLastDeleteDeferred() {
TestNmethodBucketContext c;
nmethodBucket::remove_dependent_nmethod(&c._bucketList, c._nmethodLast, false /* delete */);
assert(c._bucketList == c._bucketFirst, "check");
assert(c._bucketList->next() == c._bucketMiddle, "check");
assert(c._bucketList->next()->next() == c._bucketLast, "check");
assert(c._bucketList->next()->next()->next() == NULL, "check");
assert(c._bucketFirst->count() == 1, "check");
assert(c._bucketMiddle->count() == 1, "check");
assert(c._bucketLast->count() == 0, "check");
}
static void testRemoveDependentNmethodConvenienceFirst() {
TestNmethodBucketContext c;
nmethodBucket::remove_dependent_nmethod(c._bucketList, c._nmethodFirst);
assert(c._bucketList == c._bucketFirst, "check");
assert(c._bucketList->next() == c._bucketMiddle, "check");
assert(c._bucketList->next()->next() == c._bucketLast, "check");
assert(c._bucketList->next()->next()->next() == NULL, "check");
assert(c._bucketFirst->count() == 0, "check");
assert(c._bucketMiddle->count() == 1, "check");
assert(c._bucketLast->count() == 1, "check");
}
static void testRemoveDependentNmethodConvenienceMiddle() {
TestNmethodBucketContext c;
nmethodBucket::remove_dependent_nmethod(c._bucketList, c._nmethodMiddle);
assert(c._bucketList == c._bucketFirst, "check");
assert(c._bucketList->next() == c._bucketMiddle, "check");
assert(c._bucketList->next()->next() == c._bucketLast, "check");
assert(c._bucketList->next()->next()->next() == NULL, "check");
assert(c._bucketFirst->count() == 1, "check");
assert(c._bucketMiddle->count() == 0, "check");
assert(c._bucketLast->count() == 1, "check");
}
static void testRemoveDependentNmethodConvenienceLast() {
TestNmethodBucketContext c;
nmethodBucket::remove_dependent_nmethod(c._bucketList, c._nmethodLast);
assert(c._bucketList == c._bucketFirst, "check");
assert(c._bucketList->next() == c._bucketMiddle, "check");
assert(c._bucketList->next()->next() == c._bucketLast, "check");
assert(c._bucketList->next()->next()->next() == NULL, "check");
assert(c._bucketFirst->count() == 1, "check");
assert(c._bucketMiddle->count() == 1, "check");
assert(c._bucketLast->count() == 0, "check");
}
static void testRemoveDependentNmethod() {
testRemoveDependentNmethodFirstDeleteImmediately();
testRemoveDependentNmethodMiddleDeleteImmediately();
testRemoveDependentNmethodLastDeleteImmediately();
testRemoveDependentNmethodFirstDeleteDeferred();
testRemoveDependentNmethodMiddleDeleteDeferred();
testRemoveDependentNmethodLastDeleteDeferred();
testRemoveDependentNmethodConvenienceFirst();
testRemoveDependentNmethodConvenienceMiddle();
testRemoveDependentNmethodConvenienceLast();
}
static void test() {
testRemoveDependentNmethod();
}
};
void TestNmethodBucket_test() {
TestNmethodBucket::test();
}
#endif

View File

@ -53,15 +53,15 @@
// forward declaration for class -- see below for definition
class SuperTypeClosure;
class JNIid;
class jniIdMapBase;
class BreakpointInfo;
class fieldDescriptor;
class DepChange;
class nmethodBucket;
class DependencyContext;
class fieldDescriptor;
class jniIdMapBase;
class JNIid;
class JvmtiCachedClassFieldMap;
class MemberNameTable;
class SuperTypeClosure;
// This is used in iterators below.
class FieldClosure: public StackObj {
@ -198,7 +198,6 @@ class InstanceKlass: public Klass {
// _is_marked_dependent can be set concurrently, thus cannot be part of the
// _misc_flags.
bool _is_marked_dependent; // used for marking during flushing and deoptimization
bool _has_unloaded_dependent;
// The low two bits of _misc_flags contains the kind field.
// This can be used to quickly discriminate among the four kinds of
@ -235,7 +234,7 @@ class InstanceKlass: public Klass {
MemberNameTable* _member_names; // Member names
JNIid* _jni_ids; // First JNI identifier for static fields in this class
jmethodID* _methods_jmethod_ids; // jmethodIDs corresponding to method_idnum, or NULL if none
nmethodBucket* _dependencies; // list of dependent nmethods
intptr_t _dep_context; // packed DependencyContext structure
nmethod* _osr_nmethods_head; // Head of list of on-stack replacement nmethods for this class
BreakpointInfo* _breakpoints; // bpt lists, managed by Method*
// Linked instanceKlasses of previous versions
@ -468,9 +467,6 @@ class InstanceKlass: public Klass {
bool is_marked_dependent() const { return _is_marked_dependent; }
void set_is_marked_dependent(bool value) { _is_marked_dependent = value; }
bool has_unloaded_dependent() const { return _has_unloaded_dependent; }
void set_has_unloaded_dependent(bool value) { _has_unloaded_dependent = value; }
// initialization (virtuals from Klass)
bool should_be_initialized() const; // means that initialize should be called
void initialize(TRAPS);
@ -835,7 +831,8 @@ public:
JNIid* jni_id_for(int offset);
// maintenance of deoptimization dependencies
int mark_dependent_nmethods(DepChange& changes);
inline DependencyContext dependencies();
int mark_dependent_nmethods(DepChange& changes);
void add_dependent_nmethod(nmethod* nm);
void remove_dependent_nmethod(nmethod* nm, bool delete_immediately);
@ -1027,7 +1024,6 @@ public:
void clean_weak_instanceklass_links(BoolObjectClosure* is_alive);
void clean_implementors_list(BoolObjectClosure* is_alive);
void clean_method_data(BoolObjectClosure* is_alive);
void clean_dependent_nmethods();
// Explicit metaspace deallocation of fields
// For RedefineClasses and class file parsing errors, we need to deallocate
@ -1320,48 +1316,6 @@ class JNIid: public CHeapObj<mtClass> {
void verify(Klass* holder);
};
//
// nmethodBucket is used to record dependent nmethods for
// deoptimization. nmethod dependencies are actually <klass, method>
// pairs but we really only care about the klass part for purposes of
// finding nmethods which might need to be deoptimized. Instead of
// recording the method, a count of how many times a particular nmethod
// was recorded is kept. This ensures that any recording errors are
// noticed since an nmethod should be removed as many times are it's
// added.
//
class nmethodBucket: public CHeapObj<mtClass> {
friend class VMStructs;
private:
nmethod* _nmethod;
int _count;
nmethodBucket* _next;
public:
nmethodBucket(nmethod* nmethod, nmethodBucket* next) {
_nmethod = nmethod;
_next = next;
_count = 1;
}
int count() { return _count; }
int increment() { _count += 1; return _count; }
int decrement();
nmethodBucket* next() { return _next; }
void set_next(nmethodBucket* b) { _next = b; }
nmethod* get_nmethod() { return _nmethod; }
static int mark_dependent_nmethods(nmethodBucket* deps, DepChange& changes);
static nmethodBucket* add_dependent_nmethod(nmethodBucket* deps, nmethod* nm);
static bool remove_dependent_nmethod(nmethodBucket** deps, nmethod* nm, bool delete_immediately);
static bool remove_dependent_nmethod(nmethodBucket* deps, nmethod* nm);
static nmethodBucket* clean_dependent_nmethods(nmethodBucket* deps);
#ifndef PRODUCT
static void print_dependent_nmethods(nmethodBucket* deps, bool verbose);
static bool is_dependent_nmethod(nmethodBucket* deps, nmethod* nm);
#endif //PRODUCT
};
// An iterator that's used to access the inner classes indices in the
// InstanceKlass::_inner_classes array.
class InnerClassesIterator : public StackObj {

View File

@ -3878,7 +3878,7 @@ _JNI_IMPORT_OR_EXPORT_ jint JNICALL JNI_GetDefaultJavaVMInitArgs(void *args_) {
unit_test_function_call
// Forward declaration
void TestNmethodBucket_test();
void TestDependencyContext_test();
void test_semaphore();
void TestOS_test();
void TestReservedSpace_test();
@ -3910,7 +3910,7 @@ void WorkerDataArray_test();
void execute_internal_vm_tests() {
if (ExecuteInternalVMTests) {
tty->print_cr("Running internal VM tests");
run_unit_test(TestNmethodBucket_test());
run_unit_test(TestDependencyContext_test());
run_unit_test(test_semaphore());
run_unit_test(TestOS_test());
run_unit_test(TestReservedSpace_test());

View File

@ -27,6 +27,7 @@
#include "classfile/stringTable.hpp"
#include "code/codeCache.hpp"
#include "code/codeCacheExtensions.hpp"
#include "code/dependencyContext.hpp"
#include "compiler/compileBroker.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/oopMapCache.hpp"
@ -945,30 +946,33 @@ int MethodHandles::find_MemberNames(KlassHandle k,
return rfill + overflow;
}
// Is it safe to remove stale entries from a dependency list?
static bool safe_to_expunge() {
// Since parallel GC threads can concurrently iterate over a dependency
// list during safepoint, it is safe to remove entries only when
// CodeCache lock is held.
return CodeCache_lock->owned_by_self();
}
void MethodHandles::add_dependent_nmethod(oop call_site, nmethod* nm) {
assert_locked_or_safepoint(CodeCache_lock);
oop context = java_lang_invoke_CallSite::context(call_site);
nmethodBucket* deps = java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(context);
nmethodBucket* new_deps = nmethodBucket::add_dependent_nmethod(deps, nm);
if (deps != new_deps) {
java_lang_invoke_MethodHandleNatives_CallSiteContext::set_vmdependencies(context, new_deps);
}
DependencyContext deps = java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(context);
// Try to purge stale entries on updates.
// Since GC doesn't clean dependency contexts rooted at CallSiteContext objects,
// in order to avoid memory leak, stale entries are purged whenever a dependency list
// is changed (both on addition and removal). Though memory reclamation is delayed,
// it avoids indefinite memory usage growth.
deps.add_dependent_nmethod(nm, /*expunge_stale_entries=*/safe_to_expunge());
}
void MethodHandles::remove_dependent_nmethod(oop call_site, nmethod* nm) {
assert_locked_or_safepoint(CodeCache_lock);
oop context = java_lang_invoke_CallSite::context(call_site);
nmethodBucket* deps = java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(context);
if (nmethodBucket::remove_dependent_nmethod(deps, nm)) {
nmethodBucket* new_deps = nmethodBucket::clean_dependent_nmethods(deps);
if (deps != new_deps) {
java_lang_invoke_MethodHandleNatives_CallSiteContext::set_vmdependencies(context, new_deps);
}
}
DependencyContext deps = java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(context);
deps.remove_dependent_nmethod(nm, /*expunge_stale_entries=*/safe_to_expunge());
}
void MethodHandles::flush_dependent_nmethods(Handle call_site, Handle target) {
@ -977,21 +981,15 @@ void MethodHandles::flush_dependent_nmethods(Handle call_site, Handle target) {
int marked = 0;
CallSiteDepChange changes(call_site(), target());
{
No_Safepoint_Verifier nsv;
MutexLockerEx mu2(CodeCache_lock, Mutex::_no_safepoint_check_flag);
oop context = java_lang_invoke_CallSite::context(call_site());
nmethodBucket* deps = java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(context);
marked = nmethodBucket::mark_dependent_nmethods(deps, changes);
if (marked > 0) {
nmethodBucket* new_deps = nmethodBucket::clean_dependent_nmethods(deps);
if (deps != new_deps) {
java_lang_invoke_MethodHandleNatives_CallSiteContext::set_vmdependencies(context, new_deps);
}
}
DependencyContext deps = java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(context);
marked = deps.mark_dependent_nmethods(changes);
}
if (marked > 0) {
// At least one nmethod has been marked for deoptimization
// At least one nmethod has been marked for deoptimization.
VM_Deoptimize op;
VMThread::execute(&op);
}
@ -1331,6 +1329,8 @@ JVM_ENTRY(void, MHN_setCallSiteTargetVolatile(JNIEnv* env, jobject igcls, jobjec
}
JVM_END
// It is called by a Cleaner object which ensures that dropped CallSites properly
// deallocate their dependency information.
JVM_ENTRY(void, MHN_clearCallSiteContext(JNIEnv* env, jobject igcls, jobject context_jh)) {
Handle context(THREAD, JNIHandles::resolve_non_null(context_jh));
{
@ -1339,19 +1339,11 @@ JVM_ENTRY(void, MHN_clearCallSiteContext(JNIEnv* env, jobject igcls, jobject con
int marked = 0;
{
No_Safepoint_Verifier nsv;
MutexLockerEx mu2(CodeCache_lock, Mutex::_no_safepoint_check_flag);
nmethodBucket* b = java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(context());
while(b != NULL) {
nmethod* nm = b->get_nmethod();
if (b->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization()) {
nm->mark_for_deoptimization();
marked++;
}
nmethodBucket* next = b->next();
delete b;
b = next;
}
java_lang_invoke_MethodHandleNatives_CallSiteContext::set_vmdependencies(context(), NULL); // reset context
assert(safe_to_expunge(), "removal is not safe");
DependencyContext deps = java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(context());
marked = deps.remove_all_dependents();
}
if (marked > 0) {
// At least one nmethod has been marked for deoptimization

View File

@ -72,6 +72,7 @@ void vtableStubs_init();
void InlineCacheBuffer_init();
void compilerOracle_init();
bool compileBroker_init();
void dependencyContext_init();
// Initialization after compiler initialization
bool universe_post_init(); // must happen after compiler_init
@ -131,6 +132,8 @@ jint init_globals() {
vtableStubs_init();
InlineCacheBuffer_init();
compilerOracle_init();
dependencyContext_init();
if (!compileBroker_init()) {
return JNI_EINVAL;
}

View File

@ -424,6 +424,7 @@ class PerfLongVariant : public PerfLong {
public:
inline void inc() { (*(jlong*)_valuep)++; }
inline void inc(jlong val) { (*(jlong*)_valuep) += val; }
inline void dec(jlong val) { inc(-val); }
inline void add(jlong val) { (*(jlong*)_valuep) += val; }
void clear_sample_helper() { _sample_helper = NULL; }
};

View File

@ -343,10 +343,6 @@ typedef CompactHashtable<Symbol*, char> SymbolCompactHashTable;
nonstatic_field(InstanceKlass, _methods_jmethod_ids, jmethodID*) \
volatile_nonstatic_field(InstanceKlass, _idnum_allocated_count, u2) \
nonstatic_field(InstanceKlass, _annotations, Annotations*) \
nonstatic_field(InstanceKlass, _dependencies, nmethodBucket*) \
nonstatic_field(nmethodBucket, _nmethod, nmethod*) \
nonstatic_field(nmethodBucket, _count, int) \
nonstatic_field(nmethodBucket, _next, nmethodBucket*) \
nonstatic_field(InstanceKlass, _method_ordering, Array<int>*) \
nonstatic_field(InstanceKlass, _default_vtable_indices, Array<int>*) \
nonstatic_field(Klass, _super_check_offset, juint) \
@ -1555,7 +1551,6 @@ typedef CompactHashtable<Symbol*, char> SymbolCompactHashTable;
declare_toplevel_type(volatile Metadata*) \
\
declare_toplevel_type(DataLayout) \
declare_toplevel_type(nmethodBucket) \
\
/********/ \
/* Oops */ \