diff --git a/src/hotspot/share/code/dependencyContext.cpp b/src/hotspot/share/code/dependencyContext.cpp index 904b0927014..a6697ba366a 100644 --- a/src/hotspot/share/code/dependencyContext.cpp +++ b/src/hotspot/share/code/dependencyContext.cpp @@ -70,36 +70,30 @@ void DependencyContext::init() { void DependencyContext::mark_dependent_nmethods(DeoptimizationScope* deopt_scope, DepChange& changes) { for (nmethodBucket* b = dependencies_not_unloading(); b != nullptr; b = b->next_not_unloading()) { nmethod* nm = b->get_nmethod(); - if (b->count() > 0) { - if (nm->is_marked_for_deoptimization()) { - deopt_scope->dependent(nm); - } else if (nm->check_dependency_on(changes)) { - LogTarget(Info, dependencies) lt; - if (lt.is_enabled()) { - ResourceMark rm; - LogStream ls(<); - ls.print_cr("Marked for deoptimization"); - changes.print_on(&ls); - nm->print_on(&ls); - nm->print_dependencies_on(&ls); - } - deopt_scope->mark(nm, !changes.is_call_site_change()); + if (nm->is_marked_for_deoptimization()) { + deopt_scope->dependent(nm); + } else if (nm->check_dependency_on(changes)) { + LogTarget(Info, dependencies) lt; + if (lt.is_enabled()) { + ResourceMark rm; + LogStream ls(<); + ls.print_cr("Marked for deoptimization"); + changes.print_on(&ls); + nm->print_on(&ls); + nm->print_dependencies_on(&ls); } + deopt_scope->mark(nm, !changes.is_call_site_change()); } } } // // Add an nmethod to the dependency context. -// It's possible that an nmethod has multiple dependencies on a klass -// so a count is kept for each bucket to guarantee that creation and -// deletion of dependencies is consistent. // void DependencyContext::add_dependent_nmethod(nmethod* nm) { assert_lock_strong(CodeCache_lock); for (nmethodBucket* b = dependencies_not_unloading(); b != nullptr; b = b->next_not_unloading()) { if (nm == b->get_nmethod()) { - b->increment(); return; } } @@ -194,11 +188,9 @@ void DependencyContext::remove_and_mark_for_deoptimization_all_dependents(Deopti set_dependencies(nullptr); while (b != nullptr) { nmethod* nm = b->get_nmethod(); - if (b->count() > 0) { - // Also count already (concurrently) marked nmethods to make sure - // deoptimization is triggered before execution in this thread continues. - deopt_scope->mark(nm); - } + // Also count already (concurrently) marked nmethods to make sure + // deoptimization is triggered before execution in this thread continues. + deopt_scope->mark(nm); b = release_and_get_next_not_unloading(b); } } @@ -208,7 +200,7 @@ void DependencyContext::print_dependent_nmethods(bool verbose) { int idx = 0; for (nmethodBucket* b = dependencies_not_unloading(); b != nullptr; b = b->next_not_unloading()) { nmethod* nm = b->get_nmethod(); - tty->print("[%d] count=%d { ", idx++, b->count()); + tty->print("[%d] { ", idx++); if (!verbose) { nm->print_on(tty, "nmethod"); tty->print_cr(" } "); @@ -224,20 +216,12 @@ void DependencyContext::print_dependent_nmethods(bool verbose) { bool DependencyContext::is_dependent_nmethod(nmethod* nm) { for (nmethodBucket* b = dependencies_not_unloading(); b != nullptr; b = b->next_not_unloading()) { if (nm == b->get_nmethod()) { -#ifdef ASSERT - int count = b->count(); - assert(count >= 0, "count shouldn't be negative: %d", count); -#endif return true; } } return false; } -int nmethodBucket::decrement() { - return Atomic::sub(&_count, 1); -} - // We use a monotonically increasing epoch counter to track the last epoch a given // dependency context was cleaned. GC threads claim cleanup tasks by performing // a CAS on this value. diff --git a/src/hotspot/share/code/dependencyContext.hpp b/src/hotspot/share/code/dependencyContext.hpp index 972a593f82e..630311c2e1e 100644 --- a/src/hotspot/share/code/dependencyContext.hpp +++ b/src/hotspot/share/code/dependencyContext.hpp @@ -39,27 +39,19 @@ class DepChange; // nmethodBucket is used to record dependent nmethods for // deoptimization. nmethod dependencies are actually // pairs but we really only care about the klass part for purposes of -// finding nmethods which might need to be deoptimized. Instead of -// recording the method, a count of how many times a particular nmethod -// was recorded is kept. This ensures that any recording errors are -// noticed since an nmethod should be removed as many times are it's -// added. +// finding nmethods which might need to be deoptimized. // class nmethodBucket: public CHeapObj { friend class VMStructs; private: nmethod* _nmethod; - volatile int _count; nmethodBucket* volatile _next; nmethodBucket* volatile _purge_list_next; public: nmethodBucket(nmethod* nmethod, nmethodBucket* next) : - _nmethod(nmethod), _count(1), _next(next), _purge_list_next(nullptr) {} + _nmethod(nmethod), _next(next), _purge_list_next(nullptr) {} - int count() { return _count; } - int increment() { _count += 1; return _count; } - int decrement(); nmethodBucket* next(); nmethodBucket* next_not_unloading(); void set_next(nmethodBucket* b);