diff --git a/src/hotspot/share/runtime/deoptimization.cpp b/src/hotspot/share/runtime/deoptimization.cpp index cf2f7e9c216..7a4a59a8553 100644 --- a/src/hotspot/share/runtime/deoptimization.cpp +++ b/src/hotspot/share/runtime/deoptimization.cpp @@ -391,7 +391,6 @@ static void restore_eliminated_locks(JavaThread* thread, GrowableArray lock_order{0};) // Start locking from outermost/oldest frame for (int i = (chunk->length() - 1); i >= 0; i--) { compiledVFrame* cvf = chunk->at(i); @@ -401,13 +400,6 @@ static void restore_eliminated_locks(JavaThread* thread, GrowableArrayowner()); - } - } -#endif // ASSERT #ifndef PRODUCT if (PrintDeoptimizationDetails) { ResourceMark rm; @@ -439,11 +431,6 @@ static void restore_eliminated_locks(JavaThread* thread, GrowableArraylock_stack().verify_consistent_lock_order(lock_order, exec_mode != Deoptimization::Unpack_none); - } -#endif // ASSERT } // Deoptimize objects, that is reallocate and relock them, just before they escape through JVMTI. diff --git a/src/hotspot/share/runtime/lockStack.cpp b/src/hotspot/share/runtime/lockStack.cpp index c7889da1a76..2f783c97c99 100644 --- a/src/hotspot/share/runtime/lockStack.cpp +++ b/src/hotspot/share/runtime/lockStack.cpp @@ -103,60 +103,6 @@ void LockStack::verify(const char* msg) const { } #endif -#ifdef ASSERT -void LockStack::verify_consistent_lock_order(GrowableArray& lock_order, bool leaf_frame) const { - int top_index = to_index(_top); - int lock_index = lock_order.length(); - - if (!leaf_frame) { - // If the lock_order is not from the leaf frame we must search - // for the top_index which fits with the most recent fast_locked - // objects in the lock stack. - while (lock_index-- > 0) { - const oop obj = lock_order.at(lock_index); - if (contains(obj)) { - for (int index = 0; index < top_index; index++) { - if (_base[index] == obj) { - // Found top index - top_index = index + 1; - break; - } - } - - if (VM_Version::supports_recursive_lightweight_locking()) { - // With recursive looks there may be more of the same object - while (lock_index-- > 0 && lock_order.at(lock_index) == obj) { - top_index++; - } - assert(top_index <= to_index(_top), "too many obj in lock_order"); - } - - break; - } - } - - lock_index = lock_order.length(); - } - - while (lock_index-- > 0) { - const oop obj = lock_order.at(lock_index); - const markWord mark = obj->mark_acquire(); - assert(obj->is_locked(), "must be locked"); - if (top_index > 0 && obj == _base[top_index - 1]) { - assert(mark.is_fast_locked() || mark.monitor()->is_owner_anonymous(), - "must be fast_locked or inflated by other thread"); - top_index--; - } else { - assert(!mark.is_fast_locked(), "must be inflated"); - assert(mark.monitor()->owner_raw() == get_thread() || - (!leaf_frame && get_thread()->current_waiting_monitor() == mark.monitor()), - "must be owned by (or waited on by) thread"); - assert(!contains(obj), "must not be on lock_stack"); - } - } -} -#endif - void LockStack::print_on(outputStream* st) { for (int i = to_index(_top); (--i) >= 0;) { st->print("LockStack[%d]: ", i); diff --git a/src/hotspot/share/runtime/lockStack.hpp b/src/hotspot/share/runtime/lockStack.hpp index 2cf02f00a8d..064f70a37fb 100644 --- a/src/hotspot/share/runtime/lockStack.hpp +++ b/src/hotspot/share/runtime/lockStack.hpp @@ -121,9 +121,6 @@ public: // Printing void print_on(outputStream* st); - - // Verify Lock Stack consistent with lock order - void verify_consistent_lock_order(GrowableArray& lock_order, bool leaf_frame) const NOT_DEBUG_RETURN; }; #endif // SHARE_RUNTIME_LOCKSTACK_HPP