8210832: Remove sneaky locking in class Monitor

Removed sneaky locking and simplified vm monitors implementation

Co-authored-by: David Holmes <david.holmes@oracle.com>
Reviewed-by: rehn, dcubed, pliden, dholmes, coleenp
This commit is contained in:
Patricio Chilano Mateo 2019-02-05 15:12:13 -05:00
parent cd9b1aabb0
commit c94cdddbdd
17 changed files with 509 additions and 1157 deletions

View File

@ -2215,5 +2215,74 @@ void Parker::unpark() {
}
}
// Platform Monitor implementation
os::PlatformMonitor::PlatformMonitor() {
int status = pthread_cond_init(&_cond, _condAttr);
assert_status(status == 0, status, "cond_init");
status = pthread_mutex_init(&_mutex, _mutexAttr);
assert_status(status == 0, status, "mutex_init");
}
os::PlatformMonitor::~PlatformMonitor() {
int status = pthread_cond_destroy(&_cond);
assert_status(status == 0, status, "cond_destroy");
status = pthread_mutex_destroy(&_mutex);
assert_status(status == 0, status, "mutex_destroy");
}
void os::PlatformMonitor::lock() {
int status = pthread_mutex_lock(&_mutex);
assert_status(status == 0, status, "mutex_lock");
}
void os::PlatformMonitor::unlock() {
int status = pthread_mutex_unlock(&_mutex);
assert_status(status == 0, status, "mutex_unlock");
}
bool os::PlatformMonitor::try_lock() {
int status = pthread_mutex_trylock(&_mutex);
assert_status(status == 0 || status == EBUSY, status, "mutex_trylock");
return status == 0;
}
// Must already be locked
int os::PlatformMonitor::wait(jlong millis) {
assert(millis >= 0, "negative timeout");
if (millis > 0) {
struct timespec abst;
// We have to watch for overflow when converting millis to nanos,
// but if millis is that large then we will end up limiting to
// MAX_SECS anyway, so just do that here.
if (millis / MILLIUNITS > MAX_SECS) {
millis = jlong(MAX_SECS) * MILLIUNITS;
}
to_abstime(&abst, millis * (NANOUNITS / MILLIUNITS), false, false);
int ret = OS_TIMEOUT;
int status = pthread_cond_timedwait(&_cond, &_mutex, &abst);
assert_status(status == 0 || status == ETIMEDOUT,
status, "cond_timedwait");
if (status == 0) {
ret = OS_OK;
}
return ret;
} else {
int status = pthread_cond_wait(&_cond, &_mutex);
assert_status(status == 0, status, "cond_wait");
return OS_OK;
}
}
void os::PlatformMonitor::notify() {
int status = pthread_cond_signal(&_cond);
assert_status(status == 0, status, "cond_signal");
}
void os::PlatformMonitor::notify_all() {
int status = pthread_cond_broadcast(&_cond);
assert_status(status == 0, status, "cond_broadcast");
}
#endif // !SOLARIS

View File

@ -224,6 +224,23 @@ class PlatformParker : public CHeapObj<mtInternal> {
PlatformParker();
};
// Platform specific implementation that underpins VM Monitor/Mutex class
class PlatformMonitor : public CHeapObj<mtInternal> {
private:
pthread_mutex_t _mutex; // Native mutex for locking
pthread_cond_t _cond; // Native condition variable for blocking
public:
PlatformMonitor();
~PlatformMonitor();
void lock();
void unlock();
bool try_lock();
int wait(jlong millis);
void notify();
void notify_all();
};
#endif // !SOLARIS
#endif // OS_POSIX_OS_POSIX_HPP

View File

@ -5192,6 +5192,72 @@ void Parker::unpark() {
}
}
// Platform Monitor implementation
os::PlatformMonitor::PlatformMonitor() {
int status = os::Solaris::cond_init(&_cond);
assert_status(status == 0, status, "cond_init");
status = os::Solaris::mutex_init(&_mutex);
assert_status(status == 0, status, "mutex_init");
}
os::PlatformMonitor::~PlatformMonitor() {
int status = os::Solaris::cond_destroy(&_cond);
assert_status(status == 0, status, "cond_destroy");
status = os::Solaris::mutex_destroy(&_mutex);
assert_status(status == 0, status, "mutex_destroy");
}
void os::PlatformMonitor::lock() {
int status = os::Solaris::mutex_lock(&_mutex);
assert_status(status == 0, status, "mutex_lock");
}
void os::PlatformMonitor::unlock() {
int status = os::Solaris::mutex_unlock(&_mutex);
assert_status(status == 0, status, "mutex_unlock");
}
bool os::PlatformMonitor::try_lock() {
int status = os::Solaris::mutex_trylock(&_mutex);
assert_status(status == 0 || status == EBUSY, status, "mutex_trylock");
return status == 0;
}
// Must already be locked
int os::PlatformMonitor::wait(jlong millis) {
assert(millis >= 0, "negative timeout");
if (millis > 0) {
timestruc_t abst;
int ret = OS_TIMEOUT;
compute_abstime(&abst, millis);
int status = os::Solaris::cond_timedwait(&_cond, &_mutex, &abst);
assert_status(status == 0 || status == EINTR ||
status == ETIME || status == ETIMEDOUT,
status, "cond_timedwait");
// EINTR acts as spurious wakeup - which is permitted anyway
if (status == 0 || status == EINTR) {
ret = OS_OK;
}
return ret;
} else {
int status = os::Solaris::cond_wait(&_cond, &_mutex);
assert_status(status == 0 || status == EINTR,
status, "cond_wait");
return OS_OK;
}
}
void os::PlatformMonitor::notify() {
int status = os::Solaris::cond_signal(&_cond);
assert_status(status == 0, status, "cond_signal");
}
void os::PlatformMonitor::notify_all() {
int status = os::Solaris::cond_broadcast(&_cond);
assert_status(status == 0, status, "cond_broadcast");
}
extern char** environ;
// Run the specified command in a separate process. Return its exit value,

View File

@ -335,4 +335,21 @@ class PlatformParker : public CHeapObj<mtInternal> {
}
};
// Platform specific implementation that underpins VM Monitor/Mutex class
class PlatformMonitor : public CHeapObj<mtInternal> {
private:
mutex_t _mutex; // Native mutex for locking
cond_t _cond; // Native condition variable for blocking
public:
PlatformMonitor();
~PlatformMonitor();
void lock();
void unlock();
bool try_lock();
int wait(jlong millis);
void notify();
void notify_all();
};
#endif // OS_SOLARIS_OS_SOLARIS_HPP

View File

@ -5277,6 +5277,55 @@ void Parker::unpark() {
SetEvent(_ParkEvent);
}
// Platform Monitor implementation
os::PlatformMonitor::PlatformMonitor() {
InitializeConditionVariable(&_cond);
InitializeCriticalSection(&_mutex);
}
os::PlatformMonitor::~PlatformMonitor() {
DeleteCriticalSection(&_mutex);
}
void os::PlatformMonitor::lock() {
EnterCriticalSection(&_mutex);
}
void os::PlatformMonitor::unlock() {
LeaveCriticalSection(&_mutex);
}
bool os::PlatformMonitor::try_lock() {
return TryEnterCriticalSection(&_mutex);
}
// Must already be locked
int os::PlatformMonitor::wait(jlong millis) {
assert(millis >= 0, "negative timeout");
int ret = OS_TIMEOUT;
int status = SleepConditionVariableCS(&_cond, &_mutex,
millis == 0 ? INFINITE : millis);
if (status != 0) {
ret = OS_OK;
}
#ifndef PRODUCT
else {
DWORD err = GetLastError();
assert(err == ERROR_TIMEOUT, "SleepConditionVariableCS: %ld:", err);
}
#endif
return ret;
}
void os::PlatformMonitor::notify() {
WakeConditionVariable(&_cond);
}
void os::PlatformMonitor::notify_all() {
WakeAllConditionVariable(&_cond);
}
// Run the specified command in a separate process. Return its exit value,
// or -1 on failure (e.g. can't create a new process).
int os::fork_and_exec(char* cmd, bool use_vfork_if_available) {

View File

@ -187,4 +187,21 @@ class PlatformParker : public CHeapObj<mtInternal> {
} ;
// Platform specific implementation that underpins VM Monitor/Mutex class
class PlatformMonitor : public CHeapObj<mtInternal> {
private:
CRITICAL_SECTION _mutex; // Native mutex for locking
CONDITION_VARIABLE _cond; // Native condition variable for blocking
public:
PlatformMonitor();
~PlatformMonitor();
void lock();
void unlock();
bool try_lock();
int wait(jlong millis);
void notify();
void notify_all();
};
#endif // OS_WINDOWS_OS_WINDOWS_HPP

View File

@ -169,6 +169,7 @@
LOG_TAG(mirror) \
LOG_TAG(verification) \
LOG_TAG(verify) \
LOG_TAG(vmmonitor) \
LOG_TAG(vmoperation) \
LOG_TAG(vmthread) \
LOG_TAG(vtables) \

View File

@ -286,6 +286,69 @@ class ThreadBlockInVM : public ThreadStateTransition {
}
};
// Unlike ThreadBlockInVM, this class is designed to avoid certain deadlock scenarios while making
// transitions inside class Monitor in cases where we need to block for a safepoint or handshake. It
// receives an extra argument compared to ThreadBlockInVM, the address of a pointer to the monitor we
// are trying to acquire. This will be used to access and release the monitor if needed to avoid
// said deadlocks.
// It works like ThreadBlockInVM but differs from it in two ways:
// - When transitioning in (constructor), it checks for safepoints without blocking, i.e., calls
// back if needed to allow a pending safepoint to continue but does not block in it.
// - When transitioning back (destructor), if there is a pending safepoint or handshake it releases
// the monitor that is only partially acquired.
class ThreadBlockInVMWithDeadlockCheck : public ThreadStateTransition {
private:
Monitor** _in_flight_monitor_adr;
void release_monitor() {
assert(_in_flight_monitor_adr != NULL, "_in_flight_monitor_adr should have been set on constructor");
Monitor* in_flight_monitor = *_in_flight_monitor_adr;
if (in_flight_monitor != NULL) {
in_flight_monitor->release_for_safepoint();
*_in_flight_monitor_adr = NULL;
}
}
public:
ThreadBlockInVMWithDeadlockCheck(JavaThread* thread, Monitor** in_flight_monitor_adr)
: ThreadStateTransition(thread), _in_flight_monitor_adr(in_flight_monitor_adr) {
// Once we are blocked vm expects stack to be walkable
thread->frame_anchor()->make_walkable(thread);
thread->set_thread_state((JavaThreadState)(_thread_in_vm + 1));
InterfaceSupport::serialize_thread_state_with_handler(thread);
SafepointMechanism::callback_if_safepoint(thread);
thread->set_thread_state(_thread_blocked);
CHECK_UNHANDLED_OOPS_ONLY(_thread->clear_unhandled_oops();)
}
~ThreadBlockInVMWithDeadlockCheck() {
// Change to transition state
_thread->set_thread_state((JavaThreadState)(_thread_blocked + 1));
InterfaceSupport::serialize_thread_state_with_handler(_thread);
if (SafepointMechanism::should_block(_thread)) {
release_monitor();
SafepointMechanism::callback_if_safepoint(_thread);
// The VMThread might have read that we were in a _thread_blocked state
// and proceeded to process a handshake for us. If that's the case then
// we need to block.
// By doing this we are also making the current thread process its own
// handshake if there is one pending and the VMThread didn't try to process
// it yet. This is more of a side-effect and not really necessary; the
// handshake could be processed later on.
if (_thread->has_handshake()) {
_thread->handshake_process_by_self();
}
}
_thread->set_thread_state(_thread_in_vm);
CHECK_UNHANDLED_OOPS_ONLY(_thread->clear_unhandled_oops();)
}
};
// This special transition class is only used to prevent asynchronous exceptions
// from being installed on vm exit in situations where we can't tolerate them.

File diff suppressed because it is too large Load Diff

View File

@ -29,50 +29,10 @@
#include "runtime/os.hpp"
#include "utilities/histogram.hpp"
// The SplitWord construct allows us to colocate the contention queue
// (cxq) with the lock-byte. The queue elements are ParkEvents, which are
// always aligned on 256-byte addresses - the least significant byte of
// a ParkEvent is always 0. Colocating the lock-byte with the queue
// allows us to easily avoid what would otherwise be a race in lock()
// if we were to use two completely separate fields for the contention queue
// and the lock indicator. Specifically, colocation renders us immune
// from the race where a thread might enqueue itself in the lock() slow-path
// immediately after the lock holder drops the outer lock in the unlock()
// fast-path.
//
// Colocation allows us to use a fast-path unlock() form that uses
// A MEMBAR instead of a CAS. MEMBAR has lower local latency than CAS
// on many platforms.
//
// See:
// + http://blogs.sun.com/dave/entry/biased_locking_in_hotspot
// + http://blogs.sun.com/dave/resource/synchronization-public2.pdf
//
// Note that we're *not* using word-tearing the classic sense.
// The lock() fast-path will CAS the lockword and the unlock()
// fast-path will store into the lock-byte colocated within the lockword.
// We depend on the fact that all our reference platforms have
// coherent and atomic byte accesses. More precisely, byte stores
// interoperate in a safe, sane, and expected manner with respect to
// CAS, ST and LDs to the full-word containing the byte.
// If you're porting HotSpot to a platform where that isn't the case
// then you'll want change the unlock() fast path from:
// STB;MEMBAR #storeload; LDN
// to a full-word CAS of the lockword.
union SplitWord { // full-word with separately addressable LSB
volatile intptr_t FullWord ;
volatile void * Address ;
volatile jbyte Bytes [sizeof(intptr_t)] ;
} ;
class ParkEvent ;
// See orderAccess.hpp. We assume throughout the VM that mutex lock and
// try_lock do fence-lock-acquire, and that unlock does a release-unlock,
// *in that order*. If their implementations change such that these
// assumptions are violated, a whole lot of code will break.
// A Mutex/Monitor is a simple wrapper around a native lock plus condition
// variable that supports lock ownership tracking, lock ranking for deadlock
// detection and coordinates with the safepoint protocol.
// The default length of monitor name was originally chosen to be 64 to avoid
// false sharing. Now, PaddedMonitor is available for this purpose.
@ -118,22 +78,10 @@ class Monitor : public CHeapObj<mtInternal> {
native = max_nonleaf + 1
};
// The WaitSet and EntryList linked lists are composed of ParkEvents.
// I use ParkEvent instead of threads as ParkEvents are immortal and
// type-stable, meaning we can safely unpark() a possibly stale
// list element in the unlock()-path.
protected: // Monitor-Mutex metadata
SplitWord _LockWord ; // Contention queue (cxq) colocated with Lock-byte
Thread * volatile _owner; // The owner of the lock
// Consider sequestering _owner on its own $line
// to aid future synchronization mechanisms.
ParkEvent * volatile _EntryList ; // List of threads waiting for entry
ParkEvent * volatile _OnDeck ; // heir-presumptive
volatile intptr_t _WaitLock [1] ; // Protects _WaitSet
ParkEvent * volatile _WaitSet ; // LL of ParkEvents
volatile bool _snuck; // Used for sneaky locking (evil).
char _name[MONITOR_NAME_LEN]; // Name of mutex
os::PlatformMonitor _lock; // Native monitor implementation
char _name[MONITOR_NAME_LEN]; // Name of mutex/monitor
// Debugging fields for naming, deadlock detection, etc. (some only used in debug mode)
#ifndef PRODUCT
@ -149,8 +97,8 @@ class Monitor : public CHeapObj<mtInternal> {
void set_owner_implementation(Thread* owner) PRODUCT_RETURN;
void check_prelock_state (Thread* thread, bool safepoint_check) PRODUCT_RETURN;
void check_block_state (Thread* thread) PRODUCT_RETURN;
void assert_owner (Thread* expected) NOT_DEBUG_RETURN;
// platform-dependent support code can go here (in os_<os_family>.cpp)
public:
enum {
_no_safepoint_check_flag = true,
@ -164,6 +112,9 @@ class Monitor : public CHeapObj<mtInternal> {
// consistent checking for each lock.
// A few existing locks will sometimes have a safepoint check and
// sometimes not, but these locks are set up in such a way to avoid deadlocks.
// Note: monitors that may be shared between JavaThreads and the VMThread
// should never encounter a safepoint check whilst they are held, else a
// deadlock with the VMThread can occur.
enum SafepointCheckRequired {
_safepoint_check_never, // Monitors with this value will cause errors
// when acquired with a safepoint check.
@ -176,22 +127,6 @@ class Monitor : public CHeapObj<mtInternal> {
NOT_PRODUCT(SafepointCheckRequired _safepoint_check_required;)
enum WaitResults {
CONDVAR_EVENT, // Wait returned because of condition variable notification
INTERRUPT_EVENT, // Wait returned because waiting thread was interrupted
NUMBER_WAIT_RESULTS
};
private:
int TrySpin (Thread * Self) ;
int TryLock () ;
int TryFast () ;
int AcquireOrPush (ParkEvent * ev) ;
void IUnlock (bool RelaxAssert) ;
void ILock (Thread * Self) ;
int IWait (Thread * Self, jlong timo);
int ILocked () ;
protected:
static void ClearMonitor (Monitor * m, const char* name = NULL) ;
Monitor() ;
@ -208,8 +143,8 @@ class Monitor : public CHeapObj<mtInternal> {
bool wait(bool no_safepoint_check = !_no_safepoint_check_flag,
long timeout = 0,
bool as_suspend_equivalent = !_as_suspend_equivalent_flag);
bool notify();
bool notify_all();
void notify();
void notify_all();
void lock(); // prints out warning if VM thread blocks
@ -219,6 +154,8 @@ class Monitor : public CHeapObj<mtInternal> {
bool try_lock(); // Like lock(), but unblocking. It returns false instead
void release_for_safepoint();
// Lock without safepoint check. Should ONLY be used by safepoint code and other code
// that is guaranteed not to block while running inside the VM.
void lock_without_safepoint_check();
@ -290,9 +227,6 @@ class PaddedMonitor : public Monitor {
// there may have been some benefit to having distinct mutexes and monitors, but that time
// has past.
//
// The Mutex/Monitor design parallels that of Java-monitors, being based on
// thread-specific park-unpark platform-specific primitives.
class Mutex : public Monitor { // degenerate Monitor
public:
@ -300,8 +234,8 @@ class Mutex : public Monitor { // degenerate Monitor
SafepointCheckRequired safepoint_check_required = _safepoint_check_always);
// default destructor
private:
bool notify () { ShouldNotReachHere(); return false; }
bool notify_all() { ShouldNotReachHere(); return false; }
void notify () { ShouldNotReachHere(); }
void notify_all() { ShouldNotReachHere(); }
bool wait (bool no_safepoint_check, long timeout, bool as_suspend_equivalent) {
ShouldNotReachHere() ;
return false ;

View File

@ -272,18 +272,16 @@ class MonitorLockerEx: public MutexLockerEx {
return false;
}
bool notify_all() {
void notify_all() {
if (_monitor != NULL) {
return _monitor->notify_all();
_monitor->notify_all();
}
return true;
}
bool notify() {
void notify() {
if (_monitor != NULL) {
return _monitor->notify();
_monitor->notify();
}
return true;
}
};

View File

@ -793,7 +793,7 @@ void SafepointSynchronize::check_for_lazy_critical_native(JavaThread *thread, Ja
// -------------------------------------------------------------------------------------------------------
// Implementation of Safepoint callback point
void SafepointSynchronize::block(JavaThread *thread) {
void SafepointSynchronize::block(JavaThread *thread, bool block_in_safepoint_check) {
assert(thread != NULL, "thread must be set");
assert(thread->is_Java_thread(), "not a Java thread");
@ -848,28 +848,37 @@ void SafepointSynchronize::block(JavaThread *thread) {
}
}
// We transition the thread to state _thread_blocked here, but
// we can't do our usual check for external suspension and then
// self-suspend after the lock_without_safepoint_check() call
// below because we are often called during transitions while
// we hold different locks. That would leave us suspended while
// holding a resource which results in deadlocks.
thread->set_thread_state(_thread_blocked);
Safepoint_lock->unlock();
if (block_in_safepoint_check) {
// We transition the thread to state _thread_blocked here, but
// we can't do our usual check for external suspension and then
// self-suspend after the lock_without_safepoint_check() call
// below because we are often called during transitions while
// we hold different locks. That would leave us suspended while
// holding a resource which results in deadlocks.
thread->set_thread_state(_thread_blocked);
Safepoint_lock->unlock();
// We now try to acquire the threads lock. Since this lock is hold by the VM thread during
// the entire safepoint, the threads will all line up here during the safepoint.
Threads_lock->lock_without_safepoint_check();
// restore original state. This is important if the thread comes from compiled code, so it
// will continue to execute with the _thread_in_Java state.
thread->set_thread_state(state);
Threads_lock->unlock();
// We now try to acquire the threads lock. Since this lock is hold by the VM thread during
// the entire safepoint, the threads will all line up here during the safepoint.
Threads_lock->lock_without_safepoint_check();
// restore original state. This is important if the thread comes from compiled code, so it
// will continue to execute with the _thread_in_Java state.
thread->set_thread_state(state);
Threads_lock->unlock();
} else {
// We choose not to block in this call since we would be
// caught when transitioning back anyways if the safepoint
// is still going on.
thread->set_thread_state(state);
Safepoint_lock->unlock();
}
break;
case _thread_in_native_trans:
case _thread_blocked_trans:
case _thread_new_trans:
if (thread->safepoint_state()->type() == ThreadSafepointState::_call_back) {
if (thread->safepoint_state()->type() == ThreadSafepointState::_call_back &&
block_in_safepoint_check) {
thread->print_thread_state();
fatal("Deadlock in safepoint code. "
"Should have called back to the VM before blocking.");

View File

@ -143,7 +143,7 @@ private:
}
// Called when a thread voluntarily blocks
static void block(JavaThread *thread);
static void block(JavaThread *thread, bool block_in_safepoint_check = true);
friend class SafepointMechanism;

View File

@ -72,12 +72,15 @@ public:
#endif
}
// Call this method to see if this thread should block for a safepoint.
// Call this method to see if this thread should block for a safepoint or process handshake.
static inline bool should_block(Thread* thread);
// Blocks a thread until safepoint is completed
// Blocks a thread until safepoint/handshake is completed.
static inline void block_if_requested(JavaThread* thread);
// Calls back if there is a pending safepoint but does not block for it.
static inline void callback_if_safepoint(JavaThread* thread);
// Caller is responsible for using a memory barrier if needed.
static inline void arm_local_poll(JavaThread* thread);
static inline void disarm_local_poll(JavaThread* thread);

View File

@ -62,6 +62,20 @@ void SafepointMechanism::block_if_requested(JavaThread *thread) {
block_if_requested_slow(thread);
}
void SafepointMechanism::callback_if_safepoint(JavaThread* thread) {
if (!uses_thread_local_poll() || local_poll_armed(thread)) {
// If using thread local polls, we should not check the
// global_poll() and callback via block() if the VMThread
// has not yet armed the local poll. Otherwise, when used in
// combination with should_block(), the latter could miss
// detecting the same safepoint that this method would detect
// if only checking global polls.
if (global_poll()) {
SafepointSynchronize::block(thread, false);
}
}
}
void SafepointMechanism::arm_local_poll(JavaThread* thread) {
thread->set_polling_page(poll_armed_value());
}

View File

@ -294,7 +294,6 @@ Thread::Thread() {
// and ::Release()
_ParkEvent = ParkEvent::Allocate(this);
_SleepEvent = ParkEvent::Allocate(this);
_MutexEvent = ParkEvent::Allocate(this);
_MuxEvent = ParkEvent::Allocate(this);
#ifdef CHECK_UNHANDLED_OOPS
@ -460,7 +459,6 @@ Thread::~Thread() {
// We NULL out the fields for good hygiene.
ParkEvent::Release(_ParkEvent); _ParkEvent = NULL;
ParkEvent::Release(_SleepEvent); _SleepEvent = NULL;
ParkEvent::Release(_MutexEvent); _MutexEvent = NULL;
ParkEvent::Release(_MuxEvent); _MuxEvent = NULL;
delete handle_area();

View File

@ -782,7 +782,6 @@ protected:
volatile int _TypeTag;
ParkEvent * _ParkEvent; // for synchronized()
ParkEvent * _SleepEvent; // for Thread.sleep
ParkEvent * _MutexEvent; // for native internal Mutex/Monitor
ParkEvent * _MuxEvent; // for low-level muxAcquire-muxRelease
int NativeSyncRecursion; // diagnostic
@ -792,8 +791,6 @@ protected:
jint _hashStateY;
jint _hashStateZ;
volatile jint rng[4]; // RNG for spin loop
// Low-level leaf-lock primitives used to implement synchronization
// and native monitor-mutex infrastructure.
// Not for general synchronization use.