8163210: java/util/concurrent/tck/JSR166TestCase.java testWriteAfterReadLock(StampedLockTest): timed out waiting for thread to terminate

Reviewed-by: martin, psandoz, dholmes
This commit is contained in:
Doug Lea 2016-08-15 09:09:00 -07:00
parent cdae608a00
commit 0f24b72e4e
3 changed files with 664 additions and 614 deletions

View File

@ -263,6 +263,47 @@ public class StampedLock implements java.io.Serializable {
* is theoretically possible, so we additionally add a
* storeStoreFence after lock acquisition CAS.
*
* ----------------------------------------------------------------
* Here's an informal proof that plain reads by _successful_
* readers see plain writes from preceding but not following
* writers (following Boehm and the C++ standard [atomics.fences]):
*
* Because of the total synchronization order of accesses to
* volatile long state containing the sequence number, writers and
* _successful_ readers can be globally sequenced.
*
* int x, y;
*
* Writer 1:
* inc sequence (odd - "locked")
* storeStoreFence();
* x = 1; y = 2;
* inc sequence (even - "unlocked")
*
* Successful Reader:
* read sequence (even)
* // must see writes from Writer 1 but not Writer 2
* r1 = x; r2 = y;
* acquireFence();
* read sequence (even - validated unchanged)
* // use r1 and r2
*
* Writer 2:
* inc sequence (odd - "locked")
* storeStoreFence();
* x = 3; y = 4;
* inc sequence (even - "unlocked")
*
* Visibility of writer 1's stores is normal - reader's initial
* read of state synchronizes with writer 1's final write to state.
* Lack of visibility of writer 2's plain writes is less obvious.
* If reader's read of x or y saw writer 2's write, then (assuming
* semantics of C++ fences) the storeStoreFence would "synchronize"
* with reader's acquireFence and reader's validation read must see
* writer 2's initial write to state and so validation must fail.
* But making this "proof" formal and rigorous is an open problem!
* ----------------------------------------------------------------
*
* The memory layout keeps lock state and queue pointers together
* (normally on the same cache line). This usually works well for
* read-mostly loads. In most other cases, the natural tendency of
@ -276,14 +317,14 @@ public class StampedLock implements java.io.Serializable {
/** Number of processors, for spin control */
private static final int NCPU = Runtime.getRuntime().availableProcessors();
/** Maximum number of retries before enqueuing on acquisition */
private static final int SPINS = (NCPU > 1) ? 1 << 6 : 0;
/** Maximum number of retries before enqueuing on acquisition; at least 1 */
private static final int SPINS = (NCPU > 1) ? 1 << 6 : 1;
/** Maximum number of retries before blocking at head on acquisition */
private static final int HEAD_SPINS = (NCPU > 1) ? 1 << 10 : 0;
/** Maximum number of tries before blocking at head on acquisition */
private static final int HEAD_SPINS = (NCPU > 1) ? 1 << 10 : 1;
/** Maximum number of retries before re-blocking */
private static final int MAX_HEAD_SPINS = (NCPU > 1) ? 1 << 16 : 0;
private static final int MAX_HEAD_SPINS = (NCPU > 1) ? 1 << 16 : 1;
/** The period for yielding when waiting for overflow spinlock */
private static final int OVERFLOW_YIELD_RATE = 7; // must be power 2 - 1
@ -1228,6 +1269,11 @@ public class StampedLock implements java.io.Serializable {
WCOWAIT.compareAndSet(h, c, c.cowait) &&
(w = c.thread) != null) // help release
LockSupport.unpark(w);
if (Thread.interrupted()) {
if (interruptible)
return cancelWaiter(node, p, true);
wasInterrupted = true;
}
if (h == (pp = p.prev) || h == p || pp == null) {
long m, s, ns;
do {
@ -1264,11 +1310,6 @@ public class StampedLock implements java.io.Serializable {
LockSupport.parkNanos(this, time);
}
node.thread = null;
if (Thread.interrupted()) {
if (interruptible)
return cancelWaiter(node, p, true);
wasInterrupted = true;
}
}
}
}

View File

@ -68,6 +68,7 @@ import java.security.ProtectionDomain;
import java.security.SecurityPermission;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Date;
import java.util.Enumeration;
import java.util.Iterator;
@ -89,6 +90,7 @@ import java.util.concurrent.RejectedExecutionHandler;
import java.util.concurrent.Semaphore;
import java.util.concurrent.SynchronousQueue;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
@ -1278,7 +1280,7 @@ public class JSR166TestCase extends TestCase {
* thread to enter a wait state: BLOCKED, WAITING, or TIMED_WAITING.
*/
void waitForThreadToEnterWaitState(Thread thread, long timeoutMillis) {
long startTime = System.nanoTime();
long startTime = 0L;
for (;;) {
Thread.State s = thread.getState();
if (s == Thread.State.BLOCKED ||
@ -1287,6 +1289,8 @@ public class JSR166TestCase extends TestCase {
return;
else if (s == Thread.State.TERMINATED)
fail("Unexpected thread termination");
else if (startTime == 0L)
startTime = System.nanoTime();
else if (millisElapsedSince(startTime) > timeoutMillis) {
threadAssertTrue(thread.isAlive());
return;
@ -1900,4 +1904,7 @@ public class JSR166TestCase extends TestCase {
1000L, MILLISECONDS,
new SynchronousQueue<Runnable>());
static <T> void shuffle(T[] array) {
Collections.shuffle(Arrays.asList(array), ThreadLocalRandom.current());
}
}

File diff suppressed because it is too large Load Diff