mirror of
https://github.com/openjdk/jdk.git
synced 2026-02-10 18:38:27 +00:00
Merge
This commit is contained in:
commit
fb8ceae0a7
@ -3511,6 +3511,13 @@ encode %{
|
||||
// Load markWord from object into displaced_header.
|
||||
__ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
|
||||
|
||||
if (DiagnoseSyncOnPrimitiveWrappers != 0) {
|
||||
__ load_klass(tmp, oop);
|
||||
__ ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
|
||||
__ tstw(tmp, JVM_ACC_IS_BOX_CLASS);
|
||||
__ br(Assembler::NE, cont);
|
||||
}
|
||||
|
||||
if (UseBiasedLocking && !UseOptoBiasInlining) {
|
||||
__ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -73,11 +73,18 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
|
||||
// save object being locked into the BasicObjectLock
|
||||
str(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
|
||||
|
||||
null_check_offset = offset();
|
||||
|
||||
if (DiagnoseSyncOnPrimitiveWrappers != 0) {
|
||||
load_klass(hdr, obj);
|
||||
ldrw(hdr, Address(hdr, Klass::access_flags_offset()));
|
||||
tstw(hdr, JVM_ACC_IS_BOX_CLASS);
|
||||
br(Assembler::NE, slow_case);
|
||||
}
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
assert(scratch != noreg, "should have scratch register at this point");
|
||||
null_check_offset = biased_locking_enter(disp_hdr, obj, hdr, scratch, false, done, &slow_case);
|
||||
} else {
|
||||
null_check_offset = offset();
|
||||
biased_locking_enter(disp_hdr, obj, hdr, scratch, false, done, &slow_case);
|
||||
}
|
||||
|
||||
// Load object header
|
||||
|
||||
@ -725,6 +725,13 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
|
||||
// Load object pointer into obj_reg %c_rarg3
|
||||
ldr(obj_reg, Address(lock_reg, obj_offset));
|
||||
|
||||
if (DiagnoseSyncOnPrimitiveWrappers != 0) {
|
||||
load_klass(tmp, obj_reg);
|
||||
ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
|
||||
tstw(tmp, JVM_ACC_IS_BOX_CLASS);
|
||||
br(Assembler::NE, slow_case);
|
||||
}
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp, false, done, &slow_case);
|
||||
}
|
||||
|
||||
@ -444,14 +444,14 @@ void MacroAssembler::reserved_stack_check() {
|
||||
bind(no_reserved_zone_enabling);
|
||||
}
|
||||
|
||||
int MacroAssembler::biased_locking_enter(Register lock_reg,
|
||||
Register obj_reg,
|
||||
Register swap_reg,
|
||||
Register tmp_reg,
|
||||
bool swap_reg_contains_mark,
|
||||
Label& done,
|
||||
Label* slow_case,
|
||||
BiasedLockingCounters* counters) {
|
||||
void MacroAssembler::biased_locking_enter(Register lock_reg,
|
||||
Register obj_reg,
|
||||
Register swap_reg,
|
||||
Register tmp_reg,
|
||||
bool swap_reg_contains_mark,
|
||||
Label& done,
|
||||
Label* slow_case,
|
||||
BiasedLockingCounters* counters) {
|
||||
assert(UseBiasedLocking, "why call this otherwise?");
|
||||
assert_different_registers(lock_reg, obj_reg, swap_reg);
|
||||
|
||||
@ -471,9 +471,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg,
|
||||
// pointers to allow age to be placed into low bits
|
||||
// First check to see whether biasing is even enabled for this object
|
||||
Label cas_label;
|
||||
int null_check_offset = -1;
|
||||
if (!swap_reg_contains_mark) {
|
||||
null_check_offset = offset();
|
||||
ldr(swap_reg, mark_addr);
|
||||
}
|
||||
andr(tmp_reg, swap_reg, markWord::biased_lock_mask_in_place);
|
||||
@ -601,8 +599,6 @@ int MacroAssembler::biased_locking_enter(Register lock_reg,
|
||||
}
|
||||
|
||||
bind(cas_label);
|
||||
|
||||
return null_check_offset;
|
||||
}
|
||||
|
||||
void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) {
|
||||
|
||||
@ -111,15 +111,11 @@ class MacroAssembler: public Assembler {
|
||||
// tmp_reg must be supplied and must not be rscratch1 or rscratch2
|
||||
// Optional slow case is for implementations (interpreter and C1) which branch to
|
||||
// slow case directly. Leaves condition codes set for C2's Fast_Lock node.
|
||||
// Returns offset of first potentially-faulting instruction for null
|
||||
// check info (currently consumed only by C1). If
|
||||
// swap_reg_contains_mark is true then returns -1 as it is assumed
|
||||
// the calling code has already passed any potential faults.
|
||||
int biased_locking_enter(Register lock_reg, Register obj_reg,
|
||||
Register swap_reg, Register tmp_reg,
|
||||
bool swap_reg_contains_mark,
|
||||
Label& done, Label* slow_case = NULL,
|
||||
BiasedLockingCounters* counters = NULL);
|
||||
void biased_locking_enter(Register lock_reg, Register obj_reg,
|
||||
Register swap_reg, Register tmp_reg,
|
||||
bool swap_reg_contains_mark,
|
||||
Label& done, Label* slow_case = NULL,
|
||||
BiasedLockingCounters* counters = NULL);
|
||||
void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);
|
||||
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -200,26 +200,29 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj,
|
||||
const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
|
||||
const int mark_offset = BasicLock::displaced_header_offset_in_bytes();
|
||||
|
||||
str(obj, Address(disp_hdr, obj_offset));
|
||||
|
||||
null_check_offset = offset();
|
||||
|
||||
if (DiagnoseSyncOnPrimitiveWrappers != 0) {
|
||||
load_klass(tmp1, obj);
|
||||
ldr_u32(tmp1, Address(tmp1, Klass::access_flags_offset()));
|
||||
tst(tmp1, JVM_ACC_IS_BOX_CLASS);
|
||||
b(slow_case, ne);
|
||||
}
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
// load object
|
||||
str(obj, Address(disp_hdr, obj_offset));
|
||||
null_check_offset = biased_locking_enter(obj, hdr/*scratched*/, tmp1, false, tmp2, done, slow_case);
|
||||
biased_locking_enter(obj, hdr/*scratched*/, tmp1, false, tmp2, done, slow_case);
|
||||
}
|
||||
|
||||
assert(oopDesc::mark_offset_in_bytes() == 0, "Required by atomic instructions");
|
||||
|
||||
|
||||
if (!UseBiasedLocking) {
|
||||
null_check_offset = offset();
|
||||
}
|
||||
|
||||
// On MP platforms the next load could return a 'stale' value if the memory location has been modified by another thread.
|
||||
// That would be acceptable as ether CAS or slow case path is taken in that case.
|
||||
|
||||
// Must be the first instruction here, because implicit null check relies on it
|
||||
ldr(hdr, Address(obj, oopDesc::mark_offset_in_bytes()));
|
||||
|
||||
str(obj, Address(disp_hdr, obj_offset));
|
||||
tst(hdr, markWord::unlocked_value);
|
||||
b(fast_lock, ne);
|
||||
|
||||
|
||||
@ -90,6 +90,13 @@ void C2_MacroAssembler::fast_lock(Register Roop, Register Rbox, Register Rscratc
|
||||
|
||||
Label fast_lock, done;
|
||||
|
||||
if (DiagnoseSyncOnPrimitiveWrappers != 0) {
|
||||
load_klass(Rscratch, Roop);
|
||||
ldr_u32(Rscratch, Address(Rscratch, Klass::access_flags_offset()));
|
||||
tst(Rscratch, JVM_ACC_IS_BOX_CLASS);
|
||||
b(done, ne);
|
||||
}
|
||||
|
||||
if (UseBiasedLocking && !UseOptoBiasInlining) {
|
||||
assert(scratch3 != noreg, "need extra temporary for -XX:-UseOptoBiasInlining");
|
||||
biased_locking_enter(Roop, Rmark, Rscratch, false, scratch3, done, done);
|
||||
|
||||
@ -883,6 +883,13 @@ void InterpreterMacroAssembler::lock_object(Register Rlock) {
|
||||
// Load object pointer
|
||||
ldr(Robj, Address(Rlock, obj_offset));
|
||||
|
||||
if (DiagnoseSyncOnPrimitiveWrappers != 0) {
|
||||
load_klass(R0, Robj);
|
||||
ldr_u32(R0, Address(R0, Klass::access_flags_offset()));
|
||||
tst(R0, JVM_ACC_IS_BOX_CLASS);
|
||||
b(slow_case, ne);
|
||||
}
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
biased_locking_enter(Robj, Rmark/*scratched*/, R0, false, Rtemp, done, slow_case);
|
||||
}
|
||||
|
||||
@ -1322,11 +1322,11 @@ void MacroAssembler::biased_locking_enter_with_cas(Register obj_reg, Register ol
|
||||
#endif // !PRODUCT
|
||||
}
|
||||
|
||||
int MacroAssembler::biased_locking_enter(Register obj_reg, Register swap_reg, Register tmp_reg,
|
||||
bool swap_reg_contains_mark,
|
||||
Register tmp2,
|
||||
Label& done, Label& slow_case,
|
||||
BiasedLockingCounters* counters) {
|
||||
void MacroAssembler::biased_locking_enter(Register obj_reg, Register swap_reg, Register tmp_reg,
|
||||
bool swap_reg_contains_mark,
|
||||
Register tmp2,
|
||||
Label& done, Label& slow_case,
|
||||
BiasedLockingCounters* counters) {
|
||||
// obj_reg must be preserved (at least) if the bias locking fails
|
||||
// tmp_reg is a temporary register
|
||||
// swap_reg was used as a temporary but contained a value
|
||||
@ -1357,10 +1357,6 @@ int MacroAssembler::biased_locking_enter(Register obj_reg, Register swap_reg, Re
|
||||
// First check to see whether biasing is even enabled for this object
|
||||
Label cas_label;
|
||||
|
||||
// The null check applies to the mark loading, if we need to load it.
|
||||
// If the mark has already been loaded in swap_reg then it has already
|
||||
// been performed and the offset is irrelevant.
|
||||
int null_check_offset = offset();
|
||||
if (!swap_reg_contains_mark) {
|
||||
ldr(swap_reg, mark_addr);
|
||||
}
|
||||
@ -1504,8 +1500,6 @@ int MacroAssembler::biased_locking_enter(Register obj_reg, Register swap_reg, Re
|
||||
// removing the bias bit from the object's header.
|
||||
|
||||
bind(cas_label);
|
||||
|
||||
return null_check_offset;
|
||||
}
|
||||
|
||||
|
||||
|
||||
@ -375,18 +375,14 @@ public:
|
||||
// biased and we acquired it. Slow case label is branched to with
|
||||
// condition code NE set if the lock is biased but we failed to acquire
|
||||
// it. Otherwise fall through.
|
||||
// Returns offset of first potentially-faulting instruction for null
|
||||
// check info (currently consumed only by C1). If
|
||||
// swap_reg_contains_mark is true then returns -1 as it is assumed
|
||||
// the calling code has already passed any potential faults.
|
||||
// Notes:
|
||||
// - swap_reg and tmp_reg are scratched
|
||||
// - Rtemp was (implicitly) scratched and can now be specified as the tmp2
|
||||
int biased_locking_enter(Register obj_reg, Register swap_reg, Register tmp_reg,
|
||||
bool swap_reg_contains_mark,
|
||||
Register tmp2,
|
||||
Label& done, Label& slow_case,
|
||||
BiasedLockingCounters* counters = NULL);
|
||||
void biased_locking_enter(Register obj_reg, Register swap_reg, Register tmp_reg,
|
||||
bool swap_reg_contains_mark,
|
||||
Register tmp2,
|
||||
Label& done, Label& slow_case,
|
||||
BiasedLockingCounters* counters = NULL);
|
||||
void biased_locking_exit(Register obj_reg, Register temp_reg, Label& done);
|
||||
|
||||
// Building block for CAS cases of biased locking: makes CAS and records statistics.
|
||||
|
||||
@ -105,6 +105,13 @@ void C1_MacroAssembler::lock_object(Register Rmark, Register Roop, Register Rbox
|
||||
// Save object being locked into the BasicObjectLock...
|
||||
std(Roop, BasicObjectLock::obj_offset_in_bytes(), Rbox);
|
||||
|
||||
if (DiagnoseSyncOnPrimitiveWrappers != 0) {
|
||||
load_klass(Rscratch, Roop);
|
||||
lwz(Rscratch, in_bytes(Klass::access_flags_offset()), Rscratch);
|
||||
testbitdi(CCR0, R0, Rscratch, exact_log2(JVM_ACC_IS_BOX_CLASS));
|
||||
bne(CCR0, slow_int);
|
||||
}
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
biased_locking_enter(CCR0, Roop, Rmark, Rscratch, R0, done, &slow_int);
|
||||
}
|
||||
|
||||
@ -910,6 +910,13 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
|
||||
// Load markWord from object into displaced_header.
|
||||
ld(displaced_header, oopDesc::mark_offset_in_bytes(), object);
|
||||
|
||||
if (DiagnoseSyncOnPrimitiveWrappers != 0) {
|
||||
load_klass(tmp, object);
|
||||
lwz(tmp, in_bytes(Klass::access_flags_offset()), tmp);
|
||||
testbitdi(CCR0, R0, tmp, exact_log2(JVM_ACC_IS_BOX_CLASS));
|
||||
bne(CCR0, slow_case);
|
||||
}
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
biased_locking_enter(CCR0, object, displaced_header, tmp, current_header, done, &slow_case);
|
||||
}
|
||||
|
||||
@ -2836,6 +2836,12 @@ void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register
|
||||
// Load markWord from object into displaced_header.
|
||||
ld(displaced_header, oopDesc::mark_offset_in_bytes(), oop);
|
||||
|
||||
if (DiagnoseSyncOnPrimitiveWrappers != 0) {
|
||||
load_klass(temp, oop);
|
||||
lwz(temp, in_bytes(Klass::access_flags_offset()), temp);
|
||||
testbitdi(flag, R0, temp, exact_log2(JVM_ACC_IS_BOX_CLASS));
|
||||
bne(flag, cont);
|
||||
}
|
||||
|
||||
if (try_bias) {
|
||||
biased_locking_enter(flag, oop, displaced_header, temp, current_header, cont);
|
||||
|
||||
@ -91,6 +91,12 @@ void C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hd
|
||||
// Save object being locked into the BasicObjectLock...
|
||||
z_stg(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
|
||||
|
||||
if (DiagnoseSyncOnPrimitiveWrappers != 0) {
|
||||
load_klass(Z_R1_scratch, obj);
|
||||
testbit(Address(Z_R1_scratch, Klass::access_flags_offset()), exact_log2(JVM_ACC_IS_BOX_CLASS));
|
||||
z_btrue(slow_case);
|
||||
}
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
biased_locking_enter(obj, hdr, Z_R1_scratch, Z_R0_scratch, done, &slow_case);
|
||||
}
|
||||
|
||||
@ -1000,6 +1000,12 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
|
||||
// Load markWord from object into displaced_header.
|
||||
z_lg(displaced_header, oopDesc::mark_offset_in_bytes(), object);
|
||||
|
||||
if (DiagnoseSyncOnPrimitiveWrappers != 0) {
|
||||
load_klass(Z_R1_scratch, object);
|
||||
testbit(Address(Z_R1_scratch, Klass::access_flags_offset()), exact_log2(JVM_ACC_IS_BOX_CLASS));
|
||||
z_btrue(slow_case);
|
||||
}
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
biased_locking_enter(object, displaced_header, Z_R1, Z_R0, done, &slow_case);
|
||||
}
|
||||
|
||||
@ -3358,6 +3358,14 @@ void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Regis
|
||||
// Load markWord from oop into mark.
|
||||
z_lg(displacedHeader, 0, oop);
|
||||
|
||||
if (DiagnoseSyncOnPrimitiveWrappers != 0) {
|
||||
load_klass(Z_R1_scratch, oop);
|
||||
z_l(Z_R1_scratch, Address(Z_R1_scratch, Klass::access_flags_offset()));
|
||||
assert((JVM_ACC_IS_BOX_CLASS & 0xFFFF) == 0, "or change following instruction");
|
||||
z_nilh(Z_R1_scratch, JVM_ACC_IS_BOX_CLASS >> 16);
|
||||
z_brne(done);
|
||||
}
|
||||
|
||||
if (try_bias) {
|
||||
biased_locking_enter(oop, displacedHeader, temp, Z_R0, done);
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -39,6 +39,7 @@
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
|
||||
int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register scratch, Label& slow_case) {
|
||||
const Register rklass_decode_tmp = LP64_ONLY(rscratch1) NOT_LP64(noreg);
|
||||
const int aligned_mask = BytesPerWord -1;
|
||||
const int hdr_offset = oopDesc::mark_offset_in_bytes();
|
||||
assert(hdr == rax, "hdr must be rax, for the cmpxchg instruction");
|
||||
@ -51,12 +52,18 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
|
||||
// save object being locked into the BasicObjectLock
|
||||
movptr(Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()), obj);
|
||||
|
||||
null_check_offset = offset();
|
||||
|
||||
if (DiagnoseSyncOnPrimitiveWrappers != 0) {
|
||||
load_klass(hdr, obj, rklass_decode_tmp);
|
||||
movl(hdr, Address(hdr, Klass::access_flags_offset()));
|
||||
testl(hdr, JVM_ACC_IS_BOX_CLASS);
|
||||
jcc(Assembler::notZero, slow_case);
|
||||
}
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
assert(scratch != noreg, "should have scratch register at this point");
|
||||
Register rklass_decode_tmp = LP64_ONLY(rscratch1) NOT_LP64(noreg);
|
||||
null_check_offset = biased_locking_enter(disp_hdr, obj, hdr, scratch, rklass_decode_tmp, false, done, &slow_case);
|
||||
} else {
|
||||
null_check_offset = offset();
|
||||
biased_locking_enter(disp_hdr, obj, hdr, scratch, rklass_decode_tmp, false, done, &slow_case);
|
||||
}
|
||||
|
||||
// Load object header
|
||||
|
||||
@ -470,6 +470,13 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmp
|
||||
|
||||
Label IsInflated, DONE_LABEL;
|
||||
|
||||
if (DiagnoseSyncOnPrimitiveWrappers != 0) {
|
||||
load_klass(tmpReg, objReg, cx1Reg);
|
||||
movl(tmpReg, Address(tmpReg, Klass::access_flags_offset()));
|
||||
testl(tmpReg, JVM_ACC_IS_BOX_CLASS);
|
||||
jcc(Assembler::notZero, DONE_LABEL);
|
||||
}
|
||||
|
||||
// it's stack-locked, biased or neutral
|
||||
// TODO: optimize away redundant LDs of obj->mark and improve the markword triage
|
||||
// order to reduce the number of conditional branches in the most common cases.
|
||||
|
||||
@ -1186,6 +1186,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
|
||||
const Register tmp_reg = rbx; // Will be passed to biased_locking_enter to avoid a
|
||||
// problematic case where tmp_reg = no_reg.
|
||||
const Register obj_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // Will contain the oop
|
||||
const Register rklass_decode_tmp = LP64_ONLY(rscratch1) NOT_LP64(noreg);
|
||||
|
||||
const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
|
||||
const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();
|
||||
@ -1197,8 +1198,14 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
|
||||
// Load object pointer into obj_reg
|
||||
movptr(obj_reg, Address(lock_reg, obj_offset));
|
||||
|
||||
if (DiagnoseSyncOnPrimitiveWrappers != 0) {
|
||||
load_klass(tmp_reg, obj_reg, rklass_decode_tmp);
|
||||
movl(tmp_reg, Address(tmp_reg, Klass::access_flags_offset()));
|
||||
testl(tmp_reg, JVM_ACC_IS_BOX_CLASS);
|
||||
jcc(Assembler::notZero, slow_case);
|
||||
}
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
Register rklass_decode_tmp = LP64_ONLY(rscratch1) NOT_LP64(noreg);
|
||||
biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp_reg, rklass_decode_tmp, false, done, &slow_case);
|
||||
}
|
||||
|
||||
|
||||
@ -1081,15 +1081,15 @@ void MacroAssembler::reserved_stack_check() {
|
||||
bind(no_reserved_zone_enabling);
|
||||
}
|
||||
|
||||
int MacroAssembler::biased_locking_enter(Register lock_reg,
|
||||
Register obj_reg,
|
||||
Register swap_reg,
|
||||
Register tmp_reg,
|
||||
Register tmp_reg2,
|
||||
bool swap_reg_contains_mark,
|
||||
Label& done,
|
||||
Label* slow_case,
|
||||
BiasedLockingCounters* counters) {
|
||||
void MacroAssembler::biased_locking_enter(Register lock_reg,
|
||||
Register obj_reg,
|
||||
Register swap_reg,
|
||||
Register tmp_reg,
|
||||
Register tmp_reg2,
|
||||
bool swap_reg_contains_mark,
|
||||
Label& done,
|
||||
Label* slow_case,
|
||||
BiasedLockingCounters* counters) {
|
||||
assert(UseBiasedLocking, "why call this otherwise?");
|
||||
assert(swap_reg == rax, "swap_reg must be rax for cmpxchgq");
|
||||
assert(tmp_reg != noreg, "tmp_reg must be supplied");
|
||||
@ -1108,9 +1108,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg,
|
||||
// pointers to allow age to be placed into low bits
|
||||
// First check to see whether biasing is even enabled for this object
|
||||
Label cas_label;
|
||||
int null_check_offset = -1;
|
||||
if (!swap_reg_contains_mark) {
|
||||
null_check_offset = offset();
|
||||
movptr(swap_reg, mark_addr);
|
||||
}
|
||||
movptr(tmp_reg, swap_reg);
|
||||
@ -1127,9 +1125,6 @@ int MacroAssembler::biased_locking_enter(Register lock_reg,
|
||||
// simpler.
|
||||
movptr(saved_mark_addr, swap_reg);
|
||||
#endif
|
||||
if (swap_reg_contains_mark) {
|
||||
null_check_offset = offset();
|
||||
}
|
||||
load_prototype_header(tmp_reg, obj_reg, tmp_reg2);
|
||||
#ifdef _LP64
|
||||
orptr(tmp_reg, r15_thread);
|
||||
@ -1263,8 +1258,6 @@ int MacroAssembler::biased_locking_enter(Register lock_reg,
|
||||
}
|
||||
|
||||
bind(cas_label);
|
||||
|
||||
return null_check_offset;
|
||||
}
|
||||
|
||||
void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) {
|
||||
|
||||
@ -661,15 +661,11 @@ class MacroAssembler: public Assembler {
|
||||
// allocate a temporary (inefficient, avoid if possible).
|
||||
// Optional slow case is for implementations (interpreter and C1) which branch to
|
||||
// slow case directly. Leaves condition codes set for C2's Fast_Lock node.
|
||||
// Returns offset of first potentially-faulting instruction for null
|
||||
// check info (currently consumed only by C1). If
|
||||
// swap_reg_contains_mark is true then returns -1 as it is assumed
|
||||
// the calling code has already passed any potential faults.
|
||||
int biased_locking_enter(Register lock_reg, Register obj_reg,
|
||||
Register swap_reg, Register tmp_reg,
|
||||
Register tmp_reg2, bool swap_reg_contains_mark,
|
||||
Label& done, Label* slow_case = NULL,
|
||||
BiasedLockingCounters* counters = NULL);
|
||||
void biased_locking_enter(Register lock_reg, Register obj_reg,
|
||||
Register swap_reg, Register tmp_reg,
|
||||
Register tmp_reg2, bool swap_reg_contains_mark,
|
||||
Label& done, Label* slow_case = NULL,
|
||||
BiasedLockingCounters* counters = NULL);
|
||||
void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);
|
||||
|
||||
Condition negate_condition(Condition cond);
|
||||
|
||||
@ -40,6 +40,8 @@
|
||||
#include "runtime/mutex.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
#include "runtime/safepointVerifiers.hpp"
|
||||
#include "runtime/vmOperations.hpp"
|
||||
#include "runtime/vmThread.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
@ -165,6 +167,13 @@ void ClassLoaderDataGraph::clean_deallocate_lists(bool walk_previous_versions) {
|
||||
loaders_processed, walk_previous_versions ? "walk_previous_versions" : "");
|
||||
}
|
||||
|
||||
void ClassLoaderDataGraph::safepoint_and_clean_metaspaces() {
|
||||
// Safepoint and mark all metadata with MetadataOnStackMark and then deallocate unused bits of metaspace.
|
||||
// This needs to be exclusive to Redefinition, so needs to be a safepoint.
|
||||
VM_CleanClassLoaderDataMetaspaces op;
|
||||
VMThread::execute(&op);
|
||||
}
|
||||
|
||||
void ClassLoaderDataGraph::walk_metadata_and_clean_metaspaces() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "must only be called at safepoint");
|
||||
|
||||
@ -497,9 +506,6 @@ bool ClassLoaderDataGraph::is_valid(ClassLoaderData* loader_data) {
|
||||
bool ClassLoaderDataGraph::do_unloading() {
|
||||
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
|
||||
|
||||
// Indicate whether safepoint cleanup is needed.
|
||||
_safepoint_cleanup_needed = true;
|
||||
|
||||
ClassLoaderData* data = _head;
|
||||
ClassLoaderData* prev = NULL;
|
||||
bool seen_dead_loader = false;
|
||||
@ -560,7 +566,7 @@ void ClassLoaderDataGraph::clean_module_and_package_info() {
|
||||
}
|
||||
}
|
||||
|
||||
void ClassLoaderDataGraph::purge() {
|
||||
void ClassLoaderDataGraph::purge(bool at_safepoint) {
|
||||
ClassLoaderData* list = _unloading;
|
||||
_unloading = NULL;
|
||||
ClassLoaderData* next = list;
|
||||
@ -576,6 +582,21 @@ void ClassLoaderDataGraph::purge() {
|
||||
set_metaspace_oom(false);
|
||||
}
|
||||
DependencyContext::purge_dependency_contexts();
|
||||
|
||||
// If we're purging metadata at a safepoint, clean remaining
|
||||
// metaspaces if we need to.
|
||||
if (at_safepoint) {
|
||||
_safepoint_cleanup_needed = true; // tested and reset next.
|
||||
if (should_clean_metaspaces_and_reset()) {
|
||||
walk_metadata_and_clean_metaspaces();
|
||||
}
|
||||
} else {
|
||||
// Tell service thread this is a good time to check to see if we should
|
||||
// clean loaded CLDGs. This causes another safepoint.
|
||||
MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
|
||||
_safepoint_cleanup_needed = true;
|
||||
Service_lock->notify_all();
|
||||
}
|
||||
}
|
||||
|
||||
int ClassLoaderDataGraph::resize_dictionaries() {
|
||||
|
||||
@ -62,7 +62,7 @@ class ClassLoaderDataGraph : public AllStatic {
|
||||
static ClassLoaderData* find_or_create(Handle class_loader);
|
||||
static ClassLoaderData* add(Handle class_loader, bool has_class_mirror_holder);
|
||||
static void clean_module_and_package_info();
|
||||
static void purge();
|
||||
static void purge(bool at_safepoint);
|
||||
static void clear_claimed_marks();
|
||||
static void clear_claimed_marks(int claim);
|
||||
// Iteration through CLDG inside a safepoint; GC support
|
||||
@ -89,10 +89,12 @@ class ClassLoaderDataGraph : public AllStatic {
|
||||
static void classes_unloading_do(void f(Klass* const));
|
||||
static bool do_unloading();
|
||||
|
||||
// Expose state to avoid logging overhead in safepoint cleanup tasks.
|
||||
static inline bool should_clean_metaspaces_and_reset();
|
||||
static void set_should_clean_deallocate_lists() { _should_clean_deallocate_lists = true; }
|
||||
static void clean_deallocate_lists(bool purge_previous_versions);
|
||||
// Called from ServiceThread
|
||||
static void safepoint_and_clean_metaspaces();
|
||||
// Called from VMOperation
|
||||
static void walk_metadata_and_clean_metaspaces();
|
||||
|
||||
// dictionary do
|
||||
|
||||
@ -2184,6 +2184,14 @@ void SystemDictionary::resolve_well_known_classes(TRAPS) {
|
||||
//_box_klasses[T_OBJECT] = WK_KLASS(object_klass);
|
||||
//_box_klasses[T_ARRAY] = WK_KLASS(object_klass);
|
||||
|
||||
if (DiagnoseSyncOnPrimitiveWrappers != 0) {
|
||||
for (int i = T_BOOLEAN; i < T_LONG + 1; i++) {
|
||||
assert(_box_klasses[i] != NULL, "NULL box class");
|
||||
_box_klasses[i]->set_is_box();
|
||||
_box_klasses[i]->set_prototype_header(markWord::prototype());
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
if (UseSharedSpaces) {
|
||||
JVMTI_ONLY(assert(JvmtiExport::is_early_phase(),
|
||||
|
||||
@ -1201,8 +1201,10 @@ void SystemDictionaryShared::set_shared_class_misc_info(InstanceKlass* k, ClassF
|
||||
Arguments::assert_is_dumping_archive();
|
||||
assert(!is_builtin(k), "must be unregistered class");
|
||||
DumpTimeSharedClassInfo* info = find_or_allocate_info_for(k);
|
||||
info->_clsfile_size = cfs->length();
|
||||
info->_clsfile_crc32 = ClassLoader::crc32(0, (const char*)cfs->buffer(), cfs->length());
|
||||
if (info != NULL) {
|
||||
info->_clsfile_size = cfs->length();
|
||||
info->_clsfile_crc32 = ClassLoader::crc32(0, (const char*)cfs->buffer(), cfs->length());
|
||||
}
|
||||
}
|
||||
|
||||
void SystemDictionaryShared::init_dumptime_info(InstanceKlass* k) {
|
||||
@ -1398,12 +1400,16 @@ void SystemDictionaryShared::check_excluded_classes() {
|
||||
bool SystemDictionaryShared::is_excluded_class(InstanceKlass* k) {
|
||||
assert(_no_class_loading_should_happen, "sanity");
|
||||
Arguments::assert_is_dumping_archive();
|
||||
return find_or_allocate_info_for(k)->is_excluded();
|
||||
DumpTimeSharedClassInfo* p = find_or_allocate_info_for(k);
|
||||
return (p == NULL) ? true : p->is_excluded();
|
||||
}
|
||||
|
||||
void SystemDictionaryShared::set_class_has_failed_verification(InstanceKlass* ik) {
|
||||
Arguments::assert_is_dumping_archive();
|
||||
find_or_allocate_info_for(ik)->set_failed_verification();
|
||||
DumpTimeSharedClassInfo* p = find_or_allocate_info_for(ik);
|
||||
if (p != NULL) {
|
||||
p->set_failed_verification();
|
||||
}
|
||||
}
|
||||
|
||||
bool SystemDictionaryShared::has_class_failed_verification(InstanceKlass* ik) {
|
||||
@ -1440,9 +1446,12 @@ bool SystemDictionaryShared::add_verification_constraint(InstanceKlass* k, Symbo
|
||||
Symbol* from_name, bool from_field_is_protected, bool from_is_array, bool from_is_object) {
|
||||
Arguments::assert_is_dumping_archive();
|
||||
DumpTimeSharedClassInfo* info = find_or_allocate_info_for(k);
|
||||
info->add_verification_constraint(k, name, from_name, from_field_is_protected,
|
||||
from_is_array, from_is_object);
|
||||
|
||||
if (info != NULL) {
|
||||
info->add_verification_constraint(k, name, from_name, from_field_is_protected,
|
||||
from_is_array, from_is_object);
|
||||
} else {
|
||||
return true;
|
||||
}
|
||||
if (DynamicDumpSharedSpaces) {
|
||||
// For dynamic dumping, we can resolve all the constraint classes for all class loaders during
|
||||
// the initial run prior to creating the archive before vm exit. We will also perform verification
|
||||
@ -1771,7 +1780,9 @@ void SystemDictionaryShared::record_linking_constraint(Symbol* name, InstanceKla
|
||||
}
|
||||
Arguments::assert_is_dumping_archive();
|
||||
DumpTimeSharedClassInfo* info = find_or_allocate_info_for(klass);
|
||||
info->record_linking_constraint(name, loader1, loader2);
|
||||
if (info != NULL) {
|
||||
info->record_linking_constraint(name, loader1, loader2);
|
||||
}
|
||||
}
|
||||
|
||||
// returns true IFF there's no need to re-initialize the i/v-tables for klass for
|
||||
|
||||
@ -873,7 +873,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
|
||||
if (result != NULL) {
|
||||
size_t size_in_regions = humongous_obj_size_in_regions(word_size);
|
||||
policy()->old_gen_alloc_tracker()->
|
||||
add_allocated_bytes_since_last_gc(size_in_regions * HeapRegion::GrainBytes);
|
||||
add_allocated_humongous_bytes_since_last_gc(size_in_regions * HeapRegion::GrainBytes);
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -893,6 +893,9 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
|
||||
assert(succeeded, "only way to get back a non-NULL result");
|
||||
log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT,
|
||||
Thread::current()->name(), p2i(result));
|
||||
size_t size_in_regions = humongous_obj_size_in_regions(word_size);
|
||||
policy()->old_gen_alloc_tracker()->
|
||||
record_collection_pause_humongous_allocation(size_in_regions * HeapRegion::GrainBytes);
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -1029,7 +1032,7 @@ void G1CollectedHeap::prepare_heap_for_mutators() {
|
||||
hrm()->prepare_for_full_collection_end();
|
||||
|
||||
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
|
||||
ClassLoaderDataGraph::purge();
|
||||
ClassLoaderDataGraph::purge(/*at_safepoint*/true);
|
||||
MetaspaceUtils::verify_metrics();
|
||||
|
||||
// Prepare heap for normal collections.
|
||||
@ -1618,12 +1621,11 @@ jint G1CollectedHeap::initialize() {
|
||||
// Create the hot card cache.
|
||||
_hot_card_cache = new G1HotCardCache(this);
|
||||
|
||||
// Carve out the G1 part of the heap.
|
||||
ReservedSpace g1_rs = heap_rs.first_part(reserved_byte_size);
|
||||
// Create space mappers.
|
||||
size_t page_size = actual_reserved_page_size(heap_rs);
|
||||
G1RegionToSpaceMapper* heap_storage =
|
||||
G1RegionToSpaceMapper::create_heap_mapper(g1_rs,
|
||||
g1_rs.size(),
|
||||
G1RegionToSpaceMapper::create_heap_mapper(heap_rs,
|
||||
heap_rs.size(),
|
||||
page_size,
|
||||
HeapRegion::GrainBytes,
|
||||
1,
|
||||
@ -1644,20 +1646,20 @@ jint G1CollectedHeap::initialize() {
|
||||
// Create storage for the BOT, card table, card counts table (hot card cache) and the bitmaps.
|
||||
G1RegionToSpaceMapper* bot_storage =
|
||||
create_aux_memory_mapper("Block Offset Table",
|
||||
G1BlockOffsetTable::compute_size(g1_rs.size() / HeapWordSize),
|
||||
G1BlockOffsetTable::compute_size(heap_rs.size() / HeapWordSize),
|
||||
G1BlockOffsetTable::heap_map_factor());
|
||||
|
||||
G1RegionToSpaceMapper* cardtable_storage =
|
||||
create_aux_memory_mapper("Card Table",
|
||||
G1CardTable::compute_size(g1_rs.size() / HeapWordSize),
|
||||
G1CardTable::compute_size(heap_rs.size() / HeapWordSize),
|
||||
G1CardTable::heap_map_factor());
|
||||
|
||||
G1RegionToSpaceMapper* card_counts_storage =
|
||||
create_aux_memory_mapper("Card Counts Table",
|
||||
G1CardCounts::compute_size(g1_rs.size() / HeapWordSize),
|
||||
G1CardCounts::compute_size(heap_rs.size() / HeapWordSize),
|
||||
G1CardCounts::heap_map_factor());
|
||||
|
||||
size_t bitmap_size = G1CMBitMap::compute_size(g1_rs.size());
|
||||
size_t bitmap_size = G1CMBitMap::compute_size(heap_rs.size());
|
||||
G1RegionToSpaceMapper* prev_bitmap_storage =
|
||||
create_aux_memory_mapper("Prev Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
|
||||
G1RegionToSpaceMapper* next_bitmap_storage =
|
||||
@ -1678,7 +1680,7 @@ jint G1CollectedHeap::initialize() {
|
||||
|
||||
// The G1FromCardCache reserves card with value 0 as "invalid", so the heap must not
|
||||
// start within the first card.
|
||||
guarantee(g1_rs.base() >= (char*)G1CardTable::card_size, "Java heap must not start within the first card.");
|
||||
guarantee(heap_rs.base() >= (char*)G1CardTable::card_size, "Java heap must not start within the first card.");
|
||||
// Also create a G1 rem set.
|
||||
_rem_set = new G1RemSet(this, _card_table, _hot_card_cache);
|
||||
_rem_set->initialize(max_reserved_capacity(), max_regions());
|
||||
|
||||
@ -1141,18 +1141,6 @@ public:
|
||||
inline G1HeapRegionAttr region_attr(const void* obj) const;
|
||||
inline G1HeapRegionAttr region_attr(uint idx) const;
|
||||
|
||||
// Return "TRUE" iff the given object address is in the reserved
|
||||
// region of g1.
|
||||
bool is_in_g1_reserved(const void* p) const {
|
||||
return _hrm->reserved().contains(p);
|
||||
}
|
||||
|
||||
// Returns a MemRegion that corresponds to the space that has been
|
||||
// reserved for the heap
|
||||
MemRegion g1_reserved() const {
|
||||
return _hrm->reserved();
|
||||
}
|
||||
|
||||
MemRegion reserved_region() const {
|
||||
return _reserved;
|
||||
}
|
||||
|
||||
@ -87,18 +87,18 @@ inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
|
||||
template <class T>
|
||||
inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {
|
||||
assert(addr != NULL, "invariant");
|
||||
assert(is_in_g1_reserved((const void*) addr),
|
||||
assert(is_in_reserved((const void*) addr),
|
||||
"Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")",
|
||||
p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end()));
|
||||
p2i((void*)addr), p2i(reserved_region().start()), p2i(reserved_region().end()));
|
||||
return _hrm->addr_to_region((HeapWord*)(void*) addr);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline HeapRegion* G1CollectedHeap::heap_region_containing_or_null(const T addr) const {
|
||||
assert(addr != NULL, "invariant");
|
||||
assert(is_in_g1_reserved((const void*) addr),
|
||||
assert(is_in_reserved((const void*) addr),
|
||||
"Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")",
|
||||
p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end()));
|
||||
p2i((void*)addr), p2i(reserved_region().start()), p2i(reserved_region().end()));
|
||||
uint const region_idx = addr_to_region(addr);
|
||||
return region_at_or_null(region_idx);
|
||||
}
|
||||
|
||||
@ -443,7 +443,7 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
|
||||
task_queue->initialize();
|
||||
_task_queues->register_queue(i, task_queue);
|
||||
|
||||
_tasks[i] = new G1CMTask(i, this, task_queue, _region_mark_stats, _g1h->max_regions());
|
||||
_tasks[i] = new G1CMTask(i, this, task_queue, _region_mark_stats);
|
||||
|
||||
_accum_task_vtime[i] = 0.0;
|
||||
}
|
||||
@ -1143,7 +1143,7 @@ void G1ConcurrentMark::remark() {
|
||||
// Clean out dead classes
|
||||
if (ClassUnloadingWithConcurrentMark) {
|
||||
GCTraceTime(Debug, gc, phases) debug("Purge Metaspace", _gc_timer_cm);
|
||||
ClassLoaderDataGraph::purge();
|
||||
ClassLoaderDataGraph::purge(/*at_safepoint*/true);
|
||||
}
|
||||
|
||||
_g1h->resize_heap_if_necessary();
|
||||
@ -1664,7 +1664,7 @@ public:
|
||||
|
||||
bool do_object_b(oop obj) {
|
||||
return obj != NULL &&
|
||||
(!_g1h->is_in_g1_reserved(obj) || !_g1h->is_obj_dead(obj));
|
||||
(!_g1h->is_in_reserved(obj) || !_g1h->is_obj_dead(obj));
|
||||
}
|
||||
};
|
||||
|
||||
@ -1830,7 +1830,7 @@ G1ConcurrentMark::claim_region(uint worker_id) {
|
||||
HeapWord* finger = _finger;
|
||||
|
||||
while (finger < _heap.end()) {
|
||||
assert(_g1h->is_in_g1_reserved(finger), "invariant");
|
||||
assert(_g1h->is_in_reserved(finger), "invariant");
|
||||
|
||||
HeapRegion* curr_region = _g1h->heap_region_containing(finger);
|
||||
// Make sure that the reads below do not float before loading curr_region.
|
||||
@ -2814,15 +2814,14 @@ void G1CMTask::do_marking_step(double time_target_ms,
|
||||
G1CMTask::G1CMTask(uint worker_id,
|
||||
G1ConcurrentMark* cm,
|
||||
G1CMTaskQueue* task_queue,
|
||||
G1RegionMarkStats* mark_stats,
|
||||
uint max_regions) :
|
||||
G1RegionMarkStats* mark_stats) :
|
||||
_objArray_processor(this),
|
||||
_worker_id(worker_id),
|
||||
_g1h(G1CollectedHeap::heap()),
|
||||
_cm(cm),
|
||||
_next_mark_bitmap(NULL),
|
||||
_task_queue(task_queue),
|
||||
_mark_stats_cache(mark_stats, max_regions, RegionMarkStatsCacheSize),
|
||||
_mark_stats_cache(mark_stats, RegionMarkStatsCacheSize),
|
||||
_calls(0),
|
||||
_time_target_ms(0.0),
|
||||
_start_time_ms(0.0),
|
||||
@ -2894,7 +2893,7 @@ G1PrintRegionLivenessInfoClosure::G1PrintRegionLivenessInfoClosure(const char* p
|
||||
}
|
||||
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
MemRegion g1_reserved = g1h->g1_reserved();
|
||||
MemRegion reserved = g1h->reserved_region();
|
||||
double now = os::elapsedTime();
|
||||
|
||||
// Print the header of the output.
|
||||
@ -2902,7 +2901,7 @@ G1PrintRegionLivenessInfoClosure::G1PrintRegionLivenessInfoClosure(const char* p
|
||||
log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP"
|
||||
G1PPRL_SUM_ADDR_FORMAT("reserved")
|
||||
G1PPRL_SUM_BYTE_FORMAT("region-size"),
|
||||
p2i(g1_reserved.start()), p2i(g1_reserved.end()),
|
||||
p2i(reserved.start()), p2i(reserved.end()),
|
||||
HeapRegion::GrainBytes);
|
||||
log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
|
||||
log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
|
||||
|
||||
@ -818,8 +818,7 @@ public:
|
||||
G1CMTask(uint worker_id,
|
||||
G1ConcurrentMark *cm,
|
||||
G1CMTaskQueue* task_queue,
|
||||
G1RegionMarkStats* mark_stats,
|
||||
uint max_regions);
|
||||
G1RegionMarkStats* mark_stats);
|
||||
|
||||
inline void update_liveness(oop const obj, size_t const obj_size);
|
||||
|
||||
|
||||
@ -106,7 +106,7 @@ inline void G1CMMarkStack::iterate(Fn fn) const {
|
||||
inline void G1CMTask::scan_task_entry(G1TaskQueueEntry task_entry) { process_grey_task_entry<true>(task_entry); }
|
||||
|
||||
inline void G1CMTask::push(G1TaskQueueEntry task_entry) {
|
||||
assert(task_entry.is_array_slice() || _g1h->is_in_g1_reserved(task_entry.obj()), "invariant");
|
||||
assert(task_entry.is_array_slice() || _g1h->is_in_reserved(task_entry.obj()), "invariant");
|
||||
assert(task_entry.is_array_slice() || !_g1h->is_on_master_free_list(
|
||||
_g1h->heap_region_containing(task_entry.obj())), "invariant");
|
||||
assert(task_entry.is_array_slice() || !_g1h->is_obj_ill(task_entry.obj()), "invariant"); // FIXME!!!
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,11 +29,12 @@
|
||||
#include "gc/g1/g1Trace.hpp"
|
||||
#include "logging/log.hpp"
|
||||
|
||||
G1IHOPControl::G1IHOPControl(double initial_ihop_percent) :
|
||||
G1IHOPControl::G1IHOPControl(double initial_ihop_percent,
|
||||
G1OldGenAllocationTracker const* old_gen_alloc_tracker) :
|
||||
_initial_ihop_percent(initial_ihop_percent),
|
||||
_target_occupancy(0),
|
||||
_last_allocation_time_s(0.0),
|
||||
_last_allocated_bytes(0)
|
||||
_old_gen_alloc_tracker(old_gen_alloc_tracker)
|
||||
{
|
||||
assert(_initial_ihop_percent >= 0.0 && _initial_ihop_percent <= 100.0, "Initial IHOP value must be between 0 and 100 but is %.3f", initial_ihop_percent);
|
||||
}
|
||||
@ -44,11 +45,10 @@ void G1IHOPControl::update_target_occupancy(size_t new_target_occupancy) {
|
||||
_target_occupancy = new_target_occupancy;
|
||||
}
|
||||
|
||||
void G1IHOPControl::update_allocation_info(double allocation_time_s, size_t allocated_bytes, size_t additional_buffer_size) {
|
||||
void G1IHOPControl::update_allocation_info(double allocation_time_s, size_t additional_buffer_size) {
|
||||
assert(allocation_time_s >= 0.0, "Allocation time must be positive but is %.3f", allocation_time_s);
|
||||
|
||||
_last_allocation_time_s = allocation_time_s;
|
||||
_last_allocated_bytes = allocated_bytes;
|
||||
}
|
||||
|
||||
void G1IHOPControl::print() {
|
||||
@ -60,9 +60,9 @@ void G1IHOPControl::print() {
|
||||
percent_of(cur_conc_mark_start_threshold, _target_occupancy),
|
||||
_target_occupancy,
|
||||
G1CollectedHeap::heap()->used(),
|
||||
_last_allocated_bytes,
|
||||
_old_gen_alloc_tracker->last_period_old_gen_bytes(),
|
||||
_last_allocation_time_s * 1000.0,
|
||||
_last_allocation_time_s > 0.0 ? _last_allocated_bytes / _last_allocation_time_s : 0.0,
|
||||
_last_allocation_time_s > 0.0 ? _old_gen_alloc_tracker->last_period_old_gen_bytes() / _last_allocation_time_s : 0.0,
|
||||
last_marking_length_s() * 1000.0);
|
||||
}
|
||||
|
||||
@ -71,21 +71,23 @@ void G1IHOPControl::send_trace_event(G1NewTracer* tracer) {
|
||||
tracer->report_basic_ihop_statistics(get_conc_mark_start_threshold(),
|
||||
_target_occupancy,
|
||||
G1CollectedHeap::heap()->used(),
|
||||
_last_allocated_bytes,
|
||||
_old_gen_alloc_tracker->last_period_old_gen_bytes(),
|
||||
_last_allocation_time_s,
|
||||
last_marking_length_s());
|
||||
}
|
||||
|
||||
G1StaticIHOPControl::G1StaticIHOPControl(double ihop_percent) :
|
||||
G1IHOPControl(ihop_percent),
|
||||
G1StaticIHOPControl::G1StaticIHOPControl(double ihop_percent,
|
||||
G1OldGenAllocationTracker const* old_gen_alloc_tracker) :
|
||||
G1IHOPControl(ihop_percent, old_gen_alloc_tracker),
|
||||
_last_marking_length_s(0.0) {
|
||||
}
|
||||
|
||||
G1AdaptiveIHOPControl::G1AdaptiveIHOPControl(double ihop_percent,
|
||||
G1OldGenAllocationTracker const* old_gen_alloc_tracker,
|
||||
G1Predictions const* predictor,
|
||||
size_t heap_reserve_percent,
|
||||
size_t heap_waste_percent) :
|
||||
G1IHOPControl(ihop_percent),
|
||||
G1IHOPControl(ihop_percent, old_gen_alloc_tracker),
|
||||
_heap_reserve_percent(heap_reserve_percent),
|
||||
_heap_waste_percent(heap_waste_percent),
|
||||
_predictor(predictor),
|
||||
@ -145,13 +147,16 @@ size_t G1AdaptiveIHOPControl::get_conc_mark_start_threshold() {
|
||||
}
|
||||
}
|
||||
|
||||
void G1AdaptiveIHOPControl::update_allocation_info(double allocation_time_s,
|
||||
size_t allocated_bytes,
|
||||
size_t additional_buffer_size) {
|
||||
G1IHOPControl::update_allocation_info(allocation_time_s, allocated_bytes, additional_buffer_size);
|
||||
double G1AdaptiveIHOPControl::last_mutator_period_old_allocation_rate() const {
|
||||
assert(_last_allocation_time_s > 0, "This should not be called when the last GC is full");
|
||||
|
||||
double allocation_rate = (double) allocated_bytes / allocation_time_s;
|
||||
_allocation_rate_s.add(allocation_rate);
|
||||
return _old_gen_alloc_tracker->last_period_old_gen_growth() / _last_allocation_time_s;
|
||||
}
|
||||
|
||||
void G1AdaptiveIHOPControl::update_allocation_info(double allocation_time_s,
|
||||
size_t additional_buffer_size) {
|
||||
G1IHOPControl::update_allocation_info(allocation_time_s, additional_buffer_size);
|
||||
_allocation_rate_s.add(last_mutator_period_old_allocation_rate());
|
||||
|
||||
_last_unrestrained_young_size = additional_buffer_size;
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,6 +25,7 @@
|
||||
#ifndef SHARE_GC_G1_G1IHOPCONTROL_HPP
|
||||
#define SHARE_GC_G1_G1IHOPCONTROL_HPP
|
||||
|
||||
#include "gc/g1/g1OldGenAllocationTracker.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "utilities/numberSeq.hpp"
|
||||
|
||||
@ -44,12 +45,12 @@ class G1IHOPControl : public CHeapObj<mtGC> {
|
||||
|
||||
// Most recent complete mutator allocation period in seconds.
|
||||
double _last_allocation_time_s;
|
||||
// Amount of bytes allocated during _last_allocation_time_s.
|
||||
size_t _last_allocated_bytes;
|
||||
|
||||
// Initialize an instance with the initial IHOP value in percent. The target
|
||||
// occupancy will be updated at the first heap expansion.
|
||||
G1IHOPControl(double initial_ihop_percent);
|
||||
const G1OldGenAllocationTracker* _old_gen_alloc_tracker;
|
||||
// Initialize an instance with the old gen allocation tracker and the
|
||||
// initial IHOP value in percent. The target occupancy will be updated
|
||||
// at the first heap expansion.
|
||||
G1IHOPControl(double ihop_percent, G1OldGenAllocationTracker const* old_gen_alloc_tracker);
|
||||
|
||||
// Most recent time from the end of the concurrent start to the start of the first
|
||||
// mixed gc.
|
||||
@ -70,7 +71,7 @@ class G1IHOPControl : public CHeapObj<mtGC> {
|
||||
// Together with the target occupancy, this additional buffer should contain the
|
||||
// difference between old gen size and total heap size at the start of reclamation,
|
||||
// and space required for that reclamation.
|
||||
virtual void update_allocation_info(double allocation_time_s, size_t allocated_bytes, size_t additional_buffer_size);
|
||||
virtual void update_allocation_info(double allocation_time_s, size_t additional_buffer_size);
|
||||
// Update the time spent in the mutator beginning from the end of concurrent start to
|
||||
// the first mixed gc.
|
||||
virtual void update_marking_length(double marking_length_s) = 0;
|
||||
@ -88,7 +89,7 @@ class G1StaticIHOPControl : public G1IHOPControl {
|
||||
protected:
|
||||
double last_marking_length_s() const { return _last_marking_length_s; }
|
||||
public:
|
||||
G1StaticIHOPControl(double ihop_percent);
|
||||
G1StaticIHOPControl(double ihop_percent, G1OldGenAllocationTracker const* old_gen_alloc_tracker);
|
||||
|
||||
size_t get_conc_mark_start_threshold() {
|
||||
guarantee(_target_occupancy > 0, "Target occupancy must have been initialized.");
|
||||
@ -132,17 +133,22 @@ class G1AdaptiveIHOPControl : public G1IHOPControl {
|
||||
// end of marking. This is typically lower than the requested threshold, as the
|
||||
// algorithm needs to consider restrictions by the environment.
|
||||
size_t actual_target_threshold() const;
|
||||
|
||||
// This method calculates the old gen allocation rate based on the net survived
|
||||
// bytes that are allocated in the old generation in the last mutator period.
|
||||
double last_mutator_period_old_allocation_rate() const;
|
||||
protected:
|
||||
virtual double last_marking_length_s() const { return _marking_times_s.last(); }
|
||||
public:
|
||||
G1AdaptiveIHOPControl(double ihop_percent,
|
||||
G1OldGenAllocationTracker const* old_gen_alloc_tracker,
|
||||
G1Predictions const* predictor,
|
||||
size_t heap_reserve_percent, // The percentage of total heap capacity that should not be tapped into.
|
||||
size_t heap_waste_percent); // The percentage of the free space in the heap that we think is not usable for allocation.
|
||||
|
||||
virtual size_t get_conc_mark_start_threshold();
|
||||
|
||||
virtual void update_allocation_info(double allocation_time_s, size_t allocated_bytes, size_t additional_buffer_size);
|
||||
virtual void update_allocation_info(double allocation_time_s, size_t additional_buffer_size);
|
||||
virtual void update_marking_length(double marking_length_s);
|
||||
|
||||
virtual void print();
|
||||
|
||||
@ -24,19 +24,39 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/g1/g1OldGenAllocationTracker.hpp"
|
||||
#include "logging/log.hpp"
|
||||
|
||||
G1OldGenAllocationTracker::G1OldGenAllocationTracker() :
|
||||
_last_cycle_old_bytes(0),
|
||||
_last_cycle_duration(0.0),
|
||||
_allocated_bytes_since_last_gc(0) {
|
||||
_last_period_old_gen_bytes(0),
|
||||
_last_period_old_gen_growth(0),
|
||||
_humongous_bytes_after_last_gc(0),
|
||||
_allocated_bytes_since_last_gc(0),
|
||||
_allocated_humongous_bytes_since_last_gc(0) {
|
||||
}
|
||||
|
||||
void G1OldGenAllocationTracker::reset_after_full_gc() {
|
||||
_last_cycle_duration = 0;
|
||||
reset_cycle_after_gc();
|
||||
}
|
||||
void G1OldGenAllocationTracker::reset_after_gc(size_t humongous_bytes_after_gc) {
|
||||
// Calculate actual increase in old, taking eager reclaim into consideration.
|
||||
size_t last_period_humongous_increase = 0;
|
||||
if (humongous_bytes_after_gc > _humongous_bytes_after_last_gc) {
|
||||
last_period_humongous_increase = humongous_bytes_after_gc - _humongous_bytes_after_last_gc;
|
||||
assert(last_period_humongous_increase <= _allocated_humongous_bytes_since_last_gc,
|
||||
"Increase larger than allocated " SIZE_FORMAT " <= " SIZE_FORMAT,
|
||||
last_period_humongous_increase, _allocated_humongous_bytes_since_last_gc);
|
||||
}
|
||||
_last_period_old_gen_growth = _allocated_bytes_since_last_gc + last_period_humongous_increase;
|
||||
|
||||
void G1OldGenAllocationTracker::reset_after_young_gc(double allocation_duration_s) {
|
||||
_last_cycle_duration = allocation_duration_s;
|
||||
reset_cycle_after_gc();
|
||||
}
|
||||
// Calculate and record needed values.
|
||||
_last_period_old_gen_bytes = _allocated_bytes_since_last_gc + _allocated_humongous_bytes_since_last_gc;
|
||||
_humongous_bytes_after_last_gc = humongous_bytes_after_gc;
|
||||
|
||||
log_debug(gc, alloc, stats)("Old generation allocation in the last mutator period, "
|
||||
"old gen allocated: " SIZE_FORMAT "B, humongous allocated: " SIZE_FORMAT "B,"
|
||||
"old gen growth: " SIZE_FORMAT "B.",
|
||||
_allocated_bytes_since_last_gc,
|
||||
_allocated_humongous_bytes_since_last_gc,
|
||||
_last_period_old_gen_growth);
|
||||
|
||||
// Reset for next mutator period.
|
||||
_allocated_bytes_since_last_gc = 0;
|
||||
_allocated_humongous_bytes_since_last_gc = 0;
|
||||
}
|
||||
|
||||
@ -28,34 +28,42 @@
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
|
||||
class G1AdaptiveIHOPControl;
|
||||
|
||||
// Track allocation details in the old generation.
|
||||
class G1OldGenAllocationTracker : public CHeapObj<mtGC> {
|
||||
// New bytes allocated in old gen between the end of the last GC and
|
||||
// the end of the GC before that.
|
||||
size_t _last_cycle_old_bytes;
|
||||
// The number of seconds between the end of the last GC and
|
||||
// the end of the GC before that.
|
||||
double _last_cycle_duration;
|
||||
// Total number of bytes allocated in the old generaton during
|
||||
// last mutator period.
|
||||
size_t _last_period_old_gen_bytes;
|
||||
// Total growth of the old geneneration for last mutator period,
|
||||
// taking eager reclaim into consideration.
|
||||
size_t _last_period_old_gen_growth;
|
||||
|
||||
// Total size of humongous objects for last gc.
|
||||
size_t _humongous_bytes_after_last_gc;
|
||||
|
||||
// Non-humongous old generation allocations during last mutator period.
|
||||
size_t _allocated_bytes_since_last_gc;
|
||||
|
||||
void reset_cycle_after_gc() {
|
||||
_last_cycle_old_bytes = _allocated_bytes_since_last_gc;
|
||||
_allocated_bytes_since_last_gc = 0;
|
||||
}
|
||||
// Humongous allocations during last mutator period.
|
||||
size_t _allocated_humongous_bytes_since_last_gc;
|
||||
|
||||
public:
|
||||
G1OldGenAllocationTracker();
|
||||
// Add the given number of bytes to the total number of allocated bytes in the old gen.
|
||||
|
||||
void add_allocated_bytes_since_last_gc(size_t bytes) { _allocated_bytes_since_last_gc += bytes; }
|
||||
void add_allocated_humongous_bytes_since_last_gc(size_t bytes) { _allocated_humongous_bytes_since_last_gc += bytes; }
|
||||
|
||||
size_t last_cycle_old_bytes() { return _last_cycle_old_bytes; }
|
||||
// Record a humongous allocation in a collection pause. This allocation
|
||||
// is accounted to the previous mutator period.
|
||||
void record_collection_pause_humongous_allocation(size_t bytes) {
|
||||
_humongous_bytes_after_last_gc += bytes;
|
||||
}
|
||||
|
||||
double last_cycle_duration() { return _last_cycle_duration; }
|
||||
size_t last_period_old_gen_bytes() const { return _last_period_old_gen_bytes; }
|
||||
size_t last_period_old_gen_growth() const { return _last_period_old_gen_growth; };
|
||||
|
||||
// Reset stats after a collection.
|
||||
void reset_after_full_gc();
|
||||
void reset_after_young_gc(double allocation_duration_s);
|
||||
// Calculates and resets stats after a collection.
|
||||
void reset_after_gc(size_t humongous_bytes_after_gc);
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_G1_G1OLDGENALLOCATIONTRACKER_HPP
|
||||
#endif // SHARE_VM_GC_G1_G1OLDGENALLOCATIONTRACKER_HPP
|
||||
|
||||
@ -125,14 +125,14 @@ void G1ParScanThreadState::verify_task(narrowOop* task) const {
|
||||
assert(task != NULL, "invariant");
|
||||
assert(UseCompressedOops, "sanity");
|
||||
oop p = RawAccess<>::oop_load(task);
|
||||
assert(_g1h->is_in_g1_reserved(p),
|
||||
assert(_g1h->is_in_reserved(p),
|
||||
"task=" PTR_FORMAT " p=" PTR_FORMAT, p2i(task), p2i(p));
|
||||
}
|
||||
|
||||
void G1ParScanThreadState::verify_task(oop* task) const {
|
||||
assert(task != NULL, "invariant");
|
||||
oop p = RawAccess<>::oop_load(task);
|
||||
assert(_g1h->is_in_g1_reserved(p),
|
||||
assert(_g1h->is_in_reserved(p),
|
||||
"task=" PTR_FORMAT " p=" PTR_FORMAT, p2i(task), p2i(p));
|
||||
}
|
||||
|
||||
|
||||
@ -57,7 +57,8 @@ G1Policy::G1Policy(STWGCTimer* gc_timer) :
|
||||
_analytics(new G1Analytics(&_predictor)),
|
||||
_remset_tracker(),
|
||||
_mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)),
|
||||
_ihop_control(create_ihop_control(&_predictor)),
|
||||
_old_gen_alloc_tracker(),
|
||||
_ihop_control(create_ihop_control(&_old_gen_alloc_tracker, &_predictor)),
|
||||
_policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)),
|
||||
_full_collection_start_sec(0.0),
|
||||
_young_list_target_length(0),
|
||||
@ -72,7 +73,6 @@ G1Policy::G1Policy(STWGCTimer* gc_timer) :
|
||||
_rs_length(0),
|
||||
_rs_length_prediction(0),
|
||||
_pending_cards_at_gc_start(0),
|
||||
_old_gen_alloc_tracker(),
|
||||
_concurrent_start_to_mixed(),
|
||||
_collection_set(NULL),
|
||||
_g1h(NULL),
|
||||
@ -469,7 +469,7 @@ void G1Policy::record_full_collection_end() {
|
||||
update_young_list_max_and_target_length();
|
||||
update_rs_length_prediction();
|
||||
|
||||
_old_gen_alloc_tracker.reset_after_full_gc();
|
||||
_old_gen_alloc_tracker.reset_after_gc(_g1h->humongous_regions_count() * HeapRegion::GrainBytes);
|
||||
|
||||
record_pause(FullGC, _full_collection_start_sec, end_sec);
|
||||
}
|
||||
@ -804,9 +804,8 @@ void G1Policy::record_collection_pause_end(double pause_time_ms) {
|
||||
// predicted target occupancy.
|
||||
size_t last_unrestrained_young_length = update_young_list_max_and_target_length();
|
||||
|
||||
_old_gen_alloc_tracker.reset_after_young_gc(app_time_ms / 1000.0);
|
||||
update_ihop_prediction(_old_gen_alloc_tracker.last_cycle_duration(),
|
||||
_old_gen_alloc_tracker.last_cycle_old_bytes(),
|
||||
_old_gen_alloc_tracker.reset_after_gc(_g1h->humongous_regions_count() * HeapRegion::GrainBytes);
|
||||
update_ihop_prediction(app_time_ms / 1000.0,
|
||||
last_unrestrained_young_length * HeapRegion::GrainBytes,
|
||||
is_young_only_pause(this_pause));
|
||||
|
||||
@ -844,19 +843,20 @@ void G1Policy::record_collection_pause_end(double pause_time_ms) {
|
||||
scan_logged_cards_time_goal_ms);
|
||||
}
|
||||
|
||||
G1IHOPControl* G1Policy::create_ihop_control(const G1Predictions* predictor){
|
||||
G1IHOPControl* G1Policy::create_ihop_control(const G1OldGenAllocationTracker* old_gen_alloc_tracker,
|
||||
const G1Predictions* predictor) {
|
||||
if (G1UseAdaptiveIHOP) {
|
||||
return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent,
|
||||
old_gen_alloc_tracker,
|
||||
predictor,
|
||||
G1ReservePercent,
|
||||
G1HeapWastePercent);
|
||||
} else {
|
||||
return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent);
|
||||
return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent, old_gen_alloc_tracker);
|
||||
}
|
||||
}
|
||||
|
||||
void G1Policy::update_ihop_prediction(double mutator_time_s,
|
||||
size_t mutator_alloc_bytes,
|
||||
size_t young_gen_size,
|
||||
bool this_gc_was_young_only) {
|
||||
// Always try to update IHOP prediction. Even evacuation failures give information
|
||||
@ -885,7 +885,7 @@ void G1Policy::update_ihop_prediction(double mutator_time_s,
|
||||
// marking, which makes any prediction useless. This increases the accuracy of the
|
||||
// prediction.
|
||||
if (this_gc_was_young_only && mutator_time_s > min_valid_time) {
|
||||
_ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size);
|
||||
_ihop_control->update_allocation_info(mutator_time_s, young_gen_size);
|
||||
report = true;
|
||||
}
|
||||
|
||||
|
||||
@ -56,10 +56,10 @@ class STWGCTimer;
|
||||
class G1Policy: public CHeapObj<mtGC> {
|
||||
private:
|
||||
|
||||
static G1IHOPControl* create_ihop_control(const G1Predictions* predictor);
|
||||
static G1IHOPControl* create_ihop_control(const G1OldGenAllocationTracker* old_gen_alloc_tracker,
|
||||
const G1Predictions* predictor);
|
||||
// Update the IHOP control with necessary statistics.
|
||||
void update_ihop_prediction(double mutator_time_s,
|
||||
size_t mutator_alloc_bytes,
|
||||
size_t young_gen_size,
|
||||
bool this_gc_was_young_only);
|
||||
void report_ihop_statistics();
|
||||
@ -68,6 +68,10 @@ class G1Policy: public CHeapObj<mtGC> {
|
||||
G1Analytics* _analytics;
|
||||
G1RemSetTrackingPolicy _remset_tracker;
|
||||
G1MMUTracker* _mmu_tracker;
|
||||
|
||||
// Tracking the allocation in the old generation between
|
||||
// two GCs.
|
||||
G1OldGenAllocationTracker _old_gen_alloc_tracker;
|
||||
G1IHOPControl* _ihop_control;
|
||||
|
||||
GCPolicyCounters* _policy_counters;
|
||||
@ -101,10 +105,6 @@ class G1Policy: public CHeapObj<mtGC> {
|
||||
|
||||
size_t _pending_cards_at_gc_start;
|
||||
|
||||
// Tracking the allocation in the old generation between
|
||||
// two GCs.
|
||||
G1OldGenAllocationTracker _old_gen_alloc_tracker;
|
||||
|
||||
G1ConcurrentStartToMixedTimeTracker _concurrent_start_to_mixed;
|
||||
|
||||
bool should_update_surv_rate_group_predictors() {
|
||||
|
||||
@ -27,9 +27,8 @@
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "utilities/powerOfTwo.hpp"
|
||||
|
||||
G1RegionMarkStatsCache::G1RegionMarkStatsCache(G1RegionMarkStats* target, uint max_regions, uint num_cache_entries) :
|
||||
G1RegionMarkStatsCache::G1RegionMarkStatsCache(G1RegionMarkStats* target, uint num_cache_entries) :
|
||||
_target(target),
|
||||
_num_stats(max_regions),
|
||||
_cache(NULL),
|
||||
_num_cache_entries(num_cache_entries),
|
||||
_cache_hits(0),
|
||||
|
||||
@ -63,8 +63,6 @@ class G1RegionMarkStatsCache {
|
||||
private:
|
||||
// The array of statistics entries to evict to; the global array.
|
||||
G1RegionMarkStats* _target;
|
||||
// Number of entries in the eviction target.
|
||||
uint _num_stats;
|
||||
|
||||
// An entry of the statistics cache.
|
||||
struct G1RegionMarkStatsCacheEntry {
|
||||
@ -100,7 +98,7 @@ private:
|
||||
|
||||
G1RegionMarkStatsCacheEntry* find_for_add(uint region_idx);
|
||||
public:
|
||||
G1RegionMarkStatsCache(G1RegionMarkStats* target, uint max_regions, uint num_cache_entries);
|
||||
G1RegionMarkStatsCache(G1RegionMarkStats* target, uint num_cache_entries);
|
||||
|
||||
~G1RegionMarkStatsCache();
|
||||
|
||||
|
||||
@ -1056,7 +1056,7 @@ void PSParallelCompact::post_compact()
|
||||
}
|
||||
|
||||
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
|
||||
ClassLoaderDataGraph::purge();
|
||||
ClassLoaderDataGraph::purge(/*at_safepoint*/true);
|
||||
MetaspaceUtils::verify_metrics();
|
||||
|
||||
heap->prune_scavengable_nmethods();
|
||||
|
||||
@ -661,7 +661,7 @@ void GenCollectedHeap::do_collection(bool full,
|
||||
_young_gen->compute_new_size();
|
||||
|
||||
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
|
||||
ClassLoaderDataGraph::purge();
|
||||
ClassLoaderDataGraph::purge(/*at_safepoint*/true);
|
||||
MetaspaceUtils::verify_metrics();
|
||||
// Resize the metaspace capacity after full collections
|
||||
MetaspaceGC::compute_new_size();
|
||||
|
||||
@ -2206,7 +2206,7 @@ void ShenandoahHeap::stw_unload_classes(bool full_gc) {
|
||||
ShenandoahGCPhase phase(full_gc ?
|
||||
ShenandoahPhaseTimings::full_gc_purge_cldg :
|
||||
ShenandoahPhaseTimings::purge_cldg);
|
||||
ClassLoaderDataGraph::purge();
|
||||
ClassLoaderDataGraph::purge(/*at_safepoint*/true);
|
||||
}
|
||||
// Resize and verify metaspace
|
||||
MetaspaceGC::compute_new_size();
|
||||
|
||||
@ -185,7 +185,7 @@ void ShenandoahUnload::unload() {
|
||||
|
||||
{
|
||||
ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_class_unload_purge_cldg);
|
||||
ClassLoaderDataGraph::purge();
|
||||
ClassLoaderDataGraph::purge(/*at_safepoint*/false);
|
||||
}
|
||||
|
||||
{
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -157,7 +157,7 @@ void ZUnload::purge() {
|
||||
ZNMethod::purge(_workers);
|
||||
}
|
||||
|
||||
ClassLoaderDataGraph::purge();
|
||||
ClassLoaderDataGraph::purge(/*at_safepoint*/false);
|
||||
CodeCache::purge_exception_caches();
|
||||
}
|
||||
|
||||
|
||||
@ -66,6 +66,10 @@
|
||||
<Field type="InflateCause" name="cause" label="Monitor Inflation Cause" description="Cause of inflation" />
|
||||
</Event>
|
||||
|
||||
<Event name="SyncOnPrimitiveWrapper" category="Java Virtual Machine, Diagnostics" label="Primitive Wrapper Synchronization" thread="true" stackTrace="true" startTime="false" experimental="true">
|
||||
<Field type="Class" name="boxClass" label="Boxing Class" />
|
||||
</Event>
|
||||
|
||||
<Event name="BiasedLockRevocation" category="Java Virtual Machine, Runtime" label="Biased Lock Revocation" description="Revoked bias of object" thread="true"
|
||||
stackTrace="true">
|
||||
<Field type="Class" name="lockClass" label="Lock Class" description="Class of object whose biased lock was revoked" />
|
||||
|
||||
@ -130,6 +130,7 @@
|
||||
LOG_TAG(phases) \
|
||||
LOG_TAG(plab) \
|
||||
LOG_TAG(preview) /* Trace loading of preview feature types */ \
|
||||
LOG_TAG(primitivewrappers) \
|
||||
LOG_TAG(promotion) \
|
||||
LOG_TAG(preorder) /* Trace all classes loaded in order referenced (not loaded) */ \
|
||||
LOG_TAG(protectiondomain) /* "Trace protection domain verification" */ \
|
||||
|
||||
@ -3956,8 +3956,9 @@ void InstanceKlass::purge_previous_version_list() {
|
||||
InstanceKlass* next = pv_node->previous_versions();
|
||||
pv_node->link_previous_versions(NULL); // point next to NULL
|
||||
last->link_previous_versions(next);
|
||||
// Add to the deallocate list after unlinking
|
||||
loader_data->add_to_deallocate_list(pv_node);
|
||||
// Delete this node directly. Nothing is referring to it and we don't
|
||||
// want it to increase the counter for metadata to delete in CLDG.
|
||||
MetadataFactory::free_metadata(loader_data, pv_node);
|
||||
pv_node = next;
|
||||
deleted_count++;
|
||||
version++;
|
||||
|
||||
@ -632,6 +632,8 @@ protected:
|
||||
void set_is_hidden() { _access_flags.set_is_hidden_class(); }
|
||||
bool is_non_strong_hidden() const { return access_flags().is_hidden_class() &&
|
||||
class_loader_data()->has_class_mirror_holder(); }
|
||||
bool is_box() const { return access_flags().is_box_class(); }
|
||||
void set_is_box() { _access_flags.set_is_box_class(); }
|
||||
|
||||
bool is_cloneable() const;
|
||||
void set_is_cloneable();
|
||||
@ -641,6 +643,7 @@ protected:
|
||||
// prototype markWord. If biased locking is enabled it may further be
|
||||
// biasable and have an epoch.
|
||||
markWord prototype_header() const { return _prototype_header; }
|
||||
|
||||
// NOTE: once instances of this klass are floating around in the
|
||||
// system, this header must only be updated at a safepoint.
|
||||
// NOTE 2: currently we only ever set the prototype header to the
|
||||
|
||||
@ -84,12 +84,13 @@
|
||||
// [header | 0 | 01] unlocked regular object header
|
||||
// [ptr | 10] monitor inflated lock (header is wapped out)
|
||||
// [ptr | 11] marked used to mark an object
|
||||
// [0 ............ 0| 00] inflating inflation in progress
|
||||
//
|
||||
// We assume that stack/thread pointers have the lowest two bits cleared.
|
||||
//
|
||||
// - INFLATING() is a distinguished markword value that is used when
|
||||
// inflating an existing stack-lock into an ObjectMonitor. See below
|
||||
// for is_being_inflated() and INFLATING().
|
||||
// - INFLATING() is a distinguished markword value of all zeros that is
|
||||
// used when inflating an existing stack-lock into an ObjectMonitor.
|
||||
// See below for is_being_inflated() and INFLATING().
|
||||
|
||||
class BasicLock;
|
||||
class ObjectMonitor;
|
||||
|
||||
@ -873,8 +873,7 @@ void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj
|
||||
{
|
||||
// For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj
|
||||
projs->fallthrough_proj = pn;
|
||||
DUIterator_Fast jmax, j = pn->fast_outs(jmax);
|
||||
const Node *cn = pn->fast_out(j);
|
||||
const Node *cn = pn->unique_ctrl_out();
|
||||
if (cn->is_Catch()) {
|
||||
ProjNode *cpn = NULL;
|
||||
for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) {
|
||||
|
||||
@ -2282,6 +2282,10 @@ WB_ENTRY(void, WB_CheckThreadObjOfTerminatingThread(JNIEnv* env, jobject wb, job
|
||||
}
|
||||
WB_END
|
||||
|
||||
WB_ENTRY(jboolean, WB_IsJVMTIIncluded(JNIEnv* env, jobject wb))
|
||||
return INCLUDE_JVMTI ? JNI_TRUE : JNI_FALSE;
|
||||
WB_END
|
||||
|
||||
#define CC (char*)
|
||||
|
||||
static JNINativeMethod methods[] = {
|
||||
@ -2533,6 +2537,7 @@ static JNINativeMethod methods[] = {
|
||||
{CC"protectionDomainRemovedCount", CC"()I", (void*)&WB_ProtectionDomainRemovedCount },
|
||||
{CC"aotLibrariesCount", CC"()I", (void*)&WB_AotLibrariesCount },
|
||||
{CC"getKlassMetadataSize", CC"(Ljava/lang/Class;)I",(void*)&WB_GetKlassMetadataSize},
|
||||
{CC"isJVMTIIncluded", CC"()Z", (void*)&WB_IsJVMTIIncluded},
|
||||
};
|
||||
|
||||
|
||||
|
||||
@ -4193,6 +4193,11 @@ jint Arguments::apply_ergo() {
|
||||
}
|
||||
#endif
|
||||
|
||||
if (FLAG_IS_CMDLINE(DiagnoseSyncOnPrimitiveWrappers)) {
|
||||
if (DiagnoseSyncOnPrimitiveWrappers == ObjectSynchronizer::LOG_WARNING && !log_is_enabled(Info, primitivewrappers)) {
|
||||
LogConfiguration::configure_stdout(LogLevel::Info, true, LOG_TAGS(primitivewrappers));
|
||||
}
|
||||
}
|
||||
return JNI_OK;
|
||||
}
|
||||
|
||||
|
||||
@ -808,6 +808,16 @@ const size_t minimumSymbolTableSize = 1024;
|
||||
range(500, max_intx) \
|
||||
constraint(BiasedLockingDecayTimeFunc,AfterErgo) \
|
||||
\
|
||||
diagnostic(intx, DiagnoseSyncOnPrimitiveWrappers, 0, \
|
||||
"Detect and take action upon identifying synchronization on " \
|
||||
"primitive wrappers. Modes: " \
|
||||
"0: off; " \
|
||||
"1: exit with fatal error; " \
|
||||
"2: log message to stdout. Output file can be specified with " \
|
||||
" -Xlog:primitivewrappers. If JFR is running it will " \
|
||||
" also generate JFR events.") \
|
||||
range(0, 2) \
|
||||
\
|
||||
product(bool, ExitOnOutOfMemoryError, false, \
|
||||
"JVM exits on the first occurrence of an out-of-memory error") \
|
||||
\
|
||||
|
||||
@ -23,7 +23,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/classLoaderDataGraph.inline.hpp"
|
||||
#include "classfile/classLoaderDataGraph.hpp"
|
||||
#include "classfile/dictionary.hpp"
|
||||
#include "classfile/stringTable.hpp"
|
||||
#include "classfile/symbolTable.hpp"
|
||||
@ -599,15 +599,6 @@ void SafepointSynchronize::do_cleanup_tasks() {
|
||||
cleanup.work(0);
|
||||
}
|
||||
|
||||
// Needs to be done single threaded by the VMThread. This walks
|
||||
// the thread stacks looking for references to metadata before
|
||||
// deciding to remove it from the metaspaces.
|
||||
if (ClassLoaderDataGraph::should_clean_metaspaces_and_reset()) {
|
||||
const char* name = "cleanup live ClassLoaderData metaspaces";
|
||||
TraceTime timer(name, TRACETIME_LOG(Info, safepoint, cleanup));
|
||||
ClassLoaderDataGraph::walk_metadata_and_clean_metaspaces();
|
||||
}
|
||||
|
||||
assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
|
||||
}
|
||||
|
||||
|
||||
@ -23,6 +23,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/classLoaderDataGraph.inline.hpp"
|
||||
#include "classfile/javaClasses.hpp"
|
||||
#include "classfile/protectionDomainCache.hpp"
|
||||
#include "classfile/stringTable.hpp"
|
||||
@ -145,6 +146,7 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) {
|
||||
bool deflate_idle_monitors = false;
|
||||
JvmtiDeferredEvent jvmti_event;
|
||||
bool oop_handles_to_release = false;
|
||||
bool cldg_cleanup_work = false;
|
||||
{
|
||||
// Need state transition ThreadBlockInVM so that this thread
|
||||
// will be handled by safepoint correctly when this thread is
|
||||
@ -172,6 +174,7 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) {
|
||||
(protection_domain_table_work = SystemDictionary::pd_cache_table()->has_work()) |
|
||||
(oopstorage_work = OopStorage::has_cleanup_work_and_reset()) |
|
||||
(oop_handles_to_release = (_oop_handle_list != NULL)) |
|
||||
(cldg_cleanup_work = ClassLoaderDataGraph::should_clean_metaspaces_and_reset()) |
|
||||
(deflate_idle_monitors = ObjectSynchronizer::is_async_deflation_needed())
|
||||
) == 0) {
|
||||
// Wait until notified that there is some work to do.
|
||||
@ -237,6 +240,10 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) {
|
||||
if (oop_handles_to_release) {
|
||||
release_oop_handles();
|
||||
}
|
||||
|
||||
if (cldg_cleanup_work) {
|
||||
ClassLoaderDataGraph::safepoint_and_clean_metaspaces();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -504,6 +504,10 @@ bool ObjectSynchronizer::quick_enter(oop obj, Thread* self,
|
||||
NoSafepointVerifier nsv;
|
||||
if (obj == NULL) return false; // Need to throw NPE
|
||||
|
||||
if (DiagnoseSyncOnPrimitiveWrappers != 0 && obj->klass()->is_box()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const markWord mark = obj->mark();
|
||||
|
||||
if (mark.has_monitor()) {
|
||||
@ -554,6 +558,52 @@ bool ObjectSynchronizer::quick_enter(oop obj, Thread* self,
|
||||
return false; // revert to slow-path
|
||||
}
|
||||
|
||||
// Handle notifications when synchronizing on primitive wrappers
|
||||
void ObjectSynchronizer::handle_sync_on_primitive_wrapper(Handle obj, Thread* current) {
|
||||
assert(current->is_Java_thread(), "must be for java object synchronization");
|
||||
JavaThread* self = (JavaThread*) current;
|
||||
|
||||
frame last_frame = self->last_frame();
|
||||
if (last_frame.is_interpreted_frame()) {
|
||||
// adjust bcp to point back to monitorenter so that we print the correct line numbers
|
||||
last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() - 1);
|
||||
}
|
||||
|
||||
if (DiagnoseSyncOnPrimitiveWrappers == FATAL_EXIT) {
|
||||
ResourceMark rm(self);
|
||||
stringStream ss;
|
||||
self->print_stack_on(&ss);
|
||||
char* base = (char*)strstr(ss.base(), "at");
|
||||
char* newline = (char*)strchr(ss.base(), '\n');
|
||||
if (newline != NULL) {
|
||||
*newline = '\0';
|
||||
}
|
||||
fatal("Synchronizing on object " INTPTR_FORMAT " of klass %s %s", p2i(obj()), obj->klass()->external_name(), base);
|
||||
} else {
|
||||
assert(DiagnoseSyncOnPrimitiveWrappers == LOG_WARNING, "invalid value for DiagnoseSyncOnPrimitiveWrappers");
|
||||
ResourceMark rm(self);
|
||||
Log(primitivewrappers) pwlog;
|
||||
|
||||
pwlog.info("Synchronizing on object " INTPTR_FORMAT " of klass %s", p2i(obj()), obj->klass()->external_name());
|
||||
if (self->has_last_Java_frame()) {
|
||||
LogStream info_stream(pwlog.info());
|
||||
self->print_stack_on(&info_stream);
|
||||
} else {
|
||||
pwlog.info("Cannot find the last Java frame");
|
||||
}
|
||||
|
||||
EventSyncOnPrimitiveWrapper event;
|
||||
if (event.should_commit()) {
|
||||
event.set_boxClass(obj->klass());
|
||||
event.commit();
|
||||
}
|
||||
}
|
||||
|
||||
if (last_frame.is_interpreted_frame()) {
|
||||
last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Monitor Enter/Exit
|
||||
// The interpreter and compiler assembly code tries to lock using the fast path
|
||||
@ -561,6 +611,10 @@ bool ObjectSynchronizer::quick_enter(oop obj, Thread* self,
|
||||
// changed. The implementation is extremely sensitive to race condition. Be careful.
|
||||
|
||||
void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, TRAPS) {
|
||||
if (DiagnoseSyncOnPrimitiveWrappers != 0 && obj->klass()->is_box()) {
|
||||
handle_sync_on_primitive_wrapper(obj, THREAD);
|
||||
}
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
if (!SafepointSynchronize::is_at_safepoint()) {
|
||||
BiasedLocking::revoke(obj, THREAD);
|
||||
@ -704,6 +758,10 @@ void ObjectSynchronizer::reenter(Handle obj, intx recursions, TRAPS) {
|
||||
// JNI locks on java objects
|
||||
// NOTE: must use heavy weight monitor to handle jni monitor enter
|
||||
void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) {
|
||||
if (DiagnoseSyncOnPrimitiveWrappers != 0 && obj->klass()->is_box()) {
|
||||
handle_sync_on_primitive_wrapper(obj, THREAD);
|
||||
}
|
||||
|
||||
// the current locking is from JNI instead of Java code
|
||||
if (UseBiasedLocking) {
|
||||
BiasedLocking::revoke(obj, THREAD);
|
||||
@ -2322,6 +2380,8 @@ void ObjectSynchronizer::deflate_idle_monitors_using_JT() {
|
||||
Atomic::load(&om_list_globals._free_count),
|
||||
Atomic::load(&om_list_globals._wait_count));
|
||||
|
||||
GVars.stw_random = os::random();
|
||||
|
||||
// The ServiceThread's async deflation request has been processed.
|
||||
_last_async_deflation_time_ns = os::javaTimeNanos();
|
||||
set_is_async_deflation_requested(false);
|
||||
|
||||
@ -62,6 +62,12 @@ class ObjectSynchronizer : AllStatic {
|
||||
inflate_cause_nof = 7 // Number of causes
|
||||
} InflateCause;
|
||||
|
||||
typedef enum {
|
||||
NOT_ENABLED = 0,
|
||||
FATAL_EXIT = 1,
|
||||
LOG_WARNING = 2
|
||||
} SyncDiagnosticOption;
|
||||
|
||||
// exit must be implemented non-blocking, since the compiler cannot easily handle
|
||||
// deoptimization at monitor exit. Hence, it does not take a Handle argument.
|
||||
|
||||
@ -194,6 +200,8 @@ class ObjectSynchronizer : AllStatic {
|
||||
static u_char* get_gvars_hc_sequence_addr();
|
||||
static size_t get_gvars_size();
|
||||
static u_char* get_gvars_stw_random_addr();
|
||||
|
||||
static void handle_sync_on_primitive_wrapper(Handle obj, Thread* current);
|
||||
};
|
||||
|
||||
// ObjectLocker enforces balanced locking and can never throw an
|
||||
|
||||
@ -23,7 +23,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/symbolTable.hpp"
|
||||
#include "classfile/classLoaderDataGraph.hpp"
|
||||
#include "classfile/vmSymbols.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
@ -40,7 +40,6 @@
|
||||
#include "runtime/deoptimization.hpp"
|
||||
#include "runtime/frame.inline.hpp"
|
||||
#include "runtime/interfaceSupport.inline.hpp"
|
||||
#include "runtime/sweeper.hpp"
|
||||
#include "runtime/synchronizer.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "runtime/threadSMR.inline.hpp"
|
||||
@ -94,6 +93,10 @@ void VM_ClearICs::doit() {
|
||||
}
|
||||
}
|
||||
|
||||
void VM_CleanClassLoaderDataMetaspaces::doit() {
|
||||
ClassLoaderDataGraph::walk_metadata_and_clean_metaspaces();
|
||||
}
|
||||
|
||||
VM_DeoptimizeFrame::VM_DeoptimizeFrame(JavaThread* thread, intptr_t* id, int reason) {
|
||||
_thread = thread;
|
||||
_id = id;
|
||||
|
||||
@ -104,6 +104,7 @@
|
||||
template(ClassLoaderHierarchyOperation) \
|
||||
template(DumpHashtable) \
|
||||
template(DumpTouchedMethods) \
|
||||
template(CleanClassLoaderDataMetaspaces) \
|
||||
template(PrintCompileQueue) \
|
||||
template(PrintClassHierarchy) \
|
||||
template(ThreadSuspend) \
|
||||
@ -236,6 +237,13 @@ class VM_GTestExecuteAtSafepoint: public VM_Operation {
|
||||
VM_GTestExecuteAtSafepoint() {}
|
||||
};
|
||||
|
||||
class VM_CleanClassLoaderDataMetaspaces : public VM_Operation {
|
||||
public:
|
||||
VM_CleanClassLoaderDataMetaspaces() {}
|
||||
VMOp_Type type() const { return VMOp_CleanClassLoaderDataMetaspaces; }
|
||||
void doit();
|
||||
};
|
||||
|
||||
// Deopt helper that can deoptimize frames in threads other than the
|
||||
// current thread. Only used through Deoptimization::deoptimize_frame.
|
||||
class VM_DeoptimizeFrame: public VM_Operation {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -67,6 +67,7 @@ enum {
|
||||
JVM_ACC_HAS_FINAL_METHOD = 0x01000000, // True if klass has final method
|
||||
JVM_ACC_IS_SHARED_CLASS = 0x02000000, // True if klass is shared
|
||||
JVM_ACC_IS_HIDDEN_CLASS = 0x04000000, // True if klass is hidden
|
||||
JVM_ACC_IS_BOX_CLASS = 0x08000000, // True if klass is primitive wrapper
|
||||
|
||||
// Klass* and Method* flags
|
||||
JVM_ACC_HAS_LOCAL_VARIABLE_TABLE= 0x00200000,
|
||||
@ -151,6 +152,7 @@ class AccessFlags {
|
||||
bool is_cloneable_fast () const { return (_flags & JVM_ACC_IS_CLONEABLE_FAST ) != 0; }
|
||||
bool is_shared_class () const { return (_flags & JVM_ACC_IS_SHARED_CLASS ) != 0; }
|
||||
bool is_hidden_class () const { return (_flags & JVM_ACC_IS_HIDDEN_CLASS ) != 0; }
|
||||
bool is_box_class () const { return (_flags & JVM_ACC_IS_BOX_CLASS ) != 0; }
|
||||
|
||||
// Klass* and Method* flags
|
||||
bool has_localvariable_table () const { return (_flags & JVM_ACC_HAS_LOCAL_VARIABLE_TABLE) != 0; }
|
||||
@ -224,6 +226,7 @@ class AccessFlags {
|
||||
void set_has_miranda_methods() { atomic_set_bits(JVM_ACC_HAS_MIRANDA_METHODS); }
|
||||
void set_is_shared_class() { atomic_set_bits(JVM_ACC_IS_SHARED_CLASS); }
|
||||
void set_is_hidden_class() { atomic_set_bits(JVM_ACC_IS_HIDDEN_CLASS); }
|
||||
void set_is_box_class() { atomic_set_bits(JVM_ACC_IS_BOX_CLASS); }
|
||||
|
||||
public:
|
||||
// field flags
|
||||
|
||||
@ -110,9 +110,7 @@ public class BytecodeDescriptor {
|
||||
} else if (type == int.class) {
|
||||
return "I";
|
||||
}
|
||||
StringBuilder sb = new StringBuilder();
|
||||
unparseSig(type, sb);
|
||||
return sb.toString();
|
||||
return type.descriptorString();
|
||||
}
|
||||
|
||||
public static String unparse(MethodType type) {
|
||||
|
||||
@ -91,6 +91,11 @@
|
||||
<setting name="threshold" control="synchronization-threshold">20 ms</setting>
|
||||
</event>
|
||||
|
||||
<event name="jdk.SyncOnPrimitiveWrapper">
|
||||
<setting name="enabled">true</setting>
|
||||
<setting name="stackTrace">true</setting>
|
||||
</event>
|
||||
|
||||
<event name="jdk.BiasedLockRevocation">
|
||||
<setting name="enabled">true</setting>
|
||||
<setting name="stackTrace">true</setting>
|
||||
|
||||
@ -91,6 +91,11 @@
|
||||
<setting name="threshold" control="synchronization-threshold">10 ms</setting>
|
||||
</event>
|
||||
|
||||
<event name="jdk.SyncOnPrimitiveWrapper">
|
||||
<setting name="enabled">true</setting>
|
||||
<setting name="stackTrace">true</setting>
|
||||
</event>
|
||||
|
||||
<event name="jdk.BiasedLockRevocation">
|
||||
<setting name="enabled">true</setting>
|
||||
<setting name="stackTrace">true</setting>
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -24,14 +24,40 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1IHOPControl.hpp"
|
||||
#include "gc/g1/g1OldGenAllocationTracker.hpp"
|
||||
#include "gc/g1/g1Predictions.hpp"
|
||||
#include "unittest.hpp"
|
||||
|
||||
static void test_update(G1IHOPControl* ctrl, double alloc_time,
|
||||
size_t alloc_amount, size_t young_size,
|
||||
double mark_time) {
|
||||
static void test_update_allocation_tracker(G1OldGenAllocationTracker* alloc_tracker,
|
||||
size_t alloc_amount) {
|
||||
alloc_tracker->add_allocated_bytes_since_last_gc(alloc_amount);
|
||||
alloc_tracker->reset_after_gc((size_t)0);
|
||||
}
|
||||
|
||||
static void test_update(G1IHOPControl* ctrl,
|
||||
G1OldGenAllocationTracker* alloc_tracker,
|
||||
double alloc_time, size_t alloc_amount,
|
||||
size_t young_size, double mark_time) {
|
||||
test_update_allocation_tracker(alloc_tracker, alloc_amount);
|
||||
for (int i = 0; i < 100; i++) {
|
||||
ctrl->update_allocation_info(alloc_time, alloc_amount, young_size);
|
||||
ctrl->update_allocation_info(alloc_time, young_size);
|
||||
ctrl->update_marking_length(mark_time);
|
||||
}
|
||||
}
|
||||
|
||||
static void test_update_humongous(G1IHOPControl* ctrl,
|
||||
G1OldGenAllocationTracker* alloc_tracker,
|
||||
double alloc_time,
|
||||
size_t alloc_amount_non_hum,
|
||||
size_t alloc_amount_hum,
|
||||
size_t humongous_bytes_after_last_gc,
|
||||
size_t young_size,
|
||||
double mark_time) {
|
||||
alloc_tracker->add_allocated_bytes_since_last_gc(alloc_amount_non_hum);
|
||||
alloc_tracker->add_allocated_humongous_bytes_since_last_gc(alloc_amount_hum);
|
||||
alloc_tracker->reset_after_gc(humongous_bytes_after_last_gc);
|
||||
for (int i = 0; i < 100; i++) {
|
||||
ctrl->update_allocation_info(alloc_time, young_size);
|
||||
ctrl->update_marking_length(mark_time);
|
||||
}
|
||||
}
|
||||
@ -45,13 +71,15 @@ TEST_VM(G1StaticIHOPControl, simple) {
|
||||
|
||||
const size_t initial_ihop = 45;
|
||||
|
||||
G1StaticIHOPControl ctrl(initial_ihop);
|
||||
G1OldGenAllocationTracker alloc_tracker;
|
||||
G1StaticIHOPControl ctrl(initial_ihop, &alloc_tracker);
|
||||
ctrl.update_target_occupancy(100);
|
||||
|
||||
size_t threshold = ctrl.get_conc_mark_start_threshold();
|
||||
EXPECT_EQ(initial_ihop, threshold);
|
||||
|
||||
ctrl.update_allocation_info(100.0, 100, 100);
|
||||
test_update_allocation_tracker(&alloc_tracker, 100);
|
||||
ctrl.update_allocation_info(100.0, 100);
|
||||
threshold = ctrl.get_conc_mark_start_threshold();
|
||||
EXPECT_EQ(initial_ihop, threshold);
|
||||
|
||||
@ -60,12 +88,12 @@ TEST_VM(G1StaticIHOPControl, simple) {
|
||||
EXPECT_EQ(initial_ihop, threshold);
|
||||
|
||||
// Whatever we pass, the IHOP value must stay the same.
|
||||
test_update(&ctrl, 2, 10, 10, 3);
|
||||
test_update(&ctrl, &alloc_tracker, 2, 10, 10, 3);
|
||||
threshold = ctrl.get_conc_mark_start_threshold();
|
||||
|
||||
EXPECT_EQ(initial_ihop, threshold);
|
||||
|
||||
test_update(&ctrl, 12, 10, 10, 3);
|
||||
test_update(&ctrl, &alloc_tracker, 12, 10, 10, 3);
|
||||
threshold = ctrl.get_conc_mark_start_threshold();
|
||||
|
||||
EXPECT_EQ(initial_ihop, threshold);
|
||||
@ -85,8 +113,9 @@ TEST_VM(G1AdaptiveIHOPControl, simple) {
|
||||
// The final IHOP value is always
|
||||
// target_size - (young_size + alloc_amount/alloc_time * marking_time)
|
||||
|
||||
G1OldGenAllocationTracker alloc_tracker;
|
||||
G1Predictions pred(0.95);
|
||||
G1AdaptiveIHOPControl ctrl(initial_threshold, &pred, 0, 0);
|
||||
G1AdaptiveIHOPControl ctrl(initial_threshold, &alloc_tracker, &pred, 0, 0);
|
||||
ctrl.update_target_occupancy(target_size);
|
||||
|
||||
// First "load".
|
||||
@ -102,7 +131,8 @@ TEST_VM(G1AdaptiveIHOPControl, simple) {
|
||||
EXPECT_EQ(initial_threshold, threshold);
|
||||
|
||||
for (size_t i = 0; i < G1AdaptiveIHOPNumInitialSamples - 1; i++) {
|
||||
ctrl.update_allocation_info(alloc_time1, alloc_amount1, young_size);
|
||||
test_update_allocation_tracker(&alloc_tracker, alloc_amount1);
|
||||
ctrl.update_allocation_info(alloc_time1, young_size);
|
||||
ctrl.update_marking_length(marking_time1);
|
||||
// Not enough data yet.
|
||||
threshold = ctrl.get_conc_mark_start_threshold();
|
||||
@ -110,7 +140,7 @@ TEST_VM(G1AdaptiveIHOPControl, simple) {
|
||||
ASSERT_EQ(initial_threshold, threshold) << "on step " << i;
|
||||
}
|
||||
|
||||
test_update(&ctrl, alloc_time1, alloc_amount1, young_size, marking_time1);
|
||||
test_update(&ctrl, &alloc_tracker, alloc_time1, alloc_amount1, young_size, marking_time1);
|
||||
|
||||
threshold = ctrl.get_conc_mark_start_threshold();
|
||||
|
||||
@ -123,7 +153,7 @@ TEST_VM(G1AdaptiveIHOPControl, simple) {
|
||||
const size_t settled_ihop2 = target_size
|
||||
- (young_size + alloc_amount2 / alloc_time2 * marking_time2);
|
||||
|
||||
test_update(&ctrl, alloc_time2, alloc_amount2, young_size, marking_time2);
|
||||
test_update(&ctrl, &alloc_tracker, alloc_time2, alloc_amount2, young_size, marking_time2);
|
||||
|
||||
threshold = ctrl.get_conc_mark_start_threshold();
|
||||
|
||||
@ -135,15 +165,82 @@ TEST_VM(G1AdaptiveIHOPControl, simple) {
|
||||
const size_t marking_time3 = 2;
|
||||
const size_t settled_ihop3 = 0;
|
||||
|
||||
test_update(&ctrl, alloc_time3, alloc_amount3, young_size, marking_time3);
|
||||
test_update(&ctrl, &alloc_tracker, alloc_time3, alloc_amount3, young_size, marking_time3);
|
||||
threshold = ctrl.get_conc_mark_start_threshold();
|
||||
|
||||
EXPECT_EQ(settled_ihop3, threshold);
|
||||
|
||||
// And back to some arbitrary value.
|
||||
test_update(&ctrl, alloc_time2, alloc_amount2, young_size, marking_time2);
|
||||
test_update(&ctrl, &alloc_tracker, alloc_time2, alloc_amount2, young_size, marking_time2);
|
||||
|
||||
threshold = ctrl.get_conc_mark_start_threshold();
|
||||
|
||||
EXPECT_GT(threshold, settled_ihop3);
|
||||
}
|
||||
|
||||
TEST_VM(G1AdaptiveIHOPControl, humongous) {
|
||||
// Test requires G1
|
||||
if (!UseG1GC) {
|
||||
return;
|
||||
}
|
||||
|
||||
const size_t initial_threshold = 45;
|
||||
const size_t young_size = 10;
|
||||
const size_t target_size = 100;
|
||||
const double duration = 10.0;
|
||||
const size_t marking_time = 2;
|
||||
|
||||
G1OldGenAllocationTracker alloc_tracker;
|
||||
G1Predictions pred(0.95);
|
||||
G1AdaptiveIHOPControl ctrl(initial_threshold, &alloc_tracker, &pred, 0, 0);
|
||||
ctrl.update_target_occupancy(target_size);
|
||||
|
||||
size_t old_bytes = 100;
|
||||
size_t humongous_bytes = 200;
|
||||
size_t humongous_bytes_after_gc = 150;
|
||||
size_t humongous_bytes_after_last_gc = 50;
|
||||
// Load 1
|
||||
test_update_humongous(&ctrl, &alloc_tracker, duration, 0, humongous_bytes,
|
||||
humongous_bytes_after_last_gc, young_size, marking_time);
|
||||
// Test threshold
|
||||
size_t threshold;
|
||||
threshold = ctrl.get_conc_mark_start_threshold();
|
||||
// Adjusted allocated bytes:
|
||||
// Total bytes: humongous_bytes
|
||||
// Freed hum bytes: humongous_bytes - humongous_bytes_after_last_gc
|
||||
double alloc_rate = humongous_bytes_after_last_gc / duration;
|
||||
size_t target_threshold = target_size - (size_t)(young_size + alloc_rate * marking_time);
|
||||
|
||||
EXPECT_EQ(threshold, target_threshold);
|
||||
|
||||
// Load 2
|
||||
G1AdaptiveIHOPControl ctrl2(initial_threshold, &alloc_tracker, &pred, 0, 0);
|
||||
ctrl2.update_target_occupancy(target_size);
|
||||
test_update_humongous(&ctrl2, &alloc_tracker, duration, old_bytes, humongous_bytes,
|
||||
humongous_bytes_after_gc, young_size, marking_time);
|
||||
threshold = ctrl2.get_conc_mark_start_threshold();
|
||||
// Adjusted allocated bytes:
|
||||
// Total bytes: old_bytes + humongous_bytes
|
||||
// Freed hum bytes: humongous_bytes - (humongous_bytes_after_gc - humongous_bytes_after_last_gc)
|
||||
alloc_rate = (old_bytes + (humongous_bytes_after_gc - humongous_bytes_after_last_gc)) / duration;
|
||||
target_threshold = target_size - (size_t)(young_size + alloc_rate * marking_time);
|
||||
|
||||
EXPECT_EQ(threshold, target_threshold);
|
||||
|
||||
// Load 3
|
||||
humongous_bytes_after_last_gc = humongous_bytes_after_gc;
|
||||
humongous_bytes_after_gc = 50;
|
||||
G1AdaptiveIHOPControl ctrl3(initial_threshold, &alloc_tracker, &pred, 0, 0);
|
||||
ctrl3.update_target_occupancy(target_size);
|
||||
test_update_humongous(&ctrl3, &alloc_tracker, duration, old_bytes, humongous_bytes,
|
||||
humongous_bytes_after_gc, young_size, marking_time);
|
||||
threshold = ctrl3.get_conc_mark_start_threshold();
|
||||
// Adjusted allocated bytes:
|
||||
// All humongous are cleaned up since humongous_bytes_after_gc < humongous_bytes_after_last_gc
|
||||
// Total bytes: old_bytes + humongous_bytes
|
||||
// Freed hum bytes: humongous_bytes
|
||||
alloc_rate = old_bytes / duration;
|
||||
target_threshold = target_size - (size_t)(young_size + alloc_rate * marking_time);
|
||||
|
||||
EXPECT_EQ(threshold, target_threshold);
|
||||
}
|
||||
|
||||
@ -68,6 +68,7 @@ requires.properties= \
|
||||
vm.cds \
|
||||
vm.cds.custom.loaders \
|
||||
vm.cds.archived.java.heap \
|
||||
vm.jvmti \
|
||||
vm.graal.enabled \
|
||||
vm.compiler1.enabled \
|
||||
vm.compiler2.enabled \
|
||||
|
||||
@ -0,0 +1,145 @@
|
||||
/*
|
||||
* Copyright (c) 2020, Red Hat, Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @test
|
||||
* @bug 8251527
|
||||
* @summary CTW: C2 (Shenandoah) compilation fails with SEGV due to unhandled catchproj == NUL
|
||||
* @requires vm.flavor == "server"
|
||||
* @requires vm.gc.Shenandoah & !vm.graal.enabled
|
||||
*
|
||||
* @run main/othervm -XX:+UseShenandoahGC -XX:CompileOnly=TestLoadPinnedAfterCall.test -XX:CompileCommand=dontinline,TestLoadPinnedAfterCall.not_inlined -XX:-TieredCompilation -XX:-BackgroundCompilation TestLoadPinnedAfterCall
|
||||
*
|
||||
*/
|
||||
|
||||
public class TestLoadPinnedAfterCall {
|
||||
private A staticField1;
|
||||
private static Object staticField2 = new Object();
|
||||
private static volatile int staticField3;
|
||||
private static int staticField4;
|
||||
static TestLoadPinnedAfterCall object = new TestLoadPinnedAfterCall();
|
||||
|
||||
public static void main(String[] args) {
|
||||
final A a = new A();
|
||||
try {
|
||||
throw new Exception();
|
||||
} catch (Exception ex) {
|
||||
}
|
||||
for (int i = 0; i < 20_000; i++) {
|
||||
inlined(0, 0, 0);
|
||||
inlined(2, 0, 0);
|
||||
inlined(2, 2, 2);
|
||||
|
||||
object.staticField1 = new A();
|
||||
test(true, a, a, false, 2, 2);
|
||||
test(false, a, a, true, 2, 2);
|
||||
test(false, a, a, false, 2, 2);
|
||||
object.staticField1 = a;
|
||||
test(true, a, a, false, 2, 2);
|
||||
test(false, a, a, true, 2, 2);
|
||||
test(false, a, a, false, 2, 2);
|
||||
}
|
||||
}
|
||||
|
||||
private static void test(boolean flag, A a, A a2, boolean flag2, int i1, int i2) {
|
||||
|
||||
int ii = 1;
|
||||
for (; ii < 2; ii *= 2) {
|
||||
|
||||
}
|
||||
ii = ii / 2;
|
||||
|
||||
i1 = 0;
|
||||
for (; i1 < 2; i1 += ii) {
|
||||
for (int i = 0; i < 2; i += ii) {
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
i2 = 0;
|
||||
for (; i2 < 2; i2 += ii) {
|
||||
for (int i = 0; i < 2; i += ii) {
|
||||
for (int j = 0; j < 2; j += ii) {
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
TestLoadPinnedAfterCall obj = object;
|
||||
if (obj == null) {
|
||||
}
|
||||
counter = 10;
|
||||
for (;;) {
|
||||
synchronized (staticField2) {
|
||||
}
|
||||
int i = 0;
|
||||
for (; i < 2; i += ii) {
|
||||
|
||||
}
|
||||
|
||||
inlined(i, i1, i2);
|
||||
|
||||
if (flag) {
|
||||
staticField3 = 0x42;
|
||||
break;
|
||||
}
|
||||
try {
|
||||
not_inlined();
|
||||
if (flag2) {
|
||||
break;
|
||||
}
|
||||
} catch (Throwable throwable) {
|
||||
if (a == obj.staticField1) {
|
||||
staticField4 = 0x42;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (a2 == obj.staticField1) {
|
||||
staticField4 = 0x42;
|
||||
}
|
||||
}
|
||||
|
||||
private static void inlined(int i, int j, int k) {
|
||||
if (i == 2) {
|
||||
if (j == 2) {
|
||||
staticField3 = 0x42;
|
||||
}
|
||||
if (k == 2) {
|
||||
staticField3 = 0x42;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int counter = 0;
|
||||
private static void not_inlined() {
|
||||
counter--;
|
||||
if (counter <= 0) {
|
||||
throw new RuntimeException();
|
||||
}
|
||||
}
|
||||
|
||||
private static class A {
|
||||
}
|
||||
}
|
||||
@ -0,0 +1,167 @@
|
||||
/*
|
||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
import jdk.test.lib.process.ProcessTools;
|
||||
import jdk.test.lib.process.OutputAnalyzer;
|
||||
import java.util.*;
|
||||
import java.util.stream.*;
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @bug 8242263
|
||||
* @summary Exercise DiagnoseSyncOnPrimitiveWrappers diagnostic flag
|
||||
* @library /test/lib
|
||||
* @run driver/timeout=180000 SyncOnPrimitiveWrapperTest
|
||||
*/
|
||||
|
||||
public class SyncOnPrimitiveWrapperTest {
|
||||
static final int LOOP_COUNT = 3000;
|
||||
static final int THREAD_COUNT = 2;
|
||||
static String[] fatalTests[];
|
||||
static String[] logTests[];
|
||||
static List<Object> testObjects = new ArrayList<Object>();
|
||||
|
||||
private static final String[] specificFlags[] = {
|
||||
{"-Xint", "-XX:+UseBiasedLocking"},
|
||||
{"-Xint", "-XX:-UseBiasedLocking"},
|
||||
{"-Xcomp", "-XX:TieredStopAtLevel=1", "-XX:+UseBiasedLocking"},
|
||||
{"-Xcomp", "-XX:TieredStopAtLevel=1", "-XX:-UseBiasedLocking"},
|
||||
{"-Xcomp", "-XX:-TieredCompilation", "-XX:-UseBiasedLocking"},
|
||||
{"-Xcomp", "-XX:-TieredCompilation", "-XX:+UseBiasedLocking", "-XX:+UseOptoBiasInlining"},
|
||||
{"-Xcomp", "-XX:-TieredCompilation", "-XX:+UseBiasedLocking", "-XX:-UseOptoBiasInlining"}
|
||||
};
|
||||
|
||||
private static void initTestObjects() {
|
||||
testObjects.add(Character.valueOf('H'));
|
||||
testObjects.add(Boolean.valueOf(true));
|
||||
testObjects.add(Byte.valueOf((byte)0x40));
|
||||
testObjects.add(Short.valueOf((short)0x4000));
|
||||
testObjects.add(Integer.valueOf(0x40000000));
|
||||
testObjects.add(Long.valueOf(0x4000000000000000L));
|
||||
testObjects.add(Float.valueOf(1.20f));
|
||||
testObjects.add(Double.valueOf(1.2345));
|
||||
}
|
||||
|
||||
private static void generateTests() {
|
||||
initTestObjects();
|
||||
String[] commonFatalTestsFlags = {"-XX:+UnlockDiagnosticVMOptions", "-XX:-CreateCoredumpOnCrash", "-XX:DiagnoseSyncOnPrimitiveWrappers=1"};
|
||||
fatalTests = new String[specificFlags.length * testObjects.size()][];
|
||||
for (int i = 0; i < specificFlags.length; i++) {
|
||||
for (int j = 0; j < testObjects.size(); j++) {
|
||||
int index = i * testObjects.size() + j;
|
||||
fatalTests[index] = Stream.of(commonFatalTestsFlags, specificFlags[i], new String[] {"SyncOnPrimitiveWrapperTest$FatalTest", Integer.toString(j)})
|
||||
.flatMap(Stream::of)
|
||||
.toArray(String[]::new);
|
||||
}
|
||||
}
|
||||
String[] commonLogTestsFlags = {"-XX:+UnlockDiagnosticVMOptions", "-XX:DiagnoseSyncOnPrimitiveWrappers=2"};
|
||||
logTests = new String[specificFlags.length][];
|
||||
for (int i = 0; i < specificFlags.length; i++) {
|
||||
logTests[i] = Stream.of(commonLogTestsFlags, specificFlags[i], new String[] {"SyncOnPrimitiveWrapperTest$LogTest"})
|
||||
.flatMap(Stream::of)
|
||||
.toArray(String[]::new);
|
||||
}
|
||||
}
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
generateTests();
|
||||
for (int i = 0; i < fatalTests.length; i++) {
|
||||
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(fatalTests[i]);
|
||||
OutputAnalyzer output = ProcessTools.executeProcess(pb);
|
||||
output.shouldContain("fatal error: Synchronizing on object");
|
||||
output.shouldNotContain("synchronization on primitive wrapper did not fail");
|
||||
output.shouldNotHaveExitValue(0);
|
||||
}
|
||||
for (int i = 0; i < logTests.length; i++) {
|
||||
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(logTests[i]);
|
||||
OutputAnalyzer output = ProcessTools.executeProcess(pb);
|
||||
checkOutput(output);
|
||||
}
|
||||
}
|
||||
|
||||
private static void checkOutput(OutputAnalyzer output) {
|
||||
String out = output.getOutput();
|
||||
assertTrue(out.matches("(?s).*Synchronizing on object 0[xX][0-9a-fA-F]+ of klass java\\.lang\\.Character.*"));
|
||||
assertTrue(out.matches("(?s).*Synchronizing on object 0[xX][0-9a-fA-F]+ of klass java\\.lang\\.Boolean.*"));
|
||||
assertTrue(out.matches("(?s).*Synchronizing on object 0[xX][0-9a-fA-F]+ of klass java\\.lang\\.Byte.*"));
|
||||
assertTrue(out.matches("(?s).*Synchronizing on object 0[xX][0-9a-fA-F]+ of klass java\\.lang\\.Short.*"));
|
||||
assertTrue(out.matches("(?s).*Synchronizing on object 0[xX][0-9a-fA-F]+ of klass java\\.lang\\.Integer.*"));
|
||||
assertTrue(out.matches("(?s).*Synchronizing on object 0[xX][0-9a-fA-F]+ of klass java\\.lang\\.Long.*"));
|
||||
String[] res = out.split("Synchronizing on object 0[xX][0-9a-fA-F]+ of klass java\\.lang\\.Float\\R");
|
||||
assertTrue(res.length - 1 == (LOOP_COUNT * THREAD_COUNT + 1), res.length - 1);
|
||||
}
|
||||
|
||||
private static void assertTrue(boolean condition) {
|
||||
if (!condition) {
|
||||
throw new RuntimeException("No synchronization matches");
|
||||
}
|
||||
}
|
||||
|
||||
private static void assertTrue(boolean condition, int count) {
|
||||
if (!condition) {
|
||||
throw new RuntimeException("Synchronization count was " + count);
|
||||
}
|
||||
}
|
||||
|
||||
static class FatalTest {
|
||||
public static void main(String[] args) throws Exception {
|
||||
initTestObjects();
|
||||
synchronized (testObjects.get(Integer.valueOf(args[0]))) {
|
||||
throw new RuntimeException("synchronization on primitive wrapper did not fail");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static class LogTest implements Runnable {
|
||||
private static long sharedCounter = 0L;
|
||||
private static Float sharedLock1 = 0.0f;
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
initTestObjects();
|
||||
for (Object obj : testObjects) {
|
||||
synchronized (obj) {
|
||||
sharedCounter++;
|
||||
}
|
||||
}
|
||||
|
||||
LogTest test = new LogTest();
|
||||
Thread[] threads = new Thread[THREAD_COUNT];
|
||||
for (int i = 0; i < threads.length; i++) {
|
||||
threads[i] = new Thread(test);
|
||||
threads[i].start();
|
||||
}
|
||||
for (Thread t : threads) {
|
||||
t.join();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
for (int i = 0; i < LOOP_COUNT; i++) {
|
||||
synchronized (sharedLock1) {
|
||||
sharedCounter++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -61,6 +61,7 @@ import com.sun.jdi.request.EventRequestManager;
|
||||
* @test GetObjectLockCount.java
|
||||
* @bug 8036666
|
||||
* @summary verify jvm returns correct lock recursion count
|
||||
* @requires vm.jvmti
|
||||
* @run compile -g RecursiveObjectLock.java
|
||||
* @run main/othervm GetObjectLockCount
|
||||
* @author axel.siebenborn@sap.com
|
||||
|
||||
@ -26,6 +26,7 @@ package MyPackage;
|
||||
/**
|
||||
* @test
|
||||
* @summary Verifies the JVMTI AddModuleExports and AddModuleOpens API
|
||||
* @requires vm.jvmti
|
||||
* @compile AddModuleExportsAndOpensTest.java
|
||||
* @run main/othervm/native -agentlib:AddModuleExportsAndOpensTest MyPackage.AddModuleExportsAndOpensTest
|
||||
*/
|
||||
|
||||
@ -26,6 +26,7 @@ package MyPackage;
|
||||
/**
|
||||
* @test
|
||||
* @summary Verifies the JVMTI AddModuleReads API
|
||||
* @requires vm.jvmti
|
||||
* @compile AddModuleReadsTest.java
|
||||
* @run main/othervm/native -agentlib:AddModuleReadsTest MyPackage.AddModuleReadsTest
|
||||
*/
|
||||
|
||||
@ -26,6 +26,7 @@ package MyPackage;
|
||||
/**
|
||||
* @test
|
||||
* @summary Basic test for JVMTI AddModuleUses and AddModuleProvides
|
||||
* @requires vm.jvmti
|
||||
* @build java.base/java.lang.TestProvider
|
||||
* java.base/jdk.internal.test.TestProviderImpl
|
||||
* @compile AddModuleUsesAndProvidesTest.java
|
||||
|
||||
@ -25,6 +25,7 @@
|
||||
* @test
|
||||
* @bug 8212159
|
||||
* @summary Generate compiled method load events without crashing
|
||||
* @requires vm.jvmti
|
||||
* @run main/othervm/native -agentlib:CompiledZombie -Xcomp -XX:ReservedCodeCacheSize=50m Zombie
|
||||
**/
|
||||
|
||||
|
||||
@ -26,6 +26,7 @@
|
||||
* @bug 8193369
|
||||
* @summary Tests that all FieldAccess and FieldModification notifications
|
||||
are generated.
|
||||
* @requires vm.jvmti
|
||||
* @compile FieldAccessWatch.java
|
||||
* @run main/othervm/native -agentlib:FieldAccessWatch FieldAccessWatch
|
||||
*/
|
||||
|
||||
@ -25,6 +25,7 @@
|
||||
* @test
|
||||
* @bug 8222072
|
||||
* @summary Send CompiledMethodLoad events only to the environment requested it with GenerateEvents
|
||||
* @requires vm.jvmti
|
||||
* @compile GenerateEventsTest.java
|
||||
* @run main/othervm/native -agentlib:GenerateEvents1 -agentlib:GenerateEvents2 MyPackage.GenerateEventsTest
|
||||
*/
|
||||
|
||||
@ -27,6 +27,7 @@
|
||||
* @test
|
||||
* @bug 8216324
|
||||
* @summary GetClassMethods is confused by the presence of default methods in super interfaces
|
||||
* @requires vm.jvmti
|
||||
* @library /test/lib
|
||||
* @compile OverpassMethods.java
|
||||
* @run main/othervm/native -agentlib:OverpassMethods OverpassMethods
|
||||
|
||||
@ -25,7 +25,7 @@
|
||||
* @test
|
||||
* @bug 8080406
|
||||
* @summary VM_GetOrSetLocal doesn't check local slot type against requested type
|
||||
*
|
||||
* @requires vm.jvmti
|
||||
* @compile GetLocalVars.java
|
||||
* @run main/othervm/native -Xcomp -agentlib:GetLocalVars GetLocalVars
|
||||
*/
|
||||
|
||||
@ -24,6 +24,7 @@
|
||||
/**
|
||||
* @test
|
||||
* @summary Verifies the JVMTI GetAllModules API
|
||||
* @requires vm.jvmti
|
||||
* @library /test/lib
|
||||
* @run main/othervm/native -agentlib:JvmtiGetAllModulesTest JvmtiGetAllModulesTest
|
||||
*
|
||||
|
||||
@ -26,6 +26,7 @@ package MyPackage;
|
||||
/**
|
||||
* @test
|
||||
* @summary Verifies the JVMTI GetNamedModule API
|
||||
* @requires vm.jvmti
|
||||
* @compile GetNamedModuleTest.java
|
||||
* @run main/othervm/native -agentlib:GetNamedModuleTest MyPackage.GetNamedModuleTest
|
||||
*/
|
||||
|
||||
@ -30,6 +30,7 @@ import jdk.test.lib.process.ProcessTools;
|
||||
* @test
|
||||
* @bug 8075030
|
||||
* @summary JvmtiEnv::GetObjectSize reports incorrect java.lang.Class instance size
|
||||
* @requires vm.jvmti
|
||||
* @library /test/lib
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* java.compiler
|
||||
|
||||
@ -33,6 +33,7 @@
|
||||
* java.management
|
||||
* jdk.internal.jvmstat/sun.jvmstat.monitor
|
||||
* @requires vm.bits == 64
|
||||
* @requires vm.jvmti
|
||||
* @build GetObjectSizeOverflowAgent
|
||||
* @run driver ClassFileInstaller GetObjectSizeOverflowAgent
|
||||
* @run main GetObjectSizeOverflow
|
||||
|
||||
@ -26,6 +26,7 @@
|
||||
* @test
|
||||
* @bug 8185164
|
||||
* @summary Checks that a contended monitor does not show up in the list of owned monitors
|
||||
* @requires vm.jvmti
|
||||
* @compile GetOwnedMonitorInfoTest.java
|
||||
* @run main/othervm/native -agentlib:GetOwnedMonitorInfoTest GetOwnedMonitorInfoTest
|
||||
*/
|
||||
|
||||
@ -27,6 +27,7 @@
|
||||
* @summary Test JVMTI's GetOwnedMonitorInfo with scalar replaced objects and eliminated locks on stack (optimizations based on escape analysis).
|
||||
* @comment Without RFE 8227745 escape analysis needs to be switched off to pass the test. For the implementation of RFE 8227745 it serves as a regression test.
|
||||
* @requires (vm.compMode != "Xcomp" & vm.compiler2.enabled)
|
||||
* @requires vm.jvmti
|
||||
* @library /test/lib
|
||||
* @compile GetOwnedMonitorInfoWithEATest.java
|
||||
* @run main/othervm/native
|
||||
|
||||
@ -26,6 +26,7 @@
|
||||
* @test
|
||||
* @bug 8153629
|
||||
* @summary Need to cover JVMTI's GetOwnedMonitorStackDepthInfo function
|
||||
* @requires vm.jvmti
|
||||
* @compile GetOwnedMonitorStackDepthInfoTest.java
|
||||
* @run main/othervm/native -agentlib:GetOwnedMonitorStackDepthInfoTest GetOwnedMonitorStackDepthInfoTest
|
||||
*/
|
||||
|
||||
@ -27,6 +27,7 @@
|
||||
* @summary Test JVMTI's GetOwnedMonitorStackDepthInfo with scalar replaced objects and eliminated locks on stack (optimizations based on escape analysis).
|
||||
* @comment Without RFE 8227745 escape analysis needs to be switched off to pass the test. For the implementation of RFE 8227745 it serves as a regression test.
|
||||
* @requires (vm.compMode != "Xcomp" & vm.compiler2.enabled)
|
||||
* @requires vm.jvmti
|
||||
* @library /test/lib
|
||||
* @compile GetOwnedMonitorStackDepthInfoWithEATest.java
|
||||
* @run main/othervm/native
|
||||
|
||||
@ -25,6 +25,7 @@
|
||||
* @test
|
||||
* @bug 8203329
|
||||
* @summary Verifies the JVMTI GetSystemProperty API returns the updated java.vm.info value
|
||||
* @requires vm.jvmti
|
||||
* @library /test/lib
|
||||
* @run main/othervm/native -agentlib:JvmtiGetSystemPropertyTest JvmtiGetSystemPropertyTest
|
||||
*
|
||||
|
||||
@ -26,6 +26,7 @@
|
||||
* @test
|
||||
* @bug 8242428
|
||||
* @summary Verifies JVMTI GetThreadListStackTraces API with thread_count = 1
|
||||
* @requires vm.jvmti
|
||||
* @library /test/lib
|
||||
* @run main/othervm/native -agentlib:OneGetThreadListStackTraces OneGetThreadListStackTraces
|
||||
*
|
||||
|
||||
@ -28,6 +28,7 @@ package MyPackage;
|
||||
* @test
|
||||
* @build Frame HeapMonitor
|
||||
* @summary Verifies the JVMTI Heap Monitor interval when allocating arrays.
|
||||
* @requires vm.jvmti
|
||||
* @compile HeapMonitorArrayAllSampledTest.java
|
||||
* @run main/othervm/native -agentlib:HeapMonitorTest MyPackage.HeapMonitorArrayAllSampledTest
|
||||
*/
|
||||
|
||||
@ -29,6 +29,7 @@ import java.util.List;
|
||||
/**
|
||||
* @test
|
||||
* @summary Verifies if turning off the event notification stops events.
|
||||
* @requires vm.jvmti
|
||||
* @build Frame HeapMonitor
|
||||
* @compile HeapMonitorEventOnOffTest.java
|
||||
* @run main/othervm/native -agentlib:HeapMonitorTest MyPackage.HeapMonitorEventOnOffTest
|
||||
|
||||
@ -30,5 +30,6 @@ package MyPackage;
|
||||
* @build Frame HeapMonitor
|
||||
* @compile HeapMonitorGCTest.java
|
||||
* @requires vm.gc == "Parallel" | vm.gc == "null"
|
||||
* @requires vm.jvmti
|
||||
* @run main/othervm/native -agentlib:HeapMonitorTest -XX:+UseParallelGC MyPackage.HeapMonitorGCTest
|
||||
*/
|
||||
|
||||
@ -30,5 +30,6 @@ package MyPackage;
|
||||
* @build Frame HeapMonitor
|
||||
* @compile HeapMonitorGCTest.java
|
||||
* @requires vm.gc == "Serial" | vm.gc == "null"
|
||||
* @requires vm.jvmti
|
||||
* @run main/othervm/native -agentlib:HeapMonitorTest -XX:+UseSerialGC MyPackage.HeapMonitorGCTest
|
||||
*/
|
||||
|
||||
@ -32,6 +32,7 @@ import java.util.List;
|
||||
* @summary Verifies the default GC with the Heap Monitor event system.
|
||||
* @compile HeapMonitorGCTest.java
|
||||
* @requires vm.gc == "G1" | vm.gc == "null"
|
||||
* @requires vm.jvmti
|
||||
* @run main/othervm/native -agentlib:HeapMonitorTest MyPackage.HeapMonitorGCTest
|
||||
*/
|
||||
|
||||
|
||||
@ -27,6 +27,7 @@ package MyPackage;
|
||||
/**
|
||||
* @test
|
||||
* @summary Verifies the JVMTI SetHeapSamplingInterval returns an illegal argument for negative ints.
|
||||
* @requires vm.jvmti
|
||||
* @build Frame HeapMonitor
|
||||
* @compile HeapMonitorIllegalArgumentTest.java
|
||||
* @run main/othervm/native -agentlib:HeapMonitorTest MyPackage.HeapMonitorIllegalArgumentTest
|
||||
|
||||
@ -27,6 +27,7 @@ package MyPackage;
|
||||
/**
|
||||
* @test
|
||||
* @summary Verifies the JVMTI Heap Monitor does not always sample the first object.
|
||||
* @requires vm.jvmti
|
||||
* @build Frame HeapMonitor
|
||||
* @compile HeapMonitorInitialAllocationTest.java
|
||||
* @run main/othervm/native -agentlib:HeapMonitorTest MyPackage.HeapMonitorInitialAllocationTest
|
||||
|
||||
@ -27,6 +27,7 @@ package MyPackage;
|
||||
/**
|
||||
* @test
|
||||
* @summary Verifies the JVMTI Heap Monitor using the interpreter.
|
||||
* @requires vm.jvmti
|
||||
* @build Frame HeapMonitor
|
||||
* @compile HeapMonitorTest.java
|
||||
* @run main/othervm/native -agentlib:HeapMonitorTest -Xint MyPackage.HeapMonitorTest 10
|
||||
|
||||
@ -27,6 +27,7 @@ package MyPackage;
|
||||
/**
|
||||
* @test
|
||||
* @summary Verifies the JVMTI Heap Monitor using the interpreter.
|
||||
* @requires vm.jvmti
|
||||
* @build Frame HeapMonitor
|
||||
* @compile HeapMonitorStatObjectCorrectnessTest.java
|
||||
* @run main/othervm/native -agentlib:HeapMonitorTest -Xint MyPackage.HeapMonitorStatObjectCorrectnessTest
|
||||
|
||||
@ -29,6 +29,7 @@ import java.util.List;
|
||||
/**
|
||||
* @test
|
||||
* @summary Verifies the JVMTI Heap Monitor API when allocating a multi-array.
|
||||
* @requires vm.jvmti
|
||||
* @build Frame HeapMonitor
|
||||
* @compile HeapMonitorMultiArrayTest.java
|
||||
* @run main/othervm/native -agentlib:HeapMonitorTest MyPackage.HeapMonitorMultiArrayTest
|
||||
|
||||
@ -27,6 +27,7 @@ package MyPackage;
|
||||
/**
|
||||
* @test
|
||||
* @summary Verifies the JVMTI Heap Monitor does not work without the required capability.
|
||||
* @requires vm.jvmti
|
||||
* @build Frame HeapMonitor
|
||||
* @compile HeapMonitorNoCapabilityTest.java
|
||||
* @run main/othervm/native -agentlib:HeapMonitorTest MyPackage.HeapMonitorNoCapabilityTest
|
||||
|
||||
@ -29,6 +29,7 @@ import java.util.List;
|
||||
/**
|
||||
* @test
|
||||
* @summary Verifies the JVMTI Heap Monitor API does not do infinite recursion.
|
||||
* @requires vm.jvmti
|
||||
* @build Frame HeapMonitor
|
||||
* @compile HeapMonitorRecursiveTest.java
|
||||
* @run main/othervm/native -agentlib:HeapMonitorTest MyPackage.HeapMonitorRecursiveTest
|
||||
|
||||
@ -28,6 +28,7 @@ package MyPackage;
|
||||
* @test
|
||||
* @build Frame HeapMonitor
|
||||
* @summary Verifies the JVMTI Heap Monitor interval when allocating arrays.
|
||||
* @requires vm.jvmti
|
||||
* @compile HeapMonitorStatArrayCorrectnessTest.java
|
||||
* @run main/othervm/native -agentlib:HeapMonitorTest MyPackage.HeapMonitorStatArrayCorrectnessTest
|
||||
*/
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user