Merge remote-tracking branch 'origin/master' into JDK-object-monitor-experiments-reconcile-enter-reenter-attempt5

This commit is contained in:
Anton Artemov 2025-08-25 10:36:44 +01:00
commit edd82e044e
260 changed files with 3975 additions and 3240 deletions

View File

@ -1451,10 +1451,10 @@ of a cross-compiling toolchain and a sysroot environment which can
easily be used together with the <code>--with-devkit</code> configure
option to cross compile the JDK. On Linux/x86_64, the following
command:</p>
<pre><code>bash configure --with-devkit=&lt;devkit-path&gt; --openjdk-target=ppc64-linux-gnu &amp;&amp; make</code></pre>
<p>will configure and build the JDK for Linux/ppc64 assuming that
<code>&lt;devkit-path&gt;</code> points to a Linux/x86_64 to Linux/ppc64
devkit.</p>
<pre><code>bash configure --with-devkit=&lt;devkit-path&gt; --openjdk-target=ppc64le-linux-gnu &amp;&amp; make</code></pre>
<p>will configure and build the JDK for Linux/ppc64le assuming that
<code>&lt;devkit-path&gt;</code> points to a Linux/x86_64 to
Linux/ppc64le devkit.</p>
<p>Devkits can be created from the <code>make/devkit</code> directory by
executing:</p>
<pre><code>make [ TARGETS=&quot;&lt;TARGET_TRIPLET&gt;+&quot; ] [ BASE_OS=&lt;OS&gt; ] [ BASE_OS_VERSION=&lt;VER&gt; ]</code></pre>
@ -1481,10 +1481,10 @@ following targets are known to work:</p>
<td>arm-linux-gnueabihf</td>
</tr>
<tr class="even">
<td>ppc64-linux-gnu</td>
<td>ppc64le-linux-gnu</td>
</tr>
<tr class="odd">
<td>ppc64le-linux-gnu</td>
<td>riscv64-linux-gnu</td>
</tr>
<tr class="even">
<td>s390x-linux-gnu</td>

View File

@ -1258,11 +1258,11 @@ toolchain and a sysroot environment which can easily be used together with the
following command:
```
bash configure --with-devkit=<devkit-path> --openjdk-target=ppc64-linux-gnu && make
bash configure --with-devkit=<devkit-path> --openjdk-target=ppc64le-linux-gnu && make
```
will configure and build the JDK for Linux/ppc64 assuming that `<devkit-path>`
points to a Linux/x86_64 to Linux/ppc64 devkit.
will configure and build the JDK for Linux/ppc64le assuming that `<devkit-path>`
points to a Linux/x86_64 to Linux/ppc64le devkit.
Devkits can be created from the `make/devkit` directory by executing:
@ -1281,8 +1281,8 @@ at least the following targets are known to work:
| x86_64-linux-gnu |
| aarch64-linux-gnu |
| arm-linux-gnueabihf |
| ppc64-linux-gnu |
| ppc64le-linux-gnu |
| riscv64-linux-gnu |
| s390x-linux-gnu |
`BASE_OS` must be one of `OL` for Oracle Enterprise Linux or `Fedora`. If the

View File

@ -39,7 +39,7 @@
#
# make TARGETS="aarch64-linux-gnu" BASE_OS=Fedora
# or
# make TARGETS="arm-linux-gnueabihf ppc64-linux-gnu" BASE_OS=Fedora BASE_OS_VERSION=17
# make TARGETS="arm-linux-gnueabihf ppc64le-linux-gnu" BASE_OS=Fedora BASE_OS_VERSION=17
#
# to build several devkits for a specific OS version at once.
# You can find the final results under ../../build/devkit/result/<host>-to-<target>
@ -50,7 +50,7 @@
# makefile again for cross compilation. Ex:
#
# PATH=$PWD/../../build/devkit/result/x86_64-linux-gnu-to-x86_64-linux-gnu/bin:$PATH \
# make TARGETS="arm-linux-gnueabihf,ppc64-linux-gnu" BASE_OS=Fedora
# make TARGETS="arm-linux-gnueabihf ppc64le-linux-gnu" BASE_OS=Fedora
#
# This is the makefile which iterates over all host and target platforms.
#

View File

@ -69,15 +69,26 @@ else ifeq ($(BASE_OS), Fedora)
ifeq ($(BASE_OS_VERSION), )
BASE_OS_VERSION := $(DEFAULT_OS_VERSION)
endif
ifeq ($(filter aarch64 armhfp ppc64le riscv64 s390x x86_64, $(ARCH)), )
$(error Only "aarch64 armhfp ppc64le riscv64 s390x x86_64" architectures are supported for Fedora, but "$(ARCH)" was requested)
endif
ifeq ($(ARCH), riscv64)
ifeq ($(filter 38 39 40 41, $(BASE_OS_VERSION)), )
$(error Only Fedora 38-41 are supported for "$(ARCH)", but Fedora $(BASE_OS_VERSION) was requested)
endif
BASE_URL := http://fedora.riscv.rocks/repos-dist/f$(BASE_OS_VERSION)/latest/$(ARCH)/Packages/
else
LATEST_ARCHIVED_OS_VERSION := 35
ifeq ($(filter x86_64 armhfp, $(ARCH)), )
LATEST_ARCHIVED_OS_VERSION := 36
ifeq ($(filter aarch64 armhfp x86_64, $(ARCH)), )
FEDORA_TYPE := fedora-secondary
else
FEDORA_TYPE := fedora/linux
endif
ifeq ($(ARCH), armhfp)
ifneq ($(BASE_OS_VERSION), 36)
$(error Fedora 36 is the last release supporting "armhfp", but $(BASE_OS) was requested)
endif
endif
NOT_ARCHIVED := $(shell [ $(BASE_OS_VERSION) -gt $(LATEST_ARCHIVED_OS_VERSION) ] && echo true)
ifeq ($(NOT_ARCHIVED),true)
BASE_URL := https://dl.fedoraproject.org/pub/$(FEDORA_TYPE)/releases/$(BASE_OS_VERSION)/Everything/$(ARCH)/os/Packages/
@ -464,7 +475,7 @@ ifeq ($(ARCH), armhfp)
$(BUILDDIR)/$(gcc_ver)/Makefile : CONFIG += --with-float=hard
endif
ifneq ($(filter riscv64 ppc64 ppc64le s390x, $(ARCH)), )
ifneq ($(filter riscv64 ppc64le s390x, $(ARCH)), )
# We only support 64-bit on these platforms anyway
CONFIG += --disable-multilib
endif

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -101,9 +101,12 @@ frame FreezeBase::new_heap_frame(frame& f, frame& caller) {
*hf.addr_at(frame::interpreter_frame_locals_offset) = locals_offset;
return hf;
} else {
// We need to re-read fp out of the frame because it may be an oop and we might have
// had a safepoint in finalize_freeze, after constructing f.
fp = *(intptr_t**)(f.sp() - frame::sender_sp_offset);
// For a compiled frame we need to re-read fp out of the frame because it may be an
// oop and we might have had a safepoint in finalize_freeze, after constructing f.
// For stub/native frames the value is not used while frozen, and will be constructed again
// when thawing the frame (see ThawBase::new_stack_frame). We use a special bad address to
// help with debugging, particularly when inspecting frames and identifying invalid accesses.
fp = FKind::compiled ? *(intptr_t**)(f.sp() - frame::sender_sp_offset) : (intptr_t*)badAddressVal;
int fsize = FKind::size(f);
sp = caller.unextended_sp() - fsize;
@ -192,6 +195,11 @@ inline void FreezeBase::patch_pd(frame& hf, const frame& caller) {
}
}
inline void FreezeBase::patch_pd_unused(intptr_t* sp) {
intptr_t* fp_addr = sp - frame::sender_sp_offset;
*fp_addr = badAddressVal;
}
//////// Thaw
// Fast path

View File

@ -172,9 +172,9 @@ void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
if (expand_call) {
assert(pre_val != c_rarg1, "smashed arg");
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre), pre_val, thread);
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), pre_val);
} else {
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre), pre_val, thread);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), pre_val);
}
__ pop(saved, sp);
@ -753,7 +753,7 @@ void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAss
__ bind(runtime);
__ push_call_clobbered_registers();
__ load_parameter(0, pre_val);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre), pre_val, thread);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), pre_val);
__ pop_call_clobbered_registers();
__ bind(done);

View File

@ -2426,13 +2426,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
Register hdr = op->hdr_opr()->as_pointer_register();
Register lock = op->lock_opr()->as_pointer_register();
if (LockingMode == LM_MONITOR) {
if (op->info() != nullptr) {
add_debug_info_for_null_check_here(op->info());
__ null_check(obj);
}
__ b(*op->stub()->entry());
} else if (op->code() == lir_lock) {
if (op->code() == lir_lock) {
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
int null_check_offset = __ lock_object(hdr, obj, lock, *op->stub()->entry());
if (op->info() != nullptr) {

View File

@ -177,18 +177,16 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len,
}
int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
Label done, fast_lock, fast_lock_done;
int null_check_offset = 0;
const Register tmp2 = Rtemp; // Rtemp should be free at c1 LIR level
assert_different_registers(hdr, obj, disp_hdr, tmp2);
assert(BasicObjectLock::lock_offset() == 0, "adjust this code");
const ByteSize obj_offset = BasicObjectLock::obj_offset();
const int mark_offset = BasicLock::displaced_header_offset_in_bytes();
assert(oopDesc::mark_offset_in_bytes() == 0, "Required by atomic instructions");
// save object being locked into the BasicObjectLock
str(obj, Address(disp_hdr, obj_offset));
str(obj, Address(disp_hdr, BasicObjectLock::obj_offset()));
null_check_offset = offset();
@ -199,95 +197,29 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
b(slow_case, ne);
}
assert(oopDesc::mark_offset_in_bytes() == 0, "Required by atomic instructions");
if (LockingMode == LM_LIGHTWEIGHT) {
Register t1 = disp_hdr; // Needs saving, probably
Register t2 = hdr; // blow
Register t3 = Rtemp; // blow
lightweight_lock(obj /* obj */, t1, t2, t3, 1 /* savemask - save t1 */, slow_case);
// Success: fall through
} else if (LockingMode == LM_LEGACY) {
// On MP platforms the next load could return a 'stale' value if the memory location has been modified by another thread.
// That would be acceptable as ether CAS or slow case path is taken in that case.
// Must be the first instruction here, because implicit null check relies on it
ldr(hdr, Address(obj, oopDesc::mark_offset_in_bytes()));
tst(hdr, markWord::unlocked_value);
b(fast_lock, ne);
// Check for recursive locking
// See comments in InterpreterMacroAssembler::lock_object for
// explanations on the fast recursive locking check.
// -1- test low 2 bits
movs(tmp2, AsmOperand(hdr, lsl, 30));
// -2- test (hdr - SP) if the low two bits are 0
sub(tmp2, hdr, SP, eq);
movs(tmp2, AsmOperand(tmp2, lsr, exact_log2(os::vm_page_size())), eq);
// If still 'eq' then recursive locking OK
// set to zero if recursive lock, set to non zero otherwise (see discussion in JDK-8267042)
str(tmp2, Address(disp_hdr, mark_offset));
b(fast_lock_done, eq);
// else need slow case
b(slow_case);
bind(fast_lock);
// Save previous object header in BasicLock structure and update the header
str(hdr, Address(disp_hdr, mark_offset));
cas_for_lock_acquire(hdr, disp_hdr, obj, tmp2, slow_case);
bind(fast_lock_done);
}
bind(done);
Register t1 = disp_hdr; // Needs saving, probably
Register t2 = hdr; // blow
Register t3 = Rtemp; // blow
lightweight_lock(obj, t1, t2, t3, 1 /* savemask - save t1 */, slow_case);
// Success: fall through
return null_check_offset;
}
void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
assert_different_registers(hdr, obj, disp_hdr, Rtemp);
Register tmp2 = Rtemp;
assert(BasicObjectLock::lock_offset() == 0, "adjust this code");
const ByteSize obj_offset = BasicObjectLock::obj_offset();
const int mark_offset = BasicLock::displaced_header_offset_in_bytes();
Label done;
assert(oopDesc::mark_offset_in_bytes() == 0, "Required by atomic instructions");
if (LockingMode == LM_LIGHTWEIGHT) {
ldr(obj, Address(disp_hdr, BasicObjectLock::obj_offset()));
ldr(obj, Address(disp_hdr, obj_offset));
Register t1 = disp_hdr; // Needs saving, probably
Register t2 = hdr; // blow
Register t3 = Rtemp; // blow
Register t1 = disp_hdr; // Needs saving, probably
Register t2 = hdr; // blow
Register t3 = Rtemp; // blow
lightweight_unlock(obj /* object */, t1, t2, t3, 1 /* savemask (save t1) */,
slow_case);
// Success: Fall through
} else if (LockingMode == LM_LEGACY) {
// Load displaced header and object from the lock
ldr(hdr, Address(disp_hdr, mark_offset));
// If hdr is null, we've got recursive locking and there's nothing more to do
cbz(hdr, done);
// load object
ldr(obj, Address(disp_hdr, obj_offset));
// Restore the object header
cas_for_lock_release(disp_hdr, hdr, obj, tmp2, slow_case);
}
bind(done);
lightweight_unlock(obj, t1, t2, t3, 1 /* savemask - save t1 */, slow_case);
// Success: fall through
}
#ifndef PRODUCT

View File

@ -81,7 +81,7 @@ void C2_MacroAssembler::fast_lock(Register Roop, Register Rbox, Register Rscratc
assert(VM_Version::supports_ldrex(), "unsupported, yet?");
assert_different_registers(Roop, Rbox, Rscratch, Rscratch2);
Label fast_lock, done;
Label done;
if (DiagnoseSyncOnValueBasedClasses != 0) {
load_klass(Rscratch, Roop);
@ -90,43 +90,10 @@ void C2_MacroAssembler::fast_lock(Register Roop, Register Rbox, Register Rscratc
b(done, ne);
}
if (LockingMode == LM_LIGHTWEIGHT) {
lightweight_lock(Roop /* obj */, Rbox /* t1 */, Rscratch /* t2 */, Rscratch2 /* t3 */,
1 /* savemask (save t1) */, done);
// Success: set Z
cmp(Roop, Roop);
} else if (LockingMode == LM_LEGACY) {
Register Rmark = Rscratch2;
ldr(Rmark, Address(Roop, oopDesc::mark_offset_in_bytes()));
tst(Rmark, markWord::unlocked_value);
b(fast_lock, ne);
// Check for recursive lock
// See comments in InterpreterMacroAssembler::lock_object for
// explanations on the fast recursive locking check.
// -1- test low 2 bits
movs(Rscratch, AsmOperand(Rmark, lsl, 30));
// -2- test (hdr - SP) if the low two bits are 0
sub(Rscratch, Rmark, SP, eq);
movs(Rscratch, AsmOperand(Rscratch, lsr, exact_log2(os::vm_page_size())), eq);
// If still 'eq' then recursive locking OK
// set to zero if recursive lock, set to non zero otherwise (see discussion in JDK-8153107)
str(Rscratch, Address(Rbox, BasicLock::displaced_header_offset_in_bytes()));
b(done);
bind(fast_lock);
str(Rmark, Address(Rbox, BasicLock::displaced_header_offset_in_bytes()));
bool allow_fallthrough_on_failure = true;
bool one_shot = true;
cas_for_lock_acquire(Rmark, Rbox, Roop, Rscratch, done, allow_fallthrough_on_failure, one_shot);
}
lightweight_lock(Roop /* obj */, Rbox /* t1 */, Rscratch /* t2 */, Rscratch2 /* t3 */,
1 /* savemask (save t1) */, done);
cmp(Roop, Roop); // Success: set Z
bind(done);
// At this point flags are set as follows:
@ -140,29 +107,12 @@ void C2_MacroAssembler::fast_unlock(Register Roop, Register Rbox, Register Rscra
Label done;
if (LockingMode == LM_LIGHTWEIGHT) {
lightweight_unlock(Roop /* obj */, Rbox /* t1 */, Rscratch /* t2 */, Rscratch2 /* t3 */,
1 /* savemask (save t1) */, done);
lightweight_unlock(Roop /* obj */, Rbox /* t1 */, Rscratch /* t2 */, Rscratch2 /* t3 */,
1 /* savemask (save t1) */, done);
cmp(Roop, Roop); // Success: Set Z
// Fall through
cmp(Roop, Roop); // Success: Set Z
// Fall through
} else if (LockingMode == LM_LEGACY) {
Register Rmark = Rscratch2;
// Find the lock address and load the displaced header from the stack.
ldr(Rmark, Address(Rbox, BasicLock::displaced_header_offset_in_bytes()));
// If hdr is null, we've got recursive locking and there's nothing more to do
cmp(Rmark, 0);
b(done, eq);
// Restore the object header
bool allow_fallthrough_on_failure = true;
bool one_shot = true;
cas_for_lock_release(Rbox, Rmark, Roop, Rscratch, done, allow_fallthrough_on_failure, one_shot);
}
bind(done);
// At this point flags are set as follows:

View File

@ -60,6 +60,10 @@ inline void FreezeBase::patch_pd(frame& hf, const frame& caller) {
Unimplemented();
}
inline void FreezeBase::patch_pd_unused(intptr_t* sp) {
Unimplemented();
}
inline void FreezeBase::patch_stack_pd(intptr_t* frame_sp, intptr_t* heap_sp) {
Unimplemented();
}

View File

@ -888,105 +888,30 @@ void InterpreterMacroAssembler::set_do_not_unlock_if_synchronized(bool flag, Reg
void InterpreterMacroAssembler::lock_object(Register Rlock) {
assert(Rlock == R1, "the second argument");
if (LockingMode == LM_MONITOR) {
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), Rlock);
} else {
Label done;
const Register Robj = R2;
const Register Rmark = R3;
assert_different_registers(Robj, Rmark, Rlock, R0, Rtemp);
const Register Robj = R2;
const Register Rmark = R3;
assert_different_registers(Robj, Rmark, Rlock, R0, Rtemp);
Label done, slow_case;
const int obj_offset = in_bytes(BasicObjectLock::obj_offset());
const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
const int mark_offset = lock_offset + BasicLock::displaced_header_offset_in_bytes();
// Load object pointer
ldr(Robj, Address(Rlock, BasicObjectLock::obj_offset()));
Label already_locked, slow_case;
// Load object pointer
ldr(Robj, Address(Rlock, obj_offset));
if (DiagnoseSyncOnValueBasedClasses != 0) {
load_klass(R0, Robj);
ldrb(R0, Address(R0, Klass::misc_flags_offset()));
tst(R0, KlassFlags::_misc_is_value_based_class);
b(slow_case, ne);
}
if (LockingMode == LM_LIGHTWEIGHT) {
lightweight_lock(Robj, R0 /* t1 */, Rmark /* t2 */, Rtemp /* t3 */, 0 /* savemask */, slow_case);
b(done);
} else if (LockingMode == LM_LEGACY) {
// On MP platforms the next load could return a 'stale' value if the memory location has been modified by another thread.
// That would be acceptable as ether CAS or slow case path is taken in that case.
// Exception to that is if the object is locked by the calling thread, then the recursive test will pass (guaranteed as
// loads are satisfied from a store queue if performed on the same processor).
assert(oopDesc::mark_offset_in_bytes() == 0, "must be");
ldr(Rmark, Address(Robj, oopDesc::mark_offset_in_bytes()));
// Test if object is already locked
tst(Rmark, markWord::unlocked_value);
b(already_locked, eq);
// Save old object->mark() into BasicLock's displaced header
str(Rmark, Address(Rlock, mark_offset));
cas_for_lock_acquire(Rmark, Rlock, Robj, Rtemp, slow_case);
b(done);
// If we got here that means the object is locked by ether calling thread or another thread.
bind(already_locked);
// Handling of locked objects: recursive locks and slow case.
// Fast check for recursive lock.
//
// Can apply the optimization only if this is a stack lock
// allocated in this thread. For efficiency, we can focus on
// recently allocated stack locks (instead of reading the stack
// base and checking whether 'mark' points inside the current
// thread stack):
// 1) (mark & 3) == 0
// 2) SP <= mark < SP + os::pagesize()
//
// Warning: SP + os::pagesize can overflow the stack base. We must
// neither apply the optimization for an inflated lock allocated
// just above the thread stack (this is why condition 1 matters)
// nor apply the optimization if the stack lock is inside the stack
// of another thread. The latter is avoided even in case of overflow
// because we have guard pages at the end of all stacks. Hence, if
// we go over the stack base and hit the stack of another thread,
// this should not be in a writeable area that could contain a
// stack lock allocated by that thread. As a consequence, a stack
// lock less than page size away from SP is guaranteed to be
// owned by the current thread.
//
// Note: assuming SP is aligned, we can check the low bits of
// (mark-SP) instead of the low bits of mark. In that case,
// assuming page size is a power of 2, we can merge the two
// conditions into a single test:
// => ((mark - SP) & (3 - os::pagesize())) == 0
// (3 - os::pagesize()) cannot be encoded as an ARM immediate operand.
// Check independently the low bits and the distance to SP.
// -1- test low 2 bits
movs(R0, AsmOperand(Rmark, lsl, 30));
// -2- test (mark - SP) if the low two bits are 0
sub(R0, Rmark, SP, eq);
movs(R0, AsmOperand(R0, lsr, exact_log2(os::vm_page_size())), eq);
// If still 'eq' then recursive locking OK: store 0 into lock record
str(R0, Address(Rlock, mark_offset), eq);
b(done, eq);
}
bind(slow_case);
// Call the runtime routine for slow case
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), Rlock);
bind(done);
if (DiagnoseSyncOnValueBasedClasses != 0) {
load_klass(R0, Robj);
ldrb(R0, Address(R0, Klass::misc_flags_offset()));
tst(R0, KlassFlags::_misc_is_value_based_class);
b(slow_case, ne);
}
lightweight_lock(Robj, R0 /* t1 */, Rmark /* t2 */, Rtemp /* t3 */, 0 /* savemask */, slow_case);
b(done);
bind(slow_case);
// Call the runtime routine for slow case
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), Rlock);
bind(done);
}
// Unlocks an object. Used in monitorexit bytecode and remove_activation.
@ -997,65 +922,39 @@ void InterpreterMacroAssembler::lock_object(Register Rlock) {
void InterpreterMacroAssembler::unlock_object(Register Rlock) {
assert(Rlock == R0, "the first argument");
if (LockingMode == LM_MONITOR) {
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), Rlock);
} else {
Label done, slow_case;
Label done, slow_case;
const Register Robj = R2;
const Register Rmark = R3;
assert_different_registers(Robj, Rmark, Rlock, Rtemp);
const Register Robj = R2;
const Register Rmark = R3;
assert_different_registers(Robj, Rmark, Rlock, Rtemp);
const int obj_offset = in_bytes(BasicObjectLock::obj_offset());
const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
const int mark_offset = lock_offset + BasicLock::displaced_header_offset_in_bytes();
const int obj_offset = in_bytes(BasicObjectLock::obj_offset());
const Register Rzero = zero_register(Rtemp);
const Register Rzero = zero_register(Rtemp);
// Load oop into Robj
ldr(Robj, Address(Rlock, obj_offset));
// Load oop into Robj
ldr(Robj, Address(Rlock, obj_offset));
// Free entry
str(Rzero, Address(Rlock, obj_offset));
// Free entry
str(Rzero, Address(Rlock, obj_offset));
// Check for non-symmetric locking. This is allowed by the spec and the interpreter
// must handle it.
ldr(Rtemp, Address(Rthread, JavaThread::lock_stack_top_offset()));
sub(Rtemp, Rtemp, oopSize);
ldr(Rtemp, Address(Rthread, Rtemp));
cmpoop(Rtemp, Robj);
b(slow_case, ne);
if (LockingMode == LM_LIGHTWEIGHT) {
lightweight_unlock(Robj /* obj */, Rlock /* t1 */, Rmark /* t2 */, Rtemp /* t3 */,
1 /* savemask (save t1) */, slow_case);
b(done);
// Check for non-symmetric locking. This is allowed by the spec and the interpreter
// must handle it.
ldr(Rtemp, Address(Rthread, JavaThread::lock_stack_top_offset()));
sub(Rtemp, Rtemp, oopSize);
ldr(Rtemp, Address(Rthread, Rtemp));
cmpoop(Rtemp, Robj);
b(slow_case, ne);
bind(slow_case);
// Call the runtime routine for slow case.
str(Robj, Address(Rlock, obj_offset)); // restore obj
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), Rlock);
lightweight_unlock(Robj /* obj */, Rlock /* t1 */, Rmark /* t2 */, Rtemp /* t3 */,
1 /* savemask (save t1) */, slow_case);
b(done);
} else if (LockingMode == LM_LEGACY) {
// Load the old header from BasicLock structure
ldr(Rmark, Address(Rlock, mark_offset));
// Test for recursion (zero mark in BasicLock)
cbz(Rmark, done);
bool allow_fallthrough_on_failure = true;
cas_for_lock_release(Rlock, Rmark, Robj, Rtemp, slow_case, allow_fallthrough_on_failure);
b(done, eq);
}
bind(slow_case);
// Call the runtime routine for slow case.
str(Robj, Address(Rlock, obj_offset)); // restore obj
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), Rlock);
bind(done);
}
bind(done);
}
// Test ImethodDataPtr. If it is null, continue at the specified label

View File

@ -1758,7 +1758,6 @@ void MacroAssembler::read_polling_page(Register dest, relocInfo::relocType rtype
// - Success: fallthrough
// - Error: break to slow, Z cleared.
void MacroAssembler::lightweight_lock(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow) {
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
assert_different_registers(obj, t1, t2, t3);
#ifdef ASSERT
@ -1816,7 +1815,6 @@ void MacroAssembler::lightweight_lock(Register obj, Register t1, Register t2, Re
// - Success: fallthrough
// - Error: break to slow, Z cleared.
void MacroAssembler::lightweight_unlock(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow) {
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
assert_different_registers(obj, t1, t2, t3);
#ifdef ASSERT

View File

@ -1139,41 +1139,10 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Remember the handle for the unlocking code
__ mov(sync_handle, R1);
if (LockingMode == LM_LIGHTWEIGHT) {
log_trace(fastlock)("SharedRuntime lock fast");
__ lightweight_lock(sync_obj /* object */, disp_hdr /* t1 */, tmp /* t2 */, Rtemp /* t3 */,
0x7 /* savemask */, slow_lock);
log_trace(fastlock)("SharedRuntime lock fast");
__ lightweight_lock(sync_obj /* object */, disp_hdr /* t1 */, tmp /* t2 */, Rtemp /* t3 */,
0x7 /* savemask */, slow_lock);
// Fall through to lock_done
} else if (LockingMode == LM_LEGACY) {
const Register mark = tmp;
// On MP platforms the next load could return a 'stale' value if the memory location has been modified by another thread.
// That would be acceptable as either CAS or slow case path is taken in that case
__ ldr(mark, Address(sync_obj, oopDesc::mark_offset_in_bytes()));
__ sub(disp_hdr, FP, lock_slot_fp_offset);
__ tst(mark, markWord::unlocked_value);
__ b(fast_lock, ne);
// Check for recursive lock
// See comments in InterpreterMacroAssembler::lock_object for
// explanations on the fast recursive locking check.
// Check independently the low bits and the distance to SP
// -1- test low 2 bits
__ movs(Rtemp, AsmOperand(mark, lsl, 30));
// -2- test (hdr - SP) if the low two bits are 0
__ sub(Rtemp, mark, SP, eq);
__ movs(Rtemp, AsmOperand(Rtemp, lsr, exact_log2(os::vm_page_size())), eq);
// If still 'eq' then recursive locking OK
// set to zero if recursive lock, set to non zero otherwise (see discussion in JDK-8267042)
__ str(Rtemp, Address(disp_hdr, BasicLock::displaced_header_offset_in_bytes()));
__ b(lock_done, eq);
__ b(slow_lock);
__ bind(fast_lock);
__ str(mark, Address(disp_hdr, BasicLock::displaced_header_offset_in_bytes()));
__ cas_for_lock_acquire(mark, disp_hdr, sync_obj, Rtemp, slow_lock);
}
__ bind(lock_done);
}
@ -1226,21 +1195,11 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
Label slow_unlock, unlock_done;
if (method->is_synchronized()) {
if (LockingMode == LM_LIGHTWEIGHT) {
log_trace(fastlock)("SharedRuntime unlock fast");
__ lightweight_unlock(sync_obj, R2 /* t1 */, tmp /* t2 */, Rtemp /* t3 */,
7 /* savemask */, slow_unlock);
// Fall through
} else if (LockingMode == LM_LEGACY) {
// See C1_MacroAssembler::unlock_object() for more comments
__ ldr(sync_obj, Address(sync_handle));
log_trace(fastlock)("SharedRuntime unlock fast");
__ lightweight_unlock(sync_obj, R2 /* t1 */, tmp /* t2 */, Rtemp /* t3 */,
7 /* savemask */, slow_unlock);
// Fall through
// See C1_MacroAssembler::unlock_object() for more comments
__ ldr(R2, Address(disp_hdr, BasicLock::displaced_header_offset_in_bytes()));
__ cbz(R2, unlock_done);
__ cas_for_lock_release(disp_hdr, R2, sync_obj, Rtemp, slow_unlock);
}
__ bind(unlock_done);
}

View File

@ -228,11 +228,7 @@ int LIR_Assembler::emit_unwind_handler() {
if (method()->is_synchronized()) {
monitor_address(0, FrameMap::R4_opr);
stub = new MonitorExitStub(FrameMap::R4_opr, true, 0);
if (LockingMode == LM_MONITOR) {
__ b(*stub->entry());
} else {
__ unlock_object(R5, R6, R4, *stub->entry());
}
__ unlock_object(R5, R6, R4, *stub->entry());
__ bind(*stub->continuation());
}
@ -2618,44 +2614,20 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
// Obj may not be an oop.
if (op->code() == lir_lock) {
MonitorEnterStub* stub = (MonitorEnterStub*)op->stub();
if (LockingMode != LM_MONITOR) {
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
// Add debug info for NullPointerException only if one is possible.
if (op->info() != nullptr) {
if (!os::zero_page_read_protected() || !ImplicitNullChecks) {
explicit_null_check(obj, op->info());
} else {
add_debug_info_for_null_check_here(op->info());
}
}
__ lock_object(hdr, obj, lock, op->scratch_opr()->as_register(), *op->stub()->entry());
} else {
// always do slow locking
// note: The slow locking code could be inlined here, however if we use
// slow locking, speed doesn't matter anyway and this solution is
// simpler and requires less duplicated code - additionally, the
// slow locking code is the same in either case which simplifies
// debugging.
if (op->info() != nullptr) {
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
// Add debug info for NullPointerException only if one is possible.
if (op->info() != nullptr) {
if (!os::zero_page_read_protected() || !ImplicitNullChecks) {
explicit_null_check(obj, op->info());
} else {
add_debug_info_for_null_check_here(op->info());
__ null_check(obj);
}
__ b(*op->stub()->entry());
}
__ lock_object(hdr, obj, lock, op->scratch_opr()->as_register(), *op->stub()->entry());
} else {
assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock");
if (LockingMode != LM_MONITOR) {
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
__ unlock_object(hdr, obj, lock, *op->stub()->entry());
} else {
// always do slow unlocking
// note: The slow unlocking code could be inlined here, however if we use
// slow unlocking, speed doesn't matter anyway and this solution is
// simpler and requires less duplicated code - additionally, the
// slow unlocking code is the same in either case which simplifies
// debugging.
__ b(*op->stub()->entry());
}
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
__ unlock_object(hdr, obj, lock, *op->stub()->entry());
}
__ bind(*op->stub()->continuation());
}

View File

@ -82,59 +82,13 @@ void C1_MacroAssembler::lock_object(Register Rmark, Register Roop, Register Rbox
// Save object being locked into the BasicObjectLock...
std(Roop, in_bytes(BasicObjectLock::obj_offset()), Rbox);
if (LockingMode == LM_LIGHTWEIGHT) {
lightweight_lock(Rbox, Roop, Rmark, Rscratch, slow_int);
} else if (LockingMode == LM_LEGACY) {
if (DiagnoseSyncOnValueBasedClasses != 0) {
load_klass(Rscratch, Roop);
lbz(Rscratch, in_bytes(Klass::misc_flags_offset()), Rscratch);
testbitdi(CR0, R0, Rscratch, exact_log2(KlassFlags::_misc_is_value_based_class));
bne(CR0, slow_int);
}
// ... and mark it unlocked.
ori(Rmark, Rmark, markWord::unlocked_value);
// Save unlocked object header into the displaced header location on the stack.
std(Rmark, BasicLock::displaced_header_offset_in_bytes(), Rbox);
// Compare object markWord with Rmark and if equal exchange Rscratch with object markWord.
assert(oopDesc::mark_offset_in_bytes() == 0, "cas must take a zero displacement");
cmpxchgd(/*flag=*/CR0,
/*current_value=*/Rscratch,
/*compare_value=*/Rmark,
/*exchange_value=*/Rbox,
/*where=*/Roop/*+0==mark_offset_in_bytes*/,
MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
MacroAssembler::cmpxchgx_hint_acquire_lock(),
noreg,
&cas_failed,
/*check without membar and ldarx first*/true);
// If compare/exchange succeeded we found an unlocked object and we now have locked it
// hence we are done.
} else {
assert(false, "Unhandled LockingMode:%d", LockingMode);
}
lightweight_lock(Rbox, Roop, Rmark, Rscratch, slow_int);
b(done);
bind(slow_int);
b(slow_case); // far
if (LockingMode == LM_LEGACY) {
bind(cas_failed);
// We did not find an unlocked object so see if this is a recursive case.
sub(Rscratch, Rscratch, R1_SP);
load_const_optimized(R0, (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
and_(R0/*==0?*/, Rscratch, R0);
std(R0/*==0, perhaps*/, BasicLock::displaced_header_offset_in_bytes(), Rbox);
bne(CR0, slow_int);
}
bind(done);
if (LockingMode == LM_LEGACY) {
inc_held_monitor_count(Rmark /*tmp*/);
}
}
@ -146,43 +100,17 @@ void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rb
Address mark_addr(Roop, oopDesc::mark_offset_in_bytes());
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
if (LockingMode != LM_LIGHTWEIGHT) {
// Test first if it is a fast recursive unlock.
ld(Rmark, BasicLock::displaced_header_offset_in_bytes(), Rbox);
cmpdi(CR0, Rmark, 0);
beq(CR0, done);
}
// Load object.
ld(Roop, in_bytes(BasicObjectLock::obj_offset()), Rbox);
verify_oop(Roop, FILE_AND_LINE);
if (LockingMode == LM_LIGHTWEIGHT) {
lightweight_unlock(Roop, Rmark, slow_int);
} else if (LockingMode == LM_LEGACY) {
// Check if it is still a light weight lock, this is is true if we see
// the stack address of the basicLock in the markWord of the object.
cmpxchgd(/*flag=*/CR0,
/*current_value=*/R0,
/*compare_value=*/Rbox,
/*exchange_value=*/Rmark,
/*where=*/Roop,
MacroAssembler::MemBarRel,
MacroAssembler::cmpxchgx_hint_release_lock(),
noreg,
&slow_int);
} else {
assert(false, "Unhandled LockingMode:%d", LockingMode);
}
lightweight_unlock(Roop, Rmark, slow_int);
b(done);
bind(slow_int);
b(slow_case); // far
// Done
bind(done);
if (LockingMode == LM_LEGACY) {
dec_held_monitor_count(Rmark /*tmp*/);
}
}

View File

@ -334,6 +334,9 @@ inline void FreezeBase::patch_pd(frame& hf, const frame& caller) {
#endif
}
inline void FreezeBase::patch_pd_unused(intptr_t* sp) {
}
//////// Thaw
// Fast path

View File

@ -311,7 +311,7 @@ void ShenandoahBarrierSetAssembler::satb_write_barrier_impl(MacroAssembler *masm
}
// Invoke runtime.
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre), pre_val, R16_thread);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), pre_val);
// Restore to-be-preserved registers.
if (!preserve_gp_registers && preloaded_mode && pre_val->is_volatile()) {
@ -966,7 +966,7 @@ void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAss
__ push_frame_reg_args(nbytes_save, R11_tmp1);
// Invoke runtime.
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre), R0_pre_val, R16_thread);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), R0_pre_val);
// Restore to-be-preserved registers.
__ pop_frame();

View File

@ -946,121 +946,20 @@ void InterpreterMacroAssembler::leave_jfr_critical_section() {
// object - Address of the object to be locked.
//
void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
if (LockingMode == LM_MONITOR) {
call_VM_preemptable(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), monitor);
} else {
// template code (for LM_LEGACY):
//
// markWord displaced_header = obj->mark().set_unlocked();
// monitor->lock()->set_displaced_header(displaced_header);
// if (Atomic::cmpxchg(/*addr*/obj->mark_addr(), /*cmp*/displaced_header, /*ex=*/monitor) == displaced_header) {
// // We stored the monitor address into the object's mark word.
// } else if (THREAD->is_lock_owned((address)displaced_header))
// // Simple recursive case.
// monitor->lock()->set_displaced_header(nullptr);
// } else {
// // Slow path.
// InterpreterRuntime::monitorenter(THREAD, monitor);
// }
const Register header = R7_ARG5;
const Register tmp = R8_ARG6;
const Register header = R7_ARG5;
const Register object_mark_addr = R8_ARG6;
const Register current_header = R9_ARG7;
const Register tmp = R10_ARG8;
Label done, slow_case;
Label count_locking, done, slow_case, cas_failed;
assert_different_registers(header, tmp);
assert_different_registers(header, object_mark_addr, current_header, tmp);
lightweight_lock(monitor, object, header, tmp, slow_case);
b(done);
// markWord displaced_header = obj->mark().set_unlocked();
bind(slow_case);
call_VM_preemptable(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), monitor);
if (LockingMode == LM_LIGHTWEIGHT) {
lightweight_lock(monitor, object, header, tmp, slow_case);
b(done);
} else if (LockingMode == LM_LEGACY) {
if (DiagnoseSyncOnValueBasedClasses != 0) {
load_klass(tmp, object);
lbz(tmp, in_bytes(Klass::misc_flags_offset()), tmp);
testbitdi(CR0, R0, tmp, exact_log2(KlassFlags::_misc_is_value_based_class));
bne(CR0, slow_case);
}
// Load markWord from object into header.
ld(header, oopDesc::mark_offset_in_bytes(), object);
// Set displaced_header to be (markWord of object | UNLOCK_VALUE).
ori(header, header, markWord::unlocked_value);
// monitor->lock()->set_displaced_header(displaced_header);
const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
const int mark_offset = lock_offset +
BasicLock::displaced_header_offset_in_bytes();
// Initialize the box (Must happen before we update the object mark!).
std(header, mark_offset, monitor);
// if (Atomic::cmpxchg(/*addr*/obj->mark_addr(), /*cmp*/displaced_header, /*ex=*/monitor) == displaced_header) {
// Store stack address of the BasicObjectLock (this is monitor) into object.
addi(object_mark_addr, object, oopDesc::mark_offset_in_bytes());
// Must fence, otherwise, preceding store(s) may float below cmpxchg.
// CmpxchgX sets CR0 to cmpX(current, displaced).
cmpxchgd(/*flag=*/CR0,
/*current_value=*/current_header,
/*compare_value=*/header, /*exchange_value=*/monitor,
/*where=*/object_mark_addr,
MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
MacroAssembler::cmpxchgx_hint_acquire_lock(),
noreg,
&cas_failed,
/*check without membar and ldarx first*/true);
// If the compare-and-exchange succeeded, then we found an unlocked
// object and we have now locked it.
b(count_locking);
bind(cas_failed);
// } else if (THREAD->is_lock_owned((address)displaced_header))
// // Simple recursive case.
// monitor->lock()->set_displaced_header(nullptr);
// We did not see an unlocked object so try the fast recursive case.
// Check if owner is self by comparing the value in the markWord of object
// (current_header) with the stack pointer.
sub(current_header, current_header, R1_SP);
assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
load_const_optimized(tmp, ~(os::vm_page_size()-1) | markWord::lock_mask_in_place);
and_(R0/*==0?*/, current_header, tmp);
// If condition is true we are done and hence we can store 0 in the displaced
// header indicating it is a recursive lock.
bne(CR0, slow_case);
std(R0/*==0!*/, mark_offset, monitor);
b(count_locking);
}
// } else {
// // Slow path.
// InterpreterRuntime::monitorenter(THREAD, monitor);
// None of the above fast optimizations worked so we have to get into the
// slow case of monitor enter.
bind(slow_case);
call_VM_preemptable(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), monitor);
// }
if (LockingMode == LM_LEGACY) {
b(done);
align(32, 12);
bind(count_locking);
inc_held_monitor_count(current_header /*tmp*/);
}
bind(done);
}
bind(done);
}
// Unlocks an object. Used in monitorexit bytecode and remove_activation.
@ -1071,95 +970,34 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
//
// Throw IllegalMonitorException if object is not locked by current thread.
void InterpreterMacroAssembler::unlock_object(Register monitor) {
if (LockingMode == LM_MONITOR) {
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), monitor);
} else {
const Register object = R7_ARG5;
const Register header = R8_ARG6;
const Register current_header = R10_ARG8;
// template code (for LM_LEGACY):
//
// if ((displaced_header = monitor->displaced_header()) == nullptr) {
// // Recursive unlock. Mark the monitor unlocked by setting the object field to null.
// monitor->set_obj(nullptr);
// } else if (Atomic::cmpxchg(obj->mark_addr(), monitor, displaced_header) == monitor) {
// // We swapped the unlocked mark in displaced_header into the object's mark word.
// monitor->set_obj(nullptr);
// } else {
// // Slow path.
// InterpreterRuntime::monitorexit(monitor);
// }
Label free_slot;
Label slow_case;
const Register object = R7_ARG5;
const Register header = R8_ARG6;
const Register object_mark_addr = R9_ARG7;
const Register current_header = R10_ARG8;
assert_different_registers(object, header, current_header);
Label free_slot;
Label slow_case;
// The object address from the monitor is in object.
ld(object, in_bytes(BasicObjectLock::obj_offset()), monitor);
assert_different_registers(object, header, object_mark_addr, current_header);
lightweight_unlock(object, header, slow_case);
if (LockingMode != LM_LIGHTWEIGHT) {
// Test first if we are in the fast recursive case.
ld(header, in_bytes(BasicObjectLock::lock_offset()) +
BasicLock::displaced_header_offset_in_bytes(), monitor);
b(free_slot);
// If the displaced header is zero, we have a recursive unlock.
cmpdi(CR0, header, 0);
beq(CR0, free_slot); // recursive unlock
}
bind(slow_case);
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), monitor);
// } else if (Atomic::cmpxchg(obj->mark_addr(), monitor, displaced_header) == monitor) {
// // We swapped the unlocked mark in displaced_header into the object's mark word.
// monitor->set_obj(nullptr);
Label done;
b(done); // Monitor register may be overwritten! Runtime has already freed the slot.
// If we still have a lightweight lock, unlock the object and be done.
// The object address from the monitor is in object.
ld(object, in_bytes(BasicObjectLock::obj_offset()), monitor);
if (LockingMode == LM_LIGHTWEIGHT) {
lightweight_unlock(object, header, slow_case);
} else {
addi(object_mark_addr, object, oopDesc::mark_offset_in_bytes());
// We have the displaced header in displaced_header. If the lock is still
// lightweight, it will contain the monitor address and we'll store the
// displaced header back into the object's mark word.
// CmpxchgX sets CR0 to cmpX(current, monitor).
cmpxchgd(/*flag=*/CR0,
/*current_value=*/current_header,
/*compare_value=*/monitor, /*exchange_value=*/header,
/*where=*/object_mark_addr,
MacroAssembler::MemBarRel,
MacroAssembler::cmpxchgx_hint_release_lock(),
noreg,
&slow_case);
}
b(free_slot);
// } else {
// // Slow path.
// InterpreterRuntime::monitorexit(monitor);
// The lock has been converted into a heavy lock and hence
// we need to get into the slow case.
bind(slow_case);
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), monitor);
// }
Label done;
b(done); // Monitor register may be overwritten! Runtime has already freed the slot.
// Exchange worked, do monitor->set_obj(nullptr);
align(32, 12);
bind(free_slot);
li(R0, 0);
std(R0, in_bytes(BasicObjectLock::obj_offset()), monitor);
if (LockingMode == LM_LEGACY) {
dec_held_monitor_count(current_header /*tmp*/);
}
bind(done);
}
// Do monitor->set_obj(nullptr);
align(32, 12);
bind(free_slot);
li(R0, 0);
std(R0, in_bytes(BasicObjectLock::obj_offset()), monitor);
bind(done);
}
// Load compiled (i2c) or interpreter entry when calling from interpreted and

View File

@ -2671,238 +2671,6 @@ address MacroAssembler::emit_trampoline_stub(int destination_toc_offset,
}
// "The box" is the space on the stack where we copy the object mark.
void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register oop, Register box,
Register temp, Register displaced_header, Register current_header) {
assert(LockingMode != LM_LIGHTWEIGHT, "uses fast_lock_lightweight");
assert_different_registers(oop, box, temp, displaced_header, current_header);
Label object_has_monitor;
Label cas_failed;
Label success, failure;
// Load markWord from object into displaced_header.
ld(displaced_header, oopDesc::mark_offset_in_bytes(), oop);
if (DiagnoseSyncOnValueBasedClasses != 0) {
load_klass(temp, oop);
lbz(temp, in_bytes(Klass::misc_flags_offset()), temp);
testbitdi(flag, R0, temp, exact_log2(KlassFlags::_misc_is_value_based_class));
bne(flag, failure);
}
// Handle existing monitor.
// The object has an existing monitor iff (mark & monitor_value) != 0.
andi_(temp, displaced_header, markWord::monitor_value);
bne(CR0, object_has_monitor);
if (LockingMode == LM_MONITOR) {
// Set NE to indicate 'failure' -> take slow-path.
crandc(flag, Assembler::equal, flag, Assembler::equal);
b(failure);
} else {
assert(LockingMode == LM_LEGACY, "must be");
// Set displaced_header to be (markWord of object | UNLOCK_VALUE).
ori(displaced_header, displaced_header, markWord::unlocked_value);
// Load Compare Value application register.
// Initialize the box. (Must happen before we update the object mark!)
std(displaced_header, BasicLock::displaced_header_offset_in_bytes(), box);
// Must fence, otherwise, preceding store(s) may float below cmpxchg.
// Compare object markWord with mark and if equal exchange scratch1 with object markWord.
cmpxchgd(/*flag=*/flag,
/*current_value=*/current_header,
/*compare_value=*/displaced_header,
/*exchange_value=*/box,
/*where=*/oop,
MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
MacroAssembler::cmpxchgx_hint_acquire_lock(),
noreg,
&cas_failed,
/*check without membar and ldarx first*/true);
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
// If the compare-and-exchange succeeded, then we found an unlocked
// object and we have now locked it.
b(success);
bind(cas_failed);
// We did not see an unlocked object so try the fast recursive case.
// Check if the owner is self by comparing the value in the markWord of object
// (current_header) with the stack pointer.
sub(current_header, current_header, R1_SP);
load_const_optimized(temp, ~(os::vm_page_size()-1) | markWord::lock_mask_in_place);
and_(R0/*==0?*/, current_header, temp);
// If condition is true we are cont and hence we can store 0 as the
// displaced header in the box, which indicates that it is a recursive lock.
std(R0/*==0, perhaps*/, BasicLock::displaced_header_offset_in_bytes(), box);
if (flag != CR0) {
mcrf(flag, CR0);
}
beq(CR0, success);
b(failure);
}
// Handle existing monitor.
bind(object_has_monitor);
// Try to CAS owner (no owner => current thread's _monitor_owner_id).
addi(temp, displaced_header, in_bytes(ObjectMonitor::owner_offset()) - markWord::monitor_value);
Register thread_id = displaced_header;
ld(thread_id, in_bytes(JavaThread::monitor_owner_id_offset()), R16_thread);
cmpxchgd(/*flag=*/flag,
/*current_value=*/current_header,
/*compare_value=*/(intptr_t)0,
/*exchange_value=*/thread_id,
/*where=*/temp,
MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
MacroAssembler::cmpxchgx_hint_acquire_lock());
// Store a non-null value into the box.
std(box, BasicLock::displaced_header_offset_in_bytes(), box);
beq(flag, success);
// Check for recursive locking.
cmpd(flag, current_header, thread_id);
bne(flag, failure);
// Current thread already owns the lock. Just increment recursions.
Register recursions = displaced_header;
ld(recursions, in_bytes(ObjectMonitor::recursions_offset() - ObjectMonitor::owner_offset()), temp);
addi(recursions, recursions, 1);
std(recursions, in_bytes(ObjectMonitor::recursions_offset() - ObjectMonitor::owner_offset()), temp);
// flag == EQ indicates success, increment held monitor count if LM_LEGACY is enabled
// flag == NE indicates failure
bind(success);
if (LockingMode == LM_LEGACY) {
inc_held_monitor_count(temp);
}
#ifdef ASSERT
// Check that unlocked label is reached with flag == EQ.
Label flag_correct;
beq(flag, flag_correct);
stop("compiler_fast_lock_object: Flag != EQ");
#endif
bind(failure);
#ifdef ASSERT
// Check that slow_path label is reached with flag == NE.
bne(flag, flag_correct);
stop("compiler_fast_lock_object: Flag != NE");
bind(flag_correct);
#endif
}
void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Register oop, Register box,
Register temp, Register displaced_header, Register current_header) {
assert(LockingMode != LM_LIGHTWEIGHT, "uses fast_unlock_lightweight");
assert_different_registers(oop, box, temp, displaced_header, current_header);
Label success, failure, object_has_monitor, not_recursive;
if (LockingMode == LM_LEGACY) {
// Find the lock address and load the displaced header from the stack.
ld(displaced_header, BasicLock::displaced_header_offset_in_bytes(), box);
// If the displaced header is 0, we have a recursive unlock.
cmpdi(flag, displaced_header, 0);
beq(flag, success);
}
// Handle existing monitor.
// The object has an existing monitor iff (mark & monitor_value) != 0.
ld(current_header, oopDesc::mark_offset_in_bytes(), oop);
andi_(R0, current_header, markWord::monitor_value);
bne(CR0, object_has_monitor);
if (LockingMode == LM_MONITOR) {
// Set NE to indicate 'failure' -> take slow-path.
crandc(flag, Assembler::equal, flag, Assembler::equal);
b(failure);
} else {
assert(LockingMode == LM_LEGACY, "must be");
// Check if it is still a light weight lock, this is is true if we see
// the stack address of the basicLock in the markWord of the object.
// Cmpxchg sets flag to cmpd(current_header, box).
cmpxchgd(/*flag=*/flag,
/*current_value=*/current_header,
/*compare_value=*/box,
/*exchange_value=*/displaced_header,
/*where=*/oop,
MacroAssembler::MemBarRel,
MacroAssembler::cmpxchgx_hint_release_lock(),
noreg,
&failure);
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
b(success);
}
// Handle existing monitor.
bind(object_has_monitor);
STATIC_ASSERT(markWord::monitor_value <= INT_MAX);
addi(current_header, current_header, -(int)markWord::monitor_value); // monitor
ld(displaced_header, in_bytes(ObjectMonitor::recursions_offset()), current_header);
addic_(displaced_header, displaced_header, -1);
blt(CR0, not_recursive); // Not recursive if negative after decrement.
// Recursive unlock
std(displaced_header, in_bytes(ObjectMonitor::recursions_offset()), current_header);
if (flag == CR0) { // Otherwise, flag is already EQ, here.
crorc(CR0, Assembler::equal, CR0, Assembler::equal); // Set CR0 EQ
}
b(success);
bind(not_recursive);
// Set owner to null.
// Release to satisfy the JMM
release();
li(temp, 0);
std(temp, in_bytes(ObjectMonitor::owner_offset()), current_header);
// We need a full fence after clearing owner to avoid stranding.
// StoreLoad achieves this.
membar(StoreLoad);
// Check if the entry_list is empty.
ld(temp, in_bytes(ObjectMonitor::entry_list_offset()), current_header);
cmpdi(flag, temp, 0);
beq(flag, success); // If so we are done.
// Check if there is a successor.
ld(temp, in_bytes(ObjectMonitor::succ_offset()), current_header);
cmpdi(flag, temp, 0);
// Invert equal bit
crnand(flag, Assembler::equal, flag, Assembler::equal);
beq(flag, success); // If there is a successor we are done.
// Save the monitor pointer in the current thread, so we can try
// to reacquire the lock in SharedRuntime::monitor_exit_helper().
std(current_header, in_bytes(JavaThread::unlocked_inflated_monitor_offset()), R16_thread);
b(failure); // flag == NE
// flag == EQ indicates success, decrement held monitor count if LM_LEGACY is enabled
// flag == NE indicates failure
bind(success);
if (LockingMode == LM_LEGACY) {
dec_held_monitor_count(temp);
}
#ifdef ASSERT
// Check that unlocked label is reached with flag == EQ.
Label flag_correct;
beq(flag, flag_correct);
stop("compiler_fast_unlock_object: Flag != EQ");
#endif
bind(failure);
#ifdef ASSERT
// Check that slow_path label is reached with flag == NE.
bne(flag, flag_correct);
stop("compiler_fast_unlock_object: Flag != NE");
bind(flag_correct);
#endif
}
void MacroAssembler::compiler_fast_lock_lightweight_object(ConditionRegister flag, Register obj, Register box,
Register tmp1, Register tmp2, Register tmp3) {
assert_different_registers(obj, box, tmp1, tmp2, tmp3);
@ -4769,38 +4537,6 @@ void MacroAssembler::pop_cont_fastpath() {
bind(done);
}
// Note: Must preserve CR0 EQ (invariant).
void MacroAssembler::inc_held_monitor_count(Register tmp) {
assert(LockingMode == LM_LEGACY, "");
ld(tmp, in_bytes(JavaThread::held_monitor_count_offset()), R16_thread);
#ifdef ASSERT
Label ok;
cmpdi(CR0, tmp, 0);
bge_predict_taken(CR0, ok);
stop("held monitor count is negativ at increment");
bind(ok);
crorc(CR0, Assembler::equal, CR0, Assembler::equal); // Restore CR0 EQ
#endif
addi(tmp, tmp, 1);
std(tmp, in_bytes(JavaThread::held_monitor_count_offset()), R16_thread);
}
// Note: Must preserve CR0 EQ (invariant).
void MacroAssembler::dec_held_monitor_count(Register tmp) {
assert(LockingMode == LM_LEGACY, "");
ld(tmp, in_bytes(JavaThread::held_monitor_count_offset()), R16_thread);
#ifdef ASSERT
Label ok;
cmpdi(CR0, tmp, 0);
bgt_predict_taken(CR0, ok);
stop("held monitor count is <= 0 at decrement");
bind(ok);
crorc(CR0, Assembler::equal, CR0, Assembler::equal); // Restore CR0 EQ
#endif
addi(tmp, tmp, -1);
std(tmp, in_bytes(JavaThread::held_monitor_count_offset()), R16_thread);
}
// Function to flip between unlocked and locked state (fast locking).
// Branches to failed if the state is not as expected with CR0 NE.
// Falls through upon success with CR0 EQ.
@ -4842,7 +4578,6 @@ void MacroAssembler::atomically_flip_locked_state(bool is_unlock, Register obj,
// - obj: the object to be locked
// - t1, t2: temporary register
void MacroAssembler::lightweight_lock(Register box, Register obj, Register t1, Register t2, Label& slow) {
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
assert_different_registers(box, obj, t1, t2, R0);
Label push;
@ -4899,7 +4634,6 @@ void MacroAssembler::lightweight_lock(Register box, Register obj, Register t1, R
// - obj: the object to be unlocked
// - t1: temporary register
void MacroAssembler::lightweight_unlock(Register obj, Register t1, Label& slow) {
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
assert_different_registers(obj, t1);
#ifdef ASSERT

View File

@ -697,8 +697,6 @@ class MacroAssembler: public Assembler {
void push_cont_fastpath();
void pop_cont_fastpath();
void inc_held_monitor_count(Register tmp);
void dec_held_monitor_count(Register tmp);
void atomically_flip_locked_state(bool is_unlock, Register obj, Register tmp, Label& failed, int semantics);
void lightweight_lock(Register box, Register obj, Register t1, Register t2, Label& slow);
void lightweight_unlock(Register obj, Register t1, Label& slow);
@ -715,12 +713,6 @@ class MacroAssembler: public Assembler {
enum { trampoline_stub_size = 6 * 4 };
address emit_trampoline_stub(int destination_toc_offset, int insts_call_instruction_offset, Register Rtoc = noreg);
void compiler_fast_lock_object(ConditionRegister flag, Register oop, Register box,
Register tmp1, Register tmp2, Register tmp3);
void compiler_fast_unlock_object(ConditionRegister flag, Register oop, Register box,
Register tmp1, Register tmp2, Register tmp3);
void compiler_fast_lock_lightweight_object(ConditionRegister flag, Register oop, Register box,
Register tmp1, Register tmp2, Register tmp3);

View File

@ -11573,40 +11573,8 @@ instruct partialSubtypeCheckConstSuper(rarg3RegP sub, rarg2RegP super_reg, immP
// inlined locking and unlocking
instruct cmpFastLock(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2) %{
predicate(LockingMode != LM_LIGHTWEIGHT);
match(Set crx (FastLock oop box));
effect(TEMP tmp1, TEMP tmp2);
format %{ "FASTLOCK $oop, $box, $tmp1, $tmp2" %}
ins_encode %{
__ compiler_fast_lock_object($crx$$CondRegister, $oop$$Register, $box$$Register,
$tmp1$$Register, $tmp2$$Register, /*tmp3*/ R0);
// If locking was successful, crx should indicate 'EQ'.
// The compiler generates a branch to the runtime call to
// _complete_monitor_locking_Java for the case where crx is 'NE'.
%}
ins_pipe(pipe_class_compare);
%}
instruct cmpFastUnlock(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2, iRegPdst tmp3) %{
predicate(LockingMode != LM_LIGHTWEIGHT);
match(Set crx (FastUnlock oop box));
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3);
format %{ "FASTUNLOCK $oop, $box, $tmp1, $tmp2" %}
ins_encode %{
__ compiler_fast_unlock_object($crx$$CondRegister, $oop$$Register, $box$$Register,
$tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
// If unlocking was successful, crx should indicate 'EQ'.
// The compiler generates a branch to the runtime call to
// _complete_monitor_unlocking_Java for the case where crx is 'NE'.
%}
ins_pipe(pipe_class_compare);
%}
instruct cmpFastLockLightweight(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2) %{
predicate(LockingMode == LM_LIGHTWEIGHT && !UseObjectMonitorTable);
predicate(!UseObjectMonitorTable);
match(Set crx (FastLock oop box));
effect(TEMP tmp1, TEMP tmp2);
@ -11622,7 +11590,7 @@ instruct cmpFastLockLightweight(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iRe
%}
instruct cmpFastLockMonitorTable(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2, iRegPdst tmp3, flagsRegCR1 cr1) %{
predicate(LockingMode == LM_LIGHTWEIGHT && UseObjectMonitorTable);
predicate(UseObjectMonitorTable);
match(Set crx (FastLock oop box));
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr1);
@ -11638,7 +11606,6 @@ instruct cmpFastLockMonitorTable(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iR
%}
instruct cmpFastUnlockLightweight(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2, iRegPdst tmp3) %{
predicate(LockingMode == LM_LIGHTWEIGHT);
match(Set crx (FastUnlock oop box));
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3);

View File

@ -2446,14 +2446,9 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
__ addi(r_box, R1_SP, lock_offset);
// Try fastpath for locking.
if (LockingMode == LM_LIGHTWEIGHT) {
// fast_lock kills r_temp_1, r_temp_2, r_temp_3.
Register r_temp_3_or_noreg = UseObjectMonitorTable ? r_temp_3 : noreg;
__ compiler_fast_lock_lightweight_object(CR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3_or_noreg);
} else {
// fast_lock kills r_temp_1, r_temp_2, r_temp_3.
__ compiler_fast_lock_object(CR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
}
// fast_lock kills r_temp_1, r_temp_2, r_temp_3.
Register r_temp_3_or_noreg = UseObjectMonitorTable ? r_temp_3 : noreg;
__ compiler_fast_lock_lightweight_object(CR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3_or_noreg);
__ beq(CR0, locked);
// None of the above fast optimizations worked so we have to get into the
@ -2620,7 +2615,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
__ stw(R0, thread_(thread_state));
// Check preemption for Object.wait()
if (LockingMode != LM_LEGACY && method->is_object_wait0()) {
if (method->is_object_wait0()) {
Label not_preempted;
__ ld(R0, in_bytes(JavaThread::preempt_alternate_return_offset()), R16_thread);
__ cmpdi(CR0, R0, 0);
@ -2672,11 +2667,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
__ addi(r_box, R1_SP, lock_offset);
// Try fastpath for unlocking.
if (LockingMode == LM_LIGHTWEIGHT) {
__ compiler_fast_unlock_lightweight_object(CR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
} else {
__ compiler_fast_unlock_object(CR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
}
__ compiler_fast_unlock_lightweight_object(CR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
__ beq(CR0, done);
// Save and restore any potential method result value around the unlocking operation.
@ -2717,7 +2708,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// --------------------------------------------------------------------------
// Last java frame won't be set if we're resuming after preemption
bool maybe_preempted = LockingMode != LM_LEGACY && method->is_object_wait0();
bool maybe_preempted = method->is_object_wait0();
__ reset_last_Java_frame(!maybe_preempted /* check_last_java_sp */);
// Unbox oop result, e.g. JNIHandles::resolve value.

View File

@ -1362,7 +1362,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
// convenient and the slow signature handler can use this same frame
// anchor.
bool support_vthread_preemption = Continuations::enabled() && LockingMode != LM_LEGACY;
bool support_vthread_preemption = Continuations::enabled();
// We have a TOP_IJAVA_FRAME here, which belongs to us.
Label last_java_pc;

View File

@ -194,6 +194,9 @@ inline void FreezeBase::patch_pd(frame& hf, const frame& caller) {
}
}
inline void FreezeBase::patch_pd_unused(intptr_t* sp) {
}
//////// Thaw
// Fast path

View File

@ -172,9 +172,9 @@ void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
// expand_call should be passed true.
if (expand_call) {
assert(pre_val != c_rarg1, "smashed arg");
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre), pre_val, thread);
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), pre_val);
} else {
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre), pre_val, thread);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), pre_val);
}
__ pop_reg(saved, sp);
@ -702,7 +702,7 @@ void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAss
__ bind(runtime);
__ push_call_clobbered_registers();
__ load_parameter(0, pre_val);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre), pre_val, thread);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), pre_val);
__ pop_call_clobbered_registers();
__ bind(done);

View File

@ -110,6 +110,7 @@ source %{
if (vlen < 4) {
return false;
}
break;
case Op_VectorCastHF2F:
case Op_VectorCastF2HF:
case Op_AddVHF:

View File

@ -60,6 +60,10 @@ inline void FreezeBase::patch_pd(frame& hf, const frame& caller) {
Unimplemented();
}
inline void FreezeBase::patch_pd_unused(intptr_t* sp) {
Unimplemented();
}
inline void FreezeBase::patch_stack_pd(intptr_t* frame_sp, intptr_t* heap_sp) {
Unimplemented();
}

View File

@ -164,15 +164,16 @@ class StubGenerator: public StubCodeGenerator {
// Save non-volatile registers to ABI of caller frame.
BLOCK_COMMENT("save registers, push frame {");
__ z_stmg(Z_R6, Z_R14, 16, Z_SP);
__ z_std(Z_F8, 96, Z_SP);
__ z_std(Z_F9, 104, Z_SP);
__ z_std(Z_F10, 112, Z_SP);
__ z_std(Z_F11, 120, Z_SP);
__ z_std(Z_F12, 128, Z_SP);
__ z_std(Z_F13, 136, Z_SP);
__ z_std(Z_F14, 144, Z_SP);
__ z_std(Z_F15, 152, Z_SP);
__ save_return_pc();
__ z_stmg(Z_R6, Z_R13, 16, Z_SP);
__ z_std(Z_F8, 80, Z_SP);
__ z_std(Z_F9, 88, Z_SP);
__ z_std(Z_F10, 96, Z_SP);
__ z_std(Z_F11, 104, Z_SP);
__ z_std(Z_F12, 112, Z_SP);
__ z_std(Z_F13, 120, Z_SP);
__ z_std(Z_F14, 128, Z_SP);
__ z_std(Z_F15, 136, Z_SP);
//
// Push ENTRY_FRAME including arguments:
@ -337,15 +338,16 @@ class StubGenerator: public StubCodeGenerator {
__ z_lg(r_arg_result_type, result_type_offset, r_entryframe_fp);
// Restore non-volatiles.
__ z_lmg(Z_R6, Z_R14, 16, Z_SP);
__ z_ld(Z_F8, 96, Z_SP);
__ z_ld(Z_F9, 104, Z_SP);
__ z_ld(Z_F10, 112, Z_SP);
__ z_ld(Z_F11, 120, Z_SP);
__ z_ld(Z_F12, 128, Z_SP);
__ z_ld(Z_F13, 136, Z_SP);
__ z_ld(Z_F14, 144, Z_SP);
__ z_ld(Z_F15, 152, Z_SP);
__ restore_return_pc();
__ z_lmg(Z_R6, Z_R13, 16, Z_SP);
__ z_ld(Z_F8, 80, Z_SP);
__ z_ld(Z_F9, 88, Z_SP);
__ z_ld(Z_F10, 96, Z_SP);
__ z_ld(Z_F11, 104, Z_SP);
__ z_ld(Z_F12, 112, Z_SP);
__ z_ld(Z_F13, 120, Z_SP);
__ z_ld(Z_F14, 128, Z_SP);
__ z_ld(Z_F15, 136, Z_SP);
BLOCK_COMMENT("} restore");
//

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -98,9 +98,12 @@ frame FreezeBase::new_heap_frame(frame& f, frame& caller) {
*hf.addr_at(frame::interpreter_frame_locals_offset) = locals_offset;
return hf;
} else {
// We need to re-read fp out of the frame because it may be an oop and we might have
// had a safepoint in finalize_freeze, after constructing f.
fp = *(intptr_t**)(f.sp() - frame::sender_sp_offset);
// For a compiled frame we need to re-read fp out of the frame because it may be an
// oop and we might have had a safepoint in finalize_freeze, after constructing f.
// For stub/native frames the value is not used while frozen, and will be constructed again
// when thawing the frame (see ThawBase::new_stack_frame). We use a special bad address to
// help with debugging, particularly when inspecting frames and identifying invalid accesses.
fp = FKind::compiled ? *(intptr_t**)(f.sp() - frame::sender_sp_offset) : (intptr_t*)badAddressVal;
int fsize = FKind::size(f);
sp = caller.unextended_sp() - fsize;
@ -183,6 +186,11 @@ inline void FreezeBase::patch_pd(frame& hf, const frame& caller) {
}
}
inline void FreezeBase::patch_pd_unused(intptr_t* sp) {
intptr_t* fp_addr = sp - frame::sender_sp_offset;
*fp_addr = badAddressVal;
}
//////// Thaw
// Fast path

View File

@ -276,9 +276,9 @@ void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
__ mov(c_rarg1, thread);
}
// Already moved pre_val into c_rarg0 above
__ MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre), 2);
__ MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), 1);
} else {
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre), c_rarg0, thread);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), c_rarg0);
}
// save the live input values
@ -946,7 +946,7 @@ void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAss
// load the pre-value
__ load_parameter(0, rcx);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre), rcx, thread);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), rcx);
__ restore_live_registers(true);

View File

@ -60,6 +60,10 @@ inline void FreezeBase::patch_pd(frame& hf, const frame& caller) {
Unimplemented();
}
inline void FreezeBase::patch_pd_unused(intptr_t* sp) {
Unimplemented();
}
inline void FreezeBase::patch_stack_pd(intptr_t* frame_sp, intptr_t* heap_sp) {
Unimplemented();
}

View File

@ -748,17 +748,29 @@ void ArchiveBuilder::mark_and_relocate_to_buffered_addr(address* ptr_location) {
bool ArchiveBuilder::has_been_archived(address src_addr) const {
SourceObjInfo* p = _src_obj_table.get(src_addr);
return (p != nullptr);
}
bool ArchiveBuilder::has_been_buffered(address src_addr) const {
if (RegeneratedClasses::has_been_regenerated(src_addr) ||
_src_obj_table.get(src_addr) == nullptr ||
get_buffered_addr(src_addr) == nullptr) {
if (p == nullptr) {
// This object has never been seen by ArchiveBuilder
return false;
} else {
return true;
}
if (p->buffered_addr() == nullptr) {
// ArchiveBuilder has seen this object, but decided not to archive it. So
// Any reference to this object will be modified to nullptr inside the buffer.
assert(p->follow_mode() == set_to_null, "must be");
return false;
}
DEBUG_ONLY({
// This is a class/method that belongs to one of the "original" classes that
// have been regenerated by lambdaFormInvokers.cpp. We must have archived
// the "regenerated" version of it.
if (RegeneratedClasses::has_been_regenerated(src_addr)) {
address regen_obj = RegeneratedClasses::get_regenerated_object(src_addr);
precond(regen_obj != nullptr && regen_obj != src_addr);
assert(has_been_archived(regen_obj), "must be");
assert(get_buffered_addr(src_addr) == get_buffered_addr(regen_obj), "must be");
}});
return true;
}
address ArchiveBuilder::get_buffered_addr(address src_addr) const {

View File

@ -180,6 +180,7 @@ private:
return _buffered_addr;
}
MetaspaceObj::Type msotype() const { return _msotype; }
FollowMode follow_mode() const { return _follow_mode; }
};
class SourceObjList {
@ -443,10 +444,8 @@ public:
}
bool has_been_archived(address src_addr) const;
bool has_been_buffered(address src_addr) const;
template <typename T> bool has_been_buffered(T src_addr) const {
return has_been_buffered((address)src_addr);
template <typename T> bool has_been_archived(T src_addr) const {
return has_been_archived((address)src_addr);
}
address get_buffered_addr(address src_addr) const;

View File

@ -764,7 +764,7 @@ void ArchiveHeapWriter::compute_ptrmap(ArchiveHeapInfo* heap_info) {
native_ptr = RegeneratedClasses::get_regenerated_object(native_ptr);
}
guarantee(ArchiveBuilder::current()->has_been_buffered((address)native_ptr),
guarantee(ArchiveBuilder::current()->has_been_archived((address)native_ptr),
"Metadata %p should have been archived", native_ptr);
address buffered_native_ptr = ArchiveBuilder::current()->get_buffered_addr((address)native_ptr);

View File

@ -383,8 +383,7 @@ void ArchiveUtils::log_to_classlist(BootstrapInfo* bootstrap_specifier, TRAPS) {
}
bool ArchiveUtils::has_aot_initialized_mirror(InstanceKlass* src_ik) {
if (SystemDictionaryShared::is_excluded_class(src_ik)) {
assert(!ArchiveBuilder::current()->has_been_buffered(src_ik), "sanity");
if (!ArchiveBuilder::current()->has_been_archived(src_ik)) {
return false;
}
return ArchiveBuilder::current()->get_buffered_addr(src_ik)->has_aot_initialized_mirror();

View File

@ -1369,7 +1369,7 @@ void AOTCodeAddressTable::init_extrs() {
SET_ADDRESS(_extrs, G1BarrierSetRuntime::write_ref_field_pre_entry);
#endif
#if INCLUDE_SHENANDOAHGC
SET_ADDRESS(_extrs, ShenandoahRuntime::write_ref_field_pre);
SET_ADDRESS(_extrs, ShenandoahRuntime::write_barrier_pre);
SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom);
SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
#endif

View File

@ -480,6 +480,7 @@ void CompileQueue::purge_stale_tasks() {
MutexUnlocker ul(MethodCompileQueue_lock);
for (CompileTask* task = head; task != nullptr; ) {
CompileTask* next_task = task->next();
task->set_next(nullptr);
CompileTaskWrapper ctw(task); // Frees the task
task->set_failure_reason("stale task");
task = next_task;

View File

@ -74,6 +74,7 @@
#include "gc/g1/g1VMOperations.hpp"
#include "gc/g1/g1YoungCollector.hpp"
#include "gc/g1/g1YoungGCAllocationFailureInjector.hpp"
#include "gc/shared/barrierSetNMethod.hpp"
#include "gc/shared/classUnloadingContext.hpp"
#include "gc/shared/concurrentGCBreakpoints.hpp"
#include "gc/shared/fullGCForwarding.hpp"
@ -799,6 +800,7 @@ void G1CollectedHeap::prepare_for_mutator_after_full_collection(size_t allocatio
// Rebuild the code root lists for each region
rebuild_code_roots();
finish_codecache_marking_cycle();
start_new_collection_set();
_allocator->init_mutator_alloc_regions();
@ -1487,6 +1489,8 @@ jint G1CollectedHeap::initialize() {
_collection_set.initialize(max_num_regions());
start_new_collection_set();
allocation_failure_injector()->reset();
CPUTimeCounters::create_counter(CPUTimeGroups::CPUTimeType::gc_parallel_workers);
@ -3077,6 +3081,8 @@ void G1CollectedHeap::register_nmethod(nmethod* nm) {
guarantee(nm != nullptr, "sanity");
RegisterNMethodOopClosure reg_cl(this, nm);
nm->oops_do(&reg_cl);
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
bs_nm->disarm(nm);
}
void G1CollectedHeap::unregister_nmethod(nmethod* nm) {
@ -3157,5 +3163,5 @@ void G1CollectedHeap::finish_codecache_marking_cycle() {
void G1CollectedHeap::prepare_group_cardsets_for_scan() {
young_regions_cardset()->reset_table_scanner_for_groups();
collection_set()->prepare_groups_for_scan();
collection_set()->prepare_for_scan();
}

View File

@ -36,7 +36,15 @@
#include "runtime/orderAccess.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/quickSort.hpp"
uint G1CollectionSet::groups_cur_length() const {
assert(_inc_build_state == CSetBuildType::Inactive, "must be");
return _groups.length();
}
uint G1CollectionSet::groups_increment_length() const {
return groups_cur_length() - _groups_inc_part_start;
}
G1CollectorState* G1CollectionSet::collector_state() const {
return _g1h->collector_state();
@ -50,22 +58,21 @@ G1CollectionSet::G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy) :
_g1h(g1h),
_policy(policy),
_candidates(),
_collection_set_regions(nullptr),
_collection_set_cur_length(0),
_collection_set_max_length(0),
_collection_set_groups(),
_selected_groups_cur_length(0),
_selected_groups_inc_part_start(0),
_regions(nullptr),
_regions_max_length(0),
_regions_cur_length(0),
_groups(),
_eden_region_length(0),
_survivor_region_length(0),
_initial_old_region_length(0),
_optional_groups(),
_inc_build_state(Inactive),
_inc_part_start(0) {
DEBUG_ONLY(_inc_build_state(CSetBuildType::Inactive) COMMA)
_regions_inc_part_start(0),
_groups_inc_part_start(0) {
}
G1CollectionSet::~G1CollectionSet() {
FREE_C_HEAP_ARRAY(uint, _collection_set_regions);
FREE_C_HEAP_ARRAY(uint, _regions);
abandon_all_candidates();
}
@ -76,8 +83,8 @@ void G1CollectionSet::init_region_lengths(uint eden_cset_region_length,
_eden_region_length = eden_cset_region_length;
_survivor_region_length = survivor_cset_region_length;
assert((size_t)young_region_length() == _collection_set_cur_length,
"Young region length %u should match collection set length %u", young_region_length(), _collection_set_cur_length);
assert((size_t)young_region_length() == _regions_cur_length,
"Young region length %u should match collection set length %u", young_region_length(), _regions_cur_length);
_initial_old_region_length = 0;
assert(_optional_groups.length() == 0, "Should not have any optional groups yet");
@ -85,9 +92,9 @@ void G1CollectionSet::init_region_lengths(uint eden_cset_region_length,
}
void G1CollectionSet::initialize(uint max_region_length) {
guarantee(_collection_set_regions == nullptr, "Must only initialize once.");
_collection_set_max_length = max_region_length;
_collection_set_regions = NEW_C_HEAP_ARRAY(uint, max_region_length, mtGC);
guarantee(_regions == nullptr, "Must only initialize once.");
_regions_max_length = max_region_length;
_regions = NEW_C_HEAP_ARRAY(uint, max_region_length, mtGC);
_candidates.initialize(max_region_length);
}
@ -97,14 +104,14 @@ void G1CollectionSet::abandon_all_candidates() {
_initial_old_region_length = 0;
}
void G1CollectionSet::prepare_groups_for_scan () {
collection_set_groups()->prepare_for_scan();
void G1CollectionSet::prepare_for_scan () {
groups()->prepare_for_scan();
}
void G1CollectionSet::add_old_region(G1HeapRegion* hr) {
assert_at_safepoint_on_vm_thread();
assert(_inc_build_state == Active,
assert(_inc_build_state == CSetBuildType::Active,
"Precondition, actively building cset or adding optional later on");
assert(hr->is_old(), "the region should be old");
@ -113,37 +120,46 @@ void G1CollectionSet::add_old_region(G1HeapRegion* hr) {
assert(!hr->in_collection_set(), "should not already be in the collection set");
_g1h->register_old_region_with_region_attr(hr);
assert(_collection_set_cur_length < _collection_set_max_length, "Collection set now larger than maximum size.");
_collection_set_regions[_collection_set_cur_length++] = hr->hrm_index();
assert(_regions_cur_length < _regions_max_length, "Collection set now larger than maximum size.");
_regions[_regions_cur_length++] = hr->hrm_index();
_initial_old_region_length++;
_g1h->old_set_remove(hr);
}
void G1CollectionSet::start_incremental_building() {
assert(_collection_set_cur_length == 0, "Collection set must be empty before starting a new collection set.");
assert(_inc_build_state == Inactive, "Precondition");
assert(_regions_cur_length == 0, "Collection set must be empty before starting a new collection set.");
assert(groups_cur_length() == 0, "Collection set groups must be empty before starting a new collection set.");
assert(_optional_groups.length() == 0, "Collection set optional gorups must be empty before starting a new collection set.");
update_incremental_marker();
continue_incremental_building();
}
void G1CollectionSet::finalize_incremental_building() {
assert(_inc_build_state == Active, "Precondition");
assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
void G1CollectionSet::continue_incremental_building() {
assert(_inc_build_state == CSetBuildType::Inactive, "Precondition");
_regions_inc_part_start = _regions_cur_length;
_groups_inc_part_start = groups_cur_length();
DEBUG_ONLY(_inc_build_state = CSetBuildType::Active;)
}
void G1CollectionSet::stop_incremental_building() {
DEBUG_ONLY(_inc_build_state = CSetBuildType::Inactive;)
}
void G1CollectionSet::clear() {
assert_at_safepoint_on_vm_thread();
_collection_set_cur_length = 0;
_collection_set_groups.clear();
_regions_cur_length = 0;
_groups.clear();
}
void G1CollectionSet::iterate(G1HeapRegionClosure* cl) const {
size_t len = _collection_set_cur_length;
size_t len = _regions_cur_length;
OrderAccess::loadload();
for (uint i = 0; i < len; i++) {
G1HeapRegion* r = _g1h->region_at(_collection_set_regions[i]);
G1HeapRegion* r = _g1h->region_at(_regions[i]);
bool result = cl->do_heap_region(r);
if (result) {
cl->set_incomplete();
@ -170,7 +186,7 @@ void G1CollectionSet::iterate_optional(G1HeapRegionClosure* cl) const {
void G1CollectionSet::iterate_incremental_part_from(G1HeapRegionClosure* cl,
G1HeapRegionClaimer* hr_claimer,
uint worker_id) const {
iterate_part_from(cl, hr_claimer, _inc_part_start, increment_length(), worker_id);
iterate_part_from(cl, hr_claimer, _regions_inc_part_start, regions_cur_length(), worker_id);
}
void G1CollectionSet::iterate_part_from(G1HeapRegionClosure* cl,
@ -180,29 +196,29 @@ void G1CollectionSet::iterate_part_from(G1HeapRegionClosure* cl,
uint worker_id) const {
_g1h->par_iterate_regions_array(cl,
hr_claimer,
&_collection_set_regions[offset],
&_regions[offset],
length,
worker_id);
}
void G1CollectionSet::add_young_region_common(G1HeapRegion* hr) {
assert(hr->is_young(), "invariant");
assert(_inc_build_state == Active, "Precondition");
assert(_inc_build_state == CSetBuildType::Active, "Precondition");
assert(!hr->in_collection_set(), "invariant");
_g1h->register_young_region_with_region_attr(hr);
// We use UINT_MAX as "invalid" marker in verification.
assert(_collection_set_cur_length < (UINT_MAX - 1),
"Collection set is too large with %u entries", _collection_set_cur_length);
hr->set_young_index_in_cset(_collection_set_cur_length + 1);
assert(_regions_cur_length < (UINT_MAX - 1),
"Collection set is too large with %u entries", _regions_cur_length);
hr->set_young_index_in_cset(_regions_cur_length + 1);
assert(_collection_set_cur_length < _collection_set_max_length, "Collection set larger than maximum allowed.");
_collection_set_regions[_collection_set_cur_length] = hr->hrm_index();
assert(_regions_cur_length < _regions_max_length, "Collection set larger than maximum allowed.");
_regions[_regions_cur_length] = hr->hrm_index();
// Concurrent readers must observe the store of the value in the array before an
// update to the length field.
OrderAccess::storestore();
_collection_set_cur_length++;
_regions_cur_length++;
}
void G1CollectionSet::add_survivor_regions(G1HeapRegion* hr) {
@ -284,9 +300,10 @@ void G1CollectionSet::print(outputStream* st) {
// pinned by JNI) to allow faster future evacuation. We already "paid" for this work
// when sizing the young generation.
double G1CollectionSet::finalize_young_part(double target_pause_time_ms, G1SurvivorRegions* survivors) {
Ticks start_time = Ticks::now();
assert(_inc_build_state == CSetBuildType::Active, "Precondition");
assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
finalize_incremental_building();
Ticks start_time = Ticks::now();
guarantee(target_pause_time_ms > 0.0,
"target_pause_time_ms = %1.6lf should be positive", target_pause_time_ms);
@ -326,10 +343,6 @@ double G1CollectionSet::finalize_young_part(double target_pause_time_ms, G1Survi
return remaining_time_ms;
}
static int compare_region_idx(const uint a, const uint b) {
return static_cast<int>(a-b);
}
// The current mechanism for evacuating pinned old regions is as below:
// * pinned regions in the marking collection set candidate list (available during mixed gc) are evacuated like
// pinned young regions to avoid the complexity of dealing with pinned regions that are part of a
@ -343,9 +356,6 @@ static int compare_region_idx(const uint a, const uint b) {
void G1CollectionSet::finalize_old_part(double time_remaining_ms) {
double non_young_start_time_sec = os::elapsedTime();
_selected_groups_cur_length = 0;
_selected_groups_inc_part_start = 0;
if (!candidates()->is_empty()) {
candidates()->verify();
@ -363,13 +373,8 @@ void G1CollectionSet::finalize_old_part(double time_remaining_ms) {
log_debug(gc, ergo, cset)("No candidates to reclaim.");
}
_selected_groups_cur_length = collection_set_groups()->length();
stop_incremental_building();
double non_young_end_time_sec = os::elapsedTime();
phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
QuickSort::sort(_collection_set_regions, _collection_set_cur_length, compare_region_idx);
}
static void print_finish_message(const char* reason, bool from_marking) {
@ -594,7 +599,6 @@ double G1CollectionSet::select_candidates_from_optional_groups(double time_remai
assert(_optional_groups.num_regions() > 0,
"Should only be called when there are optional regions");
uint num_groups_selected = 0;
double total_prediction_ms = 0.0;
G1CSetCandidateGroupList selected;
for (G1CSetCandidateGroup* group : _optional_groups) {
@ -610,22 +614,22 @@ double G1CollectionSet::select_candidates_from_optional_groups(double time_remai
time_remaining_ms -= predicted_time_ms;
num_regions_selected += group->length();
num_groups_selected++;
add_group_to_collection_set(group);
selected.append(group);
}
log_debug(gc, ergo, cset) ("Completed with groups, selected %u", num_regions_selected);
log_debug(gc, ergo, cset)("Completed with groups, selected %u region in %u groups",
num_regions_selected, selected.length());
// Remove selected groups from candidate list.
if (num_groups_selected > 0) {
if (selected.length() > 0) {
_optional_groups.remove(&selected);
candidates()->remove(&selected);
}
return total_prediction_ms;
}
uint G1CollectionSet::select_optional_collection_set_regions(double time_remaining_ms) {
uint G1CollectionSet::select_optional_groups(double time_remaining_ms) {
uint optional_regions_count = num_optional_regions();
assert(optional_regions_count > 0,
"Should only be called when there are optional regions");
@ -660,7 +664,7 @@ void G1CollectionSet::add_group_to_collection_set(G1CSetCandidateGroup* gr) {
assert(r->rem_set()->is_complete(), "must be");
add_region_to_collection_set(r);
}
_collection_set_groups.append(gr);
_groups.append(gr);
}
void G1CollectionSet::add_region_to_collection_set(G1HeapRegion* r) {
@ -670,16 +674,20 @@ void G1CollectionSet::add_region_to_collection_set(G1HeapRegion* r) {
}
void G1CollectionSet::finalize_initial_collection_set(double target_pause_time_ms, G1SurvivorRegions* survivor) {
assert(_regions_inc_part_start == 0, "must be");
assert(_groups_inc_part_start == 0, "must be");
double time_remaining_ms = finalize_young_part(target_pause_time_ms, survivor);
finalize_old_part(time_remaining_ms);
stop_incremental_building();
}
bool G1CollectionSet::finalize_optional_for_evacuation(double remaining_pause_time) {
update_incremental_marker();
continue_incremental_building();
uint num_regions_selected = select_optional_collection_set_regions(remaining_pause_time);
uint num_regions_selected = select_optional_groups(remaining_pause_time);
_selected_groups_cur_length = collection_set_groups()->length();
stop_incremental_building();
_g1h->verify_region_attr_remset_is_tracked();
@ -741,7 +749,7 @@ public:
void G1CollectionSet::verify_young_cset_indices() const {
assert_at_safepoint_on_vm_thread();
G1VerifyYoungCSetIndicesClosure cl(_collection_set_cur_length);
G1VerifyYoungCSetIndicesClosure cl(_regions_cur_length);
iterate(&cl);
}
#endif

View File

@ -41,19 +41,19 @@ class G1HeapRegionClosure;
// The collection set.
//
// The set of regions that are evacuated during an evacuation pause.
// The set of regions and candidate groups that were evacuated during an
// evacuation pause.
//
// At the end of a collection, before freeing the collection set, this set
// contains all regions that were evacuated during this collection:
// At the end of a collection, before freeing it, this set contains all regions
// and collection set groups that were evacuated during this collection:
//
// - survivor regions from the last collection (if any)
// - eden regions allocated by the mutator
// - old gen regions evacuated during mixed gc
//
// This set is built incrementally at mutator time as regions are retired, and
// if this had been a mixed gc, some additional (during gc) incrementally added
// old regions from the collection set candidates built during the concurrent
// cycle.
// This set is initially built at mutator time as regions are retired. If the
// collection is a mixed gc, it contains some additional (during the pause)
// incrementally added old regions from the collection set candidates.
//
// A more detailed overview of how the collection set changes over time follows:
//
@ -129,6 +129,7 @@ class G1HeapRegionClosure;
// || ... after step b6)
// |SSS| ... after step 7), with three survivor regions
//
// Candidate groups are kept in sync with the contents of the collection set regions.
class G1CollectionSet {
G1CollectedHeap* _g1h;
G1Policy* _policy;
@ -137,48 +138,54 @@ class G1CollectionSet {
G1CollectionSetCandidates _candidates;
// The actual collection set as a set of region indices.
// All entries in _collection_set_regions below _collection_set_cur_length are
// assumed to be part of the collection set.
//
// All regions in _regions below _regions_cur_length are assumed to be part of the
// collection set.
// We assume that at any time there is at most only one writer and (one or more)
// concurrent readers. This means we are good with using storestore and loadload
// barriers on the writer and reader respectively only.
uint* _collection_set_regions;
volatile uint _collection_set_cur_length;
uint _collection_set_max_length;
// concurrent readers. This means synchronization using storestore and loadload
// barriers on the writer and reader respectively only are sufficient.
//
// This corresponds to the regions referenced by the candidate groups further below.
uint* _regions;
uint _regions_max_length;
volatile uint _regions_cur_length;
// Old gen groups selected for evacuation.
G1CSetCandidateGroupList _collection_set_groups;
G1CSetCandidateGroupList _groups;
// Groups are added to the collection set in increments when performing optional evacuations.
// We use the value below to track these increments.
uint _selected_groups_cur_length;
uint _selected_groups_inc_part_start;
uint groups_cur_length() const;
uint _eden_region_length;
uint _survivor_region_length;
uint _initial_old_region_length;
// When doing mixed collections we can add old regions to the collection set, which
// will be collected only if there is enough time. We call these optional (old) regions.
// will be collected only if there is enough time. We call these optional (old)
// groups. Regions are reachable via this list as well.
G1CSetCandidateGroupList _optional_groups;
enum CSetBuildType {
#ifdef ASSERT
enum class CSetBuildType {
Active, // We are actively building the collection set
Inactive // We are not actively building the collection set
};
CSetBuildType _inc_build_state;
size_t _inc_part_start;
#endif
// Index into the _regions indicating the start of the current collection set increment.
size_t _regions_inc_part_start;
// Index into the _groups indicating the start of the current collection set increment.
uint _groups_inc_part_start;
G1CollectorState* collector_state() const;
G1GCPhaseTimes* phase_times();
void verify_young_cset_indices() const NOT_DEBUG_RETURN;
// Update the incremental collection set information when adding a region.
void add_young_region_common(G1HeapRegion* hr);
// Add the given old region to the head of the current collection set.
// Add the given old region to the current collection set.
void add_old_region(G1HeapRegion* hr);
void prepare_optional_group(G1CSetCandidateGroup* gr, uint cur_index);
@ -191,18 +198,15 @@ class G1CollectionSet {
void select_candidates_from_retained(double time_remaining_ms);
// Select regions for evacuation from the optional candidates given the remaining time
// and return the number of actually selected regions.
uint select_optional_collection_set_regions(double time_remaining_ms);
double select_candidates_from_optional_groups(double time_remaining_ms, uint& num_regions_selected);
// Select groups for evacuation from the optional candidates given the remaining time
// and return the number of actually selected regions.
uint select_optional_groups(double time_remaining_ms);
double select_candidates_from_optional_groups(double time_remaining_ms, uint& num_groups_selected);
// Finalize the young part of the initial collection set. Relabel survivor regions
// as Eden and calculate a prediction on how long the evacuation of all young regions
// will take.
// will take. Returns the time remaining from the given target pause time.
double finalize_young_part(double target_pause_time_ms, G1SurvivorRegions* survivors);
// Perform any final calculations on the incremental collection set fields before we
// can use them.
void finalize_incremental_building();
// Select the regions comprising the initial and optional collection set from marking
// and retained collection set candidates.
@ -223,27 +227,29 @@ public:
// Initializes the collection set giving the maximum possible length of the collection set.
void initialize(uint max_region_length);
// Drop all collection set candidates (only the candidates).
void abandon_all_candidates();
G1CollectionSetCandidates* candidates() { return &_candidates; }
const G1CollectionSetCandidates* candidates() const { return &_candidates; }
G1CSetCandidateGroupList* collection_set_groups() { return &_collection_set_groups; }
const G1CSetCandidateGroupList* collection_set_groups() const { return &_collection_set_groups; }
G1CSetCandidateGroupList* groups() { return &_groups; }
const G1CSetCandidateGroupList* groups() const { return &_groups; }
void prepare_groups_for_scan();
void prepare_for_scan();
void init_region_lengths(uint eden_cset_region_length,
uint survivor_cset_region_length);
uint region_length() const { return young_region_length() +
initial_old_region_length(); }
// Total length of the initial collection set in regions.
uint initial_region_length() const { return young_region_length() +
initial_old_region_length(); }
uint young_region_length() const { return eden_region_length() +
survivor_region_length(); }
uint eden_region_length() const { return _eden_region_length; }
uint eden_region_length() const { return _eden_region_length; }
uint survivor_region_length() const { return _survivor_region_length; }
uint initial_old_region_length() const { return _initial_old_region_length; }
uint initial_old_region_length() const { return _initial_old_region_length; }
uint num_optional_regions() const { return _optional_groups.num_regions(); }
bool only_contains_young_regions() const { return (initial_old_region_length() + num_optional_regions()) == 0; }
@ -258,28 +264,24 @@ public:
// Initialize incremental collection set info.
void start_incremental_building();
// Start a new collection set increment.
void update_incremental_marker() {
_inc_build_state = Active;
_inc_part_start = _collection_set_cur_length;
_selected_groups_inc_part_start = _selected_groups_cur_length;
}
// Start a new collection set increment, continuing the incremental building.
void continue_incremental_building();
// Stop adding regions to the current collection set increment.
void stop_incremental_building() { _inc_build_state = Inactive; }
void stop_incremental_building();
// Iterate over the current collection set increment applying the given G1HeapRegionClosure
// from a starting position determined by the given worker id.
void iterate_incremental_part_from(G1HeapRegionClosure* cl, G1HeapRegionClaimer* hr_claimer, uint worker_id) const;
// Returns the length of the current increment in number of regions.
size_t increment_length() const { return _collection_set_cur_length - _inc_part_start; }
size_t regions_cur_length() const { return _regions_cur_length - _regions_inc_part_start; }
// Returns the length of the whole current collection set in number of regions
size_t cur_length() const { return _collection_set_cur_length; }
size_t cur_length() const { return _regions_cur_length; }
uint collection_groups_increment_length() const { return _selected_groups_cur_length - _selected_groups_inc_part_start; }
uint groups_increment_length() const;
// Iterate over the entire collection set (all increments calculated so far), applying
// the given G1HeapRegionClosure on all of them.
// the given G1HeapRegionClosure on all of the regions.
void iterate(G1HeapRegionClosure* cl) const;
void par_iterate(G1HeapRegionClosure* cl,
G1HeapRegionClaimer* hr_claimer,
@ -287,10 +289,11 @@ public:
void iterate_optional(G1HeapRegionClosure* cl) const;
// Finalize the initial collection set consisting of all young regions potentially a
// Finalize the initial collection set consisting of all young regions and potentially a
// few old gen regions.
void finalize_initial_collection_set(double target_pause_time_ms, G1SurvivorRegions* survivor);
// Finalize the next collection set from the set of available optional old gen regions.
// Returns whether there still were some optional regions.
bool finalize_optional_for_evacuation(double remaining_pause_time);
// Abandon (clean up) optional collection set regions that were not evacuated in this
// pause.

View File

@ -31,8 +31,8 @@
template <class CardOrRangeVisitor>
inline void G1CollectionSet::merge_cardsets_for_collection_groups(CardOrRangeVisitor& cl, uint worker_id, uint num_workers) {
uint length = collection_groups_increment_length();
uint offset = _selected_groups_inc_part_start;
uint length = groups_increment_length();
uint offset = _groups_inc_part_start;
if (length == 0) {
return;
}
@ -41,7 +41,7 @@ inline void G1CollectionSet::merge_cardsets_for_collection_groups(CardOrRangeVis
uint cur_pos = start_pos;
uint count = 0;
do {
G1HeapRegionRemSet::iterate_for_merge(collection_set_groups()->at(offset + cur_pos)->card_set(), cl);
G1HeapRegionRemSet::iterate_for_merge(groups()->at(offset + cur_pos)->card_set(), cl);
cur_pos++;
count++;
if (cur_pos == length) {

View File

@ -44,12 +44,7 @@ G1CSetCandidateGroup::G1CSetCandidateGroup() :
void G1CSetCandidateGroup::add(G1HeapRegion* hr) {
G1CollectionSetCandidateInfo c(hr);
add(c);
}
void G1CSetCandidateGroup::add(G1CollectionSetCandidateInfo& hr_info) {
G1HeapRegion* hr = hr_info._r;
_candidates.append(hr_info);
_candidates.append(c);
hr->install_cset_group(this);
}
@ -63,10 +58,9 @@ void G1CSetCandidateGroup::calculate_efficiency() {
_gc_efficiency = _reclaimable_bytes / predict_group_total_time_ms();
}
size_t G1CSetCandidateGroup::liveness() const {
double G1CSetCandidateGroup::liveness_percent() const {
size_t capacity = length() * G1HeapRegion::GrainBytes;
return (size_t) ceil(((capacity - _reclaimable_bytes) * 100.0) / capacity);
return ((capacity - _reclaimable_bytes) * 100.0) / capacity;
}
void G1CSetCandidateGroup::clear(bool uninstall_group_cardset) {
@ -134,31 +128,6 @@ int G1CSetCandidateGroup::compare_gc_efficiency(G1CSetCandidateGroup** gr1, G1CS
}
}
int G1CollectionSetCandidateInfo::compare_region_gc_efficiency(G1CollectionSetCandidateInfo* ci1, G1CollectionSetCandidateInfo* ci2) {
// Make sure that null entries are moved to the end.
if (ci1->_r == nullptr) {
if (ci2->_r == nullptr) {
return 0;
} else {
return 1;
}
} else if (ci2->_r == nullptr) {
return -1;
}
G1Policy* p = G1CollectedHeap::heap()->policy();
double gc_efficiency1 = p->predict_gc_efficiency(ci1->_r);
double gc_efficiency2 = p->predict_gc_efficiency(ci2->_r);
if (gc_efficiency1 > gc_efficiency2) {
return -1;
} else if (gc_efficiency1 < gc_efficiency2) {
return 1;
} else {
return 0;
}
}
G1CSetCandidateGroupList::G1CSetCandidateGroupList() : _groups(8, mtGC), _num_regions(0) { }
void G1CSetCandidateGroupList::append(G1CSetCandidateGroup* group) {
@ -280,9 +249,9 @@ void G1CollectionSetCandidates::sort_marking_by_efficiency() {
_from_marking_groups.verify();
}
void G1CollectionSetCandidates::set_candidates_from_marking(G1CollectionSetCandidateInfo* candidate_infos,
uint num_infos) {
if (num_infos == 0) {
void G1CollectionSetCandidates::set_candidates_from_marking(G1HeapRegion** candidates,
uint num_candidates) {
if (num_candidates == 0) {
log_debug(gc, ergo, cset) ("No regions selected from marking.");
return;
}
@ -295,7 +264,7 @@ void G1CollectionSetCandidates::set_candidates_from_marking(G1CollectionSetCandi
// the G1MixedGCCountTarget. For the first collection in a Mixed GC cycle, we can add all regions
// required to meet this threshold to the same remset group. We are certain these will be collected in
// the same MixedGC.
uint group_limit = p->calc_min_old_cset_length(num_infos);
uint group_limit = p->calc_min_old_cset_length(num_candidates);
uint num_added_to_group = 0;
@ -304,8 +273,8 @@ void G1CollectionSetCandidates::set_candidates_from_marking(G1CollectionSetCandi
current = new G1CSetCandidateGroup();
for (uint i = 0; i < num_infos; i++) {
G1HeapRegion* r = candidate_infos[i]._r;
for (uint i = 0; i < num_candidates; i++) {
G1HeapRegion* r = candidates[i];
assert(!contains(r), "must not contain region %u", r->hrm_index());
_contains_map[r->hrm_index()] = CandidateOrigin::Marking;
@ -319,16 +288,16 @@ void G1CollectionSetCandidates::set_candidates_from_marking(G1CollectionSetCandi
current = new G1CSetCandidateGroup();
num_added_to_group = 0;
}
current->add(candidate_infos[i]);
current->add(r);
num_added_to_group++;
}
_from_marking_groups.append(current);
assert(_from_marking_groups.num_regions() == num_infos, "Must be!");
assert(_from_marking_groups.num_regions() == num_candidates, "Must be!");
log_debug(gc, ergo, cset) ("Finished creating %u collection groups from %u regions", _from_marking_groups.length(), num_infos);
_last_marking_candidates_length = num_infos;
log_debug(gc, ergo, cset) ("Finished creating %u collection groups from %u regions", _from_marking_groups.length(), num_candidates);
_last_marking_candidates_length = num_candidates;
verify();
}

View File

@ -48,8 +48,6 @@ struct G1CollectionSetCandidateInfo {
++_num_unreclaimed;
return _num_unreclaimed < G1NumCollectionsKeepPinned;
}
static int compare_region_gc_efficiency(G1CollectionSetCandidateInfo* ci1, G1CollectionSetCandidateInfo* ci2);
};
using G1CSetCandidateGroupIterator = GrowableArrayIterator<G1CollectionSetCandidateInfo>;
@ -91,7 +89,6 @@ public:
}
void add(G1HeapRegion* hr);
void add(G1CollectionSetCandidateInfo& hr_info);
uint length() const { return (uint)_candidates.length(); }
@ -102,7 +99,7 @@ public:
void calculate_efficiency();
size_t liveness() const;
double liveness_percent() const;
// Comparison function to order regions in decreasing GC efficiency order. This
// will cause regions with a lot of live objects and large remembered sets to end
// up at the end of the list.
@ -235,10 +232,10 @@ public:
void clear();
// Merge collection set candidates from marking into the current marking list
// Merge collection set candidates from marking into the current marking candidates
// (which needs to be empty).
void set_candidates_from_marking(G1CollectionSetCandidateInfo* candidate_infos,
uint num_infos);
void set_candidates_from_marking(G1HeapRegion** candidates,
uint num_candidates);
// The most recent length of the list that had been merged last via
// set_candidates_from_marking(). Used for calculating minimum collection set
// regions.

View File

@ -31,15 +31,13 @@
#include "utilities/quickSort.hpp"
// Determine collection set candidates (from marking): For all regions determine
// whether they should be a collection set candidate, calculate their efficiency,
// sort and put them into the candidates.
// whether they should be a collection set candidate. Calculate their efficiency,
// sort, and put them into the collection set candidates.
//
// Threads calculate the GC efficiency of the regions they get to process, and
// put them into some work area without sorting. At the end that array is sorted and
// moved to the destination.
class G1BuildCandidateRegionsTask : public WorkerTask {
using CandidateInfo = G1CollectionSetCandidateInfo;
// Work area for building the set of collection set candidates. Contains references
// to heap regions with their GC efficiencies calculated. To reduce contention
// on claiming array elements, worker threads claim parts of this array in chunks;
@ -47,14 +45,40 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
// up their chunks completely.
// Final sorting will remove them.
class G1BuildCandidateArray : public StackObj {
uint const _max_size;
uint const _chunk_size;
CandidateInfo* _data;
G1HeapRegion** _data;
uint volatile _cur_claim_idx;
static int compare_region_gc_efficiency(G1HeapRegion** rr1, G1HeapRegion** rr2) {
G1HeapRegion* r1 = *rr1;
G1HeapRegion* r2 = *rr2;
// Make sure that null entries are moved to the end.
if (r1 == nullptr) {
if (r2 == nullptr) {
return 0;
} else {
return 1;
}
} else if (r2 == nullptr) {
return -1;
}
G1Policy* p = G1CollectedHeap::heap()->policy();
double gc_efficiency1 = p->predict_gc_efficiency(r1);
double gc_efficiency2 = p->predict_gc_efficiency(r2);
if (gc_efficiency1 > gc_efficiency2) {
return -1;
} else if (gc_efficiency1 < gc_efficiency2) {
return 1;
} else {
return 0;
}
}
// Calculates the maximum array size that will be used.
static uint required_array_size(uint num_regions, uint chunk_size, uint num_workers) {
uint const max_waste = num_workers * chunk_size;
@ -68,15 +92,15 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
G1BuildCandidateArray(uint max_num_regions, uint chunk_size, uint num_workers) :
_max_size(required_array_size(max_num_regions, chunk_size, num_workers)),
_chunk_size(chunk_size),
_data(NEW_C_HEAP_ARRAY(CandidateInfo, _max_size, mtGC)),
_data(NEW_C_HEAP_ARRAY(G1HeapRegion*, _max_size, mtGC)),
_cur_claim_idx(0) {
for (uint i = 0; i < _max_size; i++) {
_data[i] = CandidateInfo();
_data[i] = nullptr;
}
}
~G1BuildCandidateArray() {
FREE_C_HEAP_ARRAY(CandidateInfo, _data);
FREE_C_HEAP_ARRAY(G1HeapRegion*, _data);
}
// Claim a new chunk, returning its bounds [from, to[.
@ -92,8 +116,8 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
// Set element in array.
void set(uint idx, G1HeapRegion* hr) {
assert(idx < _max_size, "Index %u out of bounds %u", idx, _max_size);
assert(_data[idx]._r == nullptr, "Value must not have been set.");
_data[idx] = CandidateInfo(hr);
assert(_data[idx] == nullptr, "Value must not have been set.");
_data[idx] = hr;
}
void sort_by_gc_efficiency() {
@ -101,15 +125,15 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
return;
}
for (uint i = _cur_claim_idx; i < _max_size; i++) {
assert(_data[i]._r == nullptr, "must be");
assert(_data[i] == nullptr, "must be");
}
qsort(_data, _cur_claim_idx, sizeof(_data[0]), (_sort_Fn)G1CollectionSetCandidateInfo::compare_region_gc_efficiency);
qsort(_data, _cur_claim_idx, sizeof(_data[0]), (_sort_Fn)compare_region_gc_efficiency);
for (uint i = _cur_claim_idx; i < _max_size; i++) {
assert(_data[i]._r == nullptr, "must be");
assert(_data[i] == nullptr, "must be");
}
}
CandidateInfo* array() const { return _data; }
G1HeapRegion** array() const { return _data; }
};
// Per-region closure. In addition to determining whether a region should be
@ -193,7 +217,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
// available (for forward progress in evacuation) or the waste accumulated by the
// removed regions is above the maximum allowed waste.
// Updates number of candidates and reclaimable bytes given.
void prune(CandidateInfo* data) {
void prune(G1HeapRegion** data) {
G1Policy* p = G1CollectedHeap::heap()->policy();
uint num_candidates = Atomic::load(&_num_regions_added);
@ -211,7 +235,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
uint max_to_prune = num_candidates - min_old_cset_length;
while (true) {
G1HeapRegion* r = data[num_candidates - num_pruned - 1]._r;
G1HeapRegion* r = data[num_candidates - num_pruned - 1];
size_t const reclaimable = r->reclaimable_bytes();
if (num_pruned >= max_to_prune ||
wasted_bytes + reclaimable > allowed_waste) {

View File

@ -38,11 +38,11 @@ class WorkerThreads;
class G1CollectionSetChooser : public AllStatic {
static uint calculate_work_chunk_size(uint num_workers, uint num_regions);
public:
static size_t mixed_gc_live_threshold_bytes() {
return G1HeapRegion::GrainBytes * (size_t)G1MixedGCLiveThresholdPercent / 100;
}
public:
static bool region_occupancy_low_enough_for_evac(size_t live_bytes) {
return live_bytes < mixed_gc_live_threshold_bytes();
}

View File

@ -1962,7 +1962,8 @@ public:
};
void G1ConcurrentMark::verify_no_collection_set_oops() {
assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
"should be at a safepoint or initializing");
if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) {
return;
}
@ -2981,6 +2982,7 @@ G1CMTask::G1CMTask(uint worker_id,
#define G1PPRL_LEN_FORMAT " " UINT32_FORMAT_W(14)
#define G1PPRL_LEN_H_FORMAT " %14s"
#define G1PPRL_GID_GCEFF_FORMAT " %14.1f"
#define G1PPRL_GID_LIVENESS_FORMAT " %9.2f"
// For summary info
#define G1PPRL_SUM_ADDR_FORMAT(tag) " " tag ":" G1PPRL_ADDR_BASE_FORMAT
@ -3114,13 +3116,13 @@ void G1PrintRegionLivenessInfoClosure::log_cset_candidate_group_add_total(G1CSet
G1PPRL_GID_FORMAT
G1PPRL_LEN_FORMAT
G1PPRL_GID_GCEFF_FORMAT
G1PPRL_BYTE_FORMAT
G1PPRL_GID_LIVENESS_FORMAT
G1PPRL_BYTE_FORMAT
G1PPRL_TYPE_H_FORMAT,
group->group_id(),
group->length(),
group->gc_efficiency(),
group->liveness(),
group->liveness_percent(),
group->card_set()->mem_size(),
type);
_total_remset_bytes += group->card_set()->mem_size();

View File

@ -224,8 +224,6 @@ void G1FullCollector::collect() {
}
phase5_reset_metadata();
G1CollectedHeap::finish_codecache_marking_cycle();
}
void G1FullCollector::complete_collection(size_t allocation_word_size) {

View File

@ -245,7 +245,7 @@ G1HeapRegion::G1HeapRegion(uint hrm_index,
_parsable_bottom(nullptr),
_garbage_bytes(0),
_incoming_refs(0),
_young_index_in_cset(-1),
_young_index_in_cset(InvalidCSetIndex),
_surv_rate_group(nullptr),
_age_index(G1SurvRateGroup::InvalidAgeIndex),
_node_index(G1NUMA::UnknownNodeIndex),

View File

@ -496,10 +496,10 @@ public:
void set_index_in_opt_cset(uint index) { _index_in_opt_cset = index; }
void clear_index_in_opt_cset() { _index_in_opt_cset = InvalidCSetIndex; }
uint young_index_in_cset() const { return _young_index_in_cset; }
uint young_index_in_cset() const { return _young_index_in_cset; }
void clear_young_index_in_cset() { _young_index_in_cset = 0; }
void set_young_index_in_cset(uint index) {
assert(index != UINT_MAX, "just checking");
assert(index != InvalidCSetIndex, "just checking");
assert(index != 0, "just checking");
assert(is_young(), "pre-condition");
_young_index_in_cset = index;

View File

@ -97,10 +97,6 @@ void G1Policy::init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) {
_free_regions_at_end_of_collection = _g1h->num_free_regions();
update_young_length_bounds();
// We immediately start allocating regions placing them in the collection set.
// Initialize the collection set info.
_collection_set->start_incremental_building();
}
void G1Policy::record_young_gc_pause_start() {

View File

@ -1426,7 +1426,7 @@ void G1RemSet::merge_heap_roots(bool initial_evacuation) {
}
WorkerThreads* workers = g1h->workers();
size_t const increment_length = g1h->collection_set()->increment_length();
size_t const increment_length = g1h->collection_set()->regions_cur_length();
uint const num_workers = initial_evacuation ? workers->active_workers() :
MIN2(workers->active_workers(), (uint)increment_length);

View File

@ -268,44 +268,39 @@ public:
return false;
}
void do_cset_groups() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
G1CSetCandidateGroup* young_only_cset_group = g1h->young_regions_cset_group();
void accumulate_stats_for_group(G1CSetCandidateGroup* group, G1PerRegionTypeRemSetCounters* gen_counter) {
// If the group has only a single region, then stats were accumulated
// during region iteration.
if (young_only_cset_group->length() > 1) {
G1CardSet* young_only_card_set = young_only_cset_group->card_set();
size_t rs_mem_sz = young_only_card_set->mem_size();
size_t rs_unused_mem_sz = young_only_card_set->unused_mem_size();
size_t occupied_cards = young_only_card_set->occupied();
// during region iteration. Skip these.
if (group->length() > 1) {
G1CardSet* card_set = group->card_set();
_max_group_cardset_mem_sz = rs_mem_sz;
_max_cardset_mem_sz_group = young_only_cset_group;
size_t rs_mem_sz = card_set->mem_size();
size_t rs_unused_mem_sz = card_set->unused_mem_size();
size_t occupied_cards = card_set->occupied();
// Only update cardset details
_young.add(rs_unused_mem_sz, rs_mem_sz, occupied_cards, 0, 0, false);
if (rs_mem_sz > _max_group_cardset_mem_sz) {
_max_group_cardset_mem_sz = rs_mem_sz;
_max_cardset_mem_sz_group = group;
}
gen_counter->add(rs_unused_mem_sz, rs_mem_sz, occupied_cards, 0, 0, false);
_all.add(rs_unused_mem_sz, rs_mem_sz, occupied_cards, 0, 0, false);
}
}
void do_cset_groups() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
G1PerRegionTypeRemSetCounters* current = &_old;
for (G1CSetCandidateGroup* group : g1h->policy()->candidates()->from_marking_groups()) {
if (group->length() > 1) {
G1CardSet* group_card_set = group->card_set();
size_t rs_mem_sz = group_card_set->mem_size();
size_t rs_unused_mem_sz = group_card_set->unused_mem_size();
size_t occupied_cards = group_card_set->occupied();
accumulate_stats_for_group(g1h->young_regions_cset_group(), &_young);
if (rs_mem_sz > _max_group_cardset_mem_sz) {
_max_group_cardset_mem_sz = rs_mem_sz;
_max_cardset_mem_sz_group = group;
}
// Only update cardset details
_old.add(rs_unused_mem_sz, rs_mem_sz, occupied_cards, 0, 0, false);
_all.add(rs_unused_mem_sz, rs_mem_sz, occupied_cards, 0, 0, false);
}
G1CollectionSetCandidates* candidates = g1h->policy()->candidates();
for (G1CSetCandidateGroup* group : candidates->from_marking_groups()) {
accumulate_stats_for_group(group, &_old);
}
// Skip gathering statistics for retained regions. Just verify that they have
// the expected amount of regions.
for (G1CSetCandidateGroup* group : candidates->retained_groups()) {
assert(group->length() == 1, "must be");
}
}

View File

@ -271,7 +271,7 @@ void G1YoungCollector::calculate_collection_set(G1EvacInfo* evacuation_info, dou
allocator()->release_mutator_alloc_regions();
collection_set()->finalize_initial_collection_set(target_pause_time_ms, survivor_regions());
evacuation_info->set_collection_set_regions(collection_set()->region_length() +
evacuation_info->set_collection_set_regions(collection_set()->initial_region_length() +
collection_set()->num_optional_regions());
concurrent_mark()->verify_no_collection_set_oops();

View File

@ -887,7 +887,7 @@ public:
p->record_serial_free_cset_time_ms((Ticks::now() - serial_time).seconds() * 1000.0);
}
double worker_cost() const override { return G1CollectedHeap::heap()->collection_set()->region_length(); }
double worker_cost() const override { return G1CollectedHeap::heap()->collection_set()->initial_region_length(); }
void set_max_workers(uint max_workers) override {
_active_workers = max_workers;

View File

@ -32,6 +32,7 @@
#include "gc/parallel/psPromotionManager.hpp"
#include "gc/parallel/psScavenge.hpp"
#include "gc/parallel/psVMOperations.hpp"
#include "gc/shared/barrierSetNMethod.hpp"
#include "gc/shared/fullGCForwarding.inline.hpp"
#include "gc/shared/gcHeapSummary.hpp"
#include "gc/shared/gcLocker.inline.hpp"
@ -861,6 +862,8 @@ void ParallelScavengeHeap::complete_loaded_archive_space(MemRegion archive_space
void ParallelScavengeHeap::register_nmethod(nmethod* nm) {
ScavengableNMethods::register_nmethod(nm);
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
bs_nm->disarm(nm);
}
void ParallelScavengeHeap::unregister_nmethod(nmethod* nm) {

View File

@ -34,6 +34,7 @@
#include "gc/serial/serialMemoryPools.hpp"
#include "gc/serial/serialVMOperations.hpp"
#include "gc/serial/tenuredGeneration.inline.hpp"
#include "gc/shared/barrierSetNMethod.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/classUnloadingContext.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
@ -432,6 +433,8 @@ bool SerialHeap::do_young_collection(bool clear_soft_refs) {
void SerialHeap::register_nmethod(nmethod* nm) {
ScavengableNMethods::register_nmethod(nm);
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
bs_nm->disarm(nm);
}
void SerialHeap::unregister_nmethod(nmethod* nm) {

View File

@ -201,34 +201,6 @@ void CollectedHeap::print_relative_to_gc(GCWhen::Type when) const {
}
}
class CPUTimeThreadClosure : public ThreadClosure {
private:
jlong _cpu_time = 0;
public:
virtual void do_thread(Thread* thread) {
jlong cpu_time = os::thread_cpu_time(thread);
if (cpu_time != -1) {
_cpu_time += cpu_time;
}
}
jlong cpu_time() { return _cpu_time; };
};
double CollectedHeap::elapsed_gc_cpu_time() const {
double string_dedup_cpu_time = UseStringDeduplication ?
os::thread_cpu_time((Thread*)StringDedup::_processor->_thread) : 0;
if (string_dedup_cpu_time == -1) {
string_dedup_cpu_time = 0;
}
CPUTimeThreadClosure cl;
gc_threads_do(&cl);
return (double)(cl.cpu_time() + _vmthread_cpu_time + string_dedup_cpu_time) / NANOSECS_PER_SEC;
}
void CollectedHeap::print_before_gc() const {
print_relative_to_gc(GCWhen::BeforeGC);
}
@ -633,36 +605,9 @@ void CollectedHeap::post_initialize() {
initialize_serviceability();
}
void CollectedHeap::log_gc_cpu_time() const {
LogTarget(Info, gc, cpu) out;
if (os::is_thread_cpu_time_supported() && out.is_enabled()) {
double process_cpu_time = os::elapsed_process_cpu_time();
double gc_cpu_time = elapsed_gc_cpu_time();
if (process_cpu_time == -1 || gc_cpu_time == -1) {
log_warning(gc, cpu)("Could not sample CPU time");
return;
}
double usage;
if (gc_cpu_time > process_cpu_time ||
process_cpu_time == 0 || gc_cpu_time == 0) {
// This can happen e.g. for short running processes with
// low CPU utilization
usage = 0;
} else {
usage = 100 * gc_cpu_time / process_cpu_time;
}
out.print("GC CPU usage: %.2f%% (Process: %.4fs GC: %.4fs)", usage, process_cpu_time, gc_cpu_time);
}
}
void CollectedHeap::before_exit() {
print_tracing_info();
// Log GC CPU usage.
log_gc_cpu_time();
// Stop any on-going concurrent work and prepare for exit.
stop();
}

View File

@ -36,6 +36,7 @@
#include "runtime/handles.hpp"
#include "runtime/perfDataTypes.hpp"
#include "runtime/safepoint.hpp"
#include "services/cpuTimeUsage.hpp"
#include "services/memoryUsage.hpp"
#include "utilities/debug.hpp"
#include "utilities/formatBuffer.hpp"
@ -89,6 +90,7 @@ public:
// ZCollectedHeap
//
class CollectedHeap : public CHeapObj<mtGC> {
friend class CPUTimeUsage::GC;
friend class VMStructs;
friend class JVMCIVMStructs;
friend class IsSTWGCActiveMark; // Block structured external access to _is_stw_gc_active
@ -429,8 +431,6 @@ protected:
void print_relative_to_gc(GCWhen::Type when) const;
void log_gc_cpu_time() const;
public:
void pre_full_gc_dump(GCTimer* timer);
void post_full_gc_dump(GCTimer* timer);
@ -463,8 +463,6 @@ protected:
// Iterator for all GC threads (other than VM thread)
virtual void gc_threads_do(ThreadClosure* tc) const = 0;
double elapsed_gc_cpu_time() const;
void print_before_gc() const;
void print_after_gc() const;

View File

@ -103,9 +103,9 @@
#include "memory/allocation.hpp"
#include "memory/allStatic.hpp"
#include "oops/oopsHierarchy.hpp"
#include "services/cpuTimeUsage.hpp"
#include "utilities/globalDefinitions.hpp"
class CollectedHeap;
class Klass;
class StringDedupThread;
class ThreadClosure;
@ -116,7 +116,7 @@ class ThreadClosure;
// feature. Other functions in the StringDedup class are called where
// needed, without requiring GC-specific code.
class StringDedup : public AllStatic {
friend class CollectedHeap;
friend class CPUTimeUsage::GC;
friend class StringDedupThread;
class Config;

View File

@ -27,9 +27,9 @@
#include "gc/shared/stringdedup/stringDedup.hpp"
#include "memory/allocation.hpp"
#include "services/cpuTimeUsage.hpp"
#include "utilities/macros.hpp"
class CollectedHeap;
class JavaThread;
class OopStorage;
@ -43,7 +43,7 @@ class OopStorage;
// incremental operations for resizing and for removing dead entries, so
// safepoint checks can be performed between steps in those operations.
class StringDedup::Processor : public CHeapObj<mtGC> {
friend class CollectedHeap;
friend class CPUTimeUsage::GC;
Processor();
~Processor() = default;

View File

@ -250,9 +250,8 @@ void ShenandoahBarrierSetC2::satb_write_barrier_pre(GraphKit* kit,
} __ else_(); {
// logging buffer is full, call the runtime
const TypeFunc *tf = ShenandoahBarrierSetC2::write_ref_field_pre_Type();
__ make_leaf_call(tf, CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre), "shenandoah_wb_pre",
pre_val, tls);
const TypeFunc *tf = ShenandoahBarrierSetC2::write_barrier_pre_Type();
__ make_leaf_call(tf, CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), "shenandoah_wb_pre", pre_val);
} __ end_if(); // (!index)
} __ end_if(); // (pre_val != nullptr)
} __ end_if(); // (!marking)
@ -270,7 +269,7 @@ void ShenandoahBarrierSetC2::satb_write_barrier_pre(GraphKit* kit,
bool ShenandoahBarrierSetC2::is_shenandoah_wb_pre_call(Node* call) {
return call->is_CallLeaf() &&
call->as_CallLeaf()->entry_point() == CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre);
call->as_CallLeaf()->entry_point() == CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre);
}
bool ShenandoahBarrierSetC2::is_shenandoah_clone_call(Node* call) {
@ -520,11 +519,10 @@ void ShenandoahBarrierSetC2::post_barrier(GraphKit* kit,
#undef __
const TypeFunc* ShenandoahBarrierSetC2::write_ref_field_pre_Type() {
const Type **fields = TypeTuple::fields(2);
const TypeFunc* ShenandoahBarrierSetC2::write_barrier_pre_Type() {
const Type **fields = TypeTuple::fields(1);
fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value
fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread
const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
// create result type (range)
fields = TypeTuple::fields(0);
@ -1108,7 +1106,7 @@ void ShenandoahBarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase p
Node* ShenandoahBarrierSetC2::ideal_node(PhaseGVN* phase, Node* n, bool can_reshape) const {
if (is_shenandoah_wb_pre_call(n)) {
uint cnt = ShenandoahBarrierSetC2::write_ref_field_pre_Type()->domain()->cnt();
uint cnt = ShenandoahBarrierSetC2::write_barrier_pre_Type()->domain()->cnt();
if (n->req() > cnt) {
Node* addp = n->in(cnt);
if (has_only_shenandoah_wb_pre_uses(addp)) {
@ -1194,7 +1192,7 @@ bool ShenandoahBarrierSetC2::final_graph_reshaping(Compile* compile, Node* n, ui
assert (n->is_Call(), "");
CallNode *call = n->as_Call();
if (ShenandoahBarrierSetC2::is_shenandoah_wb_pre_call(call)) {
uint cnt = ShenandoahBarrierSetC2::write_ref_field_pre_Type()->domain()->cnt();
uint cnt = ShenandoahBarrierSetC2::write_barrier_pre_Type()->domain()->cnt();
if (call->req() > cnt) {
assert(call->req() == cnt + 1, "only one extra input");
Node *addp = call->in(cnt);

View File

@ -103,7 +103,7 @@ public:
ShenandoahBarrierSetC2State* state() const;
static const TypeFunc* write_ref_field_pre_Type();
static const TypeFunc* write_barrier_pre_Type();
static const TypeFunc* clone_barrier_Type();
static const TypeFunc* load_reference_barrier_Type();
virtual bool has_load_barrier_nodes() const { return true; }

View File

@ -38,20 +38,16 @@ JRT_LEAF(void, ShenandoahRuntime::arraycopy_barrier_narrow_oop(narrowOop* src, n
ShenandoahBarrierSet::barrier_set()->arraycopy_barrier(src, dst, length);
JRT_END
JRT_LEAF(void, ShenandoahRuntime::write_ref_field_pre(oopDesc * orig, JavaThread * thread))
assert(thread == JavaThread::current(), "pre-condition");
JRT_LEAF(void, ShenandoahRuntime::write_barrier_pre(oopDesc* orig))
assert(orig != nullptr, "should be optimized out");
shenandoah_assert_correct(nullptr, orig);
// Capture the original value that was in the field reference.
JavaThread* thread = JavaThread::current();
assert(ShenandoahThreadLocalData::satb_mark_queue(thread).is_active(), "Shouldn't be here otherwise");
SATBMarkQueue& queue = ShenandoahThreadLocalData::satb_mark_queue(thread);
ShenandoahBarrierSet::satb_mark_queue_set().enqueue_known_active(queue, orig);
JRT_END
void ShenandoahRuntime::write_barrier_pre(oopDesc* orig) {
write_ref_field_pre(orig, JavaThread::current());
}
JRT_LEAF(oopDesc*, ShenandoahRuntime::load_reference_barrier_strong(oopDesc* src, oop* load_addr))
return ShenandoahBarrierSet::barrier_set()->load_reference_barrier_mutator(src, load_addr);
JRT_END

View File

@ -36,7 +36,6 @@ public:
static void arraycopy_barrier_oop(oop* src, oop* dst, size_t length);
static void arraycopy_barrier_narrow_oop(narrowOop* src, narrowOop* dst, size_t length);
static void write_ref_field_pre(oopDesc* orig, JavaThread* thread);
static void write_barrier_pre(oopDesc* orig);
static oopDesc* load_reference_barrier_strong(oopDesc* src, oop* load_addr);

View File

@ -24,8 +24,8 @@
#include "classfile/javaClasses.inline.hpp"
#include "classfile/vmSymbols.hpp"
#include "jfr/jfr.hpp"
#include "jfr/dcmd/jfrDcmds.hpp"
#include "jfr/jfr.hpp"
#include "jfr/jni/jfrJavaSupport.hpp"
#include "jfr/recorder/jfrRecorder.hpp"
#include "jfr/recorder/service/jfrOptionSet.hpp"

View File

@ -28,8 +28,8 @@
#include "classfile/javaClasses.inline.hpp"
#include "classfile/symbolTable.hpp"
#include "jfr/instrumentation/jfrClassTransformer.hpp"
#include "jfr/recorder/service/jfrOptionSet.hpp"
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
#include "jfr/recorder/service/jfrOptionSet.hpp"
#include "logging/log.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"

View File

@ -26,18 +26,16 @@
#include "jfr/jfr.hpp"
#include "jfr/jni/jfrJavaSupport.hpp"
#include "jfr/leakprofiler/leakProfiler.hpp"
#include "jfr/recorder/jfrRecorder.hpp"
#include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
#include "jfr/recorder/jfrRecorder.hpp"
#include "jfr/recorder/repository/jfrEmergencyDump.hpp"
#include "jfr/recorder/service/jfrOptionSet.hpp"
#include "jfr/recorder/service/jfrOptionSet.hpp"
#include "jfr/recorder/repository/jfrRepository.hpp"
#include "jfr/recorder/service/jfrOptionSet.hpp"
#include "jfr/support/jfrKlassExtension.hpp"
#include "jfr/support/jfrResolution.hpp"
#include "jfr/support/jfrThreadLocal.hpp"
#include "jfr/support/methodtracer/jfrMethodTracer.hpp"
#include "jfr/support/methodtracer/jfrTraceTagging.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/instanceKlass.inline.hpp"
#include "oops/klass.hpp"
#include "runtime/java.hpp"

View File

@ -39,11 +39,11 @@
#include "memory/resourceArea.hpp"
#include "oops/instanceOop.hpp"
#include "oops/klass.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/objArrayKlass.hpp"
#include "oops/objArrayOop.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/fieldDescriptor.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/javaThread.hpp"
@ -52,7 +52,6 @@
#include "runtime/synchronizer.hpp"
#include "runtime/threadSMR.hpp"
#include "utilities/growableArray.hpp"
#include "classfile/vmSymbols.hpp"
#ifdef ASSERT
static void check_java_thread_state(JavaThread* t, JavaThreadState state) {

View File

@ -22,19 +22,24 @@
*
*/
#include "jfr/instrumentation/jfrEventClassTransformer.hpp"
#include "jfr/instrumentation/jfrJvmtiAgent.hpp"
#include "jfr/jfr.hpp"
#include "jfr/jfrEvents.hpp"
#include "jfr/jni/jfrJavaSupport.hpp"
#include "jfr/jni/jfrJniMethodRegistration.hpp"
#include "jfr/leakprofiler/leakProfiler.hpp"
#include "jfr/periodic/sampling/jfrCPUTimeThreadSampler.hpp"
#include "jfr/periodic/sampling/jfrThreadSampler.hpp"
#include "jfr/recorder/jfrEventSetting.hpp"
#include "jfr/recorder/jfrRecorder.hpp"
#include "jfr/recorder/checkpoint/jfrMetadataEvent.hpp"
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
#include "jfr/recorder/jfrEventSetting.hpp"
#include "jfr/recorder/jfrRecorder.hpp"
#include "jfr/recorder/repository/jfrChunk.hpp"
#include "jfr/recorder/repository/jfrRepository.hpp"
#include "jfr/recorder/repository/jfrChunkRotation.hpp"
#include "jfr/recorder/repository/jfrChunkWriter.hpp"
#include "jfr/recorder/repository/jfrEmergencyDump.hpp"
#include "jfr/recorder/repository/jfrRepository.hpp"
#include "jfr/recorder/service/jfrEventThrottler.hpp"
#include "jfr/recorder/service/jfrOptionSet.hpp"
#include "jfr/recorder/service/jfrRecorderService.hpp"
@ -42,18 +47,13 @@
#include "jfr/recorder/stacktrace/jfrStackFilterRegistry.hpp"
#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
#include "jfr/recorder/stringpool/jfrStringPool.hpp"
#include "jfr/jni/jfrJavaSupport.hpp"
#include "jfr/jni/jfrJniMethodRegistration.hpp"
#include "jfr/instrumentation/jfrEventClassTransformer.hpp"
#include "jfr/instrumentation/jfrJvmtiAgent.hpp"
#include "jfr/leakprofiler/leakProfiler.hpp"
#include "jfr/support/jfrDeprecationManager.hpp"
#include "jfr/support/jfrJdkJfrEvent.hpp"
#include "jfr/support/jfrKlassUnloading.hpp"
#include "jfr/support/methodtracer/jfrMethodTracer.hpp"
#include "jfr/utilities/jfrJavaLog.hpp"
#include "jfr/utilities/jfrTimeConverter.hpp"
#include "jfr/utilities/jfrTime.hpp"
#include "jfr/utilities/jfrTimeConverter.hpp"
#include "jfr/writers/jfrJavaEventWriter.hpp"
#include "jfrfiles/jfrPeriodic.hpp"
#include "jfrfiles/jfrTypes.hpp"
@ -67,8 +67,8 @@
#include "runtime/os.hpp"
#include "utilities/debug.hpp"
#ifdef LINUX
#include "osContainer_linux.hpp"
#include "os_linux.hpp"
#include "osContainer_linux.hpp"
#endif
#define NO_TRANSITION(result_type, header) extern "C" { result_type JNICALL header {

View File

@ -24,8 +24,8 @@
#include "jfr/leakprofiler/chains/bfsClosure.hpp"
#include "jfr/leakprofiler/chains/dfsClosure.hpp"
#include "jfr/leakprofiler/chains/edge.hpp"
#include "jfr/leakprofiler/chains/edgeStore.hpp"
#include "jfr/leakprofiler/chains/edgeQueue.hpp"
#include "jfr/leakprofiler/chains/edgeStore.hpp"
#include "jfr/leakprofiler/chains/jfrbitset.hpp"
#include "jfr/leakprofiler/utilities/granularTimer.hpp"
#include "jfr/leakprofiler/utilities/unifiedOopRef.inline.hpp"

View File

@ -25,9 +25,9 @@
#ifndef SHARE_JFR_LEAKPROFILER_CHAINS_EDGEQUEUE_HPP
#define SHARE_JFR_LEAKPROFILER_CHAINS_EDGEQUEUE_HPP
#include "memory/allocation.hpp"
#include "jfr/leakprofiler/chains/edge.hpp"
#include "jfr/leakprofiler/utilities/unifiedOopRef.hpp"
#include "memory/allocation.hpp"
class JfrVirtualMemory;

View File

@ -24,7 +24,6 @@
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/gc_globals.hpp"
#include "jfr/leakprofiler/leakProfiler.hpp"
#include "jfr/leakprofiler/chains/bfsClosure.hpp"
#include "jfr/leakprofiler/chains/dfsClosure.hpp"
#include "jfr/leakprofiler/chains/edge.hpp"
@ -32,12 +31,11 @@
#include "jfr/leakprofiler/chains/edgeStore.hpp"
#include "jfr/leakprofiler/chains/jfrbitset.hpp"
#include "jfr/leakprofiler/chains/objectSampleMarker.hpp"
#include "jfr/leakprofiler/chains/rootSetClosure.hpp"
#include "jfr/leakprofiler/chains/edgeStore.hpp"
#include "jfr/leakprofiler/chains/objectSampleMarker.hpp"
#include "jfr/leakprofiler/chains/pathToGcRootsOperation.hpp"
#include "jfr/leakprofiler/chains/rootSetClosure.hpp"
#include "jfr/leakprofiler/checkpoint/eventEmitter.hpp"
#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
#include "jfr/leakprofiler/leakProfiler.hpp"
#include "jfr/leakprofiler/sampling/objectSample.hpp"
#include "jfr/leakprofiler/sampling/objectSampler.hpp"
#include "jfr/leakprofiler/utilities/granularTimer.hpp"

View File

@ -25,8 +25,8 @@
#ifndef SHARE_JFR_LEAKPROFILER_CHECKPOINT_EVENTEMITTER_HPP
#define SHARE_JFR_LEAKPROFILER_CHECKPOINT_EVENTEMITTER_HPP
#include "memory/allocation.hpp"
#include "jfr/utilities/jfrTime.hpp"
#include "memory/allocation.hpp"
typedef u8 traceid;

View File

@ -39,8 +39,8 @@
#include "jfr/support/jfrKlassUnloading.hpp"
#include "jfr/support/jfrMethodLookup.hpp"
#include "jfr/utilities/jfrHashtable.hpp"
#include "jfr/utilities/jfrSet.hpp"
#include "jfr/utilities/jfrRelation.hpp"
#include "jfr/utilities/jfrSet.hpp"
#include "memory/resourceArea.inline.hpp"
#include "oops/instanceKlass.inline.hpp"
#include "runtime/interfaceSupport.inline.hpp"

View File

@ -25,8 +25,8 @@
#ifndef SHARE_JFR_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLECHECKPOINT_HPP
#define SHARE_JFR_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLECHECKPOINT_HPP
#include "memory/allStatic.hpp"
#include "jfr/utilities/jfrTypes.hpp"
#include "memory/allStatic.hpp"
class EdgeStore;
class InstanceKlass;

View File

@ -22,7 +22,6 @@
*
*/
#include "jfrfiles/jfrTypes.hpp"
#include "jfr/leakprofiler/chains/edge.hpp"
#include "jfr/leakprofiler/chains/edgeStore.hpp"
#include "jfr/leakprofiler/chains/edgeUtils.hpp"
@ -34,6 +33,7 @@
#include "jfr/leakprofiler/utilities/unifiedOopRef.inline.hpp"
#include "jfr/metadata/jfrSerializer.hpp"
#include "jfr/writers/jfrTypeWriterHost.hpp"
#include "jfrfiles/jfrTypes.hpp"
#include "oops/oop.inline.hpp"
#include "oops/symbol.hpp"
#include "utilities/growableArray.hpp"

View File

@ -27,13 +27,13 @@
#include "gc/shared/oopStorage.inline.hpp"
#include "gc/shared/oopStorageSet.hpp"
#include "gc/shared/strongRootsScope.hpp"
#include "jfr/leakprofiler/utilities/unifiedOopRef.inline.hpp"
#include "jfr/leakprofiler/checkpoint/rootResolver.hpp"
#include "jfr/leakprofiler/utilities/unifiedOopRef.inline.hpp"
#include "jfr/utilities/jfrThreadIterator.hpp"
#include "memory/iterator.hpp"
#include "prims/jvmtiDeferredUpdates.hpp"
#include "oops/klass.hpp"
#include "oops/oop.hpp"
#include "prims/jvmtiDeferredUpdates.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/jniHandles.hpp"

View File

@ -22,11 +22,11 @@
*
*/
#include "jfr/leakprofiler/checkpoint/eventEmitter.hpp"
#include "jfr/leakprofiler/leakProfiler.hpp"
#include "jfr/leakprofiler/sampling/objectSampler.hpp"
#include "jfr/leakprofiler/startOperation.hpp"
#include "jfr/leakprofiler/stopOperation.hpp"
#include "jfr/leakprofiler/checkpoint/eventEmitter.hpp"
#include "jfr/leakprofiler/sampling/objectSampler.hpp"
#include "jfr/recorder/service/jfrOptionSet.hpp"
#include "logging/log.hpp"
#include "memory/iterator.hpp"

View File

@ -31,11 +31,11 @@
#include "jfr/leakprofiler/sampling/objectSampler.hpp"
#include "jfr/leakprofiler/sampling/sampleList.hpp"
#include "jfr/leakprofiler/sampling/samplePriorityQueue.hpp"
#include "jfr/recorder/jfrEventSetting.inline.hpp"
#include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
#include "jfr/recorder/jfrEventSetting.inline.hpp"
#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
#include "jfr/utilities/jfrSignal.hpp"
#include "jfr/support/jfrThreadLocal.hpp"
#include "jfr/utilities/jfrSignal.hpp"
#include "jfr/utilities/jfrTime.hpp"
#include "jfr/utilities/jfrTryLock.hpp"
#include "logging/log.hpp"

View File

@ -25,9 +25,9 @@
#ifndef SHARE_JFR_METADATA_JFRSERIALIZER_HPP
#define SHARE_JFR_METADATA_JFRSERIALIZER_HPP
#include "memory/allocation.hpp"
#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
#include "jfrfiles/jfrTypes.hpp"
#include "memory/allocation.hpp"
/*
* A "type" in Jfr is a binary relation defined by enumerating a set of <key, value> ordered pairs:

View File

@ -22,13 +22,13 @@
*
*/
#include "logging/log.hpp"
#include "jfr/jfrEvents.hpp"
#include "jfr/metadata/jfrSerializer.hpp"
#include "jfr/periodic/jfrNetworkUtilization.hpp"
#include "jfr/periodic/jfrOSInterface.hpp"
#include "jfr/utilities/jfrTime.hpp"
#include "jfr/utilities/jfrTypes.hpp"
#include "logging/log.hpp"
#include "runtime/os_perf.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/growableArray.hpp"

View File

@ -38,11 +38,11 @@
#include "jfr/periodic/jfrCompilerQueueUtilization.hpp"
#include "jfr/periodic/jfrFinalizerStatisticsEvent.hpp"
#include "jfr/periodic/jfrModuleEvent.hpp"
#include "jfr/periodic/jfrNativeMemoryEvent.hpp"
#include "jfr/periodic/jfrNetworkUtilization.hpp"
#include "jfr/periodic/jfrOSInterface.hpp"
#include "jfr/periodic/jfrThreadCPULoadEvent.hpp"
#include "jfr/periodic/jfrThreadDumpEvent.hpp"
#include "jfr/periodic/jfrNativeMemoryEvent.hpp"
#include "jfr/periodic/jfrNetworkUtilization.hpp"
#include "jfr/recorder/jfrRecorder.hpp"
#include "jfr/utilities/jfrThreadIterator.hpp"
#include "jfr/utilities/jfrTime.hpp"
@ -61,8 +61,8 @@
#include "runtime/os_perf.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/threads.hpp"
#include "runtime/vmThread.hpp"
#include "runtime/vm_version.hpp"
#include "runtime/vmThread.hpp"
#include "services/classLoadingService.hpp"
#include "services/management.hpp"
#include "services/memoryPool.hpp"

View File

@ -22,14 +22,14 @@
*
*/
#include "logging/log.hpp"
#include "jfr/jfrEvents.hpp"
#include "jfr/periodic/jfrThreadCPULoadEvent.hpp"
#include "jfr/utilities/jfrThreadIterator.hpp"
#include "jfr/utilities/jfrTime.hpp"
#include "utilities/globalDefinitions.hpp"
#include "logging/log.hpp"
#include "runtime/javaThread.hpp"
#include "runtime/os.hpp"
#include "utilities/globalDefinitions.hpp"
jlong JfrThreadCPULoadEvent::get_wallclock_time() {
return os::javaTimeNanos();

View File

@ -29,8 +29,8 @@
#if defined(LINUX)
#include "jfr/periodic/sampling/jfrThreadSampling.hpp"
#include "jfr/support/jfrThreadLocal.hpp"
#include "jfr/utilities/jfrTime.hpp"
#include "jfr/utilities/jfrThreadIterator.hpp"
#include "jfr/utilities/jfrTime.hpp"
#include "jfr/utilities/jfrTypes.hpp"
#include "jfrfiles/jfrEventClasses.hpp"
#include "memory/resourceArea.hpp"
@ -41,9 +41,8 @@
#include "runtime/threadSMR.hpp"
#include "runtime/vmOperation.hpp"
#include "runtime/vmThread.hpp"
#include "utilities/ticks.hpp"
#include "signals_posix.hpp"
#include "utilities/ticks.hpp"
static const int64_t RECOMPUTE_INTERVAL_MS = 100;

View File

@ -23,11 +23,11 @@
*/
#include "jfr/metadata/jfrSerializer.hpp"
#include "jfr/recorder/service/jfrOptionSet.hpp"
#include "jfr/periodic/sampling/jfrSampleMonitor.hpp"
#include "jfr/periodic/sampling/jfrSampleRequest.hpp"
#include "jfr/periodic/sampling/jfrThreadSampling.hpp"
#include "jfr/periodic/sampling/jfrThreadSampler.hpp"
#include "jfr/periodic/sampling/jfrThreadSampling.hpp"
#include "jfr/recorder/service/jfrOptionSet.hpp"
#include "jfr/utilities/jfrTime.hpp"
#include "jfr/utilities/jfrTryLock.hpp"
#include "jfr/utilities/jfrTypes.hpp"

View File

@ -25,8 +25,8 @@
#include "jfr/jni/jfrJavaSupport.hpp"
#include "jfr/jni/jfrUpcalls.hpp"
#include "jfr/recorder/checkpoint/jfrMetadataEvent.hpp"
#include "jfr/recorder/repository/jfrChunkWriter.hpp"
#include "jfr/recorder/jfrEventSetting.inline.hpp"
#include "jfr/recorder/repository/jfrChunkWriter.hpp"
#include "oops/klass.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/typeArrayOop.inline.hpp"

View File

@ -23,8 +23,8 @@
*/
#include "classfile/javaClasses.inline.hpp"
#include "jfr/recorder/checkpoint/types/jfrThreadState.hpp"
#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
#include "jfr/recorder/checkpoint/types/jfrThreadState.hpp"
#include "jfr/support/jfrThreadLocal.hpp"
#include "jvmtifiles/jvmti.h"
#include "runtime/javaThread.hpp"

View File

@ -27,9 +27,9 @@
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.hpp"
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdLoadBarrier.inline.hpp"
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdBits.inline.hpp"
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp"
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdLoadBarrier.inline.hpp"
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdMacros.hpp"
#include "jfr/support/jfrKlassExtension.hpp"
#include "oops/instanceKlass.hpp"

View File

@ -22,8 +22,8 @@
*
*/
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdLoadBarrier.inline.hpp"
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdKlassQueue.hpp"
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdLoadBarrier.inline.hpp"
#include "jfr/recorder/service/jfrOptionSet.hpp"
#include "jfr/support/jfrThreadLocal.hpp"
#include "jfr/utilities/jfrEpochQueue.inline.hpp"

View File

@ -25,9 +25,9 @@
#ifndef SHARE_JFR_RECORDER_JFREVENTSETTING_HPP
#define SHARE_JFR_RECORDER_JFREVENTSETTING_HPP
#include "jni.h"
#include "jfr/utilities/jfrAllocation.hpp"
#include "jfrfiles/jfrEventControl.hpp"
#include "jni.h"
//
// Native event settings as an associative array using the event id as key.

View File

@ -31,17 +31,17 @@
#include "jfr/periodic/jfrOSInterface.hpp"
#include "jfr/periodic/sampling/jfrCPUTimeThreadSampler.hpp"
#include "jfr/periodic/sampling/jfrThreadSampler.hpp"
#include "jfr/recorder/jfrRecorder.hpp"
#include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
#include "jfr/recorder/checkpoint/types/jfrThreadGroupManager.hpp"
#include "jfr/recorder/jfrRecorder.hpp"
#include "jfr/recorder/repository/jfrRepository.hpp"
#include "jfr/recorder/service/jfrEventThrottler.hpp"
#include "jfr/recorder/service/jfrOptionSet.hpp"
#include "jfr/recorder/service/jfrPostBox.hpp"
#include "jfr/recorder/service/jfrRecorderService.hpp"
#include "jfr/recorder/service/jfrRecorderThread.hpp"
#include "jfr/recorder/storage/jfrStorage.hpp"
#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
#include "jfr/recorder/storage/jfrStorage.hpp"
#include "jfr/recorder/stringpool/jfrStringPool.hpp"
#include "jfr/support/jfrThreadLocal.hpp"
#include "jfr/utilities/jfrTime.hpp"
@ -49,8 +49,8 @@
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/resourceArea.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/handles.inline.hpp"
#include "utilities/growableArray.hpp"
#ifdef ASSERT
#include "prims/jvmtiEnvBase.hpp"

View File

@ -26,8 +26,8 @@
#ifndef SHARE_JFR_RECORDER_SERVICE_JFREVENTTHROTTLER_HPP
#define SHARE_JFR_RECORDER_SERVICE_JFREVENTTHROTTLER_HPP
#include "jfrfiles/jfrEventIds.hpp"
#include "jfr/support/jfrAdaptiveSampler.hpp"
#include "jfrfiles/jfrEventIds.hpp"
class JfrEventThrottler : public JfrAdaptiveSampler {
friend class JfrRecorder;

View File

@ -22,14 +22,13 @@
*
*/
#include "jfrfiles/jfrEventClasses.hpp"
#include "jfr/jni/jfrJavaSupport.hpp"
#include "jfr/leakprofiler/leakProfiler.hpp"
#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
#include "jfr/leakprofiler/leakProfiler.hpp"
#include "jfr/leakprofiler/sampling/objectSampler.hpp"
#include "jfr/recorder/jfrRecorder.hpp"
#include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
#include "jfr/recorder/checkpoint/jfrMetadataEvent.hpp"
#include "jfr/recorder/jfrRecorder.hpp"
#include "jfr/recorder/repository/jfrChunkRotation.hpp"
#include "jfr/recorder/repository/jfrChunkWriter.hpp"
#include "jfr/recorder/repository/jfrRepository.hpp"
@ -43,8 +42,9 @@
#include "jfr/utilities/jfrAllocation.hpp"
#include "jfr/utilities/jfrThreadIterator.hpp"
#include "jfr/utilities/jfrTime.hpp"
#include "jfr/writers/jfrJavaEventWriter.hpp"
#include "jfr/utilities/jfrTypes.hpp"
#include "jfr/writers/jfrJavaEventWriter.hpp"
#include "jfrfiles/jfrEventClasses.hpp"
#include "logging/log.hpp"
#include "runtime/atomic.hpp"
#include "runtime/interfaceSupport.inline.hpp"

View File

@ -33,8 +33,8 @@
#include "memory/universe.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/javaThread.hpp"
#include "utilities/preserveException.hpp"
#include "utilities/macros.hpp"
#include "utilities/preserveException.hpp"
static Thread* start_thread(instanceHandle thread_oop, ThreadFunction proc, TRAPS) {
assert(thread_oop.not_null(), "invariant");

View File

@ -27,7 +27,6 @@
#include "jfr/recorder/service/jfrPostBox.hpp"
#include "jfr/recorder/service/jfrRecorderService.hpp"
#include "jfr/recorder/service/jfrRecorderThread.hpp"
#include "jfr/recorder/jfrRecorder.hpp"
#include "logging/log.hpp"
#include "runtime/handles.hpp"
#include "runtime/interfaceSupport.inline.hpp"

View File

@ -25,8 +25,8 @@
#ifndef SHARE_JFR_RECORDER_STACKTRACE_JFRSTCKFILTERREGISTRY_HPP
#define SHARE_JFR_RECORDER_STACKTRACE_JFRSTCKFILTERREGISTRY_HPP
#include "jni.h"
#include "jfr/utilities/jfrAllocation.hpp"
#include "jni.h"
class JavaThread;
class JfrStackFilter;

View File

@ -24,8 +24,8 @@
#include "jfr/jfrEvents.hpp"
#include "jfr/jni/jfrJavaSupport.hpp"
#include "jfr/recorder/jfrRecorder.hpp"
#include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
#include "jfr/recorder/jfrRecorder.hpp"
#include "jfr/recorder/repository/jfrChunkWriter.hpp"
#include "jfr/recorder/service/jfrOptionSet.hpp"
#include "jfr/recorder/service/jfrPostBox.hpp"

View File

@ -25,8 +25,8 @@
#ifndef SHARE_JFR_RECORDER_STORAGE_JFRSTORAGEUTILS_HPP
#define SHARE_JFR_RECORDER_STORAGE_JFRSTORAGEUTILS_HPP
#include "jfr/recorder/storage/jfrBuffer.hpp"
#include "jfr/recorder/repository/jfrChunkWriter.hpp"
#include "jfr/recorder/storage/jfrBuffer.hpp"
#include "jfr/utilities/jfrAllocation.hpp"
#include "jfr/utilities/jfrTypes.hpp"
#include "runtime/javaThread.hpp"

View File

@ -27,9 +27,9 @@
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp"
#include "jfr/recorder/repository/jfrChunkWriter.hpp"
#include "jfr/recorder/service/jfrOptionSet.hpp"
#include "jfr/recorder/storage/jfrMemorySpace.inline.hpp"
#include "jfr/recorder/repository/jfrChunkWriter.hpp"
#include "jfr/recorder/storage/jfrStorageUtils.inline.hpp"
#include "jfr/recorder/stringpool/jfrStringPool.hpp"
#include "jfr/recorder/stringpool/jfrStringPoolWriter.hpp"

View File

@ -25,11 +25,11 @@
#ifndef SHARE_JFR_RECORDER_STRINGPOOL_JFRSTRINGPOOLWRITER_HPP
#define SHARE_JFR_RECORDER_STRINGPOOL_JFRSTRINGPOOLWRITER_HPP
#include "memory/allocation.hpp"
#include "jfr/recorder/stringpool/jfrStringPoolBuffer.hpp"
#include "jfr/writers/jfrEventWriterHost.hpp"
#include "jfr/writers/jfrMemoryWriterHost.hpp"
#include "jfr/writers/jfrStorageAdapter.hpp"
#include "memory/allocation.hpp"
class Thread;

Some files were not shown because too many files have changed in this diff Show More