mirror of
https://github.com/openjdk/jdk.git
synced 2026-01-28 03:58:21 +00:00
Merge branch 'master' into user/missa-prime/avx10_2
This commit is contained in:
commit
09d1e44d77
@ -69,22 +69,18 @@ AC_DEFUN([FLAGS_SETUP_DEBUG_SYMBOLS],
|
||||
# Debug prefix mapping if supported by compiler
|
||||
DEBUG_PREFIX_CFLAGS=
|
||||
|
||||
UTIL_ARG_WITH(NAME: native-debug-symbols-level, TYPE: string,
|
||||
DEFAULT: "",
|
||||
RESULT: DEBUG_SYMBOLS_LEVEL,
|
||||
UTIL_ARG_WITH(NAME: native-debug-symbols-level, TYPE: literal,
|
||||
DEFAULT: [auto], VALID_VALUES: [auto 1 2 3],
|
||||
CHECK_AVAILABLE: [
|
||||
if test x$TOOLCHAIN_TYPE = xmicrosoft; then
|
||||
AVAILABLE=false
|
||||
fi
|
||||
],
|
||||
DESC: [set the native debug symbol level (GCC and Clang only)],
|
||||
DEFAULT_DESC: [toolchain default])
|
||||
AC_SUBST(DEBUG_SYMBOLS_LEVEL)
|
||||
|
||||
if test "x${TOOLCHAIN_TYPE}" = xgcc || \
|
||||
test "x${TOOLCHAIN_TYPE}" = xclang; then
|
||||
DEBUG_SYMBOLS_LEVEL_FLAGS="-g"
|
||||
if test "x${DEBUG_SYMBOLS_LEVEL}" != "x"; then
|
||||
DEBUG_SYMBOLS_LEVEL_FLAGS="-g${DEBUG_SYMBOLS_LEVEL}"
|
||||
FLAGS_COMPILER_CHECK_ARGUMENTS(ARGUMENT: [${DEBUG_SYMBOLS_LEVEL_FLAGS}],
|
||||
IF_FALSE: AC_MSG_ERROR("Debug info level ${DEBUG_SYMBOLS_LEVEL} is not supported"))
|
||||
fi
|
||||
fi
|
||||
DEFAULT_DESC: [toolchain default],
|
||||
IF_AUTO: [
|
||||
RESULT=""
|
||||
])
|
||||
|
||||
# Debug symbols
|
||||
if test "x$TOOLCHAIN_TYPE" = xgcc; then
|
||||
@ -111,8 +107,8 @@ AC_DEFUN([FLAGS_SETUP_DEBUG_SYMBOLS],
|
||||
fi
|
||||
|
||||
# Debug info level should follow the debug format to be effective.
|
||||
CFLAGS_DEBUG_SYMBOLS="-gdwarf-4 ${DEBUG_SYMBOLS_LEVEL_FLAGS}"
|
||||
ASFLAGS_DEBUG_SYMBOLS="${DEBUG_SYMBOLS_LEVEL_FLAGS}"
|
||||
CFLAGS_DEBUG_SYMBOLS="-gdwarf-4 -g${NATIVE_DEBUG_SYMBOLS_LEVEL}"
|
||||
ASFLAGS_DEBUG_SYMBOLS="-g${NATIVE_DEBUG_SYMBOLS_LEVEL}"
|
||||
elif test "x$TOOLCHAIN_TYPE" = xclang; then
|
||||
if test "x$ALLOW_ABSOLUTE_PATHS_IN_OUTPUT" = "xfalse"; then
|
||||
# Check if compiler supports -fdebug-prefix-map. If so, use that to make
|
||||
@ -132,8 +128,8 @@ AC_DEFUN([FLAGS_SETUP_DEBUG_SYMBOLS],
|
||||
IF_FALSE: [GDWARF_FLAGS=""])
|
||||
|
||||
# Debug info level should follow the debug format to be effective.
|
||||
CFLAGS_DEBUG_SYMBOLS="${GDWARF_FLAGS} ${DEBUG_SYMBOLS_LEVEL_FLAGS}"
|
||||
ASFLAGS_DEBUG_SYMBOLS="${DEBUG_SYMBOLS_LEVEL_FLAGS}"
|
||||
CFLAGS_DEBUG_SYMBOLS="${GDWARF_FLAGS} -g${NATIVE_DEBUG_SYMBOLS_LEVEL}"
|
||||
ASFLAGS_DEBUG_SYMBOLS="-g${NATIVE_DEBUG_SYMBOLS_LEVEL}"
|
||||
elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
|
||||
CFLAGS_DEBUG_SYMBOLS="-Z7"
|
||||
fi
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -61,7 +61,8 @@ $(eval $(call SetupJdkLibrary, BUILD_GTEST_LIBGTEST, \
|
||||
INCLUDE_FILES := gtest-all.cc gmock-all.cc, \
|
||||
DISABLED_WARNINGS_gcc := format-nonliteral maybe-uninitialized undef \
|
||||
unused-result zero-as-null-pointer-constant, \
|
||||
DISABLED_WARNINGS_clang := format-nonliteral undef unused-result, \
|
||||
DISABLED_WARNINGS_clang := format-nonliteral undef unused-result \
|
||||
zero-as-null-pointer-constant, \
|
||||
DISABLED_WARNINGS_microsoft := 4530, \
|
||||
DEFAULT_CFLAGS := false, \
|
||||
CFLAGS := $(JVM_CFLAGS) \
|
||||
|
||||
@ -31,13 +31,14 @@ include LibCommon.gmk
|
||||
## Build libjaas
|
||||
################################################################################
|
||||
|
||||
$(eval $(call SetupJdkLibrary, BUILD_LIBJAAS, \
|
||||
NAME := jaas, \
|
||||
OPTIMIZATION := LOW, \
|
||||
EXTRA_HEADER_DIRS := java.base:libjava, \
|
||||
LIBS_windows := advapi32.lib mpr.lib netapi32.lib user32.lib, \
|
||||
))
|
||||
|
||||
TARGETS += $(BUILD_LIBJAAS)
|
||||
ifeq ($(call isTargetOs, windows), true)
|
||||
$(eval $(call SetupJdkLibrary, BUILD_LIBJAAS, \
|
||||
NAME := jaas, \
|
||||
OPTIMIZATION := LOW, \
|
||||
EXTRA_HEADER_DIRS := java.base:libjava, \
|
||||
LIBS_windows := advapi32.lib mpr.lib netapi32.lib user32.lib, \
|
||||
))
|
||||
|
||||
TARGETS += $(BUILD_LIBJAAS)
|
||||
endif
|
||||
################################################################################
|
||||
|
||||
@ -5782,6 +5782,9 @@ address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3,
|
||||
// return false;
|
||||
bind(A_IS_NOT_NULL);
|
||||
ldrw(cnt1, Address(a1, length_offset));
|
||||
ldrw(tmp5, Address(a2, length_offset));
|
||||
cmp(cnt1, tmp5);
|
||||
br(NE, DONE); // If lengths differ, return false
|
||||
// Increase loop counter by diff between base- and actual start-offset.
|
||||
addw(cnt1, cnt1, extra_length);
|
||||
lea(a1, Address(a1, start_offset));
|
||||
@ -5848,6 +5851,9 @@ address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3,
|
||||
cbz(a1, DONE);
|
||||
ldrw(cnt1, Address(a1, length_offset));
|
||||
cbz(a2, DONE);
|
||||
ldrw(tmp5, Address(a2, length_offset));
|
||||
cmp(cnt1, tmp5);
|
||||
br(NE, DONE); // If lengths differ, return false
|
||||
// Increase loop counter by diff between base- and actual start-offset.
|
||||
addw(cnt1, cnt1, extra_length);
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@ -722,22 +722,20 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
|
||||
|
||||
// Class initialization barrier for static methods
|
||||
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
|
||||
if (VM_Version::supports_fast_class_init_checks()) {
|
||||
Label L_skip_barrier;
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
Label L_skip_barrier;
|
||||
|
||||
{ // Bypass the barrier for non-static methods
|
||||
__ ldrh(rscratch1, Address(rmethod, Method::access_flags_offset()));
|
||||
__ andsw(zr, rscratch1, JVM_ACC_STATIC);
|
||||
__ br(Assembler::EQ, L_skip_barrier); // non-static
|
||||
}
|
||||
// Bypass the barrier for non-static methods
|
||||
__ ldrh(rscratch1, Address(rmethod, Method::access_flags_offset()));
|
||||
__ andsw(zr, rscratch1, JVM_ACC_STATIC);
|
||||
__ br(Assembler::EQ, L_skip_barrier); // non-static
|
||||
|
||||
__ load_method_holder(rscratch2, rmethod);
|
||||
__ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
|
||||
__ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
|
||||
__ load_method_holder(rscratch2, rmethod);
|
||||
__ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
|
||||
__ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
|
||||
|
||||
__ bind(L_skip_barrier);
|
||||
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
|
||||
}
|
||||
__ bind(L_skip_barrier);
|
||||
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
|
||||
|
||||
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
bs->c2i_entry_barrier(masm);
|
||||
@ -1508,7 +1506,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// SVC, HVC, or SMC. Make it a NOP.
|
||||
__ nop();
|
||||
|
||||
if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
|
||||
if (method->needs_clinit_barrier()) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
Label L_skip_barrier;
|
||||
__ mov_metadata(rscratch2, method->method_holder()); // InstanceKlass*
|
||||
__ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -2290,7 +2290,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
|
||||
__ subs(zr, temp, (int) code); // have we resolved this bytecode?
|
||||
|
||||
// Class initialization barrier for static methods
|
||||
if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
|
||||
if (bytecode() == Bytecodes::_invokestatic) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
__ br(Assembler::NE, L_clinit_barrier_slow);
|
||||
__ ldr(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::method_offset())));
|
||||
__ load_method_holder(temp, temp);
|
||||
@ -2340,8 +2341,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
|
||||
__ subs(zr, temp, (int) code); // have we resolved this bytecode?
|
||||
|
||||
// Class initialization barrier for static fields
|
||||
if (VM_Version::supports_fast_class_init_checks() &&
|
||||
(bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
|
||||
if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
const Register field_holder = temp;
|
||||
|
||||
__ br(Assembler::NE, L_clinit_barrier_slow);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -356,10 +356,10 @@ frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
|
||||
bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
|
||||
assert(is_interpreted_frame(), "Not an interpreted frame");
|
||||
// These are reasonable sanity checks
|
||||
if (fp() == 0 || (intptr_t(fp()) & (wordSize-1)) != 0) {
|
||||
if (fp() == nullptr || (intptr_t(fp()) & (wordSize-1)) != 0) {
|
||||
return false;
|
||||
}
|
||||
if (sp() == 0 || (intptr_t(sp()) & (wordSize-1)) != 0) {
|
||||
if (sp() == nullptr || (intptr_t(sp()) & (wordSize-1)) != 0) {
|
||||
return false;
|
||||
}
|
||||
if (fp() + interpreter_frame_initial_sp_offset < sp()) {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -172,7 +172,7 @@ void NativeMovConstReg::set_data(intptr_t x, address pc) {
|
||||
|
||||
address addr = oop_addr != nullptr ? (address)oop_addr : (address)metadata_addr;
|
||||
|
||||
if(pc == 0) {
|
||||
if (pc == nullptr) {
|
||||
offset = addr - instruction_address() - 8;
|
||||
} else {
|
||||
offset = addr - pc - 8;
|
||||
@ -228,7 +228,7 @@ void NativeMovConstReg::set_data(intptr_t x, address pc) {
|
||||
|
||||
void NativeMovConstReg::set_pc_relative_offset(address addr, address pc) {
|
||||
int offset;
|
||||
if (pc == 0) {
|
||||
if (pc == nullptr) {
|
||||
offset = addr - instruction_address() - 8;
|
||||
} else {
|
||||
offset = addr - pc - 8;
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -371,7 +371,7 @@ class NativeMovConstReg: public NativeInstruction {
|
||||
public:
|
||||
|
||||
intptr_t data() const;
|
||||
void set_data(intptr_t x, address pc = 0);
|
||||
void set_data(intptr_t x, address pc = nullptr);
|
||||
bool is_pc_relative() {
|
||||
return !is_movw();
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -1237,26 +1237,24 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
|
||||
|
||||
// Class initialization barrier for static methods
|
||||
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
|
||||
if (VM_Version::supports_fast_class_init_checks()) {
|
||||
Label L_skip_barrier;
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
Label L_skip_barrier;
|
||||
|
||||
{ // Bypass the barrier for non-static methods
|
||||
__ lhz(R0, in_bytes(Method::access_flags_offset()), R19_method);
|
||||
__ andi_(R0, R0, JVM_ACC_STATIC);
|
||||
__ beq(CR0, L_skip_barrier); // non-static
|
||||
}
|
||||
// Bypass the barrier for non-static methods
|
||||
__ lhz(R0, in_bytes(Method::access_flags_offset()), R19_method);
|
||||
__ andi_(R0, R0, JVM_ACC_STATIC);
|
||||
__ beq(CR0, L_skip_barrier); // non-static
|
||||
|
||||
Register klass = R11_scratch1;
|
||||
__ load_method_holder(klass, R19_method);
|
||||
__ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/);
|
||||
Register klass = R11_scratch1;
|
||||
__ load_method_holder(klass, R19_method);
|
||||
__ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/);
|
||||
|
||||
__ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0);
|
||||
__ mtctr(klass);
|
||||
__ bctr();
|
||||
__ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0);
|
||||
__ mtctr(klass);
|
||||
__ bctr();
|
||||
|
||||
__ bind(L_skip_barrier);
|
||||
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
|
||||
}
|
||||
__ bind(L_skip_barrier);
|
||||
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
|
||||
|
||||
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
bs->c2i_entry_barrier(masm, /* tmp register*/ ic_klass, /* tmp register*/ receiver_klass, /* tmp register*/ code);
|
||||
@ -2210,7 +2208,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
// --------------------------------------------------------------------------
|
||||
vep_start_pc = (intptr_t)__ pc();
|
||||
|
||||
if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
|
||||
if (method->needs_clinit_barrier()) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
Label L_skip_barrier;
|
||||
Register klass = r_temp_1;
|
||||
// Notify OOP recorder (don't need the relocation)
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2013, 2025 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -2199,7 +2199,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no, Register Rca
|
||||
__ isync(); // Order load wrt. succeeding loads.
|
||||
|
||||
// Class initialization barrier for static methods
|
||||
if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
|
||||
if (bytecode() == Bytecodes::_invokestatic) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
const Register method = Rscratch;
|
||||
const Register klass = Rscratch;
|
||||
|
||||
@ -2244,8 +2245,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no, Register Rcac
|
||||
__ isync(); // Order load wrt. succeeding loads.
|
||||
|
||||
// Class initialization barrier for static fields
|
||||
if (VM_Version::supports_fast_class_init_checks() &&
|
||||
(bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
|
||||
if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
const Register field_holder = R4_ARG2;
|
||||
|
||||
// InterpreterRuntime::resolve_get_put sets field_holder and finally release-stores put_code.
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@ -213,7 +213,7 @@ void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
|
||||
// Is vector's size (in bytes) bigger than a size saved by default?
|
||||
// riscv does not ovlerlay the floating-point registers on vector registers like aarch64.
|
||||
bool SharedRuntime::is_wide_vector(int size) {
|
||||
return UseRVV;
|
||||
return UseRVV && size > 0;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
@ -637,22 +637,20 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
|
||||
|
||||
// Class initialization barrier for static methods
|
||||
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
|
||||
if (VM_Version::supports_fast_class_init_checks()) {
|
||||
Label L_skip_barrier;
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
Label L_skip_barrier;
|
||||
|
||||
{ // Bypass the barrier for non-static methods
|
||||
__ load_unsigned_short(t0, Address(xmethod, Method::access_flags_offset()));
|
||||
__ test_bit(t1, t0, exact_log2(JVM_ACC_STATIC));
|
||||
__ beqz(t1, L_skip_barrier); // non-static
|
||||
}
|
||||
// Bypass the barrier for non-static methods
|
||||
__ load_unsigned_short(t0, Address(xmethod, Method::access_flags_offset()));
|
||||
__ test_bit(t1, t0, exact_log2(JVM_ACC_STATIC));
|
||||
__ beqz(t1, L_skip_barrier); // non-static
|
||||
|
||||
__ load_method_holder(t1, xmethod);
|
||||
__ clinit_barrier(t1, t0, &L_skip_barrier);
|
||||
__ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
|
||||
__ load_method_holder(t1, xmethod);
|
||||
__ clinit_barrier(t1, t0, &L_skip_barrier);
|
||||
__ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
|
||||
|
||||
__ bind(L_skip_barrier);
|
||||
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
|
||||
}
|
||||
__ bind(L_skip_barrier);
|
||||
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
|
||||
|
||||
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
bs->c2i_entry_barrier(masm);
|
||||
@ -1443,7 +1441,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
__ nop(); // 4 bytes
|
||||
}
|
||||
|
||||
if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
|
||||
if (method->needs_clinit_barrier()) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
Label L_skip_barrier;
|
||||
__ mov_metadata(t1, method->method_holder()); // InstanceKlass*
|
||||
__ clinit_barrier(t1, t0, &L_skip_barrier);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@ -2192,7 +2192,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
|
||||
__ mv(t0, (int) code);
|
||||
|
||||
// Class initialization barrier for static methods
|
||||
if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
|
||||
if (bytecode() == Bytecodes::_invokestatic) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
__ bne(temp, t0, L_clinit_barrier_slow); // have we resolved this bytecode?
|
||||
__ ld(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::method_offset())));
|
||||
__ load_method_holder(temp, temp);
|
||||
@ -2243,8 +2244,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
|
||||
__ mv(t0, (int) code); // have we resolved this bytecode?
|
||||
|
||||
// Class initialization barrier for static fields
|
||||
if (VM_Version::supports_fast_class_init_checks() &&
|
||||
(bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
|
||||
if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
const Register field_holder = temp;
|
||||
|
||||
__ bne(temp, t0, L_clinit_barrier_slow);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2024 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -1567,7 +1567,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
//---------------------------------------------------------------------
|
||||
wrapper_VEPStart = __ offset();
|
||||
|
||||
if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
|
||||
if (method->needs_clinit_barrier()) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
Label L_skip_barrier;
|
||||
Register klass = Z_R1_scratch;
|
||||
// Notify OOP recorder (don't need the relocation)
|
||||
@ -2378,24 +2379,22 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
|
||||
|
||||
// Class initialization barrier for static methods
|
||||
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
|
||||
if (VM_Version::supports_fast_class_init_checks()) {
|
||||
Label L_skip_barrier;
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
Label L_skip_barrier;
|
||||
|
||||
{ // Bypass the barrier for non-static methods
|
||||
__ testbit_ushort(Address(Z_method, Method::access_flags_offset()), JVM_ACC_STATIC_BIT);
|
||||
__ z_bfalse(L_skip_barrier); // non-static
|
||||
}
|
||||
// Bypass the barrier for non-static methods
|
||||
__ testbit_ushort(Address(Z_method, Method::access_flags_offset()), JVM_ACC_STATIC_BIT);
|
||||
__ z_bfalse(L_skip_barrier); // non-static
|
||||
|
||||
Register klass = Z_R11;
|
||||
__ load_method_holder(klass, Z_method);
|
||||
__ clinit_barrier(klass, Z_thread, &L_skip_barrier /*L_fast_path*/);
|
||||
Register klass = Z_R11;
|
||||
__ load_method_holder(klass, Z_method);
|
||||
__ clinit_barrier(klass, Z_thread, &L_skip_barrier /*L_fast_path*/);
|
||||
|
||||
__ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub());
|
||||
__ z_br(klass);
|
||||
__ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub());
|
||||
__ z_br(klass);
|
||||
|
||||
__ bind(L_skip_barrier);
|
||||
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
|
||||
}
|
||||
__ bind(L_skip_barrier);
|
||||
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
|
||||
|
||||
gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
|
||||
return;
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2024 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -2377,7 +2377,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
|
||||
__ z_cli(Address(Rcache, bc_offset), code);
|
||||
|
||||
// Class initialization barrier for static methods
|
||||
if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
|
||||
if (bytecode() == Bytecodes::_invokestatic) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
const Register method = Z_R1_scratch;
|
||||
const Register klass = Z_R1_scratch;
|
||||
__ z_brne(L_clinit_barrier_slow);
|
||||
@ -2427,8 +2428,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
|
||||
__ z_cli(Address(cache, code_offset), code);
|
||||
|
||||
// Class initialization barrier for static fields
|
||||
if (VM_Version::supports_fast_class_init_checks() &&
|
||||
(bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
|
||||
if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
const Register field_holder = index;
|
||||
|
||||
__ z_brne(L_clinit_barrier_slow);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1043,26 +1043,24 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
|
||||
|
||||
// Class initialization barrier for static methods
|
||||
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
|
||||
if (VM_Version::supports_fast_class_init_checks()) {
|
||||
Label L_skip_barrier;
|
||||
Register method = rbx;
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
Label L_skip_barrier;
|
||||
Register method = rbx;
|
||||
|
||||
{ // Bypass the barrier for non-static methods
|
||||
Register flags = rscratch1;
|
||||
__ load_unsigned_short(flags, Address(method, Method::access_flags_offset()));
|
||||
__ testl(flags, JVM_ACC_STATIC);
|
||||
__ jcc(Assembler::zero, L_skip_barrier); // non-static
|
||||
}
|
||||
// Bypass the barrier for non-static methods
|
||||
Register flags = rscratch1;
|
||||
__ load_unsigned_short(flags, Address(method, Method::access_flags_offset()));
|
||||
__ testl(flags, JVM_ACC_STATIC);
|
||||
__ jcc(Assembler::zero, L_skip_barrier); // non-static
|
||||
|
||||
Register klass = rscratch1;
|
||||
__ load_method_holder(klass, method);
|
||||
__ clinit_barrier(klass, &L_skip_barrier /*L_fast_path*/);
|
||||
Register klass = rscratch1;
|
||||
__ load_method_holder(klass, method);
|
||||
__ clinit_barrier(klass, &L_skip_barrier /*L_fast_path*/);
|
||||
|
||||
__ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path
|
||||
__ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path
|
||||
|
||||
__ bind(L_skip_barrier);
|
||||
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
|
||||
}
|
||||
__ bind(L_skip_barrier);
|
||||
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
|
||||
|
||||
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
bs->c2i_entry_barrier(masm);
|
||||
@ -1904,7 +1902,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
int vep_offset = ((intptr_t)__ pc()) - start;
|
||||
|
||||
if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
|
||||
if (method->needs_clinit_barrier()) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
Label L_skip_barrier;
|
||||
Register klass = r10;
|
||||
__ mov_metadata(klass, method->method_holder()); // InstanceKlass*
|
||||
@ -3602,4 +3601,3 @@ RuntimeStub* SharedRuntime::generate_jfr_return_lease() {
|
||||
}
|
||||
|
||||
#endif // INCLUDE_JFR
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -2216,7 +2216,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
|
||||
__ cmpl(temp, code); // have we resolved this bytecode?
|
||||
|
||||
// Class initialization barrier for static methods
|
||||
if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
|
||||
if (bytecode() == Bytecodes::_invokestatic) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
const Register method = temp;
|
||||
const Register klass = temp;
|
||||
|
||||
@ -2264,8 +2265,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
|
||||
__ cmpl(temp, code); // have we resolved this bytecode?
|
||||
|
||||
// Class initialization barrier for static fields
|
||||
if (VM_Version::supports_fast_class_init_checks() &&
|
||||
(bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
|
||||
if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
const Register field_holder = temp;
|
||||
|
||||
__ jcc(Assembler::notEqual, L_clinit_barrier_slow);
|
||||
|
||||
@ -132,7 +132,7 @@ public:
|
||||
static const char* tagToStr(uint32_t user_tag) {
|
||||
switch (user_tag) {
|
||||
case 0:
|
||||
return 0;
|
||||
return nullptr;
|
||||
X1(MALLOC, malloc);
|
||||
X1(MALLOC_SMALL, malloc_small);
|
||||
X1(MALLOC_LARGE, malloc_large);
|
||||
|
||||
@ -628,7 +628,7 @@ static void *thread_native_entry(Thread *thread) {
|
||||
log_info(os, thread)("Thread finished (tid: %zu, pthread id: %zu).",
|
||||
os::current_thread_id(), (uintx) pthread_self());
|
||||
|
||||
return 0;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
bool os::create_thread(Thread* thread, ThreadType thr_type,
|
||||
@ -1420,7 +1420,7 @@ int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *pa
|
||||
#elif defined(__APPLE__)
|
||||
for (uint32_t i = 1; i < _dyld_image_count(); i++) {
|
||||
// Value for top_address is returned as 0 since we don't have any information about module size
|
||||
if (callback(_dyld_get_image_name(i), (address)_dyld_get_image_header(i), (address)0, param)) {
|
||||
if (callback(_dyld_get_image_name(i), (address)_dyld_get_image_header(i), nullptr, param)) {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
@ -188,7 +188,7 @@ class CachedMetric : public CHeapObj<mtInternal>{
|
||||
volatile jlong _next_check_counter;
|
||||
public:
|
||||
CachedMetric() {
|
||||
_metric = value_unlimited;
|
||||
_metric = static_cast<MetricType>(value_unlimited);
|
||||
_next_check_counter = min_jlong;
|
||||
}
|
||||
bool should_check_metric() {
|
||||
|
||||
@ -304,12 +304,13 @@ void OSContainer::print_container_metric(outputStream* st, const char* metrics,
|
||||
constexpr int longest_value = max_length - 11; // Max length - shortest "metric: " string ("cpu_quota: ")
|
||||
char value_str[longest_value + 1] = {};
|
||||
os::snprintf_checked(value_str, longest_value, metric_fmt<T>::fmt, value);
|
||||
st->print("%s: %*s", metrics, max_length - static_cast<int>(strlen(metrics)) - 2, value_str); // -2 for the ": "
|
||||
if (unit[0] != '\0') {
|
||||
st->print_cr(" %s", unit);
|
||||
} else {
|
||||
st->print_cr("");
|
||||
}
|
||||
|
||||
const int pad_width = max_length - static_cast<int>(strlen(metrics)) - 2; // -2 for the ": "
|
||||
const char* unit_prefix = unit[0] != '\0' ? " " : "";
|
||||
|
||||
char line[128] = {};
|
||||
os::snprintf_checked(line, sizeof(line), "%s: %*s%s%s", metrics, pad_width, value_str, unit_prefix, unit);
|
||||
st->print_cr("%s", line);
|
||||
}
|
||||
|
||||
void OSContainer::print_container_helper(outputStream* st, MetricResult& res, const char* metrics) {
|
||||
|
||||
@ -4963,9 +4963,14 @@ int os::open(const char *path, int oflag, int mode) {
|
||||
oflag |= O_CLOEXEC;
|
||||
|
||||
int fd = ::open(path, oflag, mode);
|
||||
if (fd == -1) return -1;
|
||||
// No further checking is needed if open() returned an error or
|
||||
// access mode is not read only.
|
||||
if (fd == -1 || (oflag & O_ACCMODE) != O_RDONLY) {
|
||||
return fd;
|
||||
}
|
||||
|
||||
//If the open succeeded, the file might still be a directory
|
||||
// If the open succeeded and is read only, the file might be a directory
|
||||
// which the JVM doesn't allow to be read.
|
||||
{
|
||||
struct stat buf;
|
||||
int ret = ::fstat(fd, &buf);
|
||||
|
||||
@ -112,6 +112,10 @@ static void save_memory_to_file(char* addr, size_t size) {
|
||||
result = ::close(fd);
|
||||
if (result == OS_ERR) {
|
||||
warning("Could not close %s: %s\n", destfile, os::strerror(errno));
|
||||
} else {
|
||||
if (!successful_write) {
|
||||
remove(destfile);
|
||||
}
|
||||
}
|
||||
}
|
||||
FREE_C_HEAP_ARRAY(char, destfile);
|
||||
@ -949,6 +953,7 @@ static int create_sharedmem_file(const char* dirname, const char* filename, size
|
||||
warning("Insufficient space for shared memory file: %s/%s\n", dirname, filename);
|
||||
}
|
||||
result = OS_ERR;
|
||||
remove(filename);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@ -571,7 +571,12 @@ ArchiveBuilder::FollowMode ArchiveBuilder::get_follow_mode(MetaspaceClosure::Ref
|
||||
}
|
||||
if (is_excluded(klass)) {
|
||||
ResourceMark rm;
|
||||
log_debug(cds, dynamic)("Skipping class (excluded): %s", klass->external_name());
|
||||
aot_log_trace(aot)("pointer set to null: class (excluded): %s", klass->external_name());
|
||||
return set_to_null;
|
||||
}
|
||||
if (klass->is_array_klass() && CDSConfig::is_dumping_dynamic_archive()) {
|
||||
ResourceMark rm;
|
||||
aot_log_trace(aot)("pointer set to null: array class not supported in dynamic region: %s", klass->external_name());
|
||||
return set_to_null;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -209,6 +209,17 @@ void G1Arguments::initialize() {
|
||||
FLAG_SET_DEFAULT(GCTimeRatio, 24);
|
||||
}
|
||||
|
||||
// Do not interfere with GC-Pressure driven heap resizing unless the user
|
||||
// explicitly sets otherwise. G1 heap sizing should be free to grow or shrink
|
||||
// the heap based on GC pressure, rather than being forced to satisfy
|
||||
// MinHeapFreeRatio or MaxHeapFreeRatio defaults that the user did not set.
|
||||
if (FLAG_IS_DEFAULT(MinHeapFreeRatio)) {
|
||||
FLAG_SET_DEFAULT(MinHeapFreeRatio, 0);
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(MaxHeapFreeRatio)) {
|
||||
FLAG_SET_DEFAULT(MaxHeapFreeRatio, 100);
|
||||
}
|
||||
|
||||
// Below, we might need to calculate the pause time interval based on
|
||||
// the pause target. When we do so we are going to give G1 maximum
|
||||
// flexibility and allow it to do pauses when it needs to. So, we'll
|
||||
|
||||
@ -70,7 +70,11 @@ inline void G1BarrierSet::write_ref_field_pre(T* field) {
|
||||
|
||||
template <DecoratorSet decorators, typename T>
|
||||
inline void G1BarrierSet::write_ref_field_post(T* field) {
|
||||
volatile CardValue* byte = _card_table->byte_for(field);
|
||||
// Make sure that the card table reference is read only once. Otherwise the compiler
|
||||
// might reload that value in the two accesses below, that could cause writes to
|
||||
// the wrong card table.
|
||||
CardTable* card_table = AtomicAccess::load(&_card_table);
|
||||
CardValue* byte = card_table->byte_for(field);
|
||||
if (*byte == G1CardTable::clean_card_val()) {
|
||||
*byte = G1CardTable::dirty_card_val();
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,7 +26,6 @@
|
||||
#include "gc/g1/g1BatchedTask.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1GCParPhaseTimesTracker.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
|
||||
void G1AbstractSubTask::record_work_item(uint worker_id, uint index, size_t count) {
|
||||
@ -40,7 +39,7 @@ const char* G1AbstractSubTask::name() const {
|
||||
}
|
||||
|
||||
bool G1BatchedTask::try_claim_serial_task(int& task) {
|
||||
task = AtomicAccess::fetch_then_add(&_num_serial_tasks_done, 1);
|
||||
task = _num_serial_tasks_done.fetch_then_add(1);
|
||||
return task < _serial_tasks.length();
|
||||
}
|
||||
|
||||
@ -96,8 +95,8 @@ void G1BatchedTask::work(uint worker_id) {
|
||||
}
|
||||
|
||||
G1BatchedTask::~G1BatchedTask() {
|
||||
assert(AtomicAccess::load(&_num_serial_tasks_done) >= _serial_tasks.length(),
|
||||
"Only %d tasks of %d claimed", AtomicAccess::load(&_num_serial_tasks_done), _serial_tasks.length());
|
||||
assert(_num_serial_tasks_done.load_relaxed() >= _serial_tasks.length(),
|
||||
"Only %d tasks of %d claimed", _num_serial_tasks_done.load_relaxed(), _serial_tasks.length());
|
||||
|
||||
for (G1AbstractSubTask* task : _parallel_tasks) {
|
||||
delete task;
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,6 +28,7 @@
|
||||
#include "gc/g1/g1GCPhaseTimes.hpp"
|
||||
#include "gc/shared/workerThread.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
|
||||
template <typename E, MemTag MT>
|
||||
class GrowableArrayCHeap;
|
||||
@ -120,7 +121,7 @@ public:
|
||||
// 5) ~T()
|
||||
//
|
||||
class G1BatchedTask : public WorkerTask {
|
||||
volatile int _num_serial_tasks_done;
|
||||
Atomic<int> _num_serial_tasks_done;
|
||||
G1GCPhaseTimes* _phase_times;
|
||||
|
||||
bool try_claim_serial_task(int& task);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -44,20 +44,20 @@ G1CardTableClaimTable::~G1CardTableClaimTable() {
|
||||
|
||||
void G1CardTableClaimTable::initialize(uint max_reserved_regions) {
|
||||
assert(_card_claims == nullptr, "Must not be initialized twice");
|
||||
_card_claims = NEW_C_HEAP_ARRAY(uint, max_reserved_regions, mtGC);
|
||||
_card_claims = NEW_C_HEAP_ARRAY(Atomic<uint>, max_reserved_regions, mtGC);
|
||||
_max_reserved_regions = max_reserved_regions;
|
||||
reset_all_to_unclaimed();
|
||||
}
|
||||
|
||||
void G1CardTableClaimTable::reset_all_to_unclaimed() {
|
||||
for (uint i = 0; i < _max_reserved_regions; i++) {
|
||||
_card_claims[i] = 0;
|
||||
_card_claims[i].store_relaxed(0);
|
||||
}
|
||||
}
|
||||
|
||||
void G1CardTableClaimTable::reset_all_to_claimed() {
|
||||
for (uint i = 0; i < _max_reserved_regions; i++) {
|
||||
_card_claims[i] = (uint)G1HeapRegion::CardsPerRegion;
|
||||
_card_claims[i].store_relaxed((uint)G1HeapRegion::CardsPerRegion);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,6 +27,7 @@
|
||||
|
||||
#include "gc/g1/g1CardTable.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
|
||||
class G1HeapRegionClosure;
|
||||
|
||||
@ -45,7 +46,7 @@ class G1CardTableClaimTable : public CHeapObj<mtGC> {
|
||||
|
||||
// Card table iteration claim values for every heap region, from 0 (completely unclaimed)
|
||||
// to (>=) G1HeapRegion::CardsPerRegion (completely claimed).
|
||||
uint volatile* _card_claims;
|
||||
Atomic<uint>* _card_claims;
|
||||
|
||||
uint _cards_per_chunk; // For conversion between card index and chunk index.
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,26 +29,25 @@
|
||||
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1HeapRegion.inline.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
|
||||
bool G1CardTableClaimTable::has_unclaimed_cards(uint region) {
|
||||
assert(region < _max_reserved_regions, "Tried to access invalid region %u", region);
|
||||
return AtomicAccess::load(&_card_claims[region]) < G1HeapRegion::CardsPerRegion;
|
||||
return _card_claims[region].load_relaxed() < G1HeapRegion::CardsPerRegion;
|
||||
}
|
||||
|
||||
void G1CardTableClaimTable::reset_to_unclaimed(uint region) {
|
||||
assert(region < _max_reserved_regions, "Tried to access invalid region %u", region);
|
||||
AtomicAccess::store(&_card_claims[region], 0u);
|
||||
_card_claims[region].store_relaxed(0u);
|
||||
}
|
||||
|
||||
uint G1CardTableClaimTable::claim_cards(uint region, uint increment) {
|
||||
assert(region < _max_reserved_regions, "Tried to access invalid region %u", region);
|
||||
return AtomicAccess::fetch_then_add(&_card_claims[region], increment, memory_order_relaxed);
|
||||
return _card_claims[region].fetch_then_add(increment, memory_order_relaxed);
|
||||
}
|
||||
|
||||
uint G1CardTableClaimTable::claim_chunk(uint region) {
|
||||
assert(region < _max_reserved_regions, "Tried to access invalid region %u", region);
|
||||
return AtomicAccess::fetch_then_add(&_card_claims[region], cards_per_chunk(), memory_order_relaxed);
|
||||
return _card_claims[region].fetch_then_add(cards_per_chunk(), memory_order_relaxed);
|
||||
}
|
||||
|
||||
uint G1CardTableClaimTable::claim_all_cards(uint region) {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,7 +28,7 @@
|
||||
#include "gc/g1/g1HeapRegion.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "utilities/concurrentHashTable.inline.hpp"
|
||||
#include "utilities/concurrentHashTableTasks.inline.hpp"
|
||||
|
||||
@ -60,7 +60,7 @@ class G1CodeRootSetHashTable : public CHeapObj<mtGC> {
|
||||
HashTable _table;
|
||||
HashTableScanTask _table_scanner;
|
||||
|
||||
size_t volatile _num_entries;
|
||||
Atomic<size_t> _num_entries;
|
||||
|
||||
bool is_empty() const { return number_of_entries() == 0; }
|
||||
|
||||
@ -120,7 +120,7 @@ public:
|
||||
bool grow_hint = false;
|
||||
bool inserted = _table.insert(Thread::current(), lookup, method, &grow_hint);
|
||||
if (inserted) {
|
||||
AtomicAccess::inc(&_num_entries);
|
||||
_num_entries.add_then_fetch(1u);
|
||||
}
|
||||
if (grow_hint) {
|
||||
_table.grow(Thread::current());
|
||||
@ -131,7 +131,7 @@ public:
|
||||
HashTableLookUp lookup(method);
|
||||
bool removed = _table.remove(Thread::current(), lookup);
|
||||
if (removed) {
|
||||
AtomicAccess::dec(&_num_entries);
|
||||
_num_entries.sub_then_fetch(1u);
|
||||
}
|
||||
return removed;
|
||||
}
|
||||
@ -182,7 +182,7 @@ public:
|
||||
guarantee(succeeded, "unable to clean table");
|
||||
|
||||
if (num_deleted != 0) {
|
||||
size_t current_size = AtomicAccess::sub(&_num_entries, num_deleted);
|
||||
size_t current_size = _num_entries.sub_then_fetch(num_deleted);
|
||||
shrink_to_match(current_size);
|
||||
}
|
||||
}
|
||||
@ -226,7 +226,7 @@ public:
|
||||
|
||||
size_t mem_size() { return sizeof(*this) + _table.get_mem_size(Thread::current()); }
|
||||
|
||||
size_t number_of_entries() const { return AtomicAccess::load(&_num_entries); }
|
||||
size_t number_of_entries() const { return _num_entries.load_relaxed(); }
|
||||
};
|
||||
|
||||
uintx G1CodeRootSetHashTable::HashTableLookUp::get_hash() const {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -54,6 +54,7 @@
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "memory/memRegion.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/threadSMR.hpp"
|
||||
#include "utilities/bitMap.hpp"
|
||||
@ -124,7 +125,7 @@ class G1JavaThreadsListClaimer : public StackObj {
|
||||
ThreadsListHandle _list;
|
||||
uint _claim_step;
|
||||
|
||||
volatile uint _cur_claim;
|
||||
Atomic<uint> _cur_claim;
|
||||
|
||||
// Attempts to claim _claim_step JavaThreads, returning an array of claimed
|
||||
// JavaThread* with count elements. Returns null (and a zero count) if there
|
||||
@ -1267,7 +1268,6 @@ public:
|
||||
|
||||
bool is_marked(oop obj) const;
|
||||
|
||||
inline static bool is_obj_filler(const oop obj);
|
||||
// Determine if an object is dead, given the object and also
|
||||
// the region to which the object belongs.
|
||||
inline bool is_obj_dead(const oop obj, const G1HeapRegion* hr) const;
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -38,10 +38,10 @@
|
||||
#include "gc/g1/g1Policy.hpp"
|
||||
#include "gc/g1/g1RegionPinCache.inline.hpp"
|
||||
#include "gc/g1/g1RemSet.hpp"
|
||||
#include "gc/shared/collectedHeap.inline.hpp"
|
||||
#include "gc/shared/markBitMap.inline.hpp"
|
||||
#include "gc/shared/taskqueue.inline.hpp"
|
||||
#include "oops/stackChunkOop.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/threadSMR.inline.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
|
||||
@ -53,10 +53,10 @@ inline bool G1STWIsAliveClosure::do_object_b(oop p) {
|
||||
|
||||
inline JavaThread* const* G1JavaThreadsListClaimer::claim(uint& count) {
|
||||
count = 0;
|
||||
if (AtomicAccess::load(&_cur_claim) >= _list.length()) {
|
||||
if (_cur_claim.load_relaxed() >= _list.length()) {
|
||||
return nullptr;
|
||||
}
|
||||
uint claim = AtomicAccess::fetch_then_add(&_cur_claim, _claim_step);
|
||||
uint claim = _cur_claim.fetch_then_add(_claim_step);
|
||||
if (claim >= _list.length()) {
|
||||
return nullptr;
|
||||
}
|
||||
@ -230,16 +230,11 @@ inline bool G1CollectedHeap::requires_barriers(stackChunkOop obj) const {
|
||||
return !heap_region_containing(obj)->is_young(); // is_in_young does an unnecessary null check
|
||||
}
|
||||
|
||||
inline bool G1CollectedHeap::is_obj_filler(const oop obj) {
|
||||
Klass* k = obj->klass_without_asserts();
|
||||
return k == Universe::fillerArrayKlass() || k == vmClasses::FillerObject_klass();
|
||||
}
|
||||
|
||||
inline bool G1CollectedHeap::is_obj_dead(const oop obj, const G1HeapRegion* hr) const {
|
||||
assert(!hr->is_free(), "looking up obj " PTR_FORMAT " in Free region %u", p2i(obj), hr->hrm_index());
|
||||
if (hr->is_in_parsable_area(obj)) {
|
||||
// This object is in the parsable part of the heap, live unless scrubbed.
|
||||
return is_obj_filler(obj);
|
||||
return is_filler_object(obj);
|
||||
} else {
|
||||
// From Remark until a region has been concurrently scrubbed, parts of the
|
||||
// region is not guaranteed to be parsable. Use the bitmap for liveness.
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,7 +27,7 @@
|
||||
#include "gc/g1/g1CollectionSetChooser.hpp"
|
||||
#include "gc/g1/g1HeapRegionRemSet.inline.hpp"
|
||||
#include "gc/shared/space.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "utilities/quickSort.hpp"
|
||||
|
||||
// Determine collection set candidates (from marking): For all regions determine
|
||||
@ -50,7 +50,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
|
||||
|
||||
G1HeapRegion** _data;
|
||||
|
||||
uint volatile _cur_claim_idx;
|
||||
Atomic<uint> _cur_claim_idx;
|
||||
|
||||
static int compare_region_gc_efficiency(G1HeapRegion** rr1, G1HeapRegion** rr2) {
|
||||
G1HeapRegion* r1 = *rr1;
|
||||
@ -105,7 +105,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
|
||||
|
||||
// Claim a new chunk, returning its bounds [from, to[.
|
||||
void claim_chunk(uint& from, uint& to) {
|
||||
uint result = AtomicAccess::add(&_cur_claim_idx, _chunk_size);
|
||||
uint result = _cur_claim_idx.add_then_fetch(_chunk_size);
|
||||
assert(_max_size > result - 1,
|
||||
"Array too small, is %u should be %u with chunk size %u.",
|
||||
_max_size, result, _chunk_size);
|
||||
@ -121,14 +121,15 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
|
||||
}
|
||||
|
||||
void sort_by_gc_efficiency() {
|
||||
if (_cur_claim_idx == 0) {
|
||||
uint length = _cur_claim_idx.load_relaxed();
|
||||
if (length == 0) {
|
||||
return;
|
||||
}
|
||||
for (uint i = _cur_claim_idx; i < _max_size; i++) {
|
||||
for (uint i = length; i < _max_size; i++) {
|
||||
assert(_data[i] == nullptr, "must be");
|
||||
}
|
||||
qsort(_data, _cur_claim_idx, sizeof(_data[0]), (_sort_Fn)compare_region_gc_efficiency);
|
||||
for (uint i = _cur_claim_idx; i < _max_size; i++) {
|
||||
qsort(_data, length, sizeof(_data[0]), (_sort_Fn)compare_region_gc_efficiency);
|
||||
for (uint i = length; i < _max_size; i++) {
|
||||
assert(_data[i] == nullptr, "must be");
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -51,6 +51,9 @@
|
||||
#include "gc/shared/gcTimer.hpp"
|
||||
#include "gc/shared/gcTraceTime.inline.hpp"
|
||||
#include "gc/shared/gcVMOperations.hpp"
|
||||
#include "gc/shared/partialArraySplitter.inline.hpp"
|
||||
#include "gc/shared/partialArrayState.hpp"
|
||||
#include "gc/shared/partialArrayTaskStats.hpp"
|
||||
#include "gc/shared/referencePolicy.hpp"
|
||||
#include "gc/shared/suspendibleThreadSet.hpp"
|
||||
#include "gc/shared/taskqueue.inline.hpp"
|
||||
@ -67,7 +70,6 @@
|
||||
#include "nmt/memTracker.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/globals_extension.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
@ -76,6 +78,7 @@
|
||||
#include "runtime/prefetch.inline.hpp"
|
||||
#include "runtime/threads.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/checkedCast.hpp"
|
||||
#include "utilities/formatBuffer.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
#include "utilities/powerOfTwo.hpp"
|
||||
@ -99,7 +102,7 @@ bool G1CMBitMapClosure::do_addr(HeapWord* const addr) {
|
||||
// We move that task's local finger along.
|
||||
_task->move_finger_to(addr);
|
||||
|
||||
_task->scan_task_entry(G1TaskQueueEntry::from_oop(cast_to_oop(addr)));
|
||||
_task->process_entry(G1TaskQueueEntry(cast_to_oop(addr)), false /* stolen */);
|
||||
// we only partially drain the local queue and global stack
|
||||
_task->drain_local_queue(true);
|
||||
_task->drain_global_stack(true);
|
||||
@ -148,25 +151,25 @@ bool G1CMMarkStack::initialize() {
|
||||
}
|
||||
|
||||
G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::ChunkAllocator::allocate_new_chunk() {
|
||||
if (_size >= _max_capacity) {
|
||||
if (_size.load_relaxed() >= _max_capacity) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
size_t cur_idx = AtomicAccess::fetch_then_add(&_size, 1u);
|
||||
size_t cur_idx = _size.fetch_then_add(1u);
|
||||
|
||||
if (cur_idx >= _max_capacity) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
size_t bucket = get_bucket(cur_idx);
|
||||
if (AtomicAccess::load_acquire(&_buckets[bucket]) == nullptr) {
|
||||
if (_buckets[bucket].load_acquire() == nullptr) {
|
||||
if (!_should_grow) {
|
||||
// Prefer to restart the CM.
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
MutexLocker x(G1MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
|
||||
if (AtomicAccess::load_acquire(&_buckets[bucket]) == nullptr) {
|
||||
if (_buckets[bucket].load_acquire() == nullptr) {
|
||||
size_t desired_capacity = bucket_size(bucket) * 2;
|
||||
if (!try_expand_to(desired_capacity)) {
|
||||
return nullptr;
|
||||
@ -175,7 +178,7 @@ G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::ChunkAllocator::allocate_new_
|
||||
}
|
||||
|
||||
size_t bucket_idx = get_bucket_index(cur_idx);
|
||||
TaskQueueEntryChunk* result = ::new (&_buckets[bucket][bucket_idx]) TaskQueueEntryChunk;
|
||||
TaskQueueEntryChunk* result = ::new (&_buckets[bucket].load_relaxed()[bucket_idx]) TaskQueueEntryChunk;
|
||||
result->next = nullptr;
|
||||
return result;
|
||||
}
|
||||
@ -197,10 +200,10 @@ bool G1CMMarkStack::ChunkAllocator::initialize(size_t initial_capacity, size_t m
|
||||
_max_capacity = max_capacity;
|
||||
_num_buckets = get_bucket(_max_capacity) + 1;
|
||||
|
||||
_buckets = NEW_C_HEAP_ARRAY(TaskQueueEntryChunk*, _num_buckets, mtGC);
|
||||
_buckets = NEW_C_HEAP_ARRAY(Atomic<TaskQueueEntryChunk*>, _num_buckets, mtGC);
|
||||
|
||||
for (size_t i = 0; i < _num_buckets; i++) {
|
||||
_buckets[i] = nullptr;
|
||||
_buckets[i].store_relaxed(nullptr);
|
||||
}
|
||||
|
||||
size_t new_capacity = bucket_size(0);
|
||||
@ -240,9 +243,9 @@ G1CMMarkStack::ChunkAllocator::~ChunkAllocator() {
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < _num_buckets; i++) {
|
||||
if (_buckets[i] != nullptr) {
|
||||
MmapArrayAllocator<TaskQueueEntryChunk>::free(_buckets[i], bucket_size(i));
|
||||
_buckets[i] = nullptr;
|
||||
if (_buckets[i].load_relaxed() != nullptr) {
|
||||
MmapArrayAllocator<TaskQueueEntryChunk>::free(_buckets[i].load_relaxed(), bucket_size(i));
|
||||
_buckets[i].store_relaxed(nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
@ -259,7 +262,7 @@ bool G1CMMarkStack::ChunkAllocator::reserve(size_t new_capacity) {
|
||||
// and the new capacity (new_capacity). This step ensures that there are no gaps in the
|
||||
// array and that the capacity accurately reflects the reserved memory.
|
||||
for (; i <= highest_bucket; i++) {
|
||||
if (AtomicAccess::load_acquire(&_buckets[i]) != nullptr) {
|
||||
if (_buckets[i].load_acquire() != nullptr) {
|
||||
continue; // Skip over already allocated buckets.
|
||||
}
|
||||
|
||||
@ -279,7 +282,7 @@ bool G1CMMarkStack::ChunkAllocator::reserve(size_t new_capacity) {
|
||||
return false;
|
||||
}
|
||||
_capacity += bucket_capacity;
|
||||
AtomicAccess::release_store(&_buckets[i], bucket_base);
|
||||
_buckets[i].release_store(bucket_base);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
@ -490,6 +493,7 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
|
||||
|
||||
_task_queues(new G1CMTaskQueueSet(_max_num_tasks)),
|
||||
_terminator(_max_num_tasks, _task_queues),
|
||||
_partial_array_state_manager(new PartialArrayStateManager(_max_num_tasks)),
|
||||
|
||||
_first_overflow_barrier_sync(),
|
||||
_second_overflow_barrier_sync(),
|
||||
@ -556,6 +560,10 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
|
||||
reset_at_marking_complete();
|
||||
}
|
||||
|
||||
PartialArrayStateManager* G1ConcurrentMark::partial_array_state_manager() const {
|
||||
return _partial_array_state_manager;
|
||||
}
|
||||
|
||||
void G1ConcurrentMark::reset() {
|
||||
_has_aborted = false;
|
||||
|
||||
@ -650,7 +658,26 @@ void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurr
|
||||
}
|
||||
}
|
||||
|
||||
#if TASKQUEUE_STATS
|
||||
void G1ConcurrentMark::print_and_reset_taskqueue_stats() {
|
||||
|
||||
_task_queues->print_and_reset_taskqueue_stats("G1ConcurrentMark Oop Queue");
|
||||
|
||||
auto get_pa_stats = [&](uint i) {
|
||||
return _tasks[i]->partial_array_task_stats();
|
||||
};
|
||||
|
||||
PartialArrayTaskStats::log_set(_max_num_tasks, get_pa_stats,
|
||||
"G1ConcurrentMark Partial Array Task Stats");
|
||||
|
||||
for (uint i = 0; i < _max_num_tasks; ++i) {
|
||||
get_pa_stats(i)->reset();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void G1ConcurrentMark::reset_at_marking_complete() {
|
||||
TASKQUEUE_STATS_ONLY(print_and_reset_taskqueue_stats());
|
||||
// We set the global marking state to some default values when we're
|
||||
// not doing marking.
|
||||
reset_marking_for_restart();
|
||||
@ -804,11 +831,25 @@ void G1ConcurrentMark::cleanup_for_next_mark() {
|
||||
|
||||
clear_bitmap(_concurrent_workers, true);
|
||||
|
||||
reset_partial_array_state_manager();
|
||||
|
||||
// Repeat the asserts from above.
|
||||
guarantee(cm_thread()->in_progress(), "invariant");
|
||||
guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
|
||||
}
|
||||
|
||||
void G1ConcurrentMark::reset_partial_array_state_manager() {
|
||||
for (uint i = 0; i < _max_num_tasks; ++i) {
|
||||
_tasks[i]->unregister_partial_array_splitter();
|
||||
}
|
||||
|
||||
partial_array_state_manager()->reset();
|
||||
|
||||
for (uint i = 0; i < _max_num_tasks; ++i) {
|
||||
_tasks[i]->register_partial_array_splitter();
|
||||
}
|
||||
}
|
||||
|
||||
void G1ConcurrentMark::clear_bitmap(WorkerThreads* workers) {
|
||||
assert_at_safepoint_on_vm_thread();
|
||||
// To avoid fragmentation the full collection requesting to clear the bitmap
|
||||
@ -1789,17 +1830,18 @@ public:
|
||||
{ }
|
||||
|
||||
void operator()(G1TaskQueueEntry task_entry) const {
|
||||
if (task_entry.is_array_slice()) {
|
||||
guarantee(_g1h->is_in_reserved(task_entry.slice()), "Slice " PTR_FORMAT " must be in heap.", p2i(task_entry.slice()));
|
||||
if (task_entry.is_partial_array_state()) {
|
||||
oop obj = task_entry.to_partial_array_state()->source();
|
||||
guarantee(_g1h->is_in_reserved(obj), "Partial Array " PTR_FORMAT " must be in heap.", p2i(obj));
|
||||
return;
|
||||
}
|
||||
guarantee(oopDesc::is_oop(task_entry.obj()),
|
||||
guarantee(oopDesc::is_oop(task_entry.to_oop()),
|
||||
"Non-oop " PTR_FORMAT ", phase: %s, info: %d",
|
||||
p2i(task_entry.obj()), _phase, _info);
|
||||
G1HeapRegion* r = _g1h->heap_region_containing(task_entry.obj());
|
||||
p2i(task_entry.to_oop()), _phase, _info);
|
||||
G1HeapRegion* r = _g1h->heap_region_containing(task_entry.to_oop());
|
||||
guarantee(!(r->in_collection_set() || r->has_index_in_opt_cset()),
|
||||
"obj " PTR_FORMAT " from %s (%d) in region %u in (optional) collection set",
|
||||
p2i(task_entry.obj()), _phase, _info, r->hrm_index());
|
||||
p2i(task_entry.to_oop()), _phase, _info, r->hrm_index());
|
||||
}
|
||||
};
|
||||
|
||||
@ -2055,6 +2097,17 @@ void G1CMTask::reset(G1CMBitMap* mark_bitmap) {
|
||||
_mark_stats_cache.reset();
|
||||
}
|
||||
|
||||
void G1CMTask::register_partial_array_splitter() {
|
||||
|
||||
::new (&_partial_array_splitter) PartialArraySplitter(_cm->partial_array_state_manager(),
|
||||
_cm->max_num_tasks(),
|
||||
ObjArrayMarkingStride);
|
||||
}
|
||||
|
||||
void G1CMTask::unregister_partial_array_splitter() {
|
||||
_partial_array_splitter.~PartialArraySplitter();
|
||||
}
|
||||
|
||||
bool G1CMTask::should_exit_termination() {
|
||||
if (!regular_clock_call()) {
|
||||
return true;
|
||||
@ -2185,7 +2238,7 @@ bool G1CMTask::get_entries_from_global_stack() {
|
||||
if (task_entry.is_null()) {
|
||||
break;
|
||||
}
|
||||
assert(task_entry.is_array_slice() || oopDesc::is_oop(task_entry.obj()), "Element " PTR_FORMAT " must be an array slice or oop", p2i(task_entry.obj()));
|
||||
assert(task_entry.is_partial_array_state() || oopDesc::is_oop(task_entry.to_oop()), "Element " PTR_FORMAT " must be an array slice or oop", p2i(task_entry.to_oop()));
|
||||
bool success = _task_queue->push(task_entry);
|
||||
// We only call this when the local queue is empty or under a
|
||||
// given target limit. So, we do not expect this push to fail.
|
||||
@ -2216,7 +2269,7 @@ void G1CMTask::drain_local_queue(bool partially) {
|
||||
G1TaskQueueEntry entry;
|
||||
bool ret = _task_queue->pop_local(entry);
|
||||
while (ret) {
|
||||
scan_task_entry(entry);
|
||||
process_entry(entry, false /* stolen */);
|
||||
if (_task_queue->size() <= target_size || has_aborted()) {
|
||||
ret = false;
|
||||
} else {
|
||||
@ -2226,6 +2279,37 @@ void G1CMTask::drain_local_queue(bool partially) {
|
||||
}
|
||||
}
|
||||
|
||||
size_t G1CMTask::start_partial_array_processing(oop obj) {
|
||||
assert(should_be_sliced(obj), "Must be an array object %d and large %zu", obj->is_objArray(), obj->size());
|
||||
|
||||
objArrayOop obj_array = objArrayOop(obj);
|
||||
size_t array_length = obj_array->length();
|
||||
|
||||
size_t initial_chunk_size = _partial_array_splitter.start(_task_queue, obj_array, nullptr, array_length);
|
||||
|
||||
// Mark objArray klass metadata
|
||||
if (_cm_oop_closure->do_metadata()) {
|
||||
_cm_oop_closure->do_klass(obj_array->klass());
|
||||
}
|
||||
|
||||
process_array_chunk(obj_array, 0, initial_chunk_size);
|
||||
|
||||
// Include object header size
|
||||
return objArrayOopDesc::object_size(checked_cast<int>(initial_chunk_size));
|
||||
}
|
||||
|
||||
size_t G1CMTask::process_partial_array(const G1TaskQueueEntry& task, bool stolen) {
|
||||
PartialArrayState* state = task.to_partial_array_state();
|
||||
// Access state before release by claim().
|
||||
objArrayOop obj = objArrayOop(state->source());
|
||||
|
||||
PartialArraySplitter::Claim claim =
|
||||
_partial_array_splitter.claim(state, _task_queue, stolen);
|
||||
|
||||
process_array_chunk(obj, claim._start, claim._end);
|
||||
return heap_word_size((claim._end - claim._start) * heapOopSize);
|
||||
}
|
||||
|
||||
void G1CMTask::drain_global_stack(bool partially) {
|
||||
if (has_aborted()) {
|
||||
return;
|
||||
@ -2430,7 +2514,7 @@ void G1CMTask::attempt_stealing() {
|
||||
while (!has_aborted()) {
|
||||
G1TaskQueueEntry entry;
|
||||
if (_cm->try_stealing(_worker_id, entry)) {
|
||||
scan_task_entry(entry);
|
||||
process_entry(entry, true /* stolen */);
|
||||
|
||||
// And since we're towards the end, let's totally drain the
|
||||
// local queue and global stack.
|
||||
@ -2759,12 +2843,12 @@ G1CMTask::G1CMTask(uint worker_id,
|
||||
G1ConcurrentMark* cm,
|
||||
G1CMTaskQueue* task_queue,
|
||||
G1RegionMarkStats* mark_stats) :
|
||||
_objArray_processor(this),
|
||||
_worker_id(worker_id),
|
||||
_g1h(G1CollectedHeap::heap()),
|
||||
_cm(cm),
|
||||
_mark_bitmap(nullptr),
|
||||
_task_queue(task_queue),
|
||||
_partial_array_splitter(_cm->partial_array_state_manager(), _cm->max_num_tasks(), ObjArrayMarkingStride),
|
||||
_mark_stats_cache(mark_stats, G1RegionMarkStatsCache::RegionMarkStatsCacheSize),
|
||||
_calls(0),
|
||||
_time_target_ms(0.0),
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,17 +26,20 @@
|
||||
#define SHARE_GC_G1_G1CONCURRENTMARK_HPP
|
||||
|
||||
#include "gc/g1/g1ConcurrentMarkBitMap.hpp"
|
||||
#include "gc/g1/g1ConcurrentMarkObjArrayProcessor.hpp"
|
||||
#include "gc/g1/g1HeapRegionSet.hpp"
|
||||
#include "gc/g1/g1HeapVerifier.hpp"
|
||||
#include "gc/g1/g1RegionMarkStatsCache.hpp"
|
||||
#include "gc/shared/gcCause.hpp"
|
||||
#include "gc/shared/partialArraySplitter.hpp"
|
||||
#include "gc/shared/partialArrayState.hpp"
|
||||
#include "gc/shared/partialArrayTaskStats.hpp"
|
||||
#include "gc/shared/taskqueue.hpp"
|
||||
#include "gc/shared/taskTerminator.hpp"
|
||||
#include "gc/shared/verifyOption.hpp"
|
||||
#include "gc/shared/workerThread.hpp"
|
||||
#include "gc/shared/workerUtils.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "utilities/compilerWarnings.hpp"
|
||||
#include "utilities/numberSeq.hpp"
|
||||
|
||||
@ -53,41 +56,7 @@ class G1RegionToSpaceMapper;
|
||||
class G1SurvivorRegions;
|
||||
class ThreadClosure;
|
||||
|
||||
// This is a container class for either an oop or a continuation address for
|
||||
// mark stack entries. Both are pushed onto the mark stack.
|
||||
class G1TaskQueueEntry {
|
||||
private:
|
||||
void* _holder;
|
||||
|
||||
static const uintptr_t ArraySliceBit = 1;
|
||||
|
||||
G1TaskQueueEntry(oop obj) : _holder(obj) {
|
||||
assert(_holder != nullptr, "Not allowed to set null task queue element");
|
||||
}
|
||||
G1TaskQueueEntry(HeapWord* addr) : _holder((void*)((uintptr_t)addr | ArraySliceBit)) { }
|
||||
public:
|
||||
|
||||
G1TaskQueueEntry() : _holder(nullptr) { }
|
||||
// Trivially copyable, for use in GenericTaskQueue.
|
||||
|
||||
static G1TaskQueueEntry from_slice(HeapWord* what) { return G1TaskQueueEntry(what); }
|
||||
static G1TaskQueueEntry from_oop(oop obj) { return G1TaskQueueEntry(obj); }
|
||||
|
||||
oop obj() const {
|
||||
assert(!is_array_slice(), "Trying to read array slice " PTR_FORMAT " as oop", p2i(_holder));
|
||||
return cast_to_oop(_holder);
|
||||
}
|
||||
|
||||
HeapWord* slice() const {
|
||||
assert(is_array_slice(), "Trying to read oop " PTR_FORMAT " as array slice", p2i(_holder));
|
||||
return (HeapWord*)((uintptr_t)_holder & ~ArraySliceBit);
|
||||
}
|
||||
|
||||
bool is_oop() const { return !is_array_slice(); }
|
||||
bool is_array_slice() const { return ((uintptr_t)_holder & ArraySliceBit) != 0; }
|
||||
bool is_null() const { return _holder == nullptr; }
|
||||
};
|
||||
|
||||
typedef ScannerTask G1TaskQueueEntry;
|
||||
typedef GenericTaskQueue<G1TaskQueueEntry, mtGC> G1CMTaskQueue;
|
||||
typedef GenericTaskQueueSet<G1CMTaskQueue, mtGC> G1CMTaskQueueSet;
|
||||
|
||||
@ -172,9 +141,9 @@ private:
|
||||
size_t _capacity;
|
||||
size_t _num_buckets;
|
||||
bool _should_grow;
|
||||
TaskQueueEntryChunk* volatile* _buckets;
|
||||
Atomic<TaskQueueEntryChunk*>* _buckets;
|
||||
char _pad0[DEFAULT_PADDING_SIZE];
|
||||
volatile size_t _size;
|
||||
Atomic<size_t> _size;
|
||||
char _pad4[DEFAULT_PADDING_SIZE - sizeof(size_t)];
|
||||
|
||||
size_t bucket_size(size_t bucket) {
|
||||
@ -212,7 +181,7 @@ private:
|
||||
bool initialize(size_t initial_capacity, size_t max_capacity);
|
||||
|
||||
void reset() {
|
||||
_size = 0;
|
||||
_size.store_relaxed(0);
|
||||
_should_grow = false;
|
||||
}
|
||||
|
||||
@ -411,6 +380,8 @@ class G1ConcurrentMark : public CHeapObj<mtGC> {
|
||||
G1CMTaskQueueSet* _task_queues; // Task queue set
|
||||
TaskTerminator _terminator; // For termination
|
||||
|
||||
PartialArrayStateManager* _partial_array_state_manager;
|
||||
|
||||
// Two sync barriers that are used to synchronize tasks when an
|
||||
// overflow occurs. The algorithm is the following. All tasks enter
|
||||
// the first one to ensure that they have all stopped manipulating
|
||||
@ -488,6 +459,8 @@ class G1ConcurrentMark : public CHeapObj<mtGC> {
|
||||
// Prints all gathered CM-related statistics
|
||||
void print_stats();
|
||||
|
||||
void print_and_reset_taskqueue_stats();
|
||||
|
||||
HeapWord* finger() { return _finger; }
|
||||
bool concurrent() { return _concurrent; }
|
||||
uint active_tasks() { return _num_active_tasks; }
|
||||
@ -556,14 +529,14 @@ public:
|
||||
// mark_in_bitmap call. Updates various statistics data.
|
||||
void add_to_liveness(uint worker_id, oop const obj, size_t size);
|
||||
// Did the last marking find a live object between bottom and TAMS?
|
||||
bool contains_live_object(uint region) const { return _region_mark_stats[region]._live_words != 0; }
|
||||
bool contains_live_object(uint region) const { return _region_mark_stats[region].live_words() != 0; }
|
||||
// Live bytes in the given region as determined by concurrent marking, i.e. the amount of
|
||||
// live bytes between bottom and TAMS.
|
||||
size_t live_bytes(uint region) const { return _region_mark_stats[region]._live_words * HeapWordSize; }
|
||||
size_t live_bytes(uint region) const { return _region_mark_stats[region].live_words() * HeapWordSize; }
|
||||
// Set live bytes for concurrent marking.
|
||||
void set_live_bytes(uint region, size_t live_bytes) { _region_mark_stats[region]._live_words = live_bytes / HeapWordSize; }
|
||||
void set_live_bytes(uint region, size_t live_bytes) { _region_mark_stats[region]._live_words.store_relaxed(live_bytes / HeapWordSize); }
|
||||
// Approximate number of incoming references found during marking.
|
||||
size_t incoming_refs(uint region) const { return _region_mark_stats[region]._incoming_refs; }
|
||||
size_t incoming_refs(uint region) const { return _region_mark_stats[region].incoming_refs(); }
|
||||
|
||||
// Update the TAMS for the given region to the current top.
|
||||
inline void update_top_at_mark_start(G1HeapRegion* r);
|
||||
@ -582,6 +555,8 @@ public:
|
||||
|
||||
uint worker_id_offset() const { return _worker_id_offset; }
|
||||
|
||||
uint max_num_tasks() const {return _max_num_tasks; }
|
||||
|
||||
// Clear statistics gathered during the concurrent cycle for the given region after
|
||||
// it has been reclaimed.
|
||||
void clear_statistics(G1HeapRegion* r);
|
||||
@ -631,6 +606,8 @@ public:
|
||||
// Calculates the number of concurrent GC threads to be used in the marking phase.
|
||||
uint calc_active_marking_workers();
|
||||
|
||||
PartialArrayStateManager* partial_array_state_manager() const;
|
||||
|
||||
// Resets the global marking data structures, as well as the
|
||||
// task local ones; should be called during concurrent start.
|
||||
void reset();
|
||||
@ -642,6 +619,10 @@ public:
|
||||
// to be called concurrently to the mutator. It will yield to safepoint requests.
|
||||
void cleanup_for_next_mark();
|
||||
|
||||
// Recycle the memory that has been requested by allocators associated with
|
||||
// this manager.
|
||||
void reset_partial_array_state_manager();
|
||||
|
||||
// Clear the next marking bitmap during safepoint.
|
||||
void clear_bitmap(WorkerThreads* workers);
|
||||
|
||||
@ -732,14 +713,13 @@ private:
|
||||
refs_reached_period = 1024,
|
||||
};
|
||||
|
||||
G1CMObjArrayProcessor _objArray_processor;
|
||||
|
||||
uint _worker_id;
|
||||
G1CollectedHeap* _g1h;
|
||||
G1ConcurrentMark* _cm;
|
||||
G1CMBitMap* _mark_bitmap;
|
||||
// the task queue of this task
|
||||
G1CMTaskQueue* _task_queue;
|
||||
PartialArraySplitter _partial_array_splitter;
|
||||
|
||||
G1RegionMarkStatsCache _mark_stats_cache;
|
||||
// Number of calls to this task
|
||||
@ -850,13 +830,24 @@ private:
|
||||
// mark bitmap scan, and so needs to be pushed onto the mark stack.
|
||||
bool is_below_finger(oop obj, HeapWord* global_finger) const;
|
||||
|
||||
template<bool scan> void process_grey_task_entry(G1TaskQueueEntry task_entry);
|
||||
template<bool scan> void process_grey_task_entry(G1TaskQueueEntry task_entry, bool stolen);
|
||||
|
||||
static bool should_be_sliced(oop obj);
|
||||
// Start processing the given objArrayOop by first pushing its continuations and
|
||||
// then scanning the first chunk including the header.
|
||||
size_t start_partial_array_processing(oop obj);
|
||||
// Process the given continuation. Returns the number of words scanned.
|
||||
size_t process_partial_array(const G1TaskQueueEntry& task, bool stolen);
|
||||
// Apply the closure to the given range of elements in the objArray.
|
||||
inline void process_array_chunk(objArrayOop obj, size_t start, size_t end);
|
||||
public:
|
||||
// Apply the closure on the given area of the objArray. Return the number of words
|
||||
// scanned.
|
||||
inline size_t scan_objArray(objArrayOop obj, MemRegion mr);
|
||||
// Resets the task; should be called right at the beginning of a marking phase.
|
||||
void reset(G1CMBitMap* mark_bitmap);
|
||||
// Register/unregister Partial Array Splitter Allocator with the PartialArrayStateManager.
|
||||
// This allows us to discard memory arenas used for partial object array states at the end
|
||||
// of a concurrent mark cycle.
|
||||
void register_partial_array_splitter();
|
||||
void unregister_partial_array_splitter();
|
||||
// Clears all the fields that correspond to a claimed region.
|
||||
void clear_region_fields();
|
||||
|
||||
@ -912,7 +903,7 @@ public:
|
||||
inline bool deal_with_reference(T* p);
|
||||
|
||||
// Scans an object and visits its children.
|
||||
inline void scan_task_entry(G1TaskQueueEntry task_entry);
|
||||
inline void process_entry(G1TaskQueueEntry task_entry, bool stolen);
|
||||
|
||||
// Pushes an object on the local queue.
|
||||
inline void push(G1TaskQueueEntry task_entry);
|
||||
@ -957,6 +948,11 @@ public:
|
||||
Pair<size_t, size_t> flush_mark_stats_cache();
|
||||
// Prints statistics associated with this task
|
||||
void print_stats();
|
||||
#if TASKQUEUE_STATS
|
||||
PartialArrayTaskStats* partial_array_task_stats() {
|
||||
return _partial_array_splitter.stats();
|
||||
}
|
||||
#endif
|
||||
};
|
||||
|
||||
// Class that's used to to print out per-region liveness
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,7 +29,6 @@
|
||||
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
|
||||
#include "gc/g1/g1ConcurrentMarkObjArrayProcessor.inline.hpp"
|
||||
#include "gc/g1/g1HeapRegion.hpp"
|
||||
#include "gc/g1/g1HeapRegionRemSet.inline.hpp"
|
||||
#include "gc/g1/g1OopClosures.inline.hpp"
|
||||
@ -39,6 +38,7 @@
|
||||
#include "gc/shared/suspendibleThreadSet.hpp"
|
||||
#include "gc/shared/taskqueue.inline.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
#include "utilities/checkedCast.hpp"
|
||||
|
||||
inline bool G1CMIsAliveClosure::do_object_b(oop obj) {
|
||||
// Check whether the passed in object is null. During discovery the referent
|
||||
@ -107,13 +107,15 @@ inline void G1CMMarkStack::iterate(Fn fn) const {
|
||||
#endif
|
||||
|
||||
// It scans an object and visits its children.
|
||||
inline void G1CMTask::scan_task_entry(G1TaskQueueEntry task_entry) { process_grey_task_entry<true>(task_entry); }
|
||||
inline void G1CMTask::process_entry(G1TaskQueueEntry task_entry, bool stolen) {
|
||||
process_grey_task_entry<true>(task_entry, stolen);
|
||||
}
|
||||
|
||||
inline void G1CMTask::push(G1TaskQueueEntry task_entry) {
|
||||
assert(task_entry.is_array_slice() || _g1h->is_in_reserved(task_entry.obj()), "invariant");
|
||||
assert(task_entry.is_array_slice() || !_g1h->is_on_master_free_list(
|
||||
_g1h->heap_region_containing(task_entry.obj())), "invariant");
|
||||
assert(task_entry.is_array_slice() || _mark_bitmap->is_marked(cast_from_oop<HeapWord*>(task_entry.obj())), "invariant");
|
||||
assert(task_entry.is_partial_array_state() || _g1h->is_in_reserved(task_entry.to_oop()), "invariant");
|
||||
assert(task_entry.is_partial_array_state() || !_g1h->is_on_master_free_list(
|
||||
_g1h->heap_region_containing(task_entry.to_oop())), "invariant");
|
||||
assert(task_entry.is_partial_array_state() || _mark_bitmap->is_marked(cast_from_oop<HeapWord*>(task_entry.to_oop())), "invariant");
|
||||
|
||||
if (!_task_queue->push(task_entry)) {
|
||||
// The local task queue looks full. We need to push some entries
|
||||
@ -159,29 +161,34 @@ inline bool G1CMTask::is_below_finger(oop obj, HeapWord* global_finger) const {
|
||||
}
|
||||
|
||||
template<bool scan>
|
||||
inline void G1CMTask::process_grey_task_entry(G1TaskQueueEntry task_entry) {
|
||||
assert(scan || (task_entry.is_oop() && task_entry.obj()->is_typeArray()), "Skipping scan of grey non-typeArray");
|
||||
assert(task_entry.is_array_slice() || _mark_bitmap->is_marked(cast_from_oop<HeapWord*>(task_entry.obj())),
|
||||
inline void G1CMTask::process_grey_task_entry(G1TaskQueueEntry task_entry, bool stolen) {
|
||||
assert(scan || (!task_entry.is_partial_array_state() && task_entry.to_oop()->is_typeArray()), "Skipping scan of grey non-typeArray");
|
||||
assert(task_entry.is_partial_array_state() || _mark_bitmap->is_marked(cast_from_oop<HeapWord*>(task_entry.to_oop())),
|
||||
"Any stolen object should be a slice or marked");
|
||||
|
||||
if (scan) {
|
||||
if (task_entry.is_array_slice()) {
|
||||
_words_scanned += _objArray_processor.process_slice(task_entry.slice());
|
||||
if (task_entry.is_partial_array_state()) {
|
||||
_words_scanned += process_partial_array(task_entry, stolen);
|
||||
} else {
|
||||
oop obj = task_entry.obj();
|
||||
if (G1CMObjArrayProcessor::should_be_sliced(obj)) {
|
||||
_words_scanned += _objArray_processor.process_obj(obj);
|
||||
oop obj = task_entry.to_oop();
|
||||
if (should_be_sliced(obj)) {
|
||||
_words_scanned += start_partial_array_processing(obj);
|
||||
} else {
|
||||
_words_scanned += obj->oop_iterate_size(_cm_oop_closure);;
|
||||
_words_scanned += obj->oop_iterate_size(_cm_oop_closure);
|
||||
}
|
||||
}
|
||||
}
|
||||
check_limits();
|
||||
}
|
||||
|
||||
inline size_t G1CMTask::scan_objArray(objArrayOop obj, MemRegion mr) {
|
||||
obj->oop_iterate(_cm_oop_closure, mr);
|
||||
return mr.word_size();
|
||||
inline bool G1CMTask::should_be_sliced(oop obj) {
|
||||
return obj->is_objArray() && ((objArrayOop)obj)->length() >= (int)ObjArrayMarkingStride;
|
||||
}
|
||||
|
||||
inline void G1CMTask::process_array_chunk(objArrayOop obj, size_t start, size_t end) {
|
||||
obj->oop_iterate_elements_range(_cm_oop_closure,
|
||||
checked_cast<int>(start),
|
||||
checked_cast<int>(end));
|
||||
}
|
||||
|
||||
inline void G1ConcurrentMark::update_top_at_mark_start(G1HeapRegion* r) {
|
||||
@ -265,7 +272,7 @@ inline bool G1CMTask::make_reference_grey(oop obj) {
|
||||
// be pushed on the stack. So, some duplicate work, but no
|
||||
// correctness problems.
|
||||
if (is_below_finger(obj, global_finger)) {
|
||||
G1TaskQueueEntry entry = G1TaskQueueEntry::from_oop(obj);
|
||||
G1TaskQueueEntry entry(obj);
|
||||
if (obj->is_typeArray()) {
|
||||
// Immediately process arrays of primitive types, rather
|
||||
// than pushing on the mark stack. This keeps us from
|
||||
@ -277,7 +284,7 @@ inline bool G1CMTask::make_reference_grey(oop obj) {
|
||||
// by only doing a bookkeeping update and avoiding the
|
||||
// actual scan of the object - a typeArray contains no
|
||||
// references, and the metadata is built-in.
|
||||
process_grey_task_entry<false>(entry);
|
||||
process_grey_task_entry<false>(entry, false /* stolen */);
|
||||
} else {
|
||||
push(entry);
|
||||
}
|
||||
|
||||
@ -1,80 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1ConcurrentMark.inline.hpp"
|
||||
#include "gc/g1/g1ConcurrentMarkObjArrayProcessor.inline.hpp"
|
||||
#include "gc/g1/g1HeapRegion.inline.hpp"
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "memory/memRegion.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
void G1CMObjArrayProcessor::push_array_slice(HeapWord* what) {
|
||||
_task->push(G1TaskQueueEntry::from_slice(what));
|
||||
}
|
||||
|
||||
size_t G1CMObjArrayProcessor::process_array_slice(objArrayOop obj, HeapWord* start_from, size_t remaining) {
|
||||
size_t words_to_scan = MIN2(remaining, (size_t)ObjArrayMarkingStride);
|
||||
|
||||
if (remaining > ObjArrayMarkingStride) {
|
||||
push_array_slice(start_from + ObjArrayMarkingStride);
|
||||
}
|
||||
|
||||
// Then process current area.
|
||||
MemRegion mr(start_from, words_to_scan);
|
||||
return _task->scan_objArray(obj, mr);
|
||||
}
|
||||
|
||||
size_t G1CMObjArrayProcessor::process_obj(oop obj) {
|
||||
assert(should_be_sliced(obj), "Must be an array object %d and large %zu", obj->is_objArray(), obj->size());
|
||||
|
||||
return process_array_slice(objArrayOop(obj), cast_from_oop<HeapWord*>(obj), objArrayOop(obj)->size());
|
||||
}
|
||||
|
||||
size_t G1CMObjArrayProcessor::process_slice(HeapWord* slice) {
|
||||
|
||||
// Find the start address of the objArrayOop.
|
||||
// Shortcut the BOT access if the given address is from a humongous object. The BOT
|
||||
// slide is fast enough for "smaller" objects in non-humongous regions, but is slower
|
||||
// than directly using heap region table.
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
G1HeapRegion* r = g1h->heap_region_containing(slice);
|
||||
|
||||
HeapWord* const start_address = r->is_humongous() ?
|
||||
r->humongous_start_region()->bottom() :
|
||||
r->block_start(slice);
|
||||
|
||||
assert(cast_to_oop(start_address)->is_objArray(), "Address " PTR_FORMAT " does not refer to an object array ", p2i(start_address));
|
||||
assert(start_address < slice,
|
||||
"Object start address " PTR_FORMAT " must be smaller than decoded address " PTR_FORMAT,
|
||||
p2i(start_address),
|
||||
p2i(slice));
|
||||
|
||||
objArrayOop objArray = objArrayOop(cast_to_oop(start_address));
|
||||
|
||||
size_t already_scanned = pointer_delta(slice, start_address);
|
||||
size_t remaining = objArray->size() - already_scanned;
|
||||
|
||||
return process_array_slice(objArray, slice, remaining);
|
||||
}
|
||||
@ -1,59 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_G1_G1CONCURRENTMARKOBJARRAYPROCESSOR_HPP
|
||||
#define SHARE_GC_G1_G1CONCURRENTMARKOBJARRAYPROCESSOR_HPP
|
||||
|
||||
#include "oops/oopsHierarchy.hpp"
|
||||
|
||||
class G1CMTask;
|
||||
|
||||
// Helper class to mark through large objArrays during marking in an efficient way.
|
||||
// Instead of pushing large object arrays, we push continuations onto the
|
||||
// mark stack. These continuations are identified by having their LSB set.
|
||||
// This allows incremental processing of large objects.
|
||||
class G1CMObjArrayProcessor {
|
||||
private:
|
||||
// Reference to the task for doing the actual work.
|
||||
G1CMTask* _task;
|
||||
|
||||
// Push the continuation at the given address onto the mark stack.
|
||||
void push_array_slice(HeapWord* addr);
|
||||
|
||||
// Process (apply the closure) on the given continuation of the given objArray.
|
||||
size_t process_array_slice(objArrayOop const obj, HeapWord* start_from, size_t remaining);
|
||||
public:
|
||||
static bool should_be_sliced(oop obj);
|
||||
|
||||
G1CMObjArrayProcessor(G1CMTask* task) : _task(task) {
|
||||
}
|
||||
|
||||
// Process the given continuation. Returns the number of words scanned.
|
||||
size_t process_slice(HeapWord* slice);
|
||||
// Start processing the given objArrayOop by scanning the header and pushing its
|
||||
// continuation.
|
||||
size_t process_obj(oop obj);
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_G1_G1CONCURRENTMARKOBJARRAYPROCESSOR_HPP
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -30,7 +30,6 @@
|
||||
#include "gc/g1/g1HeapRegionPrinter.hpp"
|
||||
#include "gc/g1/g1RemSetTrackingPolicy.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
|
||||
struct G1UpdateRegionLivenessAndSelectForRebuildTask::G1OnRegionClosure : public G1HeapRegionClosure {
|
||||
@ -154,7 +153,7 @@ void G1UpdateRegionLivenessAndSelectForRebuildTask::work(uint worker_id) {
|
||||
G1OnRegionClosure on_region_cl(_g1h, _cm, &local_cleanup_list);
|
||||
_g1h->heap_region_par_iterate_from_worker_offset(&on_region_cl, &_hrclaimer, worker_id);
|
||||
|
||||
AtomicAccess::add(&_total_selected_for_rebuild, on_region_cl._num_selected_for_rebuild);
|
||||
_total_selected_for_rebuild.add_then_fetch(on_region_cl._num_selected_for_rebuild);
|
||||
|
||||
// Update the old/humongous region sets
|
||||
_g1h->remove_from_old_gen_sets(on_region_cl._num_old_regions_removed,
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,6 +29,7 @@
|
||||
#include "gc/g1/g1HeapRegionManager.hpp"
|
||||
#include "gc/g1/g1HeapRegionSet.hpp"
|
||||
#include "gc/shared/workerThread.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
|
||||
class G1CollectedHeap;
|
||||
class G1ConcurrentMark;
|
||||
@ -41,7 +42,7 @@ class G1UpdateRegionLivenessAndSelectForRebuildTask : public WorkerTask {
|
||||
G1ConcurrentMark* _cm;
|
||||
G1HeapRegionClaimer _hrclaimer;
|
||||
|
||||
uint volatile _total_selected_for_rebuild;
|
||||
Atomic<uint> _total_selected_for_rebuild;
|
||||
|
||||
// Reclaimed empty regions
|
||||
G1FreeRegionList _cleanup_list;
|
||||
@ -57,7 +58,9 @@ public:
|
||||
|
||||
void work(uint worker_id) override;
|
||||
|
||||
uint total_selected_for_rebuild() const { return _total_selected_for_rebuild; }
|
||||
uint total_selected_for_rebuild() const {
|
||||
return _total_selected_for_rebuild.load_relaxed();
|
||||
}
|
||||
|
||||
static uint desired_num_workers(uint num_regions);
|
||||
};
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -116,8 +116,8 @@ G1FullCollector::G1FullCollector(G1CollectedHeap* heap,
|
||||
_num_workers(calc_active_workers()),
|
||||
_has_compaction_targets(false),
|
||||
_has_humongous(false),
|
||||
_oop_queue_set(_num_workers),
|
||||
_array_queue_set(_num_workers),
|
||||
_marking_task_queues(_num_workers),
|
||||
_partial_array_state_manager(nullptr),
|
||||
_preserved_marks_set(true),
|
||||
_serial_compaction_point(this, nullptr),
|
||||
_humongous_compaction_point(this, nullptr),
|
||||
@ -140,23 +140,31 @@ G1FullCollector::G1FullCollector(G1CollectedHeap* heap,
|
||||
_compaction_tops[j] = nullptr;
|
||||
}
|
||||
|
||||
_partial_array_state_manager = new PartialArrayStateManager(_num_workers);
|
||||
|
||||
for (uint i = 0; i < _num_workers; i++) {
|
||||
_markers[i] = new G1FullGCMarker(this, i, _live_stats);
|
||||
_compaction_points[i] = new G1FullGCCompactionPoint(this, _preserved_marks_set.get(i));
|
||||
_oop_queue_set.register_queue(i, marker(i)->oop_stack());
|
||||
_array_queue_set.register_queue(i, marker(i)->objarray_stack());
|
||||
_marking_task_queues.register_queue(i, marker(i)->task_queue());
|
||||
}
|
||||
|
||||
_serial_compaction_point.set_preserved_stack(_preserved_marks_set.get(0));
|
||||
_humongous_compaction_point.set_preserved_stack(_preserved_marks_set.get(0));
|
||||
_region_attr_table.initialize(heap->reserved(), G1HeapRegion::GrainBytes);
|
||||
}
|
||||
|
||||
PartialArrayStateManager* G1FullCollector::partial_array_state_manager() const {
|
||||
return _partial_array_state_manager;
|
||||
}
|
||||
|
||||
G1FullCollector::~G1FullCollector() {
|
||||
for (uint i = 0; i < _num_workers; i++) {
|
||||
delete _markers[i];
|
||||
delete _compaction_points[i];
|
||||
}
|
||||
|
||||
delete _partial_array_state_manager;
|
||||
|
||||
FREE_C_HEAP_ARRAY(G1FullGCMarker*, _markers);
|
||||
FREE_C_HEAP_ARRAY(G1FullGCCompactionPoint*, _compaction_points);
|
||||
FREE_C_HEAP_ARRAY(HeapWord*, _compaction_tops);
|
||||
@ -279,8 +287,8 @@ public:
|
||||
uint index = (_tm == RefProcThreadModel::Single) ? 0 : worker_id;
|
||||
G1FullKeepAliveClosure keep_alive(_collector.marker(index));
|
||||
BarrierEnqueueDiscoveredFieldClosure enqueue;
|
||||
G1FollowStackClosure* complete_gc = _collector.marker(index)->stack_closure();
|
||||
_rp_task->rp_work(worker_id, &is_alive, &keep_alive, &enqueue, complete_gc);
|
||||
G1MarkStackClosure* complete_marking = _collector.marker(index)->stack_closure();
|
||||
_rp_task->rp_work(worker_id, &is_alive, &keep_alive, &enqueue, complete_marking);
|
||||
}
|
||||
};
|
||||
|
||||
@ -302,7 +310,7 @@ void G1FullCollector::phase1_mark_live_objects() {
|
||||
const ReferenceProcessorStats& stats = reference_processor()->process_discovered_references(task, _heap->workers(), pt);
|
||||
scope()->tracer()->report_gc_reference_stats(stats);
|
||||
pt.print_all_references();
|
||||
assert(marker(0)->oop_stack()->is_empty(), "Should be no oops on the stack");
|
||||
assert(marker(0)->task_queue()->is_empty(), "Should be no oops on the stack");
|
||||
}
|
||||
|
||||
{
|
||||
@ -328,8 +336,7 @@ void G1FullCollector::phase1_mark_live_objects() {
|
||||
scope()->tracer()->report_object_count_after_gc(&_is_alive, _heap->workers());
|
||||
}
|
||||
#if TASKQUEUE_STATS
|
||||
oop_queue_set()->print_and_reset_taskqueue_stats("Oop Queue");
|
||||
array_queue_set()->print_and_reset_taskqueue_stats("ObjArrayOop Queue");
|
||||
marking_task_queues()->print_and_reset_taskqueue_stats("Marking Task Queue");
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -79,8 +79,8 @@ class G1FullCollector : StackObj {
|
||||
bool _has_humongous;
|
||||
G1FullGCMarker** _markers;
|
||||
G1FullGCCompactionPoint** _compaction_points;
|
||||
OopQueueSet _oop_queue_set;
|
||||
ObjArrayTaskQueueSet _array_queue_set;
|
||||
G1MarkTasksQueueSet _marking_task_queues;
|
||||
PartialArrayStateManager* _partial_array_state_manager;
|
||||
PreservedMarksSet _preserved_marks_set;
|
||||
G1FullGCCompactionPoint _serial_compaction_point;
|
||||
G1FullGCCompactionPoint _humongous_compaction_point;
|
||||
@ -113,8 +113,7 @@ public:
|
||||
uint workers() { return _num_workers; }
|
||||
G1FullGCMarker* marker(uint id) { return _markers[id]; }
|
||||
G1FullGCCompactionPoint* compaction_point(uint id) { return _compaction_points[id]; }
|
||||
OopQueueSet* oop_queue_set() { return &_oop_queue_set; }
|
||||
ObjArrayTaskQueueSet* array_queue_set() { return &_array_queue_set; }
|
||||
G1MarkTasksQueueSet* marking_task_queues() { return &_marking_task_queues; }
|
||||
PreservedMarksSet* preserved_mark_set() { return &_preserved_marks_set; }
|
||||
G1FullGCCompactionPoint* serial_compaction_point() { return &_serial_compaction_point; }
|
||||
G1FullGCCompactionPoint* humongous_compaction_point() { return &_humongous_compaction_point; }
|
||||
@ -122,9 +121,11 @@ public:
|
||||
ReferenceProcessor* reference_processor();
|
||||
size_t live_words(uint region_index) const {
|
||||
assert(region_index < _heap->max_num_regions(), "sanity");
|
||||
return _live_stats[region_index]._live_words;
|
||||
return _live_stats[region_index].live_words();
|
||||
}
|
||||
|
||||
PartialArrayStateManager* partial_array_state_manager() const;
|
||||
|
||||
void before_marking_update_attribute_table(G1HeapRegion* hr);
|
||||
|
||||
inline bool is_compacting(oop obj) const;
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -34,7 +34,7 @@
|
||||
G1FullGCMarkTask::G1FullGCMarkTask(G1FullCollector* collector) :
|
||||
G1FullGCTask("G1 Parallel Marking Task", collector),
|
||||
_root_processor(G1CollectedHeap::heap(), collector->workers()),
|
||||
_terminator(collector->workers(), collector->array_queue_set()) {
|
||||
_terminator(collector->workers(), collector->marking_task_queues()) {
|
||||
}
|
||||
|
||||
void G1FullGCMarkTask::work(uint worker_id) {
|
||||
@ -54,10 +54,9 @@ void G1FullGCMarkTask::work(uint worker_id) {
|
||||
}
|
||||
|
||||
// Mark stack is populated, now process and drain it.
|
||||
marker->complete_marking(collector()->oop_queue_set(), collector()->array_queue_set(), &_terminator);
|
||||
marker->complete_marking(collector()->marking_task_queues(), &_terminator);
|
||||
|
||||
// This is the point where the entire marking should have completed.
|
||||
assert(marker->oop_stack()->is_empty(), "Marking should have completed");
|
||||
assert(marker->objarray_stack()->is_empty(), "Array marking should have completed");
|
||||
assert(marker->task_queue()->is_empty(), "Marking should have completed");
|
||||
log_task("Marking task", worker_id, start);
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,6 +25,8 @@
|
||||
#include "classfile/classLoaderData.hpp"
|
||||
#include "classfile/classLoaderDataGraph.hpp"
|
||||
#include "gc/g1/g1FullGCMarker.inline.hpp"
|
||||
#include "gc/shared/partialArraySplitter.inline.hpp"
|
||||
#include "gc/shared/partialArrayState.hpp"
|
||||
#include "gc/shared/referenceProcessor.hpp"
|
||||
#include "gc/shared/taskTerminator.hpp"
|
||||
#include "gc/shared/verifyOption.hpp"
|
||||
@ -36,8 +38,8 @@ G1FullGCMarker::G1FullGCMarker(G1FullCollector* collector,
|
||||
_collector(collector),
|
||||
_worker_id(worker_id),
|
||||
_bitmap(collector->mark_bitmap()),
|
||||
_oop_stack(),
|
||||
_objarray_stack(),
|
||||
_task_queue(),
|
||||
_partial_array_splitter(collector->partial_array_state_manager(), collector->workers(), ObjArrayMarkingStride),
|
||||
_mark_closure(worker_id, this, ClassLoaderData::_claim_stw_fullgc_mark, G1CollectedHeap::heap()->ref_processor_stw()),
|
||||
_stack_closure(this),
|
||||
_cld_closure(mark_closure(), ClassLoaderData::_claim_stw_fullgc_mark),
|
||||
@ -47,24 +49,36 @@ G1FullGCMarker::G1FullGCMarker(G1FullCollector* collector,
|
||||
}
|
||||
|
||||
G1FullGCMarker::~G1FullGCMarker() {
|
||||
assert(is_empty(), "Must be empty at this point");
|
||||
assert(is_task_queue_empty(), "Must be empty at this point");
|
||||
}
|
||||
|
||||
void G1FullGCMarker::complete_marking(OopQueueSet* oop_stacks,
|
||||
ObjArrayTaskQueueSet* array_stacks,
|
||||
void G1FullGCMarker::process_partial_array(PartialArrayState* state, bool stolen) {
|
||||
// Access state before release by claim().
|
||||
objArrayOop obj_array = objArrayOop(state->source());
|
||||
PartialArraySplitter::Claim claim =
|
||||
_partial_array_splitter.claim(state, task_queue(), stolen);
|
||||
process_array_chunk(obj_array, claim._start, claim._end);
|
||||
}
|
||||
|
||||
void G1FullGCMarker::start_partial_array_processing(objArrayOop obj) {
|
||||
mark_closure()->do_klass(obj->klass());
|
||||
// Don't push empty arrays to avoid unnecessary work.
|
||||
size_t array_length = obj->length();
|
||||
if (array_length > 0) {
|
||||
size_t initial_chunk_size = _partial_array_splitter.start(task_queue(), obj, nullptr, array_length);
|
||||
process_array_chunk(obj, 0, initial_chunk_size);
|
||||
}
|
||||
}
|
||||
|
||||
void G1FullGCMarker::complete_marking(G1ScannerTasksQueueSet* task_queues,
|
||||
TaskTerminator* terminator) {
|
||||
do {
|
||||
follow_marking_stacks();
|
||||
ObjArrayTask steal_array;
|
||||
if (array_stacks->steal(_worker_id, steal_array)) {
|
||||
follow_array_chunk(objArrayOop(steal_array.obj()), steal_array.index());
|
||||
} else {
|
||||
oop steal_oop;
|
||||
if (oop_stacks->steal(_worker_id, steal_oop)) {
|
||||
follow_object(steal_oop);
|
||||
}
|
||||
process_marking_stacks();
|
||||
ScannerTask stolen_task;
|
||||
if (task_queues->steal(_worker_id, stolen_task)) {
|
||||
dispatch_task(stolen_task, true);
|
||||
}
|
||||
} while (!is_empty() || !terminator->offer_termination());
|
||||
} while (!is_task_queue_empty() || !terminator->offer_termination());
|
||||
}
|
||||
|
||||
void G1FullGCMarker::flush_mark_stats_cache() {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,6 +28,8 @@
|
||||
#include "gc/g1/g1FullGCOopClosures.hpp"
|
||||
#include "gc/g1/g1OopClosures.hpp"
|
||||
#include "gc/g1/g1RegionMarkStatsCache.hpp"
|
||||
#include "gc/shared/partialArraySplitter.hpp"
|
||||
#include "gc/shared/partialArrayState.hpp"
|
||||
#include "gc/shared/stringdedup/stringDedup.hpp"
|
||||
#include "gc/shared/taskqueue.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
@ -38,16 +40,15 @@
|
||||
#include "utilities/growableArray.hpp"
|
||||
#include "utilities/stack.hpp"
|
||||
|
||||
typedef OverflowTaskQueue<oop, mtGC> OopQueue;
|
||||
typedef OverflowTaskQueue<ObjArrayTask, mtGC> ObjArrayTaskQueue;
|
||||
|
||||
typedef GenericTaskQueueSet<OopQueue, mtGC> OopQueueSet;
|
||||
typedef GenericTaskQueueSet<ObjArrayTaskQueue, mtGC> ObjArrayTaskQueueSet;
|
||||
|
||||
class G1CMBitMap;
|
||||
class G1FullCollector;
|
||||
class TaskTerminator;
|
||||
|
||||
typedef OverflowTaskQueue<ScannerTask, mtGC> G1MarkTasksQueue;
|
||||
typedef GenericTaskQueueSet<G1MarkTasksQueue, mtGC> G1MarkTasksQueueSet;
|
||||
|
||||
class G1FullGCMarker : public CHeapObj<mtGC> {
|
||||
G1FullCollector* _collector;
|
||||
|
||||
@ -56,56 +57,50 @@ class G1FullGCMarker : public CHeapObj<mtGC> {
|
||||
G1CMBitMap* _bitmap;
|
||||
|
||||
// Mark stack
|
||||
OopQueue _oop_stack;
|
||||
ObjArrayTaskQueue _objarray_stack;
|
||||
G1MarkTasksQueue _task_queue;
|
||||
PartialArraySplitter _partial_array_splitter;
|
||||
|
||||
// Marking closures
|
||||
G1MarkAndPushClosure _mark_closure;
|
||||
G1FollowStackClosure _stack_closure;
|
||||
G1MarkStackClosure _stack_closure;
|
||||
CLDToOopClosure _cld_closure;
|
||||
StringDedup::Requests _string_dedup_requests;
|
||||
|
||||
|
||||
G1RegionMarkStatsCache _mark_stats_cache;
|
||||
|
||||
inline bool is_empty();
|
||||
inline void push_objarray(oop obj, size_t index);
|
||||
inline bool is_task_queue_empty();
|
||||
inline bool mark_object(oop obj);
|
||||
|
||||
// Marking helpers
|
||||
inline void follow_object(oop obj);
|
||||
inline void follow_array(objArrayOop array);
|
||||
inline void follow_array_chunk(objArrayOop array, int index);
|
||||
inline void process_array_chunk(objArrayOop obj, size_t start, size_t end);
|
||||
inline void dispatch_task(const ScannerTask& task, bool stolen);
|
||||
// Start processing the given objArrayOop by first pushing its continuations and
|
||||
// then scanning the first chunk.
|
||||
void start_partial_array_processing(objArrayOop obj);
|
||||
// Process the given continuation.
|
||||
void process_partial_array(PartialArrayState* state, bool stolen);
|
||||
|
||||
inline void publish_and_drain_oop_tasks();
|
||||
// Try to publish all contents from the objArray task queue overflow stack to
|
||||
// the shared objArray stack.
|
||||
// Returns true and a valid task if there has not been enough space in the shared
|
||||
// objArray stack, otherwise returns false and the task is invalid.
|
||||
inline bool publish_or_pop_objarray_tasks(ObjArrayTask& task);
|
||||
|
||||
public:
|
||||
G1FullGCMarker(G1FullCollector* collector,
|
||||
uint worker_id,
|
||||
G1RegionMarkStats* mark_stats);
|
||||
~G1FullGCMarker();
|
||||
|
||||
// Stack getters
|
||||
OopQueue* oop_stack() { return &_oop_stack; }
|
||||
ObjArrayTaskQueue* objarray_stack() { return &_objarray_stack; }
|
||||
G1MarkTasksQueue* task_queue() { return &_task_queue; }
|
||||
|
||||
// Marking entry points
|
||||
template <class T> inline void mark_and_push(T* p);
|
||||
|
||||
inline void follow_marking_stacks();
|
||||
void complete_marking(OopQueueSet* oop_stacks,
|
||||
ObjArrayTaskQueueSet* array_stacks,
|
||||
inline void process_marking_stacks();
|
||||
void complete_marking(G1MarkTasksQueueSet* task_queues,
|
||||
TaskTerminator* terminator);
|
||||
|
||||
// Closure getters
|
||||
CLDToOopClosure* cld_closure() { return &_cld_closure; }
|
||||
G1MarkAndPushClosure* mark_closure() { return &_mark_closure; }
|
||||
G1FollowStackClosure* stack_closure() { return &_stack_closure; }
|
||||
G1MarkStackClosure* stack_closure() { return &_stack_closure; }
|
||||
|
||||
// Flush live bytes to regions
|
||||
void flush_mark_stats_cache();
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -42,6 +42,7 @@
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "utilities/checkedCast.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
inline bool G1FullGCMarker::mark_object(oop obj) {
|
||||
@ -71,94 +72,55 @@ template <class T> inline void G1FullGCMarker::mark_and_push(T* p) {
|
||||
if (!CompressedOops::is_null(heap_oop)) {
|
||||
oop obj = CompressedOops::decode_not_null(heap_oop);
|
||||
if (mark_object(obj)) {
|
||||
_oop_stack.push(obj);
|
||||
_task_queue.push(ScannerTask(obj));
|
||||
}
|
||||
assert(_bitmap->is_marked(obj), "Must be marked");
|
||||
}
|
||||
}
|
||||
|
||||
inline bool G1FullGCMarker::is_empty() {
|
||||
return _oop_stack.is_empty() && _objarray_stack.is_empty();
|
||||
inline bool G1FullGCMarker::is_task_queue_empty() {
|
||||
return _task_queue.is_empty();
|
||||
}
|
||||
|
||||
inline void G1FullGCMarker::push_objarray(oop obj, size_t index) {
|
||||
ObjArrayTask task(obj, index);
|
||||
assert(task.is_valid(), "bad ObjArrayTask");
|
||||
_objarray_stack.push(task);
|
||||
inline void G1FullGCMarker::process_array_chunk(objArrayOop obj, size_t start, size_t end) {
|
||||
obj->oop_iterate_elements_range(mark_closure(),
|
||||
checked_cast<int>(start),
|
||||
checked_cast<int>(end));
|
||||
}
|
||||
|
||||
inline void G1FullGCMarker::follow_array(objArrayOop array) {
|
||||
mark_closure()->do_klass(array->klass());
|
||||
// Don't push empty arrays to avoid unnecessary work.
|
||||
if (array->length() > 0) {
|
||||
push_objarray(array, 0);
|
||||
}
|
||||
}
|
||||
|
||||
void G1FullGCMarker::follow_array_chunk(objArrayOop array, int index) {
|
||||
const int len = array->length();
|
||||
const int beg_index = index;
|
||||
assert(beg_index < len || len == 0, "index too large");
|
||||
|
||||
const int stride = MIN2(len - beg_index, (int) ObjArrayMarkingStride);
|
||||
const int end_index = beg_index + stride;
|
||||
|
||||
// Push the continuation first to allow more efficient work stealing.
|
||||
if (end_index < len) {
|
||||
push_objarray(array, end_index);
|
||||
}
|
||||
|
||||
array->oop_iterate_elements_range(mark_closure(), beg_index, end_index);
|
||||
}
|
||||
|
||||
inline void G1FullGCMarker::follow_object(oop obj) {
|
||||
assert(_bitmap->is_marked(obj), "should be marked");
|
||||
if (obj->is_objArray()) {
|
||||
// Handle object arrays explicitly to allow them to
|
||||
// be split into chunks if needed.
|
||||
follow_array((objArrayOop)obj);
|
||||
inline void G1FullGCMarker::dispatch_task(const ScannerTask& task, bool stolen) {
|
||||
if (task.is_partial_array_state()) {
|
||||
assert(_bitmap->is_marked(task.to_partial_array_state()->source()), "should be marked");
|
||||
process_partial_array(task.to_partial_array_state(), stolen);
|
||||
} else {
|
||||
obj->oop_iterate(mark_closure());
|
||||
oop obj = task.to_oop();
|
||||
assert(_bitmap->is_marked(obj), "should be marked");
|
||||
if (obj->is_objArray()) {
|
||||
// Handle object arrays explicitly to allow them to
|
||||
// be split into chunks if needed.
|
||||
start_partial_array_processing((objArrayOop)obj);
|
||||
} else {
|
||||
obj->oop_iterate(mark_closure());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline void G1FullGCMarker::publish_and_drain_oop_tasks() {
|
||||
oop obj;
|
||||
while (_oop_stack.pop_overflow(obj)) {
|
||||
if (!_oop_stack.try_push_to_taskqueue(obj)) {
|
||||
assert(_bitmap->is_marked(obj), "must be marked");
|
||||
follow_object(obj);
|
||||
ScannerTask task;
|
||||
while (_task_queue.pop_overflow(task)) {
|
||||
if (!_task_queue.try_push_to_taskqueue(task)) {
|
||||
dispatch_task(task, false);
|
||||
}
|
||||
}
|
||||
while (_oop_stack.pop_local(obj)) {
|
||||
assert(_bitmap->is_marked(obj), "must be marked");
|
||||
follow_object(obj);
|
||||
while (_task_queue.pop_local(task)) {
|
||||
dispatch_task(task, false);
|
||||
}
|
||||
}
|
||||
|
||||
inline bool G1FullGCMarker::publish_or_pop_objarray_tasks(ObjArrayTask& task) {
|
||||
// It is desirable to move as much as possible work from the overflow queue to
|
||||
// the shared queue as quickly as possible.
|
||||
while (_objarray_stack.pop_overflow(task)) {
|
||||
if (!_objarray_stack.try_push_to_taskqueue(task)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void G1FullGCMarker::follow_marking_stacks() {
|
||||
void G1FullGCMarker::process_marking_stacks() {
|
||||
do {
|
||||
// First, drain regular oop stack.
|
||||
publish_and_drain_oop_tasks();
|
||||
|
||||
// Then process ObjArrays one at a time to avoid marking stack bloat.
|
||||
ObjArrayTask task;
|
||||
if (publish_or_pop_objarray_tasks(task) ||
|
||||
_objarray_stack.pop_local(task)) {
|
||||
follow_array_chunk(objArrayOop(task.obj()), task.index());
|
||||
}
|
||||
} while (!is_empty());
|
||||
} while (!is_task_queue_empty());
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_G1_G1FULLGCMARKER_INLINE_HPP
|
||||
|
||||
@ -35,7 +35,7 @@
|
||||
G1IsAliveClosure::G1IsAliveClosure(G1FullCollector* collector) :
|
||||
G1IsAliveClosure(collector, collector->mark_bitmap()) { }
|
||||
|
||||
void G1FollowStackClosure::do_void() { _marker->follow_marking_stacks(); }
|
||||
void G1MarkStackClosure::do_void() { _marker->process_marking_stacks(); }
|
||||
|
||||
void G1FullKeepAliveClosure::do_oop(oop* p) { do_oop_work(p); }
|
||||
void G1FullKeepAliveClosure::do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
|
||||
@ -86,11 +86,11 @@ public:
|
||||
virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS; }
|
||||
};
|
||||
|
||||
class G1FollowStackClosure: public VoidClosure {
|
||||
class G1MarkStackClosure: public VoidClosure {
|
||||
G1FullGCMarker* _marker;
|
||||
|
||||
public:
|
||||
G1FollowStackClosure(G1FullGCMarker* marker) : _marker(marker) {}
|
||||
G1MarkStackClosure(G1FullGCMarker* marker) : _marker(marker) {}
|
||||
virtual void do_void();
|
||||
};
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -24,7 +24,6 @@
|
||||
|
||||
|
||||
#include "gc/g1/g1ParallelCleaning.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#if INCLUDE_JVMCI
|
||||
#include "jvmci/jvmci.hpp"
|
||||
#endif
|
||||
@ -35,11 +34,11 @@ JVMCICleaningTask::JVMCICleaningTask() :
|
||||
}
|
||||
|
||||
bool JVMCICleaningTask::claim_cleaning_task() {
|
||||
if (AtomicAccess::load(&_cleaning_claimed)) {
|
||||
if (_cleaning_claimed.load_relaxed()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return !AtomicAccess::cmpxchg(&_cleaning_claimed, false, true);
|
||||
return _cleaning_claimed.compare_set(false, true);
|
||||
}
|
||||
|
||||
void JVMCICleaningTask::work(bool unloading_occurred) {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,10 +26,13 @@
|
||||
#define SHARE_GC_G1_G1PARALLELCLEANING_HPP
|
||||
|
||||
#include "gc/shared/parallelCleaning.hpp"
|
||||
#if INCLUDE_JVMCI
|
||||
#include "runtime/atomic.hpp"
|
||||
#endif
|
||||
|
||||
#if INCLUDE_JVMCI
|
||||
class JVMCICleaningTask : public StackObj {
|
||||
volatile bool _cleaning_claimed;
|
||||
Atomic<bool> _cleaning_claimed;
|
||||
|
||||
public:
|
||||
JVMCICleaningTask();
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -203,8 +203,8 @@ void G1Policy::update_young_length_bounds(size_t pending_cards, size_t card_rs_l
|
||||
// allocation.
|
||||
// That is "fine" - at most this will schedule a GC (hopefully only a little) too
|
||||
// early or too late.
|
||||
AtomicAccess::store(&_young_list_desired_length, new_young_list_desired_length);
|
||||
AtomicAccess::store(&_young_list_target_length, new_young_list_target_length);
|
||||
_young_list_desired_length.store_relaxed(new_young_list_desired_length);
|
||||
_young_list_target_length.store_relaxed(new_young_list_target_length);
|
||||
}
|
||||
|
||||
// Calculates desired young gen length. It is calculated from:
|
||||
@ -943,7 +943,7 @@ void G1Policy::record_young_collection_end(bool concurrent_operation_is_full_mar
|
||||
phase_times()->sum_thread_work_items(G1GCPhaseTimes::MergePSS, G1GCPhaseTimes::MergePSSToYoungGenCards));
|
||||
}
|
||||
|
||||
record_pause(this_pause, start_time_sec, end_time_sec, allocation_failure);
|
||||
record_pause(this_pause, start_time_sec, end_time_sec);
|
||||
|
||||
if (G1GCPauseTypeHelper::is_last_young_pause(this_pause)) {
|
||||
assert(!G1GCPauseTypeHelper::is_concurrent_start_pause(this_pause),
|
||||
@ -1389,16 +1389,13 @@ void G1Policy::update_gc_pause_time_ratios(G1GCPauseType gc_type, double start_t
|
||||
|
||||
void G1Policy::record_pause(G1GCPauseType gc_type,
|
||||
double start,
|
||||
double end,
|
||||
bool allocation_failure) {
|
||||
double end) {
|
||||
// Manage the MMU tracker. For some reason it ignores Full GCs.
|
||||
if (gc_type != G1GCPauseType::FullGC) {
|
||||
_mmu_tracker->add_pause(start, end);
|
||||
}
|
||||
|
||||
if (!allocation_failure) {
|
||||
update_gc_pause_time_ratios(gc_type, start, end);
|
||||
}
|
||||
update_gc_pause_time_ratios(gc_type, start, end);
|
||||
|
||||
update_time_to_mixed_tracking(gc_type, start, end);
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -35,7 +35,7 @@
|
||||
#include "gc/g1/g1RemSetTrackingPolicy.hpp"
|
||||
#include "gc/g1/g1YoungGenSizer.hpp"
|
||||
#include "gc/shared/gcCause.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "utilities/pair.hpp"
|
||||
#include "utilities/ticks.hpp"
|
||||
|
||||
@ -81,12 +81,9 @@ class G1Policy: public CHeapObj<mtGC> {
|
||||
|
||||
// Desired young gen length without taking actually available free regions into
|
||||
// account.
|
||||
volatile uint _young_list_desired_length;
|
||||
Atomic<uint> _young_list_desired_length;
|
||||
// Actual target length given available free memory.
|
||||
volatile uint _young_list_target_length;
|
||||
// The max number of regions we can extend the eden by while the GC
|
||||
// locker is active. This should be >= _young_list_target_length;
|
||||
volatile uint _young_list_max_length;
|
||||
Atomic<uint> _young_list_target_length;
|
||||
|
||||
// The survivor rate groups below must be initialized after the predictor because they
|
||||
// indirectly use it through the "this" object passed to their constructor.
|
||||
@ -275,8 +272,7 @@ private:
|
||||
// Record the given STW pause with the given start and end times (in s).
|
||||
void record_pause(G1GCPauseType gc_type,
|
||||
double start,
|
||||
double end,
|
||||
bool allocation_failure = false);
|
||||
double end);
|
||||
|
||||
void update_gc_pause_time_ratios(G1GCPauseType gc_type, double start_sec, double end_sec);
|
||||
|
||||
@ -363,8 +359,8 @@ public:
|
||||
// This must be called at the very beginning of an evacuation pause.
|
||||
void decide_on_concurrent_start_pause();
|
||||
|
||||
uint young_list_desired_length() const { return AtomicAccess::load(&_young_list_desired_length); }
|
||||
uint young_list_target_length() const { return AtomicAccess::load(&_young_list_target_length); }
|
||||
uint young_list_desired_length() const { return _young_list_desired_length.load_relaxed(); }
|
||||
uint young_list_target_length() const { return _young_list_target_length.load_relaxed(); }
|
||||
|
||||
bool should_allocate_mutator_region() const;
|
||||
bool should_expand_on_mutator_allocation() const;
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,6 +27,7 @@
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/pair.hpp"
|
||||
@ -40,20 +41,23 @@
|
||||
// * the number of incoming references found during marking. This is an approximate
|
||||
// value because we do not mark through all objects.
|
||||
struct G1RegionMarkStats {
|
||||
size_t _live_words;
|
||||
size_t _incoming_refs;
|
||||
Atomic<size_t> _live_words;
|
||||
Atomic<size_t> _incoming_refs;
|
||||
|
||||
// Clear all members.
|
||||
void clear() {
|
||||
_live_words = 0;
|
||||
_incoming_refs = 0;
|
||||
_live_words.store_relaxed(0);
|
||||
_incoming_refs.store_relaxed(0);
|
||||
}
|
||||
// Clear all members after a marking overflow. Only needs to clear the number of
|
||||
// incoming references as all objects will be rescanned, while the live words are
|
||||
// gathered whenever a thread can mark an object, which is synchronized.
|
||||
void clear_during_overflow() {
|
||||
_incoming_refs = 0;
|
||||
_incoming_refs.store_relaxed(0);
|
||||
}
|
||||
|
||||
size_t live_words() const { return _live_words.load_relaxed(); }
|
||||
size_t incoming_refs() const { return _incoming_refs.load_relaxed(); }
|
||||
};
|
||||
|
||||
// Per-marking thread cache for the region mark statistics.
|
||||
@ -112,12 +116,16 @@ public:
|
||||
void add_live_words(oop obj);
|
||||
void add_live_words(uint region_idx, size_t live_words) {
|
||||
G1RegionMarkStatsCacheEntry* const cur = find_for_add(region_idx);
|
||||
cur->_stats._live_words += live_words;
|
||||
// This method is only ever called single-threaded, so we do not need atomic
|
||||
// update here.
|
||||
cur->_stats._live_words.store_relaxed(cur->_stats.live_words() + live_words);
|
||||
}
|
||||
|
||||
void inc_incoming_refs(uint region_idx) {
|
||||
G1RegionMarkStatsCacheEntry* const cur = find_for_add(region_idx);
|
||||
cur->_stats._incoming_refs++;
|
||||
// This method is only ever called single-threaded, so we do not need atomic
|
||||
// update here.
|
||||
cur->_stats._incoming_refs.store_relaxed(cur->_stats.incoming_refs() + 1u);
|
||||
}
|
||||
|
||||
void reset(uint region_idx) {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,8 +27,6 @@
|
||||
|
||||
#include "gc/g1/g1RegionMarkStatsCache.hpp"
|
||||
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
|
||||
inline G1RegionMarkStatsCache::G1RegionMarkStatsCacheEntry* G1RegionMarkStatsCache::find_for_add(uint region_idx) {
|
||||
uint const cache_idx = hash(region_idx);
|
||||
|
||||
@ -46,12 +44,12 @@ inline G1RegionMarkStatsCache::G1RegionMarkStatsCacheEntry* G1RegionMarkStatsCac
|
||||
|
||||
inline void G1RegionMarkStatsCache::evict(uint idx) {
|
||||
G1RegionMarkStatsCacheEntry* cur = &_cache[idx];
|
||||
if (cur->_stats._live_words != 0) {
|
||||
AtomicAccess::add(&_target[cur->_region_idx]._live_words, cur->_stats._live_words);
|
||||
if (cur->_stats.live_words() != 0) {
|
||||
_target[cur->_region_idx]._live_words.add_then_fetch(cur->_stats.live_words());
|
||||
}
|
||||
|
||||
if (cur->_stats._incoming_refs != 0) {
|
||||
AtomicAccess::add(&_target[cur->_region_idx]._incoming_refs, cur->_stats._incoming_refs);
|
||||
if (cur->_stats.incoming_refs() != 0) {
|
||||
_target[cur->_region_idx]._incoming_refs.add_then_fetch(cur->_stats.incoming_refs());
|
||||
}
|
||||
|
||||
cur->clear();
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -58,6 +58,7 @@
|
||||
#include "gc/shared/workerThread.hpp"
|
||||
#include "jfr/jfrEvents.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/threads.hpp"
|
||||
#include "utilities/ticks.hpp"
|
||||
|
||||
@ -459,8 +460,8 @@ class G1PrepareEvacuationTask : public WorkerTask {
|
||||
|
||||
G1CollectedHeap* _g1h;
|
||||
G1HeapRegionClaimer _claimer;
|
||||
volatile uint _humongous_total;
|
||||
volatile uint _humongous_candidates;
|
||||
Atomic<uint> _humongous_total;
|
||||
Atomic<uint> _humongous_candidates;
|
||||
|
||||
G1MonotonicArenaMemoryStats _all_card_set_stats;
|
||||
|
||||
@ -481,19 +482,19 @@ public:
|
||||
}
|
||||
|
||||
void add_humongous_candidates(uint candidates) {
|
||||
AtomicAccess::add(&_humongous_candidates, candidates);
|
||||
_humongous_candidates.add_then_fetch(candidates);
|
||||
}
|
||||
|
||||
void add_humongous_total(uint total) {
|
||||
AtomicAccess::add(&_humongous_total, total);
|
||||
_humongous_total.add_then_fetch(total);
|
||||
}
|
||||
|
||||
uint humongous_candidates() {
|
||||
return _humongous_candidates;
|
||||
return _humongous_candidates.load_relaxed();
|
||||
}
|
||||
|
||||
uint humongous_total() {
|
||||
return _humongous_total;
|
||||
return _humongous_total.load_relaxed();
|
||||
}
|
||||
|
||||
const G1MonotonicArenaMemoryStats all_card_set_stats() const {
|
||||
@ -698,7 +699,7 @@ protected:
|
||||
virtual void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) = 0;
|
||||
|
||||
private:
|
||||
volatile bool _pinned_regions_recorded;
|
||||
Atomic<bool> _pinned_regions_recorded;
|
||||
|
||||
public:
|
||||
G1EvacuateRegionsBaseTask(const char* name,
|
||||
@ -722,7 +723,7 @@ public:
|
||||
G1ParScanThreadState* pss = _per_thread_states->state_for_worker(worker_id);
|
||||
pss->set_ref_discoverer(_g1h->ref_processor_stw());
|
||||
|
||||
if (!AtomicAccess::cmpxchg(&_pinned_regions_recorded, false, true)) {
|
||||
if (_pinned_regions_recorded.compare_set(false, true)) {
|
||||
record_pinned_regions(pss, worker_id);
|
||||
}
|
||||
scan_roots(pss, worker_id);
|
||||
|
||||
@ -46,6 +46,7 @@
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/prefetch.inline.hpp"
|
||||
#include "runtime/threads.hpp"
|
||||
#include "runtime/threadSMR.hpp"
|
||||
@ -759,7 +760,7 @@ class G1PostEvacuateCollectionSetCleanupTask2::FreeCollectionSetTask : public G1
|
||||
const size_t* _surviving_young_words;
|
||||
uint _active_workers;
|
||||
G1EvacFailureRegions* _evac_failure_regions;
|
||||
volatile uint _num_retained_regions;
|
||||
Atomic<uint> _num_retained_regions;
|
||||
|
||||
FreeCSetStats* worker_stats(uint worker) {
|
||||
return &_worker_stats[worker];
|
||||
@ -794,7 +795,7 @@ public:
|
||||
virtual ~FreeCollectionSetTask() {
|
||||
Ticks serial_time = Ticks::now();
|
||||
|
||||
bool has_new_retained_regions = AtomicAccess::load(&_num_retained_regions) != 0;
|
||||
bool has_new_retained_regions = _num_retained_regions.load_relaxed() != 0;
|
||||
if (has_new_retained_regions) {
|
||||
G1CollectionSetCandidates* candidates = _g1h->collection_set()->candidates();
|
||||
candidates->sort_by_efficiency();
|
||||
@ -829,7 +830,7 @@ public:
|
||||
// Report per-region type timings.
|
||||
cl.report_timing();
|
||||
|
||||
AtomicAccess::add(&_num_retained_regions, cl.num_retained_regions(), memory_order_relaxed);
|
||||
_num_retained_regions.add_then_fetch(cl.num_retained_regions(), memory_order_relaxed);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -44,6 +44,7 @@
|
||||
#include "gc/parallel/psStringDedup.hpp"
|
||||
#include "gc/parallel/psYoungGen.hpp"
|
||||
#include "gc/shared/classUnloadingContext.hpp"
|
||||
#include "gc/shared/collectedHeap.inline.hpp"
|
||||
#include "gc/shared/fullGCForwarding.inline.hpp"
|
||||
#include "gc/shared/gcCause.hpp"
|
||||
#include "gc/shared/gcHeapSummary.hpp"
|
||||
@ -932,6 +933,17 @@ void PSParallelCompact::summary_phase(bool should_do_max_compaction)
|
||||
}
|
||||
}
|
||||
|
||||
void PSParallelCompact::report_object_count_after_gc() {
|
||||
GCTraceTime(Debug, gc, phases) tm("Report Object Count", &_gc_timer);
|
||||
// The heap is compacted, all objects are iterable. However there may be
|
||||
// filler objects in the heap which we should ignore.
|
||||
class SkipFillerObjectClosure : public BoolObjectClosure {
|
||||
public:
|
||||
bool do_object_b(oop obj) override { return !CollectedHeap::is_filler_object(obj); }
|
||||
} cl;
|
||||
_gc_tracer.report_object_count_after_gc(&cl, &ParallelScavengeHeap::heap()->workers());
|
||||
}
|
||||
|
||||
bool PSParallelCompact::invoke(bool clear_all_soft_refs, bool should_do_max_compaction) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
|
||||
assert(Thread::current() == (Thread*)VMThread::vm_thread(),
|
||||
@ -1027,6 +1039,8 @@ bool PSParallelCompact::invoke(bool clear_all_soft_refs, bool should_do_max_comp
|
||||
|
||||
heap->print_heap_change(pre_gc_values);
|
||||
|
||||
report_object_count_after_gc();
|
||||
|
||||
// Track memory usage and detect low memory
|
||||
MemoryService::track_memory_usage();
|
||||
heap->update_counters();
|
||||
@ -1274,10 +1288,6 @@ void PSParallelCompact::marking_phase(ParallelOldTracer *gc_tracer) {
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
GCTraceTime(Debug, gc, phases) tm("Report Object Count", &_gc_timer);
|
||||
_gc_tracer.report_object_count_after_gc(is_alive_closure(), &ParallelScavengeHeap::heap()->workers());
|
||||
}
|
||||
#if TASKQUEUE_STATS
|
||||
ParCompactionManager::print_and_reset_taskqueue_stats();
|
||||
#endif
|
||||
@ -1835,8 +1845,7 @@ void PSParallelCompact::verify_filler_in_dense_prefix() {
|
||||
oop obj = cast_to_oop(cur_addr);
|
||||
oopDesc::verify(obj);
|
||||
if (!mark_bitmap()->is_marked(cur_addr)) {
|
||||
Klass* k = cast_to_oop(cur_addr)->klass();
|
||||
assert(k == Universe::fillerArrayKlass() || k == vmClasses::FillerObject_klass(), "inv");
|
||||
assert(CollectedHeap::is_filler_object(cast_to_oop(cur_addr)), "inv");
|
||||
}
|
||||
cur_addr += obj->size();
|
||||
}
|
||||
|
||||
@ -749,6 +749,7 @@ private:
|
||||
// Move objects to new locations.
|
||||
static void compact();
|
||||
|
||||
static void report_object_count_after_gc();
|
||||
// Add available regions to the stack and draining tasks to the task queue.
|
||||
static void prepare_region_draining_tasks(uint parallel_gc_threads);
|
||||
|
||||
|
||||
@ -771,7 +771,7 @@ Node* BarrierSetC2::obj_allocate(PhaseMacroExpand* macro, Node* mem, Node* toobi
|
||||
// this will require extensive changes to the loop optimization in order to
|
||||
// prevent a degradation of the optimization.
|
||||
// See comment in memnode.hpp, around line 227 in class LoadPNode.
|
||||
Node* tlab_end = macro->make_load(toobig_false, mem, tlab_end_adr, 0, TypeRawPtr::BOTTOM, T_ADDRESS);
|
||||
Node* tlab_end = macro->make_load_raw(toobig_false, mem, tlab_end_adr, 0, TypeRawPtr::BOTTOM, T_ADDRESS);
|
||||
|
||||
// Load the TLAB top.
|
||||
Node* old_tlab_top = new LoadPNode(toobig_false, mem, tlab_top_adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -309,6 +309,8 @@ protected:
|
||||
fill_with_object(start, pointer_delta(end, start), zap);
|
||||
}
|
||||
|
||||
inline static bool is_filler_object(oop obj);
|
||||
|
||||
virtual void fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap);
|
||||
static size_t min_dummy_object_size() {
|
||||
return oopDesc::header_size();
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,7 +27,9 @@
|
||||
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
|
||||
#include "classfile/vmClasses.hpp"
|
||||
#include "gc/shared/memAllocator.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
|
||||
@ -50,4 +52,9 @@ inline void CollectedHeap::add_vmthread_cpu_time(jlong time) {
|
||||
_vmthread_cpu_time += time;
|
||||
}
|
||||
|
||||
inline bool CollectedHeap::is_filler_object(oop obj) {
|
||||
Klass* k = obj->klass_without_asserts();
|
||||
return k == Universe::fillerArrayKlass() || k == vmClasses::FillerObject_klass();
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_SHARED_COLLECTEDHEAP_INLINE_HPP
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -641,6 +641,10 @@ public:
|
||||
return (raw_value() & PartialArrayTag) != 0;
|
||||
}
|
||||
|
||||
bool is_null() const {
|
||||
return _p == nullptr;
|
||||
}
|
||||
|
||||
oop* to_oop_ptr() const {
|
||||
return static_cast<oop*>(decode(OopTag));
|
||||
}
|
||||
|
||||
@ -96,8 +96,22 @@ void WorkerThreads::initialize_workers() {
|
||||
}
|
||||
}
|
||||
|
||||
bool WorkerThreads::allow_inject_creation_failure() const {
|
||||
if (!is_init_completed()) {
|
||||
// Never allow creation failures during VM init
|
||||
return false;
|
||||
}
|
||||
|
||||
if (_created_workers == 0) {
|
||||
// Never allow creation failures of the first worker, it will cause the VM to exit
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
WorkerThread* WorkerThreads::create_worker(uint name_suffix) {
|
||||
if (is_init_completed() && InjectGCWorkerCreationFailure) {
|
||||
if (InjectGCWorkerCreationFailure && allow_inject_creation_failure()) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
||||
@ -104,6 +104,7 @@ public:
|
||||
WorkerThreads(const char* name, uint max_workers);
|
||||
|
||||
void initialize_workers();
|
||||
bool allow_inject_creation_failure() const;
|
||||
|
||||
uint max_workers() const { return _max_workers; }
|
||||
uint created_workers() const { return _created_workers; }
|
||||
|
||||
@ -68,9 +68,9 @@ ShenandoahAdaptiveHeuristics::ShenandoahAdaptiveHeuristics(ShenandoahSpaceInfo*
|
||||
|
||||
ShenandoahAdaptiveHeuristics::~ShenandoahAdaptiveHeuristics() {}
|
||||
|
||||
void ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t actual_free) {
|
||||
size_t ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t actual_free) {
|
||||
size_t garbage_threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
|
||||
|
||||
// The logic for cset selection in adaptive is as follows:
|
||||
@ -124,6 +124,7 @@ void ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata(Shenand
|
||||
cur_garbage = new_garbage;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ShenandoahAdaptiveHeuristics::record_cycle_start() {
|
||||
|
||||
@ -108,9 +108,9 @@ public:
|
||||
|
||||
virtual ~ShenandoahAdaptiveHeuristics();
|
||||
|
||||
virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t actual_free) override;
|
||||
virtual size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t actual_free) override;
|
||||
|
||||
virtual void record_cycle_start() override;
|
||||
virtual void record_success_concurrent() override;
|
||||
|
||||
@ -39,15 +39,16 @@ ShenandoahAggressiveHeuristics::ShenandoahAggressiveHeuristics(ShenandoahSpaceIn
|
||||
SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahEvacReserveOverflow);
|
||||
}
|
||||
|
||||
void ShenandoahAggressiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t free) {
|
||||
size_t ShenandoahAggressiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t free) {
|
||||
for (size_t idx = 0; idx < size; idx++) {
|
||||
ShenandoahHeapRegion* r = data[idx].get_region();
|
||||
if (r->garbage() > 0) {
|
||||
cset->add_region(r);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool ShenandoahAggressiveHeuristics::should_start_gc() {
|
||||
|
||||
@ -35,9 +35,9 @@ class ShenandoahAggressiveHeuristics : public ShenandoahHeuristics {
|
||||
public:
|
||||
ShenandoahAggressiveHeuristics(ShenandoahSpaceInfo* space_info);
|
||||
|
||||
virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t free);
|
||||
virtual size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t free);
|
||||
|
||||
virtual bool should_start_gc();
|
||||
|
||||
|
||||
@ -76,9 +76,9 @@ bool ShenandoahCompactHeuristics::should_start_gc() {
|
||||
return ShenandoahHeuristics::should_start_gc();
|
||||
}
|
||||
|
||||
void ShenandoahCompactHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t actual_free) {
|
||||
size_t ShenandoahCompactHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t actual_free) {
|
||||
// Do not select too large CSet that would overflow the available free space
|
||||
size_t max_cset = actual_free * 3 / 4;
|
||||
|
||||
@ -97,4 +97,5 @@ void ShenandoahCompactHeuristics::choose_collection_set_from_regiondata(Shenando
|
||||
cset->add_region(r);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -37,9 +37,9 @@ public:
|
||||
|
||||
virtual bool should_start_gc();
|
||||
|
||||
virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t actual_free);
|
||||
virtual size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t actual_free);
|
||||
|
||||
virtual const char* name() { return "Compact"; }
|
||||
virtual bool is_diagnostic() { return false; }
|
||||
|
||||
@ -37,7 +37,7 @@ ShenandoahGenerationalHeuristics::ShenandoahGenerationalHeuristics(ShenandoahGen
|
||||
: ShenandoahAdaptiveHeuristics(generation), _generation(generation) {
|
||||
}
|
||||
|
||||
void ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set) {
|
||||
size_t ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set) {
|
||||
assert(collection_set->is_empty(), "Must be empty");
|
||||
|
||||
auto heap = ShenandoahGenerationalHeap::heap();
|
||||
@ -168,16 +168,12 @@ void ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectio
|
||||
byte_size_in_proper_unit(total_garbage), proper_unit_for_byte_size(total_garbage));
|
||||
|
||||
size_t immediate_percent = (total_garbage == 0) ? 0 : (immediate_garbage * 100 / total_garbage);
|
||||
|
||||
bool doing_promote_in_place = (humongous_regions_promoted + regular_regions_promoted_in_place > 0);
|
||||
if (doing_promote_in_place || (preselected_candidates > 0) || (immediate_percent <= ShenandoahImmediateThreshold)) {
|
||||
// Only young collections need to prime the collection set.
|
||||
if (_generation->is_young()) {
|
||||
heap->old_generation()->heuristics()->prime_collection_set(collection_set);
|
||||
}
|
||||
|
||||
size_t add_regions_to_old = 0;
|
||||
if (doing_promote_in_place || (preselected_candidates > 0) || (immediate_percent <= ShenandoahImmediateThreshold)) {
|
||||
// Call the subclasses to add young-gen regions into the collection set.
|
||||
choose_collection_set_from_regiondata(collection_set, candidates, cand_idx, immediate_garbage + free);
|
||||
add_regions_to_old = choose_collection_set_from_regiondata(collection_set, candidates, cand_idx, immediate_garbage + free);
|
||||
}
|
||||
|
||||
if (collection_set->has_old_regions()) {
|
||||
@ -194,6 +190,7 @@ void ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectio
|
||||
regular_regions_promoted_free,
|
||||
immediate_regions,
|
||||
immediate_garbage);
|
||||
return add_regions_to_old;
|
||||
}
|
||||
|
||||
|
||||
@ -210,13 +207,6 @@ size_t ShenandoahGenerationalHeuristics::add_preselected_regions_to_collection_s
|
||||
assert(ShenandoahGenerationalHeap::heap()->is_tenurable(r), "Preselected regions must have tenure age");
|
||||
// Entire region will be promoted, This region does not impact young-gen or old-gen evacuation reserve.
|
||||
// This region has been pre-selected and its impact on promotion reserve is already accounted for.
|
||||
|
||||
// r->used() is r->garbage() + r->get_live_data_bytes()
|
||||
// Since all live data in this region is being evacuated from young-gen, it is as if this memory
|
||||
// is garbage insofar as young-gen is concerned. Counting this as garbage reduces the need to
|
||||
// reclaim highly utilized young-gen regions just for the sake of finding min_garbage to reclaim
|
||||
// within young-gen memory.
|
||||
|
||||
cur_young_garbage += r->garbage();
|
||||
cset->add_region(r);
|
||||
}
|
||||
|
||||
@ -44,7 +44,7 @@ class ShenandoahGenerationalHeuristics : public ShenandoahAdaptiveHeuristics {
|
||||
public:
|
||||
explicit ShenandoahGenerationalHeuristics(ShenandoahGeneration* generation);
|
||||
|
||||
void choose_collection_set(ShenandoahCollectionSet* collection_set) override;
|
||||
size_t choose_collection_set(ShenandoahCollectionSet* collection_set) override;
|
||||
protected:
|
||||
ShenandoahGeneration* _generation;
|
||||
|
||||
|
||||
@ -24,6 +24,7 @@
|
||||
*/
|
||||
|
||||
#include "gc/shenandoah/heuristics/shenandoahGlobalHeuristics.hpp"
|
||||
#include "gc/shenandoah/shenandoahAsserts.hpp"
|
||||
#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
|
||||
#include "gc/shenandoah/shenandoahGenerationalHeap.inline.hpp"
|
||||
#include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
|
||||
@ -35,13 +36,14 @@ ShenandoahGlobalHeuristics::ShenandoahGlobalHeuristics(ShenandoahGlobalGeneratio
|
||||
}
|
||||
|
||||
|
||||
void ShenandoahGlobalHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t actual_free) {
|
||||
size_t ShenandoahGlobalHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t actual_free) {
|
||||
// Better select garbage-first regions
|
||||
QuickSort::sort<RegionData>(data, (int) size, compare_by_garbage);
|
||||
|
||||
choose_global_collection_set(cset, data, size, actual_free, 0 /* cur_young_garbage */);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@ -49,94 +51,212 @@ void ShenandoahGlobalHeuristics::choose_global_collection_set(ShenandoahCollecti
|
||||
const ShenandoahHeuristics::RegionData* data,
|
||||
size_t size, size_t actual_free,
|
||||
size_t cur_young_garbage) const {
|
||||
shenandoah_assert_heaplocked_or_safepoint();
|
||||
auto heap = ShenandoahGenerationalHeap::heap();
|
||||
auto free_set = heap->free_set();
|
||||
size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
|
||||
size_t capacity = heap->soft_max_capacity();
|
||||
|
||||
size_t garbage_threshold = region_size_bytes * ShenandoahGarbageThreshold / 100;
|
||||
size_t ignore_threshold = region_size_bytes * ShenandoahIgnoreGarbageThreshold / 100;
|
||||
|
||||
size_t young_evac_reserve = heap->young_generation()->get_evacuation_reserve();
|
||||
size_t original_young_evac_reserve = young_evac_reserve;
|
||||
size_t old_evac_reserve = heap->old_generation()->get_evacuation_reserve();
|
||||
size_t max_young_cset = (size_t) (young_evac_reserve / ShenandoahEvacWaste);
|
||||
size_t young_cur_cset = 0;
|
||||
size_t max_old_cset = (size_t) (old_evac_reserve / ShenandoahOldEvacWaste);
|
||||
size_t old_cur_cset = 0;
|
||||
size_t old_promo_reserve = heap->old_generation()->get_promoted_reserve();
|
||||
|
||||
// Figure out how many unaffiliated young regions are dedicated to mutator and to evacuator. Allow the young
|
||||
// collector's unaffiliated regions to be transferred to old-gen if old-gen has more easily reclaimed garbage
|
||||
// than young-gen. At the end of this cycle, any excess regions remaining in old-gen will be transferred back
|
||||
// to young. Do not transfer the mutator's unaffiliated regions to old-gen. Those must remain available
|
||||
// to the mutator as it needs to be able to consume this memory during concurrent GC.
|
||||
|
||||
size_t unaffiliated_young_regions = heap->young_generation()->free_unaffiliated_regions();
|
||||
size_t unaffiliated_young_regions = free_set->collector_unaffiliated_regions();
|
||||
size_t unaffiliated_young_memory = unaffiliated_young_regions * region_size_bytes;
|
||||
size_t unaffiliated_old_regions = free_set->old_collector_unaffiliated_regions();
|
||||
size_t unaffiliated_old_memory = unaffiliated_old_regions * region_size_bytes;
|
||||
|
||||
if (unaffiliated_young_memory > max_young_cset) {
|
||||
size_t unaffiliated_mutator_memory = unaffiliated_young_memory - max_young_cset;
|
||||
unaffiliated_young_memory -= unaffiliated_mutator_memory;
|
||||
unaffiliated_young_regions = unaffiliated_young_memory / region_size_bytes; // round down
|
||||
unaffiliated_young_memory = unaffiliated_young_regions * region_size_bytes;
|
||||
// Figure out how many unaffiliated regions are dedicated to Collector and OldCollector reserves. Let these
|
||||
// be shuffled between young and old generations in order to expedite evacuation of whichever regions have the
|
||||
// most garbage, regardless of whether these garbage-first regions reside in young or old generation.
|
||||
// Excess reserves will be transferred back to the mutator after collection set has been chosen. At the end
|
||||
// of evacuation, any reserves not consumed by evacuation will also be transferred to the mutator free set.
|
||||
|
||||
// Truncate reserves to only target unaffiliated memory
|
||||
size_t shared_reserve_regions = 0;
|
||||
if (young_evac_reserve > unaffiliated_young_memory) {
|
||||
shared_reserve_regions += unaffiliated_young_regions;
|
||||
} else {
|
||||
size_t delta_regions = young_evac_reserve / region_size_bytes;
|
||||
shared_reserve_regions += delta_regions;
|
||||
}
|
||||
young_evac_reserve = 0;
|
||||
size_t total_old_reserve = old_evac_reserve + old_promo_reserve;
|
||||
if (total_old_reserve > unaffiliated_old_memory) {
|
||||
// Give all the unaffiliated memory to the shared reserves. Leave the rest for promo reserve.
|
||||
shared_reserve_regions += unaffiliated_old_regions;
|
||||
old_promo_reserve = total_old_reserve - unaffiliated_old_memory;
|
||||
} else {
|
||||
size_t delta_regions = old_evac_reserve / region_size_bytes;
|
||||
shared_reserve_regions += delta_regions;
|
||||
}
|
||||
old_evac_reserve = 0;
|
||||
assert(shared_reserve_regions <=
|
||||
(heap->young_generation()->free_unaffiliated_regions() + heap->old_generation()->free_unaffiliated_regions()),
|
||||
"simple math");
|
||||
|
||||
// We'll affiliate these unaffiliated regions with either old or young, depending on need.
|
||||
max_young_cset -= unaffiliated_young_memory;
|
||||
size_t shared_reserves = shared_reserve_regions * region_size_bytes;
|
||||
size_t committed_from_shared_reserves = 0;
|
||||
|
||||
// Keep track of how many regions we plan to transfer from young to old.
|
||||
size_t regions_transferred_to_old = 0;
|
||||
size_t promo_bytes = 0;
|
||||
size_t old_evac_bytes = 0;
|
||||
size_t young_evac_bytes = 0;
|
||||
|
||||
size_t free_target = (capacity * ShenandoahMinFreeThreshold) / 100 + max_young_cset;
|
||||
size_t consumed_by_promo = 0; // promo_bytes * ShenandoahPromoEvacWaste
|
||||
size_t consumed_by_old_evac = 0; // old_evac_bytes * ShenandoahOldEvacWaste
|
||||
size_t consumed_by_young_evac = 0; // young_evac_bytes * ShenandoahEvacWaste
|
||||
|
||||
// Of the memory reclaimed by GC, some of this will need to be reserved for the next GC collection. Use the current
|
||||
// young reserve as an approximation of the future Collector reserve requirement. Try to end with at least
|
||||
// (capacity * ShenandoahMinFreeThreshold) / 100 bytes available to the mutator.
|
||||
size_t free_target = (capacity * ShenandoahMinFreeThreshold) / 100 + original_young_evac_reserve;
|
||||
size_t min_garbage = (free_target > actual_free) ? (free_target - actual_free) : 0;
|
||||
|
||||
log_info(gc, ergo)("Adaptive CSet Selection for GLOBAL. Max Young Evacuation: %zu"
|
||||
"%s, Max Old Evacuation: %zu%s, Max Either Evacuation: %zu%s, Actual Free: %zu%s.",
|
||||
byte_size_in_proper_unit(max_young_cset), proper_unit_for_byte_size(max_young_cset),
|
||||
byte_size_in_proper_unit(max_old_cset), proper_unit_for_byte_size(max_old_cset),
|
||||
byte_size_in_proper_unit(unaffiliated_young_memory), proper_unit_for_byte_size(unaffiliated_young_memory),
|
||||
byte_size_in_proper_unit(actual_free), proper_unit_for_byte_size(actual_free));
|
||||
size_t aged_regions_promoted = 0;
|
||||
size_t young_regions_evacuated = 0;
|
||||
size_t old_regions_evacuated = 0;
|
||||
|
||||
log_info(gc, ergo)("Adaptive CSet Selection for GLOBAL. Discretionary evacuation budget (for either old or young): %zu%s"
|
||||
", Actual Free: %zu%s.",
|
||||
byte_size_in_proper_unit(shared_reserves), proper_unit_for_byte_size(shared_reserves),
|
||||
byte_size_in_proper_unit(actual_free), proper_unit_for_byte_size(actual_free));
|
||||
|
||||
size_t cur_garbage = cur_young_garbage;
|
||||
for (size_t idx = 0; idx < size; idx++) {
|
||||
ShenandoahHeapRegion* r = data[idx].get_region();
|
||||
assert(!cset->is_preselected(r->index()), "There should be no preselected regions during GLOBAL GC");
|
||||
bool add_region = false;
|
||||
if (r->is_old() || heap->is_tenurable(r)) {
|
||||
size_t new_cset = old_cur_cset + r->get_live_data_bytes();
|
||||
if ((r->garbage() > garbage_threshold)) {
|
||||
while ((new_cset > max_old_cset) && (unaffiliated_young_regions > 0)) {
|
||||
unaffiliated_young_regions--;
|
||||
regions_transferred_to_old++;
|
||||
max_old_cset += region_size_bytes / ShenandoahOldEvacWaste;
|
||||
size_t region_garbage = r->garbage();
|
||||
size_t new_garbage = cur_garbage + region_garbage;
|
||||
bool add_regardless = (region_garbage > ignore_threshold) && (new_garbage < min_garbage);
|
||||
size_t live_bytes = r->get_live_data_bytes();
|
||||
if (add_regardless || (region_garbage >= garbage_threshold)) {
|
||||
if (r->is_old()) {
|
||||
size_t anticipated_consumption = (size_t) (live_bytes * ShenandoahOldEvacWaste);
|
||||
size_t new_old_consumption = consumed_by_old_evac + anticipated_consumption;
|
||||
size_t new_old_evac_reserve = old_evac_reserve;
|
||||
size_t proposed_old_region_expansion = 0;
|
||||
while ((new_old_consumption > new_old_evac_reserve) && (committed_from_shared_reserves < shared_reserves)) {
|
||||
committed_from_shared_reserves += region_size_bytes;
|
||||
proposed_old_region_expansion++;
|
||||
new_old_evac_reserve += region_size_bytes;
|
||||
}
|
||||
}
|
||||
if ((new_cset <= max_old_cset) && (r->garbage() > garbage_threshold)) {
|
||||
add_region = true;
|
||||
old_cur_cset = new_cset;
|
||||
}
|
||||
} else {
|
||||
assert(r->is_young() && !heap->is_tenurable(r), "DeMorgan's law (assuming r->is_affiliated)");
|
||||
size_t new_cset = young_cur_cset + r->get_live_data_bytes();
|
||||
size_t region_garbage = r->garbage();
|
||||
size_t new_garbage = cur_young_garbage + region_garbage;
|
||||
bool add_regardless = (region_garbage > ignore_threshold) && (new_garbage < min_garbage);
|
||||
|
||||
if (add_regardless || (r->garbage() > garbage_threshold)) {
|
||||
while ((new_cset > max_young_cset) && (unaffiliated_young_regions > 0)) {
|
||||
unaffiliated_young_regions--;
|
||||
max_young_cset += region_size_bytes / ShenandoahEvacWaste;
|
||||
// If this region has free memory and we choose to place it in the collection set, its free memory is no longer
|
||||
// available to hold promotion results. So we behave as if its free memory is consumed within the promotion reserve.
|
||||
size_t anticipated_loss_from_promo_reserve = r->free();
|
||||
size_t new_promo_consumption = consumed_by_promo + anticipated_loss_from_promo_reserve;
|
||||
size_t new_promo_reserve = old_promo_reserve;
|
||||
while ((new_promo_consumption > new_promo_reserve) && (committed_from_shared_reserves < shared_reserves)) {
|
||||
committed_from_shared_reserves += region_size_bytes;
|
||||
proposed_old_region_expansion++;
|
||||
new_promo_reserve += region_size_bytes;
|
||||
}
|
||||
if ((new_old_consumption <= new_old_evac_reserve) && (new_promo_consumption <= new_promo_reserve)) {
|
||||
add_region = true;
|
||||
old_evac_reserve = new_old_evac_reserve;
|
||||
old_promo_reserve = new_promo_reserve;
|
||||
old_evac_bytes += live_bytes;
|
||||
consumed_by_old_evac = new_old_consumption;
|
||||
consumed_by_promo = new_promo_consumption;
|
||||
cur_garbage = new_garbage;
|
||||
old_regions_evacuated++;
|
||||
} else {
|
||||
// We failed to sufficiently expand old so unwind proposed expansion
|
||||
committed_from_shared_reserves -= proposed_old_region_expansion * region_size_bytes;
|
||||
}
|
||||
} else if (heap->is_tenurable(r)) {
|
||||
size_t anticipated_consumption = (size_t) (live_bytes * ShenandoahPromoEvacWaste);
|
||||
size_t new_promo_consumption = consumed_by_promo + anticipated_consumption;
|
||||
size_t new_promo_reserve = old_promo_reserve;
|
||||
size_t proposed_old_region_expansion = 0;
|
||||
while ((new_promo_consumption > new_promo_reserve) && (committed_from_shared_reserves < shared_reserves)) {
|
||||
committed_from_shared_reserves += region_size_bytes;
|
||||
proposed_old_region_expansion++;
|
||||
new_promo_reserve += region_size_bytes;
|
||||
}
|
||||
if (new_promo_consumption <= new_promo_reserve) {
|
||||
add_region = true;
|
||||
old_promo_reserve = new_promo_reserve;
|
||||
promo_bytes += live_bytes;
|
||||
consumed_by_promo = new_promo_consumption;
|
||||
cur_garbage = new_garbage;
|
||||
aged_regions_promoted++;
|
||||
} else {
|
||||
// We failed to sufficiently expand old so unwind proposed expansion
|
||||
committed_from_shared_reserves -= proposed_old_region_expansion * region_size_bytes;
|
||||
}
|
||||
} else {
|
||||
assert(r->is_young() && !heap->is_tenurable(r), "DeMorgan's law (assuming r->is_affiliated)");
|
||||
size_t anticipated_consumption = (size_t) (live_bytes * ShenandoahEvacWaste);
|
||||
size_t new_young_evac_consumption = consumed_by_young_evac + anticipated_consumption;
|
||||
size_t new_young_evac_reserve = young_evac_reserve;
|
||||
size_t proposed_young_region_expansion = 0;
|
||||
while ((new_young_evac_consumption > new_young_evac_reserve) && (committed_from_shared_reserves < shared_reserves)) {
|
||||
committed_from_shared_reserves += region_size_bytes;
|
||||
proposed_young_region_expansion++;
|
||||
new_young_evac_reserve += region_size_bytes;
|
||||
}
|
||||
if (new_young_evac_consumption <= new_young_evac_reserve) {
|
||||
add_region = true;
|
||||
young_evac_reserve = new_young_evac_reserve;
|
||||
young_evac_bytes += live_bytes;
|
||||
consumed_by_young_evac = new_young_evac_consumption;
|
||||
cur_garbage = new_garbage;
|
||||
young_regions_evacuated++;
|
||||
} else {
|
||||
// We failed to sufficiently expand old so unwind proposed expansion
|
||||
committed_from_shared_reserves -= proposed_young_region_expansion * region_size_bytes;
|
||||
}
|
||||
}
|
||||
if ((new_cset <= max_young_cset) && (add_regardless || (region_garbage > garbage_threshold))) {
|
||||
add_region = true;
|
||||
young_cur_cset = new_cset;
|
||||
cur_young_garbage = new_garbage;
|
||||
}
|
||||
}
|
||||
if (add_region) {
|
||||
cset->add_region(r);
|
||||
}
|
||||
}
|
||||
if (regions_transferred_to_old > 0) {
|
||||
assert(young_evac_reserve > regions_transferred_to_old * region_size_bytes, "young reserve cannot be negative");
|
||||
heap->young_generation()->set_evacuation_reserve(young_evac_reserve - regions_transferred_to_old * region_size_bytes);
|
||||
heap->old_generation()->set_evacuation_reserve(old_evac_reserve + regions_transferred_to_old * region_size_bytes);
|
||||
|
||||
if (committed_from_shared_reserves < shared_reserves) {
|
||||
// Give all the rest to promotion
|
||||
old_promo_reserve += (shared_reserves - committed_from_shared_reserves);
|
||||
// dead code: committed_from_shared_reserves = shared_reserves;
|
||||
}
|
||||
|
||||
// Consider the effects of round-off:
|
||||
// 1. We know that the sum over each evacuation mutiplied by Evacuation Waste is <= total evacuation reserve
|
||||
// 2. However, the reserve for each individual evacuation may be rounded down. In the worst case, we will be over budget
|
||||
// by the number of regions evacuated, since each region's reserve might be under-estimated by at most 1
|
||||
// 3. Likewise, if we take the sum of bytes evacuated and multiply this by the Evacuation Waste and then round down
|
||||
// to nearest integer, the calculated reserve will underestimate the true reserve needs by at most 1.
|
||||
// 4. This explains the adjustments to subtotals in the assert statements below.
|
||||
assert(young_evac_bytes * ShenandoahEvacWaste <= young_evac_reserve + young_regions_evacuated,
|
||||
"budget: %zu <= %zu", (size_t) (young_evac_bytes * ShenandoahEvacWaste), young_evac_reserve);
|
||||
assert(old_evac_bytes * ShenandoahOldEvacWaste <= old_evac_reserve + old_regions_evacuated,
|
||||
"budget: %zu <= %zu", (size_t) (old_evac_bytes * ShenandoahOldEvacWaste), old_evac_reserve);
|
||||
assert(promo_bytes * ShenandoahPromoEvacWaste <= old_promo_reserve + aged_regions_promoted,
|
||||
"budget: %zu <= %zu", (size_t) (promo_bytes * ShenandoahPromoEvacWaste), old_promo_reserve);
|
||||
assert(young_evac_reserve + old_evac_reserve + old_promo_reserve <=
|
||||
heap->young_generation()->get_evacuation_reserve() + heap->old_generation()->get_evacuation_reserve() +
|
||||
heap->old_generation()->get_promoted_reserve(), "Exceeded budget");
|
||||
|
||||
if (heap->young_generation()->get_evacuation_reserve() < young_evac_reserve) {
|
||||
size_t delta_bytes = young_evac_reserve - heap->young_generation()->get_evacuation_reserve();
|
||||
size_t delta_regions = delta_bytes / region_size_bytes;
|
||||
size_t regions_to_transfer = MIN2(unaffiliated_old_regions, delta_regions);
|
||||
log_info(gc)("Global GC moves %zu unaffiliated regions from old collector to young collector reserves", regions_to_transfer);
|
||||
ssize_t negated_regions = -regions_to_transfer;
|
||||
heap->free_set()->move_unaffiliated_regions_from_collector_to_old_collector(negated_regions);
|
||||
} else if (heap->young_generation()->get_evacuation_reserve() > young_evac_reserve) {
|
||||
size_t delta_bytes = heap->young_generation()->get_evacuation_reserve() - young_evac_reserve;
|
||||
size_t delta_regions = delta_bytes / region_size_bytes;
|
||||
size_t regions_to_transfer = MIN2(unaffiliated_young_regions, delta_regions);
|
||||
log_info(gc)("Global GC moves %zu unaffiliated regions from young collector to old collector reserves", regions_to_transfer);
|
||||
heap->free_set()->move_unaffiliated_regions_from_collector_to_old_collector(regions_to_transfer);
|
||||
}
|
||||
|
||||
heap->young_generation()->set_evacuation_reserve(young_evac_reserve);
|
||||
heap->old_generation()->set_evacuation_reserve(old_evac_reserve);
|
||||
heap->old_generation()->set_promoted_reserve(old_promo_reserve);
|
||||
}
|
||||
|
||||
@ -39,9 +39,9 @@ class ShenandoahGlobalHeuristics : public ShenandoahGenerationalHeuristics {
|
||||
public:
|
||||
ShenandoahGlobalHeuristics(ShenandoahGlobalGeneration* generation);
|
||||
|
||||
void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t actual_free) override;
|
||||
size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t actual_free) override;
|
||||
|
||||
private:
|
||||
void choose_global_collection_set(ShenandoahCollectionSet* cset,
|
||||
|
||||
@ -72,7 +72,7 @@ ShenandoahHeuristics::~ShenandoahHeuristics() {
|
||||
FREE_C_HEAP_ARRAY(RegionGarbage, _region_data);
|
||||
}
|
||||
|
||||
void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set) {
|
||||
size_t ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set) {
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
|
||||
assert(collection_set->is_empty(), "Must be empty");
|
||||
@ -153,8 +153,8 @@ void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collec
|
||||
if (immediate_percent <= ShenandoahImmediateThreshold) {
|
||||
choose_collection_set_from_regiondata(collection_set, candidates, cand_idx, immediate_garbage + free);
|
||||
}
|
||||
|
||||
collection_set->summarize(total_garbage, immediate_garbage, immediate_regions);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ShenandoahHeuristics::record_cycle_start() {
|
||||
|
||||
@ -129,6 +129,13 @@ protected:
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void update_livedata(size_t live) {
|
||||
_region_union._live_data = live;
|
||||
#ifdef ASSERT
|
||||
_union_tag = is_live_data;
|
||||
#endif
|
||||
}
|
||||
|
||||
inline ShenandoahHeapRegion* get_region() const {
|
||||
assert(_union_tag != is_uninitialized, "Cannot fetch region from uninitialized RegionData");
|
||||
return _region;
|
||||
@ -176,9 +183,12 @@ protected:
|
||||
|
||||
static int compare_by_garbage(RegionData a, RegionData b);
|
||||
|
||||
virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
|
||||
RegionData* data, size_t data_size,
|
||||
size_t free) = 0;
|
||||
// This is a helper function to choose_collection_set(), returning the number of regions that need to be transferred to
|
||||
// the old reserve from the young reserve in order to effectively evacuate the chosen collection set. In non-generational
|
||||
// mode, the return value is 0.
|
||||
virtual size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
|
||||
RegionData* data, size_t data_size,
|
||||
size_t free) = 0;
|
||||
|
||||
void adjust_penalty(intx step);
|
||||
|
||||
@ -226,7 +236,9 @@ public:
|
||||
|
||||
virtual void record_requested_gc();
|
||||
|
||||
virtual void choose_collection_set(ShenandoahCollectionSet* collection_set);
|
||||
// Choose the collection set, returning the number of regions that need to be transferred to the old reserve from the young
|
||||
// reserve in order to effectively evacuate the chosen collection set. In non-generational mode, the return value is 0.
|
||||
virtual size_t choose_collection_set(ShenandoahCollectionSet* collection_set);
|
||||
|
||||
virtual bool can_unload_classes();
|
||||
|
||||
|
||||
@ -26,9 +26,11 @@
|
||||
#include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
|
||||
#include "gc/shenandoah/shenandoahCollectionSet.hpp"
|
||||
#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
|
||||
#include "gc/shenandoah/shenandoahFreeSet.hpp"
|
||||
#include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
|
||||
#include "gc/shenandoah/shenandoahOldGeneration.hpp"
|
||||
#include "gc/shenandoah/shenandoahYoungGeneration.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "utilities/quickSort.hpp"
|
||||
|
||||
@ -77,18 +79,31 @@ ShenandoahOldHeuristics::ShenandoahOldHeuristics(ShenandoahOldGeneration* genera
|
||||
}
|
||||
|
||||
bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* collection_set) {
|
||||
if (unprocessed_old_collection_candidates() == 0) {
|
||||
return false;
|
||||
}
|
||||
_mixed_evac_cset = collection_set;
|
||||
_included_old_regions = 0;
|
||||
_evacuated_old_bytes = 0;
|
||||
_collected_old_bytes = 0;
|
||||
|
||||
if (_old_generation->is_preparing_for_mark()) {
|
||||
// We have unprocessed old collection candidates, but the heuristic has given up on evacuating them.
|
||||
// This is most likely because they were _all_ pinned at the time of the last mixed evacuation (and
|
||||
// this in turn is most likely because there are just one or two candidate regions remaining).
|
||||
log_info(gc, ergo)("Remaining " UINT32_FORMAT " old regions are being coalesced and filled", unprocessed_old_collection_candidates());
|
||||
log_info(gc, ergo)("Remaining " UINT32_FORMAT
|
||||
" old regions are being coalesced and filled", unprocessed_old_collection_candidates());
|
||||
return false;
|
||||
}
|
||||
|
||||
// Between consecutive mixed-evacuation cycles, the live data within each candidate region may change due to
|
||||
// promotions and old-gen evacuations. Re-sort the candidate regions in order to first evacuate regions that have
|
||||
// the smallest amount of live data. These are easiest to evacuate with least effort. Doing these first allows
|
||||
// us to more quickly replenish free memory with empty regions.
|
||||
for (uint i = _next_old_collection_candidate; i < _last_old_collection_candidate; i++) {
|
||||
ShenandoahHeapRegion* r = _region_data[i].get_region();
|
||||
_region_data[i].update_livedata(r->get_mixed_candidate_live_data_bytes());
|
||||
}
|
||||
QuickSort::sort<RegionData>(_region_data + _next_old_collection_candidate, unprocessed_old_collection_candidates(),
|
||||
compare_by_live);
|
||||
|
||||
_first_pinned_candidate = NOT_FOUND;
|
||||
|
||||
uint included_old_regions = 0;
|
||||
@ -100,150 +115,44 @@ bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* coll
|
||||
// of memory that can still be evacuated. We address this by reducing the evacuation budget by the amount
|
||||
// of live memory in that region and by the amount of unallocated memory in that region if the evacuation
|
||||
// budget is constrained by availability of free memory.
|
||||
const size_t old_evacuation_reserve = _old_generation->get_evacuation_reserve();
|
||||
const size_t old_evacuation_budget = (size_t) ((double) old_evacuation_reserve / ShenandoahOldEvacWaste);
|
||||
size_t unfragmented_available = _old_generation->free_unaffiliated_regions() * ShenandoahHeapRegion::region_size_bytes();
|
||||
size_t fragmented_available;
|
||||
size_t excess_fragmented_available;
|
||||
_old_evacuation_reserve = _old_generation->get_evacuation_reserve();
|
||||
_old_evacuation_budget = (size_t) ((double) _old_evacuation_reserve / ShenandoahOldEvacWaste);
|
||||
|
||||
if (unfragmented_available > old_evacuation_budget) {
|
||||
unfragmented_available = old_evacuation_budget;
|
||||
fragmented_available = 0;
|
||||
excess_fragmented_available = 0;
|
||||
// fragmented_available is the amount of memory within partially consumed old regions that may be required to
|
||||
// hold the results of old evacuations. If all of the memory required by the old evacuation reserve is available
|
||||
// in unfragmented regions (unaffiliated old regions), then fragmented_available is zero because we do not need
|
||||
// to evacuate into the existing partially consumed old regions.
|
||||
|
||||
// if fragmented_available is non-zero, excess_fragmented_old_budget represents the amount of fragmented memory
|
||||
// that is available within old, but is not required to hold the resuilts of old evacuation. As old-gen regions
|
||||
// are added into the collection set, their free memory is subtracted from excess_fragmented_old_budget until the
|
||||
// excess is exhausted. For old-gen regions subsequently added to the collection set, their free memory is
|
||||
// subtracted from fragmented_available and from the old_evacuation_budget (since the budget decreases when this
|
||||
// fragmented_available memory decreases). After fragmented_available has been exhausted, any further old regions
|
||||
// selected for the cset do not further decrease the old_evacuation_budget because all further evacuation is targeted
|
||||
// to unfragmented regions.
|
||||
|
||||
size_t unaffiliated_available = _old_generation->free_unaffiliated_regions() * ShenandoahHeapRegion::region_size_bytes();
|
||||
if (unaffiliated_available > _old_evacuation_reserve) {
|
||||
_unspent_unfragmented_old_budget = _old_evacuation_budget;
|
||||
_unspent_fragmented_old_budget = 0;
|
||||
_excess_fragmented_old_budget = 0;
|
||||
} else {
|
||||
assert(_old_generation->available() >= old_evacuation_budget, "Cannot budget more than is available");
|
||||
fragmented_available = _old_generation->available() - unfragmented_available;
|
||||
assert(fragmented_available + unfragmented_available >= old_evacuation_budget, "Budgets do not add up");
|
||||
if (fragmented_available + unfragmented_available > old_evacuation_budget) {
|
||||
excess_fragmented_available = (fragmented_available + unfragmented_available) - old_evacuation_budget;
|
||||
fragmented_available -= excess_fragmented_available;
|
||||
assert(_old_generation->available() >= _old_evacuation_reserve, "Cannot reserve more than is available");
|
||||
size_t affiliated_available = _old_generation->available() - unaffiliated_available;
|
||||
assert(affiliated_available + unaffiliated_available >= _old_evacuation_reserve, "Budgets do not add up");
|
||||
if (affiliated_available + unaffiliated_available > _old_evacuation_reserve) {
|
||||
_excess_fragmented_old_budget = (affiliated_available + unaffiliated_available) - _old_evacuation_reserve;
|
||||
affiliated_available -= _excess_fragmented_old_budget;
|
||||
}
|
||||
_unspent_fragmented_old_budget = (size_t) ((double) affiliated_available / ShenandoahOldEvacWaste);
|
||||
_unspent_unfragmented_old_budget = (size_t) ((double) unaffiliated_available / ShenandoahOldEvacWaste);
|
||||
}
|
||||
|
||||
size_t remaining_old_evacuation_budget = old_evacuation_budget;
|
||||
log_debug(gc)("Choose old regions for mixed collection: old evacuation budget: %zu%s, candidates: %u",
|
||||
byte_size_in_proper_unit(old_evacuation_budget), proper_unit_for_byte_size(old_evacuation_budget),
|
||||
log_debug(gc)("Choose old regions for mixed collection: old evacuation budget: " PROPERFMT ", candidates: %u",
|
||||
PROPERFMTARGS(_old_evacuation_budget),
|
||||
unprocessed_old_collection_candidates());
|
||||
|
||||
size_t lost_evacuation_capacity = 0;
|
||||
|
||||
// The number of old-gen regions that were selected as candidates for collection at the end of the most recent old-gen
|
||||
// concurrent marking phase and have not yet been collected is represented by unprocessed_old_collection_candidates().
|
||||
// Candidate regions are ordered according to increasing amount of live data. If there is not sufficient room to
|
||||
// evacuate region N, then there is no need to even consider evacuating region N+1.
|
||||
while (unprocessed_old_collection_candidates() > 0) {
|
||||
// Old collection candidates are sorted in order of decreasing garbage contained therein.
|
||||
ShenandoahHeapRegion* r = next_old_collection_candidate();
|
||||
if (r == nullptr) {
|
||||
break;
|
||||
}
|
||||
assert(r->is_regular(), "There should be no humongous regions in the set of mixed-evac candidates");
|
||||
|
||||
// If region r is evacuated to fragmented memory (to free memory within a partially used region), then we need
|
||||
// to decrease the capacity of the fragmented memory by the scaled loss.
|
||||
|
||||
const size_t live_data_for_evacuation = r->get_live_data_bytes();
|
||||
size_t lost_available = r->free();
|
||||
|
||||
if ((lost_available > 0) && (excess_fragmented_available > 0)) {
|
||||
if (lost_available < excess_fragmented_available) {
|
||||
excess_fragmented_available -= lost_available;
|
||||
lost_evacuation_capacity -= lost_available;
|
||||
lost_available = 0;
|
||||
} else {
|
||||
lost_available -= excess_fragmented_available;
|
||||
lost_evacuation_capacity -= excess_fragmented_available;
|
||||
excess_fragmented_available = 0;
|
||||
}
|
||||
}
|
||||
size_t scaled_loss = (size_t) ((double) lost_available / ShenandoahOldEvacWaste);
|
||||
if ((lost_available > 0) && (fragmented_available > 0)) {
|
||||
if (scaled_loss + live_data_for_evacuation < fragmented_available) {
|
||||
fragmented_available -= scaled_loss;
|
||||
scaled_loss = 0;
|
||||
} else {
|
||||
// We will have to allocate this region's evacuation memory from unfragmented memory, so don't bother
|
||||
// to decrement scaled_loss
|
||||
}
|
||||
}
|
||||
if (scaled_loss > 0) {
|
||||
// We were not able to account for the lost free memory within fragmented memory, so we need to take this
|
||||
// allocation out of unfragmented memory. Unfragmented memory does not need to account for loss of free.
|
||||
if (live_data_for_evacuation > unfragmented_available) {
|
||||
// There is no room to evacuate this region or any that come after it in within the candidates array.
|
||||
log_debug(gc, cset)("Not enough unfragmented memory (%zu) to hold evacuees (%zu) from region: (%zu)",
|
||||
unfragmented_available, live_data_for_evacuation, r->index());
|
||||
break;
|
||||
} else {
|
||||
unfragmented_available -= live_data_for_evacuation;
|
||||
}
|
||||
} else {
|
||||
// Since scaled_loss == 0, we have accounted for the loss of free memory, so we can allocate from either
|
||||
// fragmented or unfragmented available memory. Use up the fragmented memory budget first.
|
||||
size_t evacuation_need = live_data_for_evacuation;
|
||||
|
||||
if (evacuation_need > fragmented_available) {
|
||||
evacuation_need -= fragmented_available;
|
||||
fragmented_available = 0;
|
||||
} else {
|
||||
fragmented_available -= evacuation_need;
|
||||
evacuation_need = 0;
|
||||
}
|
||||
if (evacuation_need > unfragmented_available) {
|
||||
// There is no room to evacuate this region or any that come after it in within the candidates array.
|
||||
log_debug(gc, cset)("Not enough unfragmented memory (%zu) to hold evacuees (%zu) from region: (%zu)",
|
||||
unfragmented_available, live_data_for_evacuation, r->index());
|
||||
break;
|
||||
} else {
|
||||
unfragmented_available -= evacuation_need;
|
||||
// dead code: evacuation_need == 0;
|
||||
}
|
||||
}
|
||||
collection_set->add_region(r);
|
||||
included_old_regions++;
|
||||
evacuated_old_bytes += live_data_for_evacuation;
|
||||
collected_old_bytes += r->garbage();
|
||||
consume_old_collection_candidate();
|
||||
}
|
||||
|
||||
if (_first_pinned_candidate != NOT_FOUND) {
|
||||
// Need to deal with pinned regions
|
||||
slide_pinned_regions_to_front();
|
||||
}
|
||||
decrease_unprocessed_old_collection_candidates_live_memory(evacuated_old_bytes);
|
||||
if (included_old_regions > 0) {
|
||||
log_info(gc, ergo)("Old-gen piggyback evac (" UINT32_FORMAT " regions, evacuating " PROPERFMT ", reclaiming: " PROPERFMT ")",
|
||||
included_old_regions, PROPERFMTARGS(evacuated_old_bytes), PROPERFMTARGS(collected_old_bytes));
|
||||
}
|
||||
|
||||
if (unprocessed_old_collection_candidates() == 0) {
|
||||
// We have added the last of our collection candidates to a mixed collection.
|
||||
// Any triggers that occurred during mixed evacuations may no longer be valid. They can retrigger if appropriate.
|
||||
clear_triggers();
|
||||
|
||||
_old_generation->complete_mixed_evacuations();
|
||||
} else if (included_old_regions == 0) {
|
||||
// We have candidates, but none were included for evacuation - are they all pinned?
|
||||
// or did we just not have enough room for any of them in this collection set?
|
||||
// We don't want a region with a stuck pin to prevent subsequent old collections, so
|
||||
// if they are all pinned we transition to a state that will allow us to make these uncollected
|
||||
// (pinned) regions parsable.
|
||||
if (all_candidates_are_pinned()) {
|
||||
log_info(gc, ergo)("All candidate regions " UINT32_FORMAT " are pinned", unprocessed_old_collection_candidates());
|
||||
_old_generation->abandon_mixed_evacuations();
|
||||
} else {
|
||||
log_info(gc, ergo)("No regions selected for mixed collection. "
|
||||
"Old evacuation budget: " PROPERFMT ", Remaining evacuation budget: " PROPERFMT
|
||||
", Lost capacity: " PROPERFMT
|
||||
", Next candidate: " UINT32_FORMAT ", Last candidate: " UINT32_FORMAT,
|
||||
PROPERFMTARGS(old_evacuation_reserve),
|
||||
PROPERFMTARGS(remaining_old_evacuation_budget),
|
||||
PROPERFMTARGS(lost_evacuation_capacity),
|
||||
_next_old_collection_candidate, _last_old_collection_candidate);
|
||||
}
|
||||
}
|
||||
|
||||
return (included_old_regions > 0);
|
||||
return add_old_regions_to_cset();
|
||||
}
|
||||
|
||||
bool ShenandoahOldHeuristics::all_candidates_are_pinned() {
|
||||
@ -317,6 +226,187 @@ void ShenandoahOldHeuristics::slide_pinned_regions_to_front() {
|
||||
_next_old_collection_candidate = write_index + 1;
|
||||
}
|
||||
|
||||
bool ShenandoahOldHeuristics::add_old_regions_to_cset() {
|
||||
if (unprocessed_old_collection_candidates() == 0) {
|
||||
return false;
|
||||
}
|
||||
_first_pinned_candidate = NOT_FOUND;
|
||||
|
||||
// The number of old-gen regions that were selected as candidates for collection at the end of the most recent old-gen
|
||||
// concurrent marking phase and have not yet been collected is represented by unprocessed_old_collection_candidates().
|
||||
// Candidate regions are ordered according to increasing amount of live data. If there is not sufficient room to
|
||||
// evacuate region N, then there is no need to even consider evacuating region N+1.
|
||||
while (unprocessed_old_collection_candidates() > 0) {
|
||||
// Old collection candidates are sorted in order of decreasing garbage contained therein.
|
||||
ShenandoahHeapRegion* r = next_old_collection_candidate();
|
||||
if (r == nullptr) {
|
||||
break;
|
||||
}
|
||||
assert(r->is_regular(), "There should be no humongous regions in the set of mixed-evac candidates");
|
||||
|
||||
// If region r is evacuated to fragmented memory (to free memory within a partially used region), then we need
|
||||
// to decrease the capacity of the fragmented memory by the scaled loss.
|
||||
|
||||
const size_t live_data_for_evacuation = r->get_live_data_bytes();
|
||||
size_t lost_available = r->free();
|
||||
|
||||
ssize_t fragmented_delta = 0;
|
||||
ssize_t unfragmented_delta = 0;
|
||||
ssize_t excess_delta = 0;
|
||||
|
||||
// We must decrease our mixed-evacuation budgets proportional to the lost available memory. This memory that is no
|
||||
// longer available was likely "promised" to promotions, so we must decrease our mixed evacuations now.
|
||||
// (e.g. if we loose 14 bytes of available old memory, we must decrease the evacuation budget by 10 bytes.)
|
||||
size_t scaled_loss = (size_t) (((double) lost_available) / ShenandoahOldEvacWaste);
|
||||
if (lost_available > 0) {
|
||||
// We need to subtract lost_available from our working evacuation budgets
|
||||
if (scaled_loss < _excess_fragmented_old_budget) {
|
||||
excess_delta -= scaled_loss;
|
||||
_excess_fragmented_old_budget -= scaled_loss;
|
||||
} else {
|
||||
excess_delta -= _excess_fragmented_old_budget;
|
||||
_excess_fragmented_old_budget = 0;
|
||||
}
|
||||
|
||||
if (scaled_loss < _unspent_fragmented_old_budget) {
|
||||
_unspent_fragmented_old_budget -= scaled_loss;
|
||||
fragmented_delta = -scaled_loss;
|
||||
scaled_loss = 0;
|
||||
} else {
|
||||
scaled_loss -= _unspent_fragmented_old_budget;
|
||||
fragmented_delta = -_unspent_fragmented_old_budget;
|
||||
_unspent_fragmented_old_budget = 0;
|
||||
}
|
||||
|
||||
if (scaled_loss < _unspent_unfragmented_old_budget) {
|
||||
_unspent_unfragmented_old_budget -= scaled_loss;
|
||||
unfragmented_delta = -scaled_loss;
|
||||
scaled_loss = 0;
|
||||
} else {
|
||||
scaled_loss -= _unspent_unfragmented_old_budget;
|
||||
fragmented_delta = -_unspent_unfragmented_old_budget;
|
||||
_unspent_unfragmented_old_budget = 0;
|
||||
}
|
||||
}
|
||||
|
||||
// Allocate replica from unfragmented memory if that exists
|
||||
size_t evacuation_need = live_data_for_evacuation;
|
||||
if (evacuation_need < _unspent_unfragmented_old_budget) {
|
||||
_unspent_unfragmented_old_budget -= evacuation_need;
|
||||
} else {
|
||||
if (_unspent_unfragmented_old_budget > 0) {
|
||||
evacuation_need -= _unspent_unfragmented_old_budget;
|
||||
unfragmented_delta -= _unspent_unfragmented_old_budget;
|
||||
_unspent_unfragmented_old_budget = 0;
|
||||
}
|
||||
// Take the remaining allocation out of fragmented available
|
||||
if (_unspent_fragmented_old_budget > evacuation_need) {
|
||||
_unspent_fragmented_old_budget -= evacuation_need;
|
||||
} else {
|
||||
// We cannot add this region into the collection set. We're done. Undo the adjustments to available.
|
||||
_unspent_fragmented_old_budget -= fragmented_delta;
|
||||
_unspent_unfragmented_old_budget -= unfragmented_delta;
|
||||
_excess_fragmented_old_budget -= excess_delta;
|
||||
break;
|
||||
}
|
||||
}
|
||||
_mixed_evac_cset->add_region(r);
|
||||
_included_old_regions++;
|
||||
_evacuated_old_bytes += live_data_for_evacuation;
|
||||
_collected_old_bytes += r->garbage();
|
||||
consume_old_collection_candidate();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ShenandoahOldHeuristics::finalize_mixed_evacs() {
|
||||
if (_first_pinned_candidate != NOT_FOUND) {
|
||||
// Need to deal with pinned regions
|
||||
slide_pinned_regions_to_front();
|
||||
}
|
||||
decrease_unprocessed_old_collection_candidates_live_memory(_evacuated_old_bytes);
|
||||
if (_included_old_regions > 0) {
|
||||
log_info(gc)("Old-gen mixed evac (%zu regions, evacuating %zu%s, reclaiming: %zu%s)",
|
||||
_included_old_regions,
|
||||
byte_size_in_proper_unit(_evacuated_old_bytes), proper_unit_for_byte_size(_evacuated_old_bytes),
|
||||
byte_size_in_proper_unit(_collected_old_bytes), proper_unit_for_byte_size(_collected_old_bytes));
|
||||
}
|
||||
|
||||
if (unprocessed_old_collection_candidates() == 0) {
|
||||
// We have added the last of our collection candidates to a mixed collection.
|
||||
// Any triggers that occurred during mixed evacuations may no longer be valid. They can retrigger if appropriate.
|
||||
clear_triggers();
|
||||
_old_generation->complete_mixed_evacuations();
|
||||
} else if (_included_old_regions == 0) {
|
||||
// We have candidates, but none were included for evacuation - are they all pinned?
|
||||
// or did we just not have enough room for any of them in this collection set?
|
||||
// We don't want a region with a stuck pin to prevent subsequent old collections, so
|
||||
// if they are all pinned we transition to a state that will allow us to make these uncollected
|
||||
// (pinned) regions parsable.
|
||||
if (all_candidates_are_pinned()) {
|
||||
log_info(gc)("All candidate regions " UINT32_FORMAT " are pinned", unprocessed_old_collection_candidates());
|
||||
_old_generation->abandon_mixed_evacuations();
|
||||
} else {
|
||||
log_info(gc)("No regions selected for mixed collection. "
|
||||
"Old evacuation budget: " PROPERFMT ", Next candidate: " UINT32_FORMAT ", Last candidate: " UINT32_FORMAT,
|
||||
PROPERFMTARGS(_old_evacuation_reserve),
|
||||
_next_old_collection_candidate, _last_old_collection_candidate);
|
||||
}
|
||||
}
|
||||
return (_included_old_regions > 0);
|
||||
}
|
||||
|
||||
bool ShenandoahOldHeuristics::top_off_collection_set(size_t &add_regions_to_old) {
|
||||
if (unprocessed_old_collection_candidates() == 0) {
|
||||
add_regions_to_old = 0;
|
||||
return false;
|
||||
} else {
|
||||
ShenandoahYoungGeneration* young_generation = _heap->young_generation();
|
||||
size_t young_unaffiliated_regions = young_generation->free_unaffiliated_regions();
|
||||
size_t max_young_cset = young_generation->get_evacuation_reserve();
|
||||
|
||||
// We have budgeted to assure the live_bytes_in_tenurable_regions() get evacuated into old generation. Young reserves
|
||||
// only for untenurable region evacuations.
|
||||
size_t planned_young_evac = _mixed_evac_cset->get_live_bytes_in_untenurable_regions();
|
||||
size_t consumed_from_young_cset = (size_t) (planned_young_evac * ShenandoahEvacWaste);
|
||||
|
||||
size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
|
||||
size_t regions_required_for_collector_reserve = (consumed_from_young_cset + region_size_bytes - 1) / region_size_bytes;
|
||||
|
||||
assert(consumed_from_young_cset <= max_young_cset, "sanity");
|
||||
assert(max_young_cset <= young_unaffiliated_regions * region_size_bytes, "sanity");
|
||||
|
||||
size_t regions_for_old_expansion;
|
||||
if (consumed_from_young_cset < max_young_cset) {
|
||||
size_t excess_young_reserves = max_young_cset - consumed_from_young_cset;
|
||||
// We can only transfer empty regions from young to old. Furthermore, we must be careful to assure that the young
|
||||
// Collector reserve that remains after transfer is comprised entirely of empty (unaffiliated) regions.
|
||||
size_t consumed_unaffiliated_regions = (consumed_from_young_cset + region_size_bytes - 1) / region_size_bytes;
|
||||
size_t available_unaffiliated_regions = ((young_unaffiliated_regions > consumed_unaffiliated_regions)?
|
||||
young_unaffiliated_regions - consumed_unaffiliated_regions: 0);
|
||||
regions_for_old_expansion = MIN2(available_unaffiliated_regions, excess_young_reserves / region_size_bytes);
|
||||
} else {
|
||||
regions_for_old_expansion = 0;
|
||||
}
|
||||
if (regions_for_old_expansion > 0) {
|
||||
log_info(gc)("Augmenting old-gen evacuation budget from unexpended young-generation reserve by %zu regions",
|
||||
regions_for_old_expansion);
|
||||
add_regions_to_old = regions_for_old_expansion;
|
||||
size_t budget_supplement = region_size_bytes * regions_for_old_expansion;
|
||||
size_t supplement_without_waste = (size_t) (((double) budget_supplement) / ShenandoahOldEvacWaste);
|
||||
_old_evacuation_budget += supplement_without_waste;
|
||||
_unspent_unfragmented_old_budget += supplement_without_waste;
|
||||
_old_generation->augment_evacuation_reserve(budget_supplement);
|
||||
young_generation->set_evacuation_reserve(max_young_cset - budget_supplement);
|
||||
|
||||
return add_old_regions_to_cset();
|
||||
} else {
|
||||
add_regions_to_old = 0;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahOldHeuristics::prepare_for_old_collections() {
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
|
||||
@ -325,7 +415,6 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() {
|
||||
size_t immediate_garbage = 0;
|
||||
size_t immediate_regions = 0;
|
||||
size_t live_data = 0;
|
||||
|
||||
RegionData* candidates = _region_data;
|
||||
for (size_t i = 0; i < num_regions; i++) {
|
||||
ShenandoahHeapRegion* region = heap->get_region(i);
|
||||
@ -344,10 +433,10 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() {
|
||||
// else, regions that were promoted in place had 0 old live data at mark start
|
||||
|
||||
if (region->is_regular() || region->is_regular_pinned()) {
|
||||
// Only place regular or pinned regions with live data into the candidate set.
|
||||
// Pinned regions cannot be evacuated, but we are not actually choosing candidates
|
||||
// for the collection set here. That happens later during the next young GC cycle,
|
||||
// by which time, the pinned region may no longer be pinned.
|
||||
// Only place regular or pinned regions with live data into the candidate set.
|
||||
// Pinned regions cannot be evacuated, but we are not actually choosing candidates
|
||||
// for the collection set here. That happens later during the next young GC cycle,
|
||||
// by which time, the pinned region may no longer be pinned.
|
||||
if (!region->has_live()) {
|
||||
assert(!region->is_pinned(), "Pinned region should have live (pinned) objects.");
|
||||
region->make_trash_immediate();
|
||||
@ -414,6 +503,8 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() {
|
||||
ShenandoahHeapRegion* r = candidates[i].get_region();
|
||||
size_t region_garbage = r->garbage();
|
||||
size_t region_free = r->free();
|
||||
|
||||
r->capture_mixed_candidate_garbage();
|
||||
candidates_garbage += region_garbage;
|
||||
unfragmented += region_free;
|
||||
}
|
||||
@ -456,6 +547,8 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() {
|
||||
r->index(), ShenandoahHeapRegion::region_state_to_string(r->state()));
|
||||
const size_t region_garbage = r->garbage();
|
||||
const size_t region_free = r->free();
|
||||
|
||||
r->capture_mixed_candidate_garbage();
|
||||
candidates_garbage += region_garbage;
|
||||
unfragmented += region_free;
|
||||
defrag_count++;
|
||||
@ -546,6 +639,7 @@ unsigned int ShenandoahOldHeuristics::get_coalesce_and_fill_candidates(Shenandoa
|
||||
void ShenandoahOldHeuristics::abandon_collection_candidates() {
|
||||
_last_old_collection_candidate = 0;
|
||||
_next_old_collection_candidate = 0;
|
||||
_live_bytes_in_unprocessed_candidates = 0;
|
||||
_last_old_region = 0;
|
||||
}
|
||||
|
||||
@ -790,8 +884,9 @@ bool ShenandoahOldHeuristics::is_experimental() {
|
||||
return true;
|
||||
}
|
||||
|
||||
void ShenandoahOldHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
|
||||
ShenandoahHeuristics::RegionData* data,
|
||||
size_t data_size, size_t free) {
|
||||
size_t ShenandoahOldHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
|
||||
ShenandoahHeuristics::RegionData* data,
|
||||
size_t data_size, size_t free) {
|
||||
ShouldNotReachHere();
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -102,6 +102,30 @@ private:
|
||||
size_t _fragmentation_first_old_region;
|
||||
size_t _fragmentation_last_old_region;
|
||||
|
||||
// State variables involved in construction of a mixed-evacuation collection set. These variables are initialized
|
||||
// when client code invokes prime_collection_set(). They are consulted, and sometimes modified, when client code
|
||||
// calls top_off_collection_set() to possibly expand the number of old-gen regions in a mixed evacuation cset, and by
|
||||
// finalize_mixed_evacs(), which prepares the way for mixed evacuations to begin.
|
||||
ShenandoahCollectionSet* _mixed_evac_cset;
|
||||
size_t _evacuated_old_bytes;
|
||||
size_t _collected_old_bytes;
|
||||
size_t _included_old_regions;
|
||||
size_t _old_evacuation_reserve;
|
||||
size_t _old_evacuation_budget;
|
||||
|
||||
// This represents the amount of memory that can be evacuated from old into initially empty regions during a mixed evacuation.
|
||||
// This is the total amount of unfragmented free memory in old divided by ShenandoahOldEvacWaste.
|
||||
size_t _unspent_unfragmented_old_budget;
|
||||
|
||||
// This represents the amount of memory that can be evacuated from old into initially non-empty regions during a mixed
|
||||
// evacuation. This is the total amount of initially fragmented free memory in old divided by ShenandoahOldEvacWaste.
|
||||
size_t _unspent_fragmented_old_budget;
|
||||
|
||||
// If there is more available memory in old than is required by the intended mixed evacuation, the amount of excess
|
||||
// memory is represented by _excess_fragmented_old. To convert this value into a promotion budget, multiply by
|
||||
// ShenandoahOldEvacWaste and divide by ShenandoahPromoWaste.
|
||||
size_t _excess_fragmented_old_budget;
|
||||
|
||||
// The value of command-line argument ShenandoahOldGarbageThreshold represents the percent of garbage that must
|
||||
// be present within an old-generation region before that region is considered a good candidate for inclusion in
|
||||
// the collection set under normal circumstances. For our purposes, normal circustances are when the memory consumed
|
||||
@ -131,7 +155,15 @@ private:
|
||||
void set_trigger_if_old_is_overgrown();
|
||||
|
||||
protected:
|
||||
void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set, RegionData* data, size_t data_size, size_t free) override;
|
||||
size_t
|
||||
choose_collection_set_from_regiondata(ShenandoahCollectionSet* set, RegionData* data, size_t data_size, size_t free) override;
|
||||
|
||||
// This internal helper routine adds as many mixed evacuation candidate regions as fit within the old-gen evacuation budget
|
||||
// to the collection set. This may be called twice to prepare for any given mixed evacuation cycle, the first time with
|
||||
// a conservative old evacuation budget, and the second time with a larger more aggressive old evacuation budget. Returns
|
||||
// true iff we need to finalize mixed evacs. (If no regions are added to the collection set, there is no need to finalize
|
||||
// mixed evacuations.)
|
||||
bool add_old_regions_to_cset();
|
||||
|
||||
public:
|
||||
explicit ShenandoahOldHeuristics(ShenandoahOldGeneration* generation, ShenandoahGenerationalHeap* gen_heap);
|
||||
@ -139,8 +171,22 @@ public:
|
||||
// Prepare for evacuation of old-gen regions by capturing the mark results of a recently completed concurrent mark pass.
|
||||
void prepare_for_old_collections();
|
||||
|
||||
// Return true iff the collection set is primed with at least one old-gen region.
|
||||
bool prime_collection_set(ShenandoahCollectionSet* set);
|
||||
// Initialize instance variables to support the preparation of a mixed-evacuation collection set. Adds as many
|
||||
// old candidate regions into the collection set as can fit within the iniital conservative old evacuation budget.
|
||||
// Returns true iff we need to finalize mixed evacs.
|
||||
bool prime_collection_set(ShenandoahCollectionSet* collection_set);
|
||||
|
||||
// If young evacuation did not consume all of its available evacuation reserve, add as many additional mixed-
|
||||
// evacuation candidate regions into the collection set as will fit within this excess repurposed reserved.
|
||||
// Returns true iff we need to finalize mixed evacs. Upon return, the var parameter regions_to_xfer holds the
|
||||
// number of regions to transfer from young to old.
|
||||
bool top_off_collection_set(size_t &add_regions_to_old);
|
||||
|
||||
// Having added all eligible mixed-evacuation candidates to the collection set, this function updates the total count
|
||||
// of how much old-gen memory remains to be evacuated and adjusts the representation of old-gen regions that remain to
|
||||
// be evacuated, giving special attention to regions that are currently pinned. It outputs relevant log messages and
|
||||
// returns true iff the collection set holds at least one unpinned mixed evacuation candidate.
|
||||
bool finalize_mixed_evacs();
|
||||
|
||||
// How many old-collection candidates have not yet been processed?
|
||||
uint unprocessed_old_collection_candidates() const;
|
||||
|
||||
@ -50,9 +50,9 @@ bool ShenandoahPassiveHeuristics::should_degenerate_cycle() {
|
||||
return ShenandoahDegeneratedGC;
|
||||
}
|
||||
|
||||
void ShenandoahPassiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t actual_free) {
|
||||
size_t ShenandoahPassiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t actual_free) {
|
||||
assert(ShenandoahDegeneratedGC, "This path is only taken for Degenerated GC");
|
||||
|
||||
// Do not select too large CSet that would overflow the available free space.
|
||||
@ -76,4 +76,5 @@ void ShenandoahPassiveHeuristics::choose_collection_set_from_regiondata(Shenando
|
||||
cset->add_region(r);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -46,9 +46,9 @@ public:
|
||||
|
||||
virtual bool should_degenerate_cycle();
|
||||
|
||||
virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
|
||||
RegionData* data, size_t data_size,
|
||||
size_t free);
|
||||
virtual size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
|
||||
RegionData* data, size_t data_size,
|
||||
size_t free);
|
||||
|
||||
virtual const char* name() { return "Passive"; }
|
||||
virtual bool is_diagnostic() { return true; }
|
||||
|
||||
@ -59,9 +59,9 @@ bool ShenandoahStaticHeuristics::should_start_gc() {
|
||||
return ShenandoahHeuristics::should_start_gc();
|
||||
}
|
||||
|
||||
void ShenandoahStaticHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t free) {
|
||||
size_t ShenandoahStaticHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t free) {
|
||||
size_t threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
|
||||
|
||||
for (size_t idx = 0; idx < size; idx++) {
|
||||
@ -70,4 +70,5 @@ void ShenandoahStaticHeuristics::choose_collection_set_from_regiondata(Shenandoa
|
||||
cset->add_region(r);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -40,9 +40,9 @@ public:
|
||||
|
||||
virtual bool should_start_gc();
|
||||
|
||||
virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t free);
|
||||
virtual size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t free);
|
||||
|
||||
virtual const char* name() { return "Static"; }
|
||||
virtual bool is_diagnostic() { return false; }
|
||||
|
||||
@ -33,11 +33,11 @@
|
||||
#include "utilities/quickSort.hpp"
|
||||
|
||||
ShenandoahYoungHeuristics::ShenandoahYoungHeuristics(ShenandoahYoungGeneration* generation)
|
||||
: ShenandoahGenerationalHeuristics(generation) {
|
||||
: ShenandoahGenerationalHeuristics(generation) {
|
||||
}
|
||||
|
||||
|
||||
void ShenandoahYoungHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
size_t ShenandoahYoungHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t actual_free) {
|
||||
// See comments in ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata():
|
||||
@ -48,6 +48,8 @@ void ShenandoahYoungHeuristics::choose_collection_set_from_regiondata(Shenandoah
|
||||
// array before younger regions that typically contain more garbage. This is one reason why,
|
||||
// for example, we continue examining regions even after rejecting a region that has
|
||||
// more live data than we can evacuate.
|
||||
ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
|
||||
bool need_to_finalize_mixed = heap->old_generation()->heuristics()->prime_collection_set(cset);
|
||||
|
||||
// Better select garbage-first regions
|
||||
QuickSort::sort<RegionData>(data, (int) size, compare_by_garbage);
|
||||
@ -55,6 +57,17 @@ void ShenandoahYoungHeuristics::choose_collection_set_from_regiondata(Shenandoah
|
||||
size_t cur_young_garbage = add_preselected_regions_to_collection_set(cset, data, size);
|
||||
|
||||
choose_young_collection_set(cset, data, size, actual_free, cur_young_garbage);
|
||||
|
||||
// Especially when young-gen trigger is expedited in order to finish mixed evacuations, there may not be
|
||||
// enough consolidated garbage to make effective use of young-gen evacuation reserve. If there is still
|
||||
// young-gen reserve available following selection of the young-gen collection set, see if we can use
|
||||
// this memory to expand the old-gen evacuation collection set.
|
||||
size_t add_regions_to_old;
|
||||
need_to_finalize_mixed |= heap->old_generation()->heuristics()->top_off_collection_set(add_regions_to_old);
|
||||
if (need_to_finalize_mixed) {
|
||||
heap->old_generation()->heuristics()->finalize_mixed_evacs();
|
||||
}
|
||||
return add_regions_to_old;
|
||||
}
|
||||
|
||||
void ShenandoahYoungHeuristics::choose_young_collection_set(ShenandoahCollectionSet* cset,
|
||||
|
||||
@ -38,9 +38,9 @@ public:
|
||||
explicit ShenandoahYoungHeuristics(ShenandoahYoungGeneration* generation);
|
||||
|
||||
|
||||
void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t actual_free) override;
|
||||
size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t actual_free) override;
|
||||
|
||||
bool should_start_gc() override;
|
||||
|
||||
|
||||
@ -50,6 +50,8 @@ ShenandoahCollectionSet::ShenandoahCollectionSet(ShenandoahHeap* heap, ReservedS
|
||||
_region_count(0),
|
||||
_old_garbage(0),
|
||||
_preselected_regions(nullptr),
|
||||
_young_available_bytes_collected(0),
|
||||
_old_available_bytes_collected(0),
|
||||
_current_index(0) {
|
||||
|
||||
// The collection set map is reserved to cover the entire heap *and* zero addresses.
|
||||
@ -104,6 +106,7 @@ void ShenandoahCollectionSet::add_region(ShenandoahHeapRegion* r) {
|
||||
}
|
||||
} else if (r->is_old()) {
|
||||
_old_bytes_to_evacuate += live;
|
||||
_old_available_bytes_collected += free;
|
||||
_old_garbage += garbage;
|
||||
}
|
||||
|
||||
@ -140,6 +143,7 @@ void ShenandoahCollectionSet::clear() {
|
||||
_old_bytes_to_evacuate = 0;
|
||||
|
||||
_young_available_bytes_collected = 0;
|
||||
_old_available_bytes_collected = 0;
|
||||
|
||||
_has_old_regions = false;
|
||||
}
|
||||
|
||||
@ -75,6 +75,10 @@ private:
|
||||
// should be subtracted from what's available.
|
||||
size_t _young_available_bytes_collected;
|
||||
|
||||
// When a region having memory available to be allocated is added to the collection set, the region's available memory
|
||||
// should be subtracted from what's available.
|
||||
size_t _old_available_bytes_collected;
|
||||
|
||||
shenandoah_padding(0);
|
||||
volatile size_t _current_index;
|
||||
shenandoah_padding(1);
|
||||
@ -121,6 +125,9 @@ public:
|
||||
// Returns the amount of free bytes in young regions in the collection set.
|
||||
size_t get_young_available_bytes_collected() const { return _young_available_bytes_collected; }
|
||||
|
||||
// Returns the amount of free bytes in old regions in the collection set.
|
||||
size_t get_old_available_bytes_collected() const { return _old_available_bytes_collected; }
|
||||
|
||||
// Returns the amount of garbage in old regions in the collection set.
|
||||
inline size_t get_old_garbage() const;
|
||||
|
||||
|
||||
@ -204,9 +204,8 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
|
||||
return false;
|
||||
}
|
||||
|
||||
entry_concurrent_update_refs_prepare(heap);
|
||||
|
||||
// Perform update-refs phase.
|
||||
entry_concurrent_update_refs_prepare(heap);
|
||||
if (ShenandoahVerify) {
|
||||
vmop_entry_init_update_refs();
|
||||
}
|
||||
@ -227,6 +226,7 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
|
||||
// Update references freed up collection set, kick the cleanup to reclaim the space.
|
||||
entry_cleanup_complete();
|
||||
} else {
|
||||
_abbreviated = true;
|
||||
if (!entry_final_roots()) {
|
||||
assert(_degen_point != _degenerated_unset, "Need to know where to start degenerated cycle");
|
||||
return false;
|
||||
@ -235,7 +235,6 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
|
||||
if (VerifyAfterGC) {
|
||||
vmop_entry_verify_final_roots();
|
||||
}
|
||||
_abbreviated = true;
|
||||
}
|
||||
|
||||
// We defer generation resizing actions until after cset regions have been recycled. We do this even following an
|
||||
@ -282,7 +281,6 @@ bool ShenandoahConcurrentGC::complete_abbreviated_cycle() {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
void ShenandoahConcurrentGC::vmop_entry_init_mark() {
|
||||
ShenandoahHeap* const heap = ShenandoahHeap::heap();
|
||||
TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
|
||||
@ -536,6 +534,12 @@ void ShenandoahConcurrentGC::entry_cleanup_early() {
|
||||
// This phase does not use workers, no need for setup
|
||||
heap->try_inject_alloc_failure();
|
||||
op_cleanup_early();
|
||||
if (!heap->is_evacuation_in_progress()) {
|
||||
// This is an abbreviated cycle. Rebuild the freeset in order to establish reserves for the next GC cycle. Doing
|
||||
// the rebuild ASAP also expedites availability of immediate trash, reducing the likelihood that we will degenerate
|
||||
// during promote-in-place processing.
|
||||
heap->rebuild_free_set(true /*concurrent*/);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahConcurrentGC::entry_evacuate() {
|
||||
|
||||
@ -326,7 +326,7 @@ void ShenandoahRegionPartitions::initialize_old_collector() {
|
||||
}
|
||||
|
||||
void ShenandoahRegionPartitions::make_all_regions_unavailable() {
|
||||
shenandoah_assert_heaplocked();
|
||||
shenandoah_assert_heaplocked_or_safepoint();
|
||||
for (size_t partition_id = 0; partition_id < IntNumPartitions; partition_id++) {
|
||||
_membership[partition_id].clear_all();
|
||||
_leftmosts[partition_id] = _max;
|
||||
@ -439,6 +439,13 @@ void ShenandoahRegionPartitions::set_capacity_of(ShenandoahFreeSetPartitionId wh
|
||||
_available[int(which_partition)] = value - _used[int(which_partition)];
|
||||
}
|
||||
|
||||
void ShenandoahRegionPartitions::set_used_by(ShenandoahFreeSetPartitionId which_partition, size_t value) {
|
||||
shenandoah_assert_heaplocked();
|
||||
assert (which_partition < NumPartitions, "selected free set must be valid");
|
||||
_used[int(which_partition)] = value;
|
||||
_available[int(which_partition)] = _capacity[int(which_partition)] - value;
|
||||
}
|
||||
|
||||
|
||||
void ShenandoahRegionPartitions::increase_capacity(ShenandoahFreeSetPartitionId which_partition, size_t bytes) {
|
||||
shenandoah_assert_heaplocked();
|
||||
@ -900,7 +907,7 @@ idx_t ShenandoahRegionPartitions::rightmost_empty(ShenandoahFreeSetPartitionId w
|
||||
|
||||
|
||||
#ifdef ASSERT
|
||||
void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) {
|
||||
void ShenandoahRegionPartitions::assert_bounds() {
|
||||
|
||||
size_t capacities[UIntNumPartitions];
|
||||
size_t used[UIntNumPartitions];
|
||||
@ -936,7 +943,7 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) {
|
||||
switch (partition) {
|
||||
case ShenandoahFreeSetPartitionId::NotFree:
|
||||
{
|
||||
assert(!validate_totals || (capacity != _region_size_bytes), "Should not be retired if empty");
|
||||
assert(capacity != _region_size_bytes, "Should not be retired if empty");
|
||||
ShenandoahHeapRegion* r = ShenandoahHeap::heap()->get_region(i);
|
||||
if (r->is_humongous()) {
|
||||
if (r->is_old()) {
|
||||
@ -976,12 +983,12 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) {
|
||||
case ShenandoahFreeSetPartitionId::Collector:
|
||||
case ShenandoahFreeSetPartitionId::OldCollector:
|
||||
{
|
||||
ShenandoahHeapRegion* r = ShenandoahHeap::heap()->get_region(i);
|
||||
assert(capacity > 0, "free regions must have allocation capacity");
|
||||
bool is_empty = (capacity == _region_size_bytes);
|
||||
regions[int(partition)]++;
|
||||
used[int(partition)] += _region_size_bytes - capacity;
|
||||
capacities[int(partition)] += _region_size_bytes;
|
||||
|
||||
if (i < leftmosts[int(partition)]) {
|
||||
leftmosts[int(partition)] = i;
|
||||
}
|
||||
@ -1020,20 +1027,20 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) {
|
||||
idx_t beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)];
|
||||
idx_t end_off = rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)];
|
||||
assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::Mutator),
|
||||
"Mutator free regions before the leftmost: %zd, bound %zd",
|
||||
"Mutator free region before the leftmost: %zd, bound %zd",
|
||||
beg_off, leftmost(ShenandoahFreeSetPartitionId::Mutator));
|
||||
assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::Mutator),
|
||||
"Mutator free regions past the rightmost: %zd, bound %zd",
|
||||
"Mutator free region past the rightmost: %zd, bound %zd",
|
||||
end_off, rightmost(ShenandoahFreeSetPartitionId::Mutator));
|
||||
|
||||
beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)];
|
||||
end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)];
|
||||
assert (beg_off >= leftmost_empty(ShenandoahFreeSetPartitionId::Mutator),
|
||||
"Mutator free empty regions before the leftmost: %zd, bound %zd",
|
||||
beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::Mutator));
|
||||
assert (end_off <= rightmost_empty(ShenandoahFreeSetPartitionId::Mutator),
|
||||
"Mutator free empty regions past the rightmost: %zd, bound %zd",
|
||||
end_off, rightmost_empty(ShenandoahFreeSetPartitionId::Mutator));
|
||||
assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)],
|
||||
"free empty region (%zd) before the leftmost bound %zd",
|
||||
beg_off, _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)]);
|
||||
assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)],
|
||||
"free empty region (%zd) past the rightmost bound %zd",
|
||||
end_off, _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)]);
|
||||
|
||||
// Performance invariants. Failing these would not break the free partition, but performance would suffer.
|
||||
assert (leftmost(ShenandoahFreeSetPartitionId::Collector) <= _max, "leftmost in bounds: %zd < %zd",
|
||||
@ -1053,20 +1060,20 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) {
|
||||
beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::Collector)];
|
||||
end_off = rightmosts[int(ShenandoahFreeSetPartitionId::Collector)];
|
||||
assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::Collector),
|
||||
"Collector free regions before the leftmost: %zd, bound %zd",
|
||||
"Collector free region before the leftmost: %zd, bound %zd",
|
||||
beg_off, leftmost(ShenandoahFreeSetPartitionId::Collector));
|
||||
assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::Collector),
|
||||
"Collector free regions past the rightmost: %zd, bound %zd",
|
||||
"Collector free region past the rightmost: %zd, bound %zd",
|
||||
end_off, rightmost(ShenandoahFreeSetPartitionId::Collector));
|
||||
|
||||
beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::Collector)];
|
||||
end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::Collector)];
|
||||
assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
|
||||
"Collector free empty regions before the leftmost: %zd, bound %zd",
|
||||
beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::Collector));
|
||||
"Collector free empty region before the leftmost: %zd, bound %zd",
|
||||
beg_off, _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)]);
|
||||
assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
|
||||
"Collector free empty regions past the rightmost: %zd, bound %zd",
|
||||
end_off, rightmost_empty(ShenandoahFreeSetPartitionId::Collector));
|
||||
"Collector free empty region past the rightmost: %zd, bound %zd",
|
||||
end_off, _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)]);
|
||||
|
||||
// Performance invariants. Failing these would not break the free partition, but performance would suffer.
|
||||
assert (leftmost(ShenandoahFreeSetPartitionId::OldCollector) <= _max, "OldCollector leftmost in bounds: %zd < %zd",
|
||||
@ -1083,106 +1090,109 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) {
|
||||
ShenandoahFreeSetPartitionId::OldCollector),
|
||||
"OldCollector rightmost region should be free: %zd", rightmost(ShenandoahFreeSetPartitionId::OldCollector));
|
||||
|
||||
// Concurrent recycling of trash recycles a region (changing its state from is_trash to is_empty without the heap lock),
|
||||
|
||||
// If OldCollector partition is empty, leftmosts will both equal max, rightmosts will both equal zero.
|
||||
// Likewise for empty region partitions.
|
||||
beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
|
||||
end_off = rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
|
||||
assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::OldCollector),
|
||||
"OldCollector free regions before the leftmost: %zd, bound %zd",
|
||||
assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::OldCollector), "free regions before the leftmost: %zd, bound %zd",
|
||||
beg_off, leftmost(ShenandoahFreeSetPartitionId::OldCollector));
|
||||
assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::OldCollector),
|
||||
"OldCollector free regions past the rightmost: %zd, bound %zd",
|
||||
assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::OldCollector), "free regions past the rightmost: %zd, bound %zd",
|
||||
end_off, rightmost(ShenandoahFreeSetPartitionId::OldCollector));
|
||||
|
||||
beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
|
||||
end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
|
||||
assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
|
||||
"OldCollector free empty regions before the leftmost: %zd, bound %zd",
|
||||
beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::OldCollector));
|
||||
"free empty region (%zd) before the leftmost bound %zd, region %s trash",
|
||||
beg_off, _leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
|
||||
((beg_off >= _max)? "out of bounds is not":
|
||||
(ShenandoahHeap::heap()->get_region(_leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)])->is_trash()?
|
||||
"is": "is not")));
|
||||
assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
|
||||
"OldCollector free empty regions past the rightmost: %zd, bound %zd",
|
||||
end_off, rightmost_empty(ShenandoahFreeSetPartitionId::OldCollector));
|
||||
"free empty region (%zd) past the rightmost bound %zd, region %s trash",
|
||||
end_off, _rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
|
||||
((end_off < 0)? "out of bounds is not" :
|
||||
(ShenandoahHeap::heap()->get_region(_rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)])->is_trash()?
|
||||
"is": "is not")));
|
||||
|
||||
if (validate_totals) {
|
||||
// young_retired_regions need to be added to either Mutator or Collector partitions, 100% used.
|
||||
// Give enough of young_retired_regions, young_retired_capacity, young_retired_user
|
||||
// to the Mutator partition to top it off so that it matches the running totals.
|
||||
//
|
||||
// Give any remnants to the Collector partition. After topping off the Collector partition, its values
|
||||
// should also match running totals.
|
||||
// young_retired_regions need to be added to either Mutator or Collector partitions, 100% used.
|
||||
// Give enough of young_retired_regions, young_retired_capacity, young_retired_user
|
||||
// to the Mutator partition to top it off so that it matches the running totals.
|
||||
//
|
||||
// Give any remnants to the Collector partition. After topping off the Collector partition, its values
|
||||
// should also match running totals.
|
||||
assert(young_retired_regions * _region_size_bytes == young_retired_capacity, "sanity");
|
||||
assert(young_retired_capacity == young_retired_used, "sanity");
|
||||
|
||||
assert(young_retired_regions * _region_size_bytes == young_retired_capacity, "sanity");
|
||||
assert(young_retired_capacity == young_retired_used, "sanity");
|
||||
assert(capacities[int(ShenandoahFreeSetPartitionId::OldCollector)]
|
||||
== _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old collector capacities must match (%zu != %zu)",
|
||||
capacities[int(ShenandoahFreeSetPartitionId::OldCollector)],
|
||||
_capacity[int(ShenandoahFreeSetPartitionId::OldCollector)]);
|
||||
assert(used[int(ShenandoahFreeSetPartitionId::OldCollector)]
|
||||
== _used[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old collector used must match");
|
||||
assert(regions[int(ShenandoahFreeSetPartitionId::OldCollector)]
|
||||
== _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] / _region_size_bytes, "Old collector regions must match");
|
||||
assert(_capacity[int(ShenandoahFreeSetPartitionId::OldCollector)]
|
||||
>= _used[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old Collector capacity must be >= used");
|
||||
assert(_available[int(ShenandoahFreeSetPartitionId::OldCollector)] ==
|
||||
(_capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] - _used[int(ShenandoahFreeSetPartitionId::OldCollector)]),
|
||||
"Old Collector available must equal capacity minus used");
|
||||
assert(_humongous_waste[int(ShenandoahFreeSetPartitionId::OldCollector)] ==
|
||||
humongous_waste[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old Collector humongous waste must match");
|
||||
|
||||
assert(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] >= capacities[int(ShenandoahFreeSetPartitionId::Mutator)],
|
||||
"Capacity total must be >= counted tally");
|
||||
size_t mutator_capacity_shortfall =
|
||||
_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] - capacities[int(ShenandoahFreeSetPartitionId::Mutator)];
|
||||
assert(mutator_capacity_shortfall <= young_retired_capacity, "sanity");
|
||||
capacities[int(ShenandoahFreeSetPartitionId::Mutator)] += mutator_capacity_shortfall;
|
||||
young_retired_capacity -= mutator_capacity_shortfall;
|
||||
capacities[int(ShenandoahFreeSetPartitionId::Collector)] += young_retired_capacity;
|
||||
|
||||
assert(capacities[int(ShenandoahFreeSetPartitionId::OldCollector)]
|
||||
== _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old collector capacities must match");
|
||||
assert(used[int(ShenandoahFreeSetPartitionId::OldCollector)]
|
||||
== _used[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old collector used must match");
|
||||
assert(regions[int(ShenandoahFreeSetPartitionId::OldCollector)]
|
||||
== _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] / _region_size_bytes, "Old collector regions must match");
|
||||
assert(_capacity[int(ShenandoahFreeSetPartitionId::OldCollector)]
|
||||
>= _used[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old Collector capacity must be >= used");
|
||||
assert(_available[int(ShenandoahFreeSetPartitionId::OldCollector)] ==
|
||||
(_capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] - _used[int(ShenandoahFreeSetPartitionId::OldCollector)]),
|
||||
"Old Collector available must equal capacity minus used");
|
||||
assert(_humongous_waste[int(ShenandoahFreeSetPartitionId::OldCollector)] ==
|
||||
humongous_waste[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old Collector humongous waste must match");
|
||||
assert(_used[int(ShenandoahFreeSetPartitionId::Mutator)] >= used[int(ShenandoahFreeSetPartitionId::Mutator)],
|
||||
"Used total must be >= counted tally");
|
||||
size_t mutator_used_shortfall =
|
||||
_used[int(ShenandoahFreeSetPartitionId::Mutator)] - used[int(ShenandoahFreeSetPartitionId::Mutator)];
|
||||
assert(mutator_used_shortfall <= young_retired_used, "sanity");
|
||||
used[int(ShenandoahFreeSetPartitionId::Mutator)] += mutator_used_shortfall;
|
||||
young_retired_used -= mutator_used_shortfall;
|
||||
used[int(ShenandoahFreeSetPartitionId::Collector)] += young_retired_used;
|
||||
|
||||
assert(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] >= capacities[int(ShenandoahFreeSetPartitionId::Mutator)],
|
||||
"Capacity total must be >= counted tally");
|
||||
size_t mutator_capacity_shortfall =
|
||||
_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] - capacities[int(ShenandoahFreeSetPartitionId::Mutator)];
|
||||
assert(mutator_capacity_shortfall <= young_retired_capacity, "sanity");
|
||||
capacities[int(ShenandoahFreeSetPartitionId::Mutator)] += mutator_capacity_shortfall;
|
||||
young_retired_capacity -= mutator_capacity_shortfall;
|
||||
capacities[int(ShenandoahFreeSetPartitionId::Collector)] += young_retired_capacity;
|
||||
assert(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] / _region_size_bytes
|
||||
>= regions[int(ShenandoahFreeSetPartitionId::Mutator)], "Region total must be >= counted tally");
|
||||
size_t mutator_regions_shortfall = (_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] / _region_size_bytes
|
||||
- regions[int(ShenandoahFreeSetPartitionId::Mutator)]);
|
||||
assert(mutator_regions_shortfall <= young_retired_regions, "sanity");
|
||||
regions[int(ShenandoahFreeSetPartitionId::Mutator)] += mutator_regions_shortfall;
|
||||
young_retired_regions -= mutator_regions_shortfall;
|
||||
regions[int(ShenandoahFreeSetPartitionId::Collector)] += young_retired_regions;
|
||||
|
||||
assert(capacities[int(ShenandoahFreeSetPartitionId::Collector)] == _capacity[int(ShenandoahFreeSetPartitionId::Collector)],
|
||||
"Collector capacities must match");
|
||||
assert(used[int(ShenandoahFreeSetPartitionId::Collector)] == _used[int(ShenandoahFreeSetPartitionId::Collector)],
|
||||
"Collector used must match");
|
||||
assert(regions[int(ShenandoahFreeSetPartitionId::Collector)]
|
||||
== _capacity[int(ShenandoahFreeSetPartitionId::Collector)] / _region_size_bytes, "Collector regions must match");
|
||||
assert(_capacity[int(ShenandoahFreeSetPartitionId::Collector)] >= _used[int(ShenandoahFreeSetPartitionId::Collector)],
|
||||
"Collector Capacity must be >= used");
|
||||
assert(_available[int(ShenandoahFreeSetPartitionId::Collector)] ==
|
||||
(_capacity[int(ShenandoahFreeSetPartitionId::Collector)] - _used[int(ShenandoahFreeSetPartitionId::Collector)]),
|
||||
"Collector Available must equal capacity minus used");
|
||||
|
||||
assert(_used[int(ShenandoahFreeSetPartitionId::Mutator)] >= used[int(ShenandoahFreeSetPartitionId::Mutator)],
|
||||
"Used total must be >= counted tally");
|
||||
size_t mutator_used_shortfall =
|
||||
_used[int(ShenandoahFreeSetPartitionId::Mutator)] - used[int(ShenandoahFreeSetPartitionId::Mutator)];
|
||||
assert(mutator_used_shortfall <= young_retired_used, "sanity");
|
||||
used[int(ShenandoahFreeSetPartitionId::Mutator)] += mutator_used_shortfall;
|
||||
young_retired_used -= mutator_used_shortfall;
|
||||
used[int(ShenandoahFreeSetPartitionId::Collector)] += young_retired_used;
|
||||
|
||||
assert(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] / _region_size_bytes
|
||||
>= regions[int(ShenandoahFreeSetPartitionId::Mutator)], "Region total must be >= counted tally");
|
||||
size_t mutator_regions_shortfall = (_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] / _region_size_bytes
|
||||
- regions[int(ShenandoahFreeSetPartitionId::Mutator)]);
|
||||
assert(mutator_regions_shortfall <= young_retired_regions, "sanity");
|
||||
regions[int(ShenandoahFreeSetPartitionId::Mutator)] += mutator_regions_shortfall;
|
||||
young_retired_regions -= mutator_regions_shortfall;
|
||||
regions[int(ShenandoahFreeSetPartitionId::Collector)] += young_retired_regions;
|
||||
|
||||
assert(capacities[int(ShenandoahFreeSetPartitionId::Collector)] == _capacity[int(ShenandoahFreeSetPartitionId::Collector)],
|
||||
"Collector capacities must match");
|
||||
assert(used[int(ShenandoahFreeSetPartitionId::Collector)] == _used[int(ShenandoahFreeSetPartitionId::Collector)],
|
||||
"Collector used must match");
|
||||
assert(regions[int(ShenandoahFreeSetPartitionId::Collector)]
|
||||
== _capacity[int(ShenandoahFreeSetPartitionId::Collector)] / _region_size_bytes, "Collector regions must match");
|
||||
assert(_capacity[int(ShenandoahFreeSetPartitionId::Collector)] >= _used[int(ShenandoahFreeSetPartitionId::Collector)],
|
||||
"Collector Capacity must be >= used");
|
||||
assert(_available[int(ShenandoahFreeSetPartitionId::Collector)] ==
|
||||
(_capacity[int(ShenandoahFreeSetPartitionId::Collector)] - _used[int(ShenandoahFreeSetPartitionId::Collector)]),
|
||||
"Collector Available must equal capacity minus used");
|
||||
|
||||
assert(capacities[int(ShenandoahFreeSetPartitionId::Mutator)] == _capacity[int(ShenandoahFreeSetPartitionId::Mutator)],
|
||||
"Mutator capacities must match");
|
||||
assert(used[int(ShenandoahFreeSetPartitionId::Mutator)] == _used[int(ShenandoahFreeSetPartitionId::Mutator)],
|
||||
"Mutator used must match");
|
||||
assert(regions[int(ShenandoahFreeSetPartitionId::Mutator)]
|
||||
== _capacity[int(ShenandoahFreeSetPartitionId::Mutator)] / _region_size_bytes, "Mutator regions must match");
|
||||
assert(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] >= _used[int(ShenandoahFreeSetPartitionId::Mutator)],
|
||||
"Mutator capacity must be >= used");
|
||||
assert(_available[int(ShenandoahFreeSetPartitionId::Mutator)] ==
|
||||
(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] - _used[int(ShenandoahFreeSetPartitionId::Mutator)]),
|
||||
"Mutator available must equal capacity minus used");
|
||||
assert(_humongous_waste[int(ShenandoahFreeSetPartitionId::Mutator)] == young_humongous_waste,
|
||||
"Mutator humongous waste must match");
|
||||
}
|
||||
assert(capacities[int(ShenandoahFreeSetPartitionId::Mutator)] == _capacity[int(ShenandoahFreeSetPartitionId::Mutator)],
|
||||
"Mutator capacities must match");
|
||||
assert(used[int(ShenandoahFreeSetPartitionId::Mutator)] == _used[int(ShenandoahFreeSetPartitionId::Mutator)],
|
||||
"Mutator used must match");
|
||||
assert(regions[int(ShenandoahFreeSetPartitionId::Mutator)]
|
||||
== _capacity[int(ShenandoahFreeSetPartitionId::Mutator)] / _region_size_bytes, "Mutator regions must match");
|
||||
assert(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] >= _used[int(ShenandoahFreeSetPartitionId::Mutator)],
|
||||
"Mutator capacity must be >= used");
|
||||
assert(_available[int(ShenandoahFreeSetPartitionId::Mutator)] ==
|
||||
(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] - _used[int(ShenandoahFreeSetPartitionId::Mutator)]),
|
||||
"Mutator available must equal capacity minus used");
|
||||
assert(_humongous_waste[int(ShenandoahFreeSetPartitionId::Mutator)] == young_humongous_waste,
|
||||
"Mutator humongous waste must match");
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -1206,6 +1216,36 @@ ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :
|
||||
clear_internal();
|
||||
}
|
||||
|
||||
void ShenandoahFreeSet::move_unaffiliated_regions_from_collector_to_old_collector(ssize_t count) {
|
||||
shenandoah_assert_heaplocked();
|
||||
size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
|
||||
|
||||
size_t old_capacity = _partitions.get_capacity(ShenandoahFreeSetPartitionId::OldCollector);
|
||||
size_t collector_capacity = _partitions.get_capacity(ShenandoahFreeSetPartitionId::Collector);
|
||||
if (count > 0) {
|
||||
size_t ucount = count;
|
||||
size_t bytes_moved = ucount * region_size_bytes;
|
||||
assert(collector_capacity >= bytes_moved, "Cannot transfer");
|
||||
assert(_partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::Collector) >= ucount,
|
||||
"Cannot transfer %zu of %zu", ucount, _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::Collector));
|
||||
_partitions.decrease_empty_region_counts(ShenandoahFreeSetPartitionId::Collector, ucount);
|
||||
_partitions.set_capacity_of(ShenandoahFreeSetPartitionId::Collector, collector_capacity - bytes_moved);
|
||||
_partitions.set_capacity_of(ShenandoahFreeSetPartitionId::OldCollector, old_capacity + bytes_moved);
|
||||
_partitions.increase_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector, ucount);
|
||||
} else if (count < 0) {
|
||||
size_t ucount = -count;
|
||||
size_t bytes_moved = ucount * region_size_bytes;
|
||||
assert(old_capacity >= bytes_moved, "Cannot transfer");
|
||||
assert(_partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector) >= ucount,
|
||||
"Cannot transfer %zu of %zu", ucount, _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector));
|
||||
_partitions.decrease_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector, ucount);
|
||||
_partitions.set_capacity_of(ShenandoahFreeSetPartitionId::OldCollector, old_capacity - bytes_moved);
|
||||
_partitions.set_capacity_of(ShenandoahFreeSetPartitionId::Collector, collector_capacity + bytes_moved);
|
||||
_partitions.increase_empty_region_counts(ShenandoahFreeSetPartitionId::Collector, ucount);
|
||||
}
|
||||
// else, do nothing
|
||||
}
|
||||
|
||||
// was pip_pad_bytes
|
||||
void ShenandoahFreeSet::add_promoted_in_place_region_to_old_collector(ShenandoahHeapRegion* region) {
|
||||
shenandoah_assert_heaplocked();
|
||||
@ -1261,7 +1301,7 @@ void ShenandoahFreeSet::add_promoted_in_place_region_to_old_collector(Shenandoah
|
||||
/* CollectorSizeChanged */ true, /* OldCollectorSizeChanged */ true,
|
||||
/* AffiliatedChangesAreYoungNeutral */ false, /* AffiliatedChangesAreGlobalNeutral */ true,
|
||||
/* UnaffiliatedChangesAreYoungNeutral */ true>();
|
||||
_partitions.assert_bounds(true);
|
||||
_partitions.assert_bounds();
|
||||
}
|
||||
|
||||
template<typename Iter>
|
||||
@ -1496,9 +1536,12 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah
|
||||
return nullptr;
|
||||
}
|
||||
HeapWord* result = nullptr;
|
||||
// We must call try_recycle_under_lock() even if !r->is_trash(). The reason is that if r is being recycled at this
|
||||
// moment by a GC worker thread, it may appear to be not trash even though it has not yet been fully recycled. If
|
||||
// we proceed without waiting for the worker to finish recycling the region, the worker thread may overwrite the
|
||||
// region's affiliation with FREE after we set the region's affiliation to req.afiliation() below
|
||||
r->try_recycle_under_lock();
|
||||
in_new_region = r->is_empty();
|
||||
|
||||
if (in_new_region) {
|
||||
log_debug(gc, free)("Using new region (%zu) for %s (" PTR_FORMAT ").",
|
||||
r->index(), req.type_string(), p2i(&req));
|
||||
@ -1668,7 +1711,7 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah
|
||||
default:
|
||||
assert(false, "won't happen");
|
||||
}
|
||||
_partitions.assert_bounds(true);
|
||||
_partitions.assert_bounds();
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -1799,6 +1842,7 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req, bo
|
||||
increase_bytes_allocated(waste_bytes);
|
||||
}
|
||||
}
|
||||
|
||||
_partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, total_used);
|
||||
increase_bytes_allocated(total_used);
|
||||
req.set_actual_size(words_size);
|
||||
@ -1819,14 +1863,16 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req, bo
|
||||
/* CollectorSizeChanged */ false, /* OldCollectorSizeChanged */ false,
|
||||
/* AffiliatedChangesAreYoungNeutral */ false, /* AffiliatedChangesAreGlobalNeutral */ false,
|
||||
/* UnaffiliatedChangesAreYoungNeutral */ false>();
|
||||
_partitions.assert_bounds(true);
|
||||
_partitions.assert_bounds();
|
||||
return _heap->get_region(beg)->bottom();
|
||||
}
|
||||
|
||||
class ShenandoahRecycleTrashedRegionClosure final : public ShenandoahHeapRegionClosure {
|
||||
public:
|
||||
void heap_region_do(ShenandoahHeapRegion* r) {
|
||||
r->try_recycle();
|
||||
if (r->is_trash()) {
|
||||
r->try_recycle();
|
||||
}
|
||||
}
|
||||
|
||||
bool is_thread_safe() {
|
||||
@ -1861,7 +1907,7 @@ bool ShenandoahFreeSet::transfer_one_region_from_mutator_to_old_collector(size_t
|
||||
/* CollectorSizeChanged */ false, /* OldCollectorSizeChanged */ true,
|
||||
/* AffiliatedChangesAreYoungNeutral */ true, /* AffiliatedChangesAreGlobalNeutral */ true,
|
||||
/* UnaffiliatedChangesAreYoungNeutral */ false>();
|
||||
_partitions.assert_bounds(true);
|
||||
_partitions.assert_bounds();
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
@ -1914,7 +1960,7 @@ bool ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) {
|
||||
/* CollectorSizeChanged */ false, /* OldCollectorSizeChanged */ true,
|
||||
/* AffiliatedChangesAreYoungNeutral */ true, /* AffiliatedChangesAreGlobalNeutral */ true,
|
||||
/* UnaffiliatedChangesAreYoungNeutral */ false>();
|
||||
_partitions.assert_bounds(true);
|
||||
_partitions.assert_bounds();
|
||||
// 4. Do not adjust capacities for generations, we just swapped the regions that have already
|
||||
// been accounted for. However, we should adjust the evacuation reserves as those may have changed.
|
||||
shenandoah_assert_heaplocked();
|
||||
@ -1945,7 +1991,7 @@ void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
|
||||
/* CollectorSizeChanged */ true, /* OldCollectorSizeChanged */ false,
|
||||
/* AffiliatedChangesAreYoungNeutral */ true, /* AffiliatedChangesAreGlobalNeutral */ true,
|
||||
/* UnaffiliatedChangesAreYoungNeutral */ true>();
|
||||
_partitions.assert_bounds(true);
|
||||
_partitions.assert_bounds();
|
||||
// We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next,
|
||||
// to recycle trash before attempting to allocate anything in the region.
|
||||
}
|
||||
@ -2025,16 +2071,23 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r
|
||||
for (size_t idx = 0; idx < num_regions; idx++) {
|
||||
ShenandoahHeapRegion* region = _heap->get_region(idx);
|
||||
if (region->is_trash()) {
|
||||
// Trashed regions represent immediate garbage identified by final mark and regions that had been in the collection
|
||||
// partition but have not yet been "cleaned up" following update refs.
|
||||
// Trashed regions represent regions that had been in the collection set (or may have been identified as immediate garbage)
|
||||
// but have not yet been "cleaned up". The cset regions are not "trashed" until we have finished update refs.
|
||||
if (region->is_old()) {
|
||||
// We're going to place this region into the Mutator set. We increment old_trashed_regions because this count represents
|
||||
// regions that the old generation is entitled to without any transfer from young. We do not place this region into
|
||||
// the OldCollector partition at this time. Instead, we let reserve_regions() decide whether to place this region
|
||||
// into the OldCollector partition. Deferring the decision allows reserve_regions() to more effectively pack the
|
||||
// OldCollector regions into high-address memory. We do not adjust capacities of old and young generations at this
|
||||
// time. At the end of finish_rebuild(), the capacities are adjusted based on the results of reserve_regions().
|
||||
old_trashed_regions++;
|
||||
} else {
|
||||
assert(region->is_young(), "Trashed region should be old or young");
|
||||
young_trashed_regions++;
|
||||
}
|
||||
} else if (region->is_old()) {
|
||||
// count both humongous and regular regions, but don't count trash (cset) regions.
|
||||
// We count humongous and regular regions as "old regions". We do not count trashed regions that are old. Those
|
||||
// are counted (above) as old_trashed_regions.
|
||||
old_region_count++;
|
||||
if (first_old_region > idx) {
|
||||
first_old_region = idx;
|
||||
@ -2048,7 +2101,7 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r
|
||||
size_t ac = alloc_capacity(region);
|
||||
if (ac >= PLAB::min_size() * HeapWordSize) {
|
||||
if (region->is_trash() || !region->is_old()) {
|
||||
// Both young and old collected regions (trashed) are placed into the Mutator set
|
||||
// Both young and old (possibly immediately) collected regions (trashed) are placed into the Mutator set
|
||||
_partitions.raw_assign_membership(idx, ShenandoahFreeSetPartitionId::Mutator);
|
||||
if (idx < mutator_leftmost) {
|
||||
mutator_leftmost = idx;
|
||||
@ -2111,10 +2164,19 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r
|
||||
assert(_partitions.membership(idx) == ShenandoahFreeSetPartitionId::NotFree, "Region should have been retired");
|
||||
size_t humongous_waste_bytes = 0;
|
||||
if (region->is_humongous_start()) {
|
||||
oop obj = cast_to_oop(region->bottom());
|
||||
size_t byte_size = obj->size() * HeapWordSize;
|
||||
size_t region_span = ShenandoahHeapRegion::required_regions(byte_size);
|
||||
humongous_waste_bytes = region_span * ShenandoahHeapRegion::region_size_bytes() - byte_size;
|
||||
// Since rebuild does not necessarily happen at a safepoint, a newly allocated humongous object may not have been
|
||||
// fully initialized. Therefore, we cannot safely consult its header.
|
||||
ShenandoahHeapRegion* last_of_humongous_continuation = region;
|
||||
size_t next_idx;
|
||||
for (next_idx = idx + 1; next_idx < num_regions; next_idx++) {
|
||||
ShenandoahHeapRegion* humongous_cont_candidate = _heap->get_region(next_idx);
|
||||
if (!humongous_cont_candidate->is_humongous_continuation()) {
|
||||
break;
|
||||
}
|
||||
last_of_humongous_continuation = humongous_cont_candidate;
|
||||
}
|
||||
// For humongous regions, used() is established while holding the global heap lock so it is reliable here
|
||||
humongous_waste_bytes = ShenandoahHeapRegion::region_size_bytes() - last_of_humongous_continuation->used();
|
||||
}
|
||||
if (region->is_old()) {
|
||||
old_collector_used += region_size_bytes;
|
||||
@ -2183,7 +2245,7 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r
|
||||
/* CollectorSizeChanged */ true, /* OldCollectorSizeChanged */ true,
|
||||
/* AffiliatedChangesAreYoungNeutral */ false, /* AffiliatedChangesAreGlobalNeutral */ false,
|
||||
/* UnaffiliatedChangesAreYoungNeutral */ false>();
|
||||
_partitions.assert_bounds(true);
|
||||
_partitions.assert_bounds();
|
||||
#ifdef ASSERT
|
||||
if (_heap->mode()->is_generational()) {
|
||||
assert(young_affiliated_regions() == _heap->young_generation()->get_affiliated_region_count(), "sanity");
|
||||
@ -2221,7 +2283,7 @@ void ShenandoahFreeSet::transfer_humongous_regions_from_mutator_to_old_collector
|
||||
/* CollectorSizeChanged */ false, /* OldCollectorSizeChanged */ true,
|
||||
/* AffiliatedChangesAreYoungNeutral */ false, /* AffiliatedChangesAreGlobalNeutral */ true,
|
||||
/* UnaffiliatedChangesAreYoungNeutral */ true>();
|
||||
_partitions.assert_bounds(true);
|
||||
_partitions.assert_bounds();
|
||||
// global_used is unaffected by this transfer
|
||||
|
||||
// No need to adjust ranges because humongous regions are not allocatable
|
||||
@ -2303,7 +2365,7 @@ void ShenandoahFreeSet::transfer_empty_regions_from_to(ShenandoahFreeSetPartitio
|
||||
/* UnaffiliatedChangesAreYoungNeutral */ true>();
|
||||
}
|
||||
}
|
||||
_partitions.assert_bounds(true);
|
||||
_partitions.assert_bounds();
|
||||
}
|
||||
|
||||
// Returns number of regions transferred, adds transferred bytes to var argument bytes_transferred
|
||||
@ -2370,7 +2432,7 @@ size_t ShenandoahFreeSet::transfer_empty_regions_from_collector_set_to_mutator_s
|
||||
/* AffiliatedChangesAreYoungNeutral */ true, /* AffiliatedChangesAreGlobalNeutral */ true,
|
||||
/* UnaffiliatedChangesAreYoungNeutral */ true>();
|
||||
}
|
||||
_partitions.assert_bounds(true);
|
||||
_partitions.assert_bounds();
|
||||
return transferred_regions;
|
||||
}
|
||||
|
||||
@ -2445,7 +2507,7 @@ transfer_non_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPa
|
||||
/* AffiliatedChangesAreYoungNeutral */ true, /* AffiliatedChangesAreGlobalNeutral */ true,
|
||||
/* UnaffiliatedChangesAreYoungNeutral */ true>();
|
||||
}
|
||||
_partitions.assert_bounds(true);
|
||||
_partitions.assert_bounds();
|
||||
return transferred_regions;
|
||||
}
|
||||
|
||||
@ -2507,14 +2569,13 @@ void ShenandoahFreeSet::prepare_to_rebuild(size_t &young_trashed_regions, size_t
|
||||
first_old_region, last_old_region, old_region_count);
|
||||
}
|
||||
|
||||
void ShenandoahFreeSet::finish_rebuild(size_t young_trashed_regions, size_t old_trashed_regions, size_t old_region_count,
|
||||
bool have_evacuation_reserves) {
|
||||
|
||||
void ShenandoahFreeSet::finish_rebuild(size_t young_cset_regions, size_t old_cset_regions, size_t old_region_count) {
|
||||
shenandoah_assert_heaplocked();
|
||||
size_t young_reserve(0), old_reserve(0);
|
||||
|
||||
if (_heap->mode()->is_generational()) {
|
||||
compute_young_and_old_reserves(young_trashed_regions, old_trashed_regions, have_evacuation_reserves,
|
||||
young_reserve, old_reserve);
|
||||
compute_young_and_old_reserves(young_cset_regions, old_cset_regions, young_reserve, old_reserve);
|
||||
} else {
|
||||
young_reserve = (_heap->max_capacity() / 100) * ShenandoahEvacReserve;
|
||||
old_reserve = 0;
|
||||
@ -2531,8 +2592,41 @@ void ShenandoahFreeSet::finish_rebuild(size_t young_trashed_regions, size_t old_
|
||||
|
||||
// Release the rebuild lock now. What remains in this function is read-only
|
||||
rebuild_lock()->unlock();
|
||||
_partitions.assert_bounds(true);
|
||||
_partitions.assert_bounds();
|
||||
log_status();
|
||||
if (_heap->mode()->is_generational()) {
|
||||
// Clear the region balance until it is adjusted in preparation for a subsequent GC cycle.
|
||||
_heap->old_generation()->set_region_balance(0);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Reduce old reserve (when there are insufficient resources to satisfy the original request).
|
||||
void ShenandoahFreeSet::reduce_old_reserve(size_t adjusted_old_reserve, size_t requested_old_reserve) {
|
||||
ShenandoahOldGeneration* const old_generation = _heap->old_generation();
|
||||
size_t requested_promoted_reserve = old_generation->get_promoted_reserve();
|
||||
size_t requested_old_evac_reserve = old_generation->get_evacuation_reserve();
|
||||
assert(adjusted_old_reserve < requested_old_reserve, "Only allow reduction");
|
||||
assert(requested_promoted_reserve + requested_old_evac_reserve >= adjusted_old_reserve, "Sanity");
|
||||
size_t delta = requested_old_reserve - adjusted_old_reserve;
|
||||
|
||||
if (requested_promoted_reserve >= delta) {
|
||||
requested_promoted_reserve -= delta;
|
||||
old_generation->set_promoted_reserve(requested_promoted_reserve);
|
||||
} else {
|
||||
delta -= requested_promoted_reserve;
|
||||
requested_promoted_reserve = 0;
|
||||
requested_old_evac_reserve -= delta;
|
||||
old_generation->set_promoted_reserve(requested_promoted_reserve);
|
||||
old_generation->set_evacuation_reserve(requested_old_evac_reserve);
|
||||
}
|
||||
}
|
||||
|
||||
// Reduce young reserve (when there are insufficient resources to satisfy the original request).
|
||||
void ShenandoahFreeSet::reduce_young_reserve(size_t adjusted_young_reserve, size_t requested_young_reserve) {
|
||||
ShenandoahYoungGeneration* const young_generation = _heap->young_generation();
|
||||
assert(adjusted_young_reserve < requested_young_reserve, "Only allow reduction");
|
||||
young_generation->set_evacuation_reserve(adjusted_young_reserve);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2549,7 +2643,6 @@ void ShenandoahFreeSet::finish_rebuild(size_t young_trashed_regions, size_t old_
|
||||
* this value should computed by ShenandoahGenerationalHeap::compute_old_generation_balance().
|
||||
*/
|
||||
void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regions, size_t old_trashed_regions,
|
||||
bool have_evacuation_reserves,
|
||||
size_t& young_reserve_result, size_t& old_reserve_result) const {
|
||||
shenandoah_assert_generational();
|
||||
shenandoah_assert_heaplocked();
|
||||
@ -2566,6 +2659,15 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regi
|
||||
old_available += old_trashed_regions * region_size_bytes;
|
||||
young_unaffiliated_regions += young_trashed_regions;
|
||||
|
||||
assert(young_capacity >= young_generation->used(),
|
||||
"Young capacity (%zu) must exceed used (%zu)", young_capacity, young_generation->used());
|
||||
|
||||
size_t young_available = young_capacity - young_generation->used();
|
||||
young_available += young_trashed_regions * region_size_bytes;
|
||||
|
||||
assert(young_available >= young_unaffiliated_regions * region_size_bytes, "sanity");
|
||||
assert(old_available >= old_unaffiliated_regions * region_size_bytes, "sanity");
|
||||
|
||||
// Consult old-region balance to make adjustments to current generation capacities and availability.
|
||||
// The generation region transfers take place after we rebuild. old_region_balance represents number of regions
|
||||
// to transfer from old to young.
|
||||
@ -2585,6 +2687,7 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regi
|
||||
ssize_t xfer_bytes = old_region_balance * checked_cast<ssize_t>(region_size_bytes);
|
||||
old_available -= xfer_bytes;
|
||||
old_unaffiliated_regions -= old_region_balance;
|
||||
young_available += xfer_bytes;
|
||||
young_capacity += xfer_bytes;
|
||||
young_unaffiliated_regions += old_region_balance;
|
||||
}
|
||||
@ -2593,41 +2696,22 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regi
|
||||
// promotions and evacuations. The partition between which old memory is reserved for evacuation and
|
||||
// which is reserved for promotion is enforced using thread-local variables that prescribe intentions for
|
||||
// each PLAB's available memory.
|
||||
if (have_evacuation_reserves) {
|
||||
// We are rebuilding at the end of final mark, having already established evacuation budgets for this GC pass.
|
||||
const size_t promoted_reserve = old_generation->get_promoted_reserve();
|
||||
const size_t old_evac_reserve = old_generation->get_evacuation_reserve();
|
||||
young_reserve_result = young_generation->get_evacuation_reserve();
|
||||
old_reserve_result = promoted_reserve + old_evac_reserve;
|
||||
if (old_reserve_result > old_available) {
|
||||
// Try to transfer memory from young to old.
|
||||
size_t old_deficit = old_reserve_result - old_available;
|
||||
size_t old_region_deficit = (old_deficit + region_size_bytes - 1) / region_size_bytes;
|
||||
if (young_unaffiliated_regions < old_region_deficit) {
|
||||
old_region_deficit = young_unaffiliated_regions;
|
||||
}
|
||||
young_unaffiliated_regions -= old_region_deficit;
|
||||
old_unaffiliated_regions += old_region_deficit;
|
||||
old_region_balance -= old_region_deficit;
|
||||
old_generation->set_region_balance(old_region_balance);
|
||||
}
|
||||
} else {
|
||||
// We are rebuilding at end of GC, so we set aside budgets specified on command line (or defaults)
|
||||
young_reserve_result = (young_capacity * ShenandoahEvacReserve) / 100;
|
||||
// The auto-sizer has already made old-gen large enough to hold all anticipated evacuations and promotions.
|
||||
// Affiliated old-gen regions are already in the OldCollector free set. Add in the relevant number of
|
||||
// unaffiliated regions.
|
||||
old_reserve_result = old_available;
|
||||
}
|
||||
const size_t promoted_reserve = old_generation->get_promoted_reserve();
|
||||
const size_t old_evac_reserve = old_generation->get_evacuation_reserve();
|
||||
young_reserve_result = young_generation->get_evacuation_reserve();
|
||||
old_reserve_result = promoted_reserve + old_evac_reserve;
|
||||
assert(old_reserve_result + young_reserve_result <= old_available + young_available,
|
||||
"Cannot reserve (%zu + %zu + %zu) more than is available: %zu + %zu",
|
||||
promoted_reserve, old_evac_reserve, young_reserve_result, old_available, young_available);
|
||||
|
||||
// Old available regions that have less than PLAB::min_size() of available memory are not placed into the OldCollector
|
||||
// free set. Because of this, old_available may not have enough memory to represent the intended reserve. Adjust
|
||||
// the reserve downward to account for this possibility. This loss is part of the reason why the original budget
|
||||
// was adjusted with ShenandoahOldEvacWaste and ShenandoahOldPromoWaste multipliers.
|
||||
if (old_reserve_result >
|
||||
_partitions.capacity_of(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes) {
|
||||
_partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes) {
|
||||
old_reserve_result =
|
||||
_partitions.capacity_of(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes;
|
||||
_partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes;
|
||||
}
|
||||
|
||||
if (young_reserve_result > young_unaffiliated_regions * region_size_bytes) {
|
||||
@ -2791,19 +2875,17 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old
|
||||
ShenandoahFreeSetPartitionId p = _partitions.membership(idx);
|
||||
size_t ac = alloc_capacity(r);
|
||||
assert(ac != region_size_bytes, "Empty regions should be in Mutator partion at entry to reserve_regions");
|
||||
if (p == ShenandoahFreeSetPartitionId::Collector) {
|
||||
if (ac != region_size_bytes) {
|
||||
young_used_regions++;
|
||||
young_used_bytes = region_size_bytes - ac;
|
||||
}
|
||||
// else, unaffiliated region has no used
|
||||
} else if (p == ShenandoahFreeSetPartitionId::OldCollector) {
|
||||
if (ac != region_size_bytes) {
|
||||
old_used_regions++;
|
||||
old_used_bytes = region_size_bytes - ac;
|
||||
}
|
||||
// else, unaffiliated region has no used
|
||||
} else if (p == ShenandoahFreeSetPartitionId::NotFree) {
|
||||
assert(p != ShenandoahFreeSetPartitionId::Collector, "Collector regions must be converted from Mutator regions");
|
||||
if (p == ShenandoahFreeSetPartitionId::OldCollector) {
|
||||
assert(!r->is_empty(), "Empty regions should be in Mutator partition at entry to reserve_regions");
|
||||
old_used_regions++;
|
||||
old_used_bytes = region_size_bytes - ac;
|
||||
// This region is within the range for OldCollector partition, as established by find_regions_with_alloc_capacity()
|
||||
assert((_partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector) <= idx) &&
|
||||
(_partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector) >= idx),
|
||||
"find_regions_with_alloc_capacity() should have established this is in range");
|
||||
} else {
|
||||
assert(p == ShenandoahFreeSetPartitionId::NotFree, "sanity");
|
||||
// This region has been retired
|
||||
if (r->is_old()) {
|
||||
old_used_regions++;
|
||||
@ -2813,21 +2895,6 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old
|
||||
young_used_regions++;
|
||||
young_used_bytes += region_size_bytes - ac;
|
||||
}
|
||||
} else {
|
||||
assert(p == ShenandoahFreeSetPartitionId::OldCollector, "Not mutator and not NotFree, so must be OldCollector");
|
||||
assert(!r->is_empty(), "Empty regions should be in Mutator partition at entry to reserve_regions");
|
||||
if (idx < old_collector_low_idx) {
|
||||
old_collector_low_idx = idx;
|
||||
}
|
||||
if (idx > old_collector_high_idx) {
|
||||
old_collector_high_idx = idx;
|
||||
}
|
||||
if (idx < old_collector_empty_low_idx) {
|
||||
old_collector_empty_low_idx = idx;
|
||||
}
|
||||
if (idx > old_collector_empty_high_idx) {
|
||||
old_collector_empty_high_idx = idx;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2856,14 +2923,14 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old
|
||||
_partitions.increase_used(ShenandoahFreeSetPartitionId::OldCollector, used_to_old_collector);
|
||||
}
|
||||
|
||||
_partitions.expand_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId::Collector,
|
||||
collector_low_idx, collector_high_idx,
|
||||
collector_empty_low_idx, collector_empty_high_idx);
|
||||
_partitions.establish_interval(ShenandoahFreeSetPartitionId::Mutator,
|
||||
mutator_low_idx, mutator_high_idx, mutator_empty_low_idx, mutator_empty_high_idx);
|
||||
_partitions.establish_interval(ShenandoahFreeSetPartitionId::Collector,
|
||||
collector_low_idx, collector_high_idx, collector_empty_low_idx, collector_empty_high_idx);
|
||||
|
||||
_partitions.expand_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId::OldCollector,
|
||||
old_collector_low_idx, old_collector_high_idx,
|
||||
old_collector_empty_low_idx, old_collector_empty_high_idx);
|
||||
_partitions.establish_interval(ShenandoahFreeSetPartitionId::Mutator,
|
||||
mutator_low_idx, mutator_high_idx, mutator_empty_low_idx, mutator_empty_high_idx);
|
||||
|
||||
recompute_total_used</* UsedByMutatorChanged */ true,
|
||||
/* UsedByCollectorChanged */ true, /* UsedByOldCollectorChanged */ true>();
|
||||
@ -2872,17 +2939,22 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old
|
||||
/* CollectorSizeChanged */ true, /* OldCollectorSizeChanged */ true,
|
||||
/* AffiliatedChangesAreYoungNeutral */ false, /* AffiliatedChangesAreGlobalNeutral */ false,
|
||||
/* UnaffiliatedChangesAreYoungNeutral */ false>();
|
||||
_partitions.assert_bounds(true);
|
||||
_partitions.assert_bounds();
|
||||
if (LogTarget(Info, gc, free)::is_enabled()) {
|
||||
size_t old_reserve = _partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector);
|
||||
if (old_reserve < to_reserve_old) {
|
||||
log_info(gc, free)("Wanted " PROPERFMT " for old reserve, but only reserved: " PROPERFMT,
|
||||
PROPERFMTARGS(to_reserve_old), PROPERFMTARGS(old_reserve));
|
||||
assert(_heap->mode()->is_generational(), "to_old_reserve > 0 implies generational mode");
|
||||
reduce_old_reserve(old_reserve, to_reserve_old);
|
||||
}
|
||||
size_t reserve = _partitions.available_in(ShenandoahFreeSetPartitionId::Collector);
|
||||
if (reserve < to_reserve) {
|
||||
if (_heap->mode()->is_generational()) {
|
||||
reduce_young_reserve(reserve, to_reserve);
|
||||
}
|
||||
log_info(gc, free)("Wanted " PROPERFMT " for young reserve, but only reserved: " PROPERFMT,
|
||||
PROPERFMTARGS(to_reserve), PROPERFMTARGS(reserve));
|
||||
PROPERFMTARGS(to_reserve), PROPERFMTARGS(reserve));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -224,6 +224,10 @@ public:
|
||||
void transfer_used_capacity_from_to(ShenandoahFreeSetPartitionId from_partition, ShenandoahFreeSetPartitionId to_partition,
|
||||
size_t regions);
|
||||
|
||||
// For recycled region r in the OldCollector partition but possibly not within the interval for empty OldCollector regions,
|
||||
// expand the empty interval to include this region.
|
||||
inline void adjust_interval_for_recycled_old_region_under_lock(ShenandoahHeapRegion* r);
|
||||
|
||||
const char* partition_membership_name(idx_t idx) const;
|
||||
|
||||
// Return the index of the next available region >= start_index, or maximum_regions if not found.
|
||||
@ -373,12 +377,7 @@ public:
|
||||
|
||||
inline void set_capacity_of(ShenandoahFreeSetPartitionId which_partition, size_t value);
|
||||
|
||||
inline void set_used_by(ShenandoahFreeSetPartitionId which_partition, size_t value) {
|
||||
shenandoah_assert_heaplocked();
|
||||
assert (which_partition < NumPartitions, "selected free set must be valid");
|
||||
_used[int(which_partition)] = value;
|
||||
_available[int(which_partition)] = _capacity[int(which_partition)] - value;
|
||||
}
|
||||
inline void set_used_by(ShenandoahFreeSetPartitionId which_partition, size_t value);
|
||||
|
||||
inline size_t count(ShenandoahFreeSetPartitionId which_partition) const { return _region_counts[int(which_partition)]; }
|
||||
|
||||
@ -402,7 +401,7 @@ public:
|
||||
// idx >= leftmost &&
|
||||
// idx <= rightmost
|
||||
// }
|
||||
void assert_bounds(bool validate_totals) NOT_DEBUG_RETURN;
|
||||
void assert_bounds() NOT_DEBUG_RETURN;
|
||||
};
|
||||
|
||||
// Publicly, ShenandoahFreeSet represents memory that is available to mutator threads. The public capacity(), used(),
|
||||
@ -634,7 +633,11 @@ private:
|
||||
void establish_old_collector_alloc_bias();
|
||||
size_t get_usable_free_words(size_t free_bytes) const;
|
||||
|
||||
void reduce_young_reserve(size_t adjusted_young_reserve, size_t requested_young_reserve);
|
||||
void reduce_old_reserve(size_t adjusted_old_reserve, size_t requested_old_reserve);
|
||||
|
||||
void log_freeset_stats(ShenandoahFreeSetPartitionId partition_id, LogStream& ls);
|
||||
|
||||
// log status, assuming lock has already been acquired by the caller.
|
||||
void log_status();
|
||||
|
||||
@ -685,35 +688,46 @@ public:
|
||||
return _total_global_used;
|
||||
}
|
||||
|
||||
size_t global_unaffiliated_regions() {
|
||||
// A negative argument results in moving from old_collector to collector
|
||||
void move_unaffiliated_regions_from_collector_to_old_collector(ssize_t regions);
|
||||
|
||||
inline size_t global_unaffiliated_regions() {
|
||||
return _global_unaffiliated_regions;
|
||||
}
|
||||
|
||||
size_t young_unaffiliated_regions() {
|
||||
inline size_t young_unaffiliated_regions() {
|
||||
return _young_unaffiliated_regions;
|
||||
}
|
||||
|
||||
size_t old_unaffiliated_regions() {
|
||||
inline size_t collector_unaffiliated_regions() {
|
||||
return _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::Collector);
|
||||
}
|
||||
|
||||
inline size_t old_collector_unaffiliated_regions() {
|
||||
return _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector);
|
||||
}
|
||||
|
||||
size_t young_affiliated_regions() {
|
||||
inline size_t old_unaffiliated_regions() {
|
||||
return _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector);
|
||||
}
|
||||
|
||||
inline size_t young_affiliated_regions() {
|
||||
return _young_affiliated_regions;
|
||||
}
|
||||
|
||||
size_t old_affiliated_regions() {
|
||||
inline size_t old_affiliated_regions() {
|
||||
return _old_affiliated_regions;
|
||||
}
|
||||
|
||||
size_t global_affiliated_regions() {
|
||||
inline size_t global_affiliated_regions() {
|
||||
return _global_affiliated_regions;
|
||||
}
|
||||
|
||||
size_t total_young_regions() {
|
||||
inline size_t total_young_regions() {
|
||||
return _total_young_regions;
|
||||
}
|
||||
|
||||
size_t total_old_regions() {
|
||||
inline size_t total_old_regions() {
|
||||
return _partitions.get_capacity(ShenandoahFreeSetPartitionId::OldCollector) / ShenandoahHeapRegion::region_size_bytes();
|
||||
}
|
||||
|
||||
@ -725,36 +739,27 @@ public:
|
||||
|
||||
// Examine the existing free set representation, capturing the current state into var arguments:
|
||||
//
|
||||
// young_cset_regions is the number of regions currently in the young cset if we are starting to evacuate, or zero
|
||||
// old_cset_regions is the number of regions currently in the old cset if we are starting a mixed evacuation, or zero
|
||||
// young_trashed_regions is the number of trashed regions (immediate garbage at final mark, cset regions after update refs)
|
||||
// old_trashed_regions is the number of trashed regions
|
||||
// (immediate garbage at final old mark, cset regions after update refs for mixed evac)
|
||||
// first_old_region is the index of the first region that is part of the OldCollector set
|
||||
// last_old_region is the index of the last region that is part of the OldCollector set
|
||||
// old_region_count is the number of regions in the OldCollector set that have memory available to be allocated
|
||||
void prepare_to_rebuild(size_t &young_cset_regions, size_t &old_cset_regions,
|
||||
void prepare_to_rebuild(size_t &young_trashed_regions, size_t &old_trashed_regions,
|
||||
size_t &first_old_region, size_t &last_old_region, size_t &old_region_count);
|
||||
|
||||
// At the end of final mark, but before we begin evacuating, heuristics calculate how much memory is required to
|
||||
// hold the results of evacuating to young-gen and to old-gen, and have_evacuation_reserves should be true.
|
||||
// These quantities, stored as reserves for their respective generations, are consulted prior to rebuilding
|
||||
// the free set (ShenandoahFreeSet) in preparation for evacuation. When the free set is rebuilt, we make sure
|
||||
// to reserve sufficient memory in the collector and old_collector sets to hold evacuations.
|
||||
// hold the results of evacuating to young-gen and to old-gen. These quantities, stored in reserves for their
|
||||
// respective generations, are consulted prior to rebuilding the free set (ShenandoahFreeSet) in preparation for
|
||||
// evacuation. When the free set is rebuilt, we make sure to reserve sufficient memory in the collector and
|
||||
// old_collector sets to hold evacuations. Likewise, at the end of update refs, we rebuild the free set in order
|
||||
// to set aside reserves to be consumed during the next GC cycle.
|
||||
//
|
||||
// We also rebuild the free set at the end of GC, as we prepare to idle GC until the next trigger. In this case,
|
||||
// have_evacuation_reserves is false because we don't yet know how much memory will need to be evacuated in the
|
||||
// next GC cycle. When have_evacuation_reserves is false, the free set rebuild operation reserves for the collector
|
||||
// and old_collector sets based on alternative mechanisms, such as ShenandoahEvacReserve, ShenandoahOldEvacReserve, and
|
||||
// ShenandoahOldCompactionReserve. In a future planned enhancement, the reserve for old_collector set when the
|
||||
// evacuation reserves are unknown, is based in part on anticipated promotion as determined by analysis of live data
|
||||
// found during the previous GC pass which is one less than the current tenure age.
|
||||
//
|
||||
// young_cset_regions is the number of regions currently in the young cset if we are starting to evacuate, or zero
|
||||
// old_cset_regions is the number of regions currently in the old cset if we are starting a mixed evacuation, or zero
|
||||
// young_trashed_regions is the number of trashed regions (immediate garbage at final mark, cset regions after update refs)
|
||||
// old_trashed_regions is the number of trashed regions
|
||||
// (immediate garbage at final old mark, cset regions after update refs for mixed evac)
|
||||
// num_old_regions is the number of old-gen regions that have available memory for further allocations (excluding old cset)
|
||||
// have_evacuation_reserves is true iff the desired values of young-gen and old-gen evacuation reserves and old-gen
|
||||
// promotion reserve have been precomputed (and can be obtained by invoking
|
||||
// <generation>->get_evacuation_reserve() or old_gen->get_promoted_reserve()
|
||||
void finish_rebuild(size_t young_cset_regions, size_t old_cset_regions, size_t num_old_regions,
|
||||
bool have_evacuation_reserves = false);
|
||||
void finish_rebuild(size_t young_trashed_regions, size_t old_trashed_regions, size_t num_old_regions);
|
||||
|
||||
// When a region is promoted in place, we add the region's available memory if it is greater than plab_min_size()
|
||||
// into the old collector partition by invoking this method.
|
||||
@ -806,9 +811,18 @@ public:
|
||||
return _partitions.available_in_locked_for_rebuild(ShenandoahFreeSetPartitionId::Mutator);
|
||||
}
|
||||
|
||||
// Use this version of available() if the heap lock is held.
|
||||
inline size_t available_locked() const {
|
||||
return _partitions.available_in(ShenandoahFreeSetPartitionId::Mutator);
|
||||
}
|
||||
|
||||
inline size_t total_humongous_waste() const { return _total_humongous_waste; }
|
||||
inline size_t humongous_waste_in_mutator() const { return _partitions.humongous_waste(ShenandoahFreeSetPartitionId::Mutator); }
|
||||
inline size_t humongous_waste_in_old() const { return _partitions.humongous_waste(ShenandoahFreeSetPartitionId::OldCollector); }
|
||||
inline size_t humongous_waste_in_mutator() const {
|
||||
return _partitions.humongous_waste(ShenandoahFreeSetPartitionId::Mutator);
|
||||
}
|
||||
inline size_t humongous_waste_in_old() const {
|
||||
return _partitions.humongous_waste(ShenandoahFreeSetPartitionId::OldCollector);
|
||||
}
|
||||
|
||||
void decrease_humongous_waste_for_regular_bypass(ShenandoahHeapRegion* r, size_t waste);
|
||||
|
||||
@ -874,7 +888,7 @@ public:
|
||||
|
||||
// Reserve space for evacuations, with regions reserved for old evacuations placed to the right
|
||||
// of regions reserved of young evacuations.
|
||||
void compute_young_and_old_reserves(size_t young_cset_regions, size_t old_cset_regions, bool have_evacuation_reserves,
|
||||
void compute_young_and_old_reserves(size_t young_cset_regions, size_t old_cset_regions,
|
||||
size_t &young_reserve_result, size_t &old_reserve_result) const;
|
||||
};
|
||||
|
||||
|
||||
@ -522,6 +522,7 @@ public:
|
||||
void heap_region_do(ShenandoahHeapRegion* r) override {
|
||||
if (r->is_trash()) {
|
||||
r->try_recycle_under_lock();
|
||||
// No need to adjust_interval_for_recycled_old_region. That will be taken care of during freeset rebuild.
|
||||
}
|
||||
if (r->is_cset()) {
|
||||
// Leave affiliation unchanged
|
||||
@ -966,6 +967,7 @@ public:
|
||||
if (r->is_trash()) {
|
||||
live = 0;
|
||||
r->try_recycle_under_lock();
|
||||
// No need to adjust_interval_for_recycled_old_region. That will be taken care of during freeset rebuild.
|
||||
} else {
|
||||
if (r->is_old()) {
|
||||
ShenandoahGenerationalFullGC::account_for_region(r, _old_regions, _old_usage, _old_humongous_waste);
|
||||
@ -1113,16 +1115,16 @@ void ShenandoahFullGC::phase5_epilog() {
|
||||
ShenandoahPostCompactClosure post_compact;
|
||||
heap->heap_region_iterate(&post_compact);
|
||||
heap->collection_set()->clear();
|
||||
size_t young_cset_regions, old_cset_regions, first_old, last_old, num_old;
|
||||
ShenandoahFreeSet* free_set = heap->free_set();
|
||||
{
|
||||
free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
|
||||
ShenandoahFreeSet* free_set = heap->free_set();
|
||||
size_t young_trashed_regions, old_trashed_regions, first_old, last_old, num_old;
|
||||
free_set->prepare_to_rebuild(young_trashed_regions, old_trashed_regions, first_old, last_old, num_old);
|
||||
// We also do not expand old generation size following Full GC because we have scrambled age populations and
|
||||
// no longer have objects separated by age into distinct regions.
|
||||
if (heap->mode()->is_generational()) {
|
||||
ShenandoahGenerationalFullGC::compute_balances();
|
||||
}
|
||||
free_set->finish_rebuild(young_cset_regions, old_cset_regions, num_old);
|
||||
free_set->finish_rebuild(young_trashed_regions, old_trashed_regions, num_old);
|
||||
}
|
||||
// Set mark incomplete because the marking bitmaps have been reset except pinned regions.
|
||||
_generation->set_mark_incomplete();
|
||||
|
||||
@ -250,6 +250,7 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap
|
||||
|
||||
ShenandoahOldGeneration* const old_generation = heap->old_generation();
|
||||
ShenandoahYoungGeneration* const young_generation = heap->young_generation();
|
||||
const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
|
||||
|
||||
// During initialization and phase changes, it is more likely that fewer objects die young and old-gen
|
||||
// memory is not yet full (or is in the process of being replaced). During these times especially, it
|
||||
@ -263,15 +264,15 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap
|
||||
|
||||
// First priority is to reclaim the easy garbage out of young-gen.
|
||||
|
||||
// maximum_young_evacuation_reserve is upper bound on memory to be evacuated out of young
|
||||
const size_t maximum_young_evacuation_reserve = (young_generation->max_capacity() * ShenandoahEvacReserve) / 100;
|
||||
size_t young_evacuation_reserve = MIN2(maximum_young_evacuation_reserve, young_generation->available_with_reserve());
|
||||
// maximum_young_evacuation_reserve is upper bound on memory to be evacuated into young Collector Reserve. This is
|
||||
// bounded at the end of previous GC cycle, based on available memory and balancing of evacuation to old and young.
|
||||
size_t maximum_young_evacuation_reserve = young_generation->get_evacuation_reserve();
|
||||
|
||||
// maximum_old_evacuation_reserve is an upper bound on memory evacuated from old and evacuated to old (promoted),
|
||||
// clamped by the old generation space available.
|
||||
//
|
||||
// Here's the algebra.
|
||||
// Let SOEP = ShenandoahOldEvacRatioPercent,
|
||||
// Let SOEP = ShenandoahOldEvacPercent,
|
||||
// OE = old evac,
|
||||
// YE = young evac, and
|
||||
// TE = total evac = OE + YE
|
||||
@ -283,12 +284,14 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap
|
||||
// => OE = YE*SOEP/(100-SOEP)
|
||||
|
||||
// We have to be careful in the event that SOEP is set to 100 by the user.
|
||||
assert(ShenandoahOldEvacRatioPercent <= 100, "Error");
|
||||
assert(ShenandoahOldEvacPercent <= 100, "Error");
|
||||
const size_t old_available = old_generation->available();
|
||||
const size_t maximum_old_evacuation_reserve = (ShenandoahOldEvacRatioPercent == 100) ?
|
||||
old_available : MIN2((maximum_young_evacuation_reserve * ShenandoahOldEvacRatioPercent) / (100 - ShenandoahOldEvacRatioPercent),
|
||||
const size_t maximum_old_evacuation_reserve = (ShenandoahOldEvacPercent == 100) ?
|
||||
old_available : MIN2((maximum_young_evacuation_reserve * ShenandoahOldEvacPercent) / (100 - ShenandoahOldEvacPercent),
|
||||
old_available);
|
||||
|
||||
// In some cases, maximum_old_reserve < old_available (when limited by ShenandoahOldEvacPercent)
|
||||
// This limit affects mixed evacuations, but does not affect promotions.
|
||||
|
||||
// Second priority is to reclaim garbage out of old-gen if there are old-gen collection candidates. Third priority
|
||||
// is to promote as much as we have room to promote. However, if old-gen memory is in short supply, this means young
|
||||
@ -305,10 +308,8 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap
|
||||
// evacuation and update-refs, we give emphasis to reclaiming garbage first, wherever that garbage is found.
|
||||
// Global GC will adjust generation sizes to accommodate the collection set it chooses.
|
||||
|
||||
// Set old_promo_reserve to enforce that no regions are preselected for promotion. Such regions typically
|
||||
// have relatively high memory utilization. We still call select_aged_regions() because this will prepare for
|
||||
// promotions in place, if relevant.
|
||||
old_promo_reserve = 0;
|
||||
// Use remnant of old_available to hold promotions.
|
||||
old_promo_reserve = old_available - maximum_old_evacuation_reserve;
|
||||
|
||||
// Dedicate all available old memory to old_evacuation reserve. This may be small, because old-gen is only
|
||||
// expanded based on an existing mixed evacuation workload at the end of the previous GC cycle. We'll expand
|
||||
@ -319,43 +320,48 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap
|
||||
// mixed evacuation, reserve all of this memory for compaction of old-gen and do not promote. Prioritize compaction
|
||||
// over promotion in order to defragment OLD so that it will be better prepared to efficiently receive promoted memory.
|
||||
old_evacuation_reserve = maximum_old_evacuation_reserve;
|
||||
old_promo_reserve = 0;
|
||||
old_promo_reserve = old_available - maximum_old_evacuation_reserve;
|
||||
} else {
|
||||
// Make all old-evacuation memory for promotion, but if we can't use it all for promotion, we'll allow some evacuation.
|
||||
old_evacuation_reserve = 0;
|
||||
old_evacuation_reserve = old_available - maximum_old_evacuation_reserve;
|
||||
old_promo_reserve = maximum_old_evacuation_reserve;
|
||||
}
|
||||
assert(old_evacuation_reserve <= old_available, "Error");
|
||||
|
||||
|
||||
// We see too many old-evacuation failures if we force ourselves to evacuate into regions that are not initially empty.
|
||||
// So we limit the old-evacuation reserve to unfragmented memory. Even so, old-evacuation is free to fill in nooks and
|
||||
// crannies within existing partially used regions and it generally tries to do so.
|
||||
const size_t old_free_unfragmented = old_generation->free_unaffiliated_regions() * ShenandoahHeapRegion::region_size_bytes();
|
||||
const size_t old_free_unfragmented = old_generation->free_unaffiliated_regions() * region_size_bytes;
|
||||
if (old_evacuation_reserve > old_free_unfragmented) {
|
||||
const size_t delta = old_evacuation_reserve - old_free_unfragmented;
|
||||
old_evacuation_reserve -= delta;
|
||||
// Let promo consume fragments of old-gen memory if not global
|
||||
if (!is_global()) {
|
||||
old_promo_reserve += delta;
|
||||
}
|
||||
// Let promo consume fragments of old-gen memory
|
||||
old_promo_reserve += delta;
|
||||
}
|
||||
|
||||
// Preselect regions for promotion by evacuation (obtaining the live data to seed promoted_reserve),
|
||||
// and identify regions that will promote in place. These use the tenuring threshold.
|
||||
const size_t consumed_by_advance_promotion = select_aged_regions(old_promo_reserve);
|
||||
assert(consumed_by_advance_promotion <= maximum_old_evacuation_reserve, "Cannot promote more than available old-gen memory");
|
||||
// If is_global(), we let garbage-first heuristic determine cset membership. Otherwise, we give priority
|
||||
// to tenurable regions by preselecting regions for promotion by evacuation (obtaining the live data to seed promoted_reserve).
|
||||
// This also identifies regions that will be promoted in place. These use the tenuring threshold.
|
||||
const size_t consumed_by_advance_promotion = select_aged_regions(is_global()? 0: old_promo_reserve);
|
||||
assert(consumed_by_advance_promotion <= old_promo_reserve, "Do not promote more than budgeted");
|
||||
|
||||
// The young evacuation reserve can be no larger than young_unaffiliated. Planning to evacuate into partially consumed
|
||||
// young regions is doomed to failure if any of those partially consumed regions is selected for the collection set.
|
||||
size_t young_unaffiliated = young_generation->free_unaffiliated_regions() * region_size_bytes;
|
||||
|
||||
// If any regions have been selected for promotion in place, this has the effect of decreasing available within mutator
|
||||
// and collector partitions, due to padding of remnant memory within each promoted in place region. This will affect
|
||||
// young_evacuation_reserve but not old_evacuation_reserve or consumed_by_advance_promotion. So recompute.
|
||||
young_evacuation_reserve = MIN2(young_evacuation_reserve, young_generation->available_with_reserve());
|
||||
size_t young_evacuation_reserve = MIN2(maximum_young_evacuation_reserve, young_unaffiliated);
|
||||
|
||||
// Note that unused old_promo_reserve might not be entirely consumed_by_advance_promotion. Do not transfer this
|
||||
// to old_evacuation_reserve because this memory is likely very fragmented, and we do not want to increase the likelihood
|
||||
// of old evacuation failure.
|
||||
// of old evacuation failure. Leave this memory in the promoted reserve as it may be targeted by opportunistic
|
||||
// promotions (found during evacuation of young regions).
|
||||
young_generation->set_evacuation_reserve(young_evacuation_reserve);
|
||||
old_generation->set_evacuation_reserve(old_evacuation_reserve);
|
||||
old_generation->set_promoted_reserve(consumed_by_advance_promotion);
|
||||
old_generation->set_promoted_reserve(old_promo_reserve);
|
||||
|
||||
// There is no need to expand OLD because all memory used here was set aside at end of previous GC, except in the
|
||||
// case of a GLOBAL gc. During choose_collection_set() of GLOBAL, old will be expanded on demand.
|
||||
@ -363,8 +369,8 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap
|
||||
|
||||
// Having chosen the collection set, adjust the budgets for generational mode based on its composition. Note
|
||||
// that young_generation->available() now knows about recently discovered immediate garbage.
|
||||
//
|
||||
void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap, ShenandoahCollectionSet* const collection_set) {
|
||||
void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap,
|
||||
ShenandoahCollectionSet* const collection_set, size_t add_regions_to_old) {
|
||||
shenandoah_assert_generational();
|
||||
// We may find that old_evacuation_reserve and/or loaned_for_young_evacuation are not fully consumed, in which case we may
|
||||
// be able to increase regions_available_to_loan
|
||||
@ -398,7 +404,8 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap,
|
||||
// Leave old_evac_reserve as previously configured
|
||||
} else if (old_evacuated_committed < old_evacuation_reserve) {
|
||||
// This happens if the old-gen collection consumes less than full budget.
|
||||
log_debug(gc, cset)("Shrinking old evac reserve to match old_evac_commited: " PROPERFMT, PROPERFMTARGS(old_evacuated_committed));
|
||||
log_debug(gc, cset)("Shrinking old evac reserve to match old_evac_commited: " PROPERFMT,
|
||||
PROPERFMTARGS(old_evacuated_committed));
|
||||
old_evacuation_reserve = old_evacuated_committed;
|
||||
old_generation->set_evacuation_reserve(old_evacuation_reserve);
|
||||
}
|
||||
@ -409,11 +416,17 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap,
|
||||
size_t young_evacuated = collection_set->get_live_bytes_in_untenurable_regions();
|
||||
size_t young_evacuated_reserve_used = (size_t) (ShenandoahEvacWaste * double(young_evacuated));
|
||||
|
||||
size_t total_young_available = young_generation->available_with_reserve();
|
||||
assert(young_evacuated_reserve_used <= total_young_available, "Cannot evacuate more than is available in young");
|
||||
size_t total_young_available = young_generation->available_with_reserve() - add_regions_to_old * region_size_bytes;;
|
||||
assert(young_evacuated_reserve_used <= total_young_available, "Cannot evacuate (%zu) more than is available in young (%zu)",
|
||||
young_evacuated_reserve_used, total_young_available);
|
||||
young_generation->set_evacuation_reserve(young_evacuated_reserve_used);
|
||||
|
||||
size_t old_available = old_generation->available();
|
||||
// We have not yet rebuilt the free set. Some of the memory that is thought to be avaiable within old may no
|
||||
// longer be available if that memory had been free within regions that were selected for the collection set.
|
||||
// Make the necessary adjustments to old_available.
|
||||
size_t old_available =
|
||||
old_generation->available() + add_regions_to_old * region_size_bytes - collection_set->get_old_available_bytes_collected();
|
||||
|
||||
// Now that we've established the collection set, we know how much memory is really required by old-gen for evacuation
|
||||
// and promotion reserves. Try shrinking OLD now in case that gives us a bit more runway for mutator allocations during
|
||||
// evac and update phases.
|
||||
@ -422,21 +435,27 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap,
|
||||
if (old_available < old_consumed) {
|
||||
// This can happen due to round-off errors when adding the results of truncated integer arithmetic.
|
||||
// We've already truncated old_evacuated_committed. Truncate young_advance_promoted_reserve_used here.
|
||||
|
||||
assert(young_advance_promoted_reserve_used <= (33 * (old_available - old_evacuated_committed)) / 32,
|
||||
"Round-off errors should be less than 3.125%%, committed: %zu, reserved: %zu",
|
||||
young_advance_promoted_reserve_used, old_available - old_evacuated_committed);
|
||||
young_advance_promoted_reserve_used = old_available - old_evacuated_committed;
|
||||
if (old_available > old_evacuated_committed) {
|
||||
young_advance_promoted_reserve_used = old_available - old_evacuated_committed;
|
||||
} else {
|
||||
young_advance_promoted_reserve_used = 0;
|
||||
old_evacuated_committed = old_available;
|
||||
}
|
||||
// TODO: reserve for full promotion reserve, not just for advance (preselected) promotion
|
||||
old_consumed = old_evacuated_committed + young_advance_promoted_reserve_used;
|
||||
}
|
||||
|
||||
assert(old_available >= old_consumed, "Cannot consume (%zu) more than is available (%zu)",
|
||||
old_consumed, old_available);
|
||||
size_t excess_old = old_available - old_consumed;
|
||||
size_t unaffiliated_old_regions = old_generation->free_unaffiliated_regions();
|
||||
size_t unaffiliated_old_regions = old_generation->free_unaffiliated_regions() + add_regions_to_old;
|
||||
size_t unaffiliated_old = unaffiliated_old_regions * region_size_bytes;
|
||||
assert(old_available >= unaffiliated_old,
|
||||
"Unaffiliated old (%zu is %zu * %zu) is a subset of old available (%zu)",
|
||||
unaffiliated_old, unaffiliated_old_regions, region_size_bytes, old_available);
|
||||
assert(unaffiliated_old >= old_evacuated_committed, "Do not evacuate (%zu) more than unaffiliated old (%zu)",
|
||||
old_evacuated_committed, unaffiliated_old);
|
||||
|
||||
// Make sure old_evac_committed is unaffiliated
|
||||
if (old_evacuated_committed > 0) {
|
||||
@ -454,20 +473,22 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap,
|
||||
}
|
||||
|
||||
// If we find that OLD has excess regions, give them back to YOUNG now to reduce likelihood we run out of allocation
|
||||
// runway during evacuation and update-refs.
|
||||
size_t regions_to_xfer = 0;
|
||||
// runway during evacuation and update-refs. We may make further adjustments to balance.
|
||||
ssize_t add_regions_to_young = 0;
|
||||
if (excess_old > unaffiliated_old) {
|
||||
// we can give back unaffiliated_old (all of unaffiliated is excess)
|
||||
if (unaffiliated_old_regions > 0) {
|
||||
regions_to_xfer = unaffiliated_old_regions;
|
||||
add_regions_to_young = unaffiliated_old_regions;
|
||||
}
|
||||
} else if (unaffiliated_old_regions > 0) {
|
||||
// excess_old < unaffiliated old: we can give back MIN(excess_old/region_size_bytes, unaffiliated_old_regions)
|
||||
size_t excess_regions = excess_old / region_size_bytes;
|
||||
regions_to_xfer = MIN2(excess_regions, unaffiliated_old_regions);
|
||||
add_regions_to_young = MIN2(excess_regions, unaffiliated_old_regions);
|
||||
}
|
||||
if (regions_to_xfer > 0) {
|
||||
excess_old -= regions_to_xfer * region_size_bytes;
|
||||
|
||||
if (add_regions_to_young > 0) {
|
||||
assert(excess_old >= add_regions_to_young * region_size_bytes, "Cannot xfer more than excess old");
|
||||
excess_old -= add_regions_to_young * region_size_bytes;
|
||||
log_debug(gc, ergo)("Before start of evacuation, total_promotion reserve is young_advance_promoted_reserve: %zu "
|
||||
"plus excess: old: %zu", young_advance_promoted_reserve_used, excess_old);
|
||||
}
|
||||
@ -475,6 +496,7 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap,
|
||||
// Add in the excess_old memory to hold unanticipated promotions, if any. If there are more unanticipated
|
||||
// promotions than fit in reserved memory, they will be deferred until a future GC pass.
|
||||
size_t total_promotion_reserve = young_advance_promoted_reserve_used + excess_old;
|
||||
|
||||
old_generation->set_promoted_reserve(total_promotion_reserve);
|
||||
old_generation->reset_promoted_expended();
|
||||
}
|
||||
@ -782,17 +804,13 @@ void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) {
|
||||
ShenandoahCollectionSetPreselector preselector(collection_set, heap->num_regions());
|
||||
|
||||
// Find the amount that will be promoted, regions that will be promoted in
|
||||
// place, and preselect older regions that will be promoted by evacuation.
|
||||
// place, and preselected older regions that will be promoted by evacuation.
|
||||
compute_evacuation_budgets(heap);
|
||||
|
||||
// Choose the collection set, including the regions preselected above for
|
||||
// promotion into the old generation.
|
||||
_heuristics->choose_collection_set(collection_set);
|
||||
if (!collection_set->is_empty()) {
|
||||
// only make use of evacuation budgets when we are evacuating
|
||||
adjust_evacuation_budgets(heap, collection_set);
|
||||
}
|
||||
|
||||
// Choose the collection set, including the regions preselected above for promotion into the old generation.
|
||||
size_t add_regions_to_old = _heuristics->choose_collection_set(collection_set);
|
||||
// Even if collection_set->is_empty(), we want to adjust budgets, making reserves available to mutator.
|
||||
adjust_evacuation_budgets(heap, collection_set, add_regions_to_old);
|
||||
if (is_global()) {
|
||||
// We have just chosen a collection set for a global cycle. The mark bitmap covering old regions is complete, so
|
||||
// the remembered set scan can use that to avoid walking into garbage. When the next old mark begins, we will
|
||||
@ -816,17 +834,16 @@ void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) {
|
||||
ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
|
||||
ShenandoahHeapLocker locker(heap->lock());
|
||||
|
||||
// We are preparing for evacuation. At this time, we ignore cset region tallies.
|
||||
size_t young_cset_regions, old_cset_regions, first_old, last_old, num_old;
|
||||
_free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
|
||||
|
||||
// We are preparing for evacuation.
|
||||
size_t young_trashed_regions, old_trashed_regions, first_old, last_old, num_old;
|
||||
_free_set->prepare_to_rebuild(young_trashed_regions, old_trashed_regions, first_old, last_old, num_old);
|
||||
if (heap->mode()->is_generational()) {
|
||||
ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
|
||||
gen_heap->compute_old_generation_balance(young_cset_regions, old_cset_regions);
|
||||
size_t allocation_runway =
|
||||
gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_trashed_regions);
|
||||
gen_heap->compute_old_generation_balance(allocation_runway, old_trashed_regions, young_trashed_regions);
|
||||
}
|
||||
|
||||
// Free set construction uses reserve quantities, because they are known to be valid here
|
||||
_free_set->finish_rebuild(young_cset_regions, old_cset_regions, num_old, true);
|
||||
_free_set->finish_rebuild(young_trashed_regions, old_trashed_regions, num_old);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -63,9 +63,10 @@ private:
|
||||
// Compute evacuation budgets prior to choosing collection set.
|
||||
void compute_evacuation_budgets(ShenandoahHeap* heap);
|
||||
|
||||
// Adjust evacuation budgets after choosing collection set.
|
||||
// Adjust evacuation budgets after choosing collection set. The argument regions_to_xfer represents regions to be
|
||||
// transfered to old based on decisions made in top_off_collection_set()
|
||||
void adjust_evacuation_budgets(ShenandoahHeap* heap,
|
||||
ShenandoahCollectionSet* collection_set);
|
||||
ShenandoahCollectionSet* collection_set, size_t regions_to_xfer);
|
||||
|
||||
// Preselect for possible inclusion into the collection set exactly the most
|
||||
// garbage-dense regions, including those that satisfy criteria 1 & 2 below,
|
||||
@ -144,6 +145,22 @@ private:
|
||||
virtual void prepare_gc();
|
||||
|
||||
// Called during final mark, chooses collection set, rebuilds free set.
|
||||
// Upon return from prepare_regions_and_collection_set(), certain parameters have been established to govern the
|
||||
// evacuation efforts that are about to begin. In particular:
|
||||
//
|
||||
// old_generation->get_promoted_reserve() represents the amount of memory within old-gen's available memory that has
|
||||
// been set aside to hold objects promoted from young-gen memory. This represents an estimated percentage
|
||||
// of the live young-gen memory within the collection set. If there is more data ready to be promoted than
|
||||
// can fit within this reserve, the promotion of some objects will be deferred until a subsequent evacuation
|
||||
// pass.
|
||||
//
|
||||
// old_generation->get_evacuation_reserve() represents the amount of memory within old-gen's available memory that has been
|
||||
// set aside to hold objects evacuated from the old-gen collection set.
|
||||
//
|
||||
// young_generation->get_evacuation_reserve() represents the amount of memory within young-gen's available memory that has
|
||||
// been set aside to hold objects evacuated from the young-gen collection set. Conservatively, this value
|
||||
// equals the entire amount of live young-gen memory within the collection set, even though some of this memory
|
||||
// will likely be promoted.
|
||||
virtual void prepare_regions_and_collection_set(bool concurrent);
|
||||
|
||||
// Cancel marking (used by Full collect and when cancelling cycle).
|
||||
|
||||
@ -55,9 +55,6 @@ void ShenandoahGenerationalFullGC::prepare() {
|
||||
// Since we may arrive here from degenerated GC failure of either young or old, establish generation as GLOBAL.
|
||||
heap->set_active_generation(heap->global_generation());
|
||||
|
||||
// No need for old_gen->increase_used() as this was done when plabs were allocated.
|
||||
heap->reset_generation_reserves();
|
||||
|
||||
// Full GC supersedes any marking or coalescing in old generation.
|
||||
heap->old_generation()->cancel_gc();
|
||||
}
|
||||
@ -156,8 +153,11 @@ void ShenandoahGenerationalFullGC::compute_balances() {
|
||||
|
||||
// In case this Full GC resulted from degeneration, clear the tally on anticipated promotion.
|
||||
heap->old_generation()->set_promotion_potential(0);
|
||||
// Invoke this in case we are able to transfer memory from OLD to YOUNG.
|
||||
heap->compute_old_generation_balance(0, 0);
|
||||
|
||||
// Invoke this in case we are able to transfer memory from OLD to YOUNG
|
||||
size_t allocation_runway =
|
||||
heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(0L);
|
||||
heap->compute_old_generation_balance(allocation_runway, 0, 0);
|
||||
}
|
||||
|
||||
ShenandoahPrepareForGenerationalCompactionObjectClosure::ShenandoahPrepareForGenerationalCompactionObjectClosure(PreservedMarks* preserved_marks,
|
||||
|
||||
@ -299,9 +299,9 @@ oop ShenandoahGenerationalHeap::try_evacuate_object(oop p, Thread* thread, uint
|
||||
alloc_from_lab = false;
|
||||
}
|
||||
// else, we leave copy equal to nullptr, signaling a promotion failure below if appropriate.
|
||||
// We choose not to promote objects smaller than PLAB::min_size() by way of shared allocations, as this is too
|
||||
// We choose not to promote objects smaller than size_threshold by way of shared allocations as this is too
|
||||
// costly. Instead, we'll simply "evacuate" to young-gen memory (using a GCLAB) and will promote in a future
|
||||
// evacuation pass. This condition is denoted by: is_promotion && has_plab && (size <= PLAB::min_size())
|
||||
// evacuation pass. This condition is denoted by: is_promotion && has_plab && (size <= size_threshhold).
|
||||
}
|
||||
#ifdef ASSERT
|
||||
}
|
||||
@ -576,19 +576,18 @@ void ShenandoahGenerationalHeap::retire_plab(PLAB* plab) {
|
||||
|
||||
// Make sure old-generation is large enough, but no larger than is necessary, to hold mixed evacuations
|
||||
// and promotions, if we anticipate either. Any deficit is provided by the young generation, subject to
|
||||
// xfer_limit, and any surplus is transferred to the young generation.
|
||||
//
|
||||
// xfer_limit is the maximum we're able to transfer from young to old based on either:
|
||||
// 1. an assumption that we will be able to replenish memory "borrowed" from young at the end of collection, or
|
||||
// 2. there is sufficient excess in the allocation runway during GC idle cycles
|
||||
void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t old_xfer_limit, size_t old_cset_regions) {
|
||||
|
||||
// mutator_xfer_limit, and any surplus is transferred to the young generation. mutator_xfer_limit is
|
||||
// the maximum we're able to transfer from young to old. This is called at the end of GC, as we prepare
|
||||
// for the idle span that precedes the next GC.
|
||||
void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_xfer_limit,
|
||||
size_t old_trashed_regions, size_t young_trashed_regions) {
|
||||
shenandoah_assert_heaplocked();
|
||||
// We can limit the old reserve to the size of anticipated promotions:
|
||||
// max_old_reserve is an upper bound on memory evacuated from old and promoted to old,
|
||||
// clamped by the old generation space available.
|
||||
//
|
||||
// Here's the algebra.
|
||||
// Let SOEP = ShenandoahOldEvacRatioPercent,
|
||||
// Let SOEP = ShenandoahOldEvacPercent,
|
||||
// OE = old evac,
|
||||
// YE = young evac, and
|
||||
// TE = total evac = OE + YE
|
||||
@ -600,81 +599,171 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t old_xfer_
|
||||
// => OE = YE*SOEP/(100-SOEP)
|
||||
|
||||
// We have to be careful in the event that SOEP is set to 100 by the user.
|
||||
assert(ShenandoahOldEvacRatioPercent <= 100, "Error");
|
||||
const size_t old_available = old_generation()->available();
|
||||
// The free set will reserve this amount of memory to hold young evacuations
|
||||
const size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100;
|
||||
|
||||
// In the case that ShenandoahOldEvacRatioPercent equals 100, max_old_reserve is limited only by xfer_limit.
|
||||
|
||||
const double bound_on_old_reserve = old_available + old_xfer_limit + young_reserve;
|
||||
const double max_old_reserve = ((ShenandoahOldEvacRatioPercent == 100)? bound_on_old_reserve:
|
||||
MIN2(double(young_reserve * ShenandoahOldEvacRatioPercent)
|
||||
/ double(100 - ShenandoahOldEvacRatioPercent), bound_on_old_reserve));
|
||||
|
||||
assert(ShenandoahOldEvacPercent <= 100, "Error");
|
||||
const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
|
||||
|
||||
ShenandoahOldGeneration* old_gen = old_generation();
|
||||
size_t old_capacity = old_gen->max_capacity();
|
||||
size_t old_usage = old_gen->used(); // includes humongous waste
|
||||
size_t old_available = ((old_capacity >= old_usage)? old_capacity - old_usage: 0) + old_trashed_regions * region_size_bytes;
|
||||
|
||||
ShenandoahYoungGeneration* young_gen = young_generation();
|
||||
size_t young_capacity = young_gen->max_capacity();
|
||||
size_t young_usage = young_gen->used(); // includes humongous waste
|
||||
size_t young_available = ((young_capacity >= young_usage)? young_capacity - young_usage: 0);
|
||||
size_t freeset_available = free_set()->available_locked();
|
||||
if (young_available > freeset_available) {
|
||||
young_available = freeset_available;
|
||||
}
|
||||
young_available += young_trashed_regions * region_size_bytes;
|
||||
|
||||
// The free set will reserve this amount of memory to hold young evacuations (initialized to the ideal reserve)
|
||||
size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100;
|
||||
|
||||
// If ShenandoahOldEvacPercent equals 100, max_old_reserve is limited only by mutator_xfer_limit and young_reserve
|
||||
const size_t bound_on_old_reserve = ((old_available + mutator_xfer_limit + young_reserve) * ShenandoahOldEvacPercent) / 100;
|
||||
size_t proposed_max_old = ((ShenandoahOldEvacPercent == 100)?
|
||||
bound_on_old_reserve:
|
||||
MIN2((young_reserve * ShenandoahOldEvacPercent) / (100 - ShenandoahOldEvacPercent),
|
||||
bound_on_old_reserve));
|
||||
if (young_reserve > young_available) {
|
||||
young_reserve = young_available;
|
||||
}
|
||||
|
||||
// Decide how much old space we should reserve for a mixed collection
|
||||
double reserve_for_mixed = 0;
|
||||
if (old_generation()->has_unprocessed_collection_candidates()) {
|
||||
size_t reserve_for_mixed = 0;
|
||||
const size_t old_fragmented_available =
|
||||
old_available - (old_generation()->free_unaffiliated_regions() + old_trashed_regions) * region_size_bytes;
|
||||
|
||||
if (old_fragmented_available > proposed_max_old) {
|
||||
// After we've promoted regions in place, there may be an abundance of old-fragmented available memory,
|
||||
// even more than the desired percentage for old reserve. We cannot transfer these fragmented regions back
|
||||
// to young. Instead we make the best of the situation by using this fragmented memory for both promotions
|
||||
// and evacuations.
|
||||
proposed_max_old = old_fragmented_available;
|
||||
}
|
||||
size_t reserve_for_promo = old_fragmented_available;
|
||||
const size_t max_old_reserve = proposed_max_old;
|
||||
const size_t mixed_candidate_live_memory = old_generation()->unprocessed_collection_candidates_live_memory();
|
||||
const bool doing_mixed = (mixed_candidate_live_memory > 0);
|
||||
if (doing_mixed) {
|
||||
// We want this much memory to be unfragmented in order to reliably evacuate old. This is conservative because we
|
||||
// may not evacuate the entirety of unprocessed candidates in a single mixed evacuation.
|
||||
const double max_evac_need =
|
||||
(double(old_generation()->unprocessed_collection_candidates_live_memory()) * ShenandoahOldEvacWaste);
|
||||
const size_t max_evac_need = (size_t) (mixed_candidate_live_memory * ShenandoahOldEvacWaste);
|
||||
assert(old_available >= old_generation()->free_unaffiliated_regions() * region_size_bytes,
|
||||
"Unaffiliated available must be less than total available");
|
||||
const double old_fragmented_available =
|
||||
double(old_available - old_generation()->free_unaffiliated_regions() * region_size_bytes);
|
||||
reserve_for_mixed = max_evac_need + old_fragmented_available;
|
||||
if (reserve_for_mixed > max_old_reserve) {
|
||||
reserve_for_mixed = max_old_reserve;
|
||||
|
||||
// We prefer to evacuate all of mixed into unfragmented memory, and will expand old in order to do so, unless
|
||||
// we already have too much fragmented available memory in old.
|
||||
reserve_for_mixed = max_evac_need;
|
||||
if (reserve_for_mixed + reserve_for_promo > max_old_reserve) {
|
||||
// In this case, we'll allow old-evac to target some of the fragmented old memory.
|
||||
size_t excess_reserves = (reserve_for_mixed + reserve_for_promo) - max_old_reserve;
|
||||
if (reserve_for_promo > excess_reserves) {
|
||||
reserve_for_promo -= excess_reserves;
|
||||
} else {
|
||||
excess_reserves -= reserve_for_promo;
|
||||
reserve_for_promo = 0;
|
||||
reserve_for_mixed -= excess_reserves;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Decide how much space we should reserve for promotions from young
|
||||
size_t reserve_for_promo = 0;
|
||||
// Decide how much additional space we should reserve for promotions from young. We give priority to mixed evacations
|
||||
// over promotions.
|
||||
const size_t promo_load = old_generation()->get_promotion_potential();
|
||||
const bool doing_promotions = promo_load > 0;
|
||||
if (doing_promotions) {
|
||||
// We're promoting and have a bound on the maximum amount that can be promoted
|
||||
assert(max_old_reserve >= reserve_for_mixed, "Sanity");
|
||||
const size_t available_for_promotions = max_old_reserve - reserve_for_mixed;
|
||||
reserve_for_promo = MIN2((size_t)(promo_load * ShenandoahPromoEvacWaste), available_for_promotions);
|
||||
// We've already set aside all of the fragmented available memory within old-gen to represent old objects
|
||||
// to be promoted from young generation. promo_load represents the memory that we anticipate to be promoted
|
||||
// from regions that have reached tenure age. In the ideal, we will always use fragmented old-gen memory
|
||||
// to hold individually promoted objects and will use unfragmented old-gen memory to represent the old-gen
|
||||
// evacuation workloa.
|
||||
|
||||
// We're promoting and have an estimate of memory to be promoted from aged regions
|
||||
assert(max_old_reserve >= (reserve_for_mixed + reserve_for_promo), "Sanity");
|
||||
const size_t available_for_additional_promotions = max_old_reserve - (reserve_for_mixed + reserve_for_promo);
|
||||
size_t promo_need = (size_t)(promo_load * ShenandoahPromoEvacWaste);
|
||||
if (promo_need > reserve_for_promo) {
|
||||
reserve_for_promo += MIN2(promo_need - reserve_for_promo, available_for_additional_promotions);
|
||||
}
|
||||
// We've already reserved all the memory required for the promo_load, and possibly more. The excess
|
||||
// can be consumed by objects promoted from regions that have not yet reached tenure age.
|
||||
}
|
||||
|
||||
// This is the total old we want to ideally reserve
|
||||
const size_t old_reserve = reserve_for_mixed + reserve_for_promo;
|
||||
assert(old_reserve <= max_old_reserve, "cannot reserve more than max for old evacuations");
|
||||
// This is the total old we want to reserve (initialized to the ideal reserve)
|
||||
size_t old_reserve = reserve_for_mixed + reserve_for_promo;
|
||||
|
||||
// We now check if the old generation is running a surplus or a deficit.
|
||||
const size_t max_old_available = old_generation()->available() + old_cset_regions * region_size_bytes;
|
||||
if (max_old_available >= old_reserve) {
|
||||
// We are running a surplus, so the old region surplus can go to young
|
||||
const size_t old_surplus = (max_old_available - old_reserve) / region_size_bytes;
|
||||
const size_t unaffiliated_old_regions = old_generation()->free_unaffiliated_regions() + old_cset_regions;
|
||||
const size_t old_region_surplus = MIN2(old_surplus, unaffiliated_old_regions);
|
||||
old_generation()->set_region_balance(checked_cast<ssize_t>(old_region_surplus));
|
||||
} else {
|
||||
// We are running a deficit which we'd like to fill from young.
|
||||
// Ignore that this will directly impact young_generation()->max_capacity(),
|
||||
// indirectly impacting young_reserve and old_reserve. These computations are conservative.
|
||||
// Note that deficit is rounded up by one region.
|
||||
const size_t old_need = (old_reserve - max_old_available + region_size_bytes - 1) / region_size_bytes;
|
||||
const size_t max_old_region_xfer = old_xfer_limit / region_size_bytes;
|
||||
size_t old_region_deficit = 0;
|
||||
size_t old_region_surplus = 0;
|
||||
|
||||
// Round down the regions we can transfer from young to old. If we're running short
|
||||
// on young-gen memory, we restrict the xfer. Old-gen collection activities will be
|
||||
// curtailed if the budget is restricted.
|
||||
const size_t old_region_deficit = MIN2(old_need, max_old_region_xfer);
|
||||
size_t mutator_region_xfer_limit = mutator_xfer_limit / region_size_bytes;
|
||||
// align the mutator_xfer_limit on region size
|
||||
mutator_xfer_limit = mutator_region_xfer_limit * region_size_bytes;
|
||||
|
||||
if (old_available >= old_reserve) {
|
||||
// We are running a surplus, so the old region surplus can go to young
|
||||
const size_t old_surplus = old_available - old_reserve;
|
||||
old_region_surplus = old_surplus / region_size_bytes;
|
||||
const size_t unaffiliated_old_regions = old_generation()->free_unaffiliated_regions() + old_trashed_regions;
|
||||
old_region_surplus = MIN2(old_region_surplus, unaffiliated_old_regions);
|
||||
old_generation()->set_region_balance(checked_cast<ssize_t>(old_region_surplus));
|
||||
} else if (old_available + mutator_xfer_limit >= old_reserve) {
|
||||
// Mutator's xfer limit is sufficient to satisfy our need: transfer all memory from there
|
||||
size_t old_deficit = old_reserve - old_available;
|
||||
old_region_deficit = (old_deficit + region_size_bytes - 1) / region_size_bytes;
|
||||
old_generation()->set_region_balance(0 - checked_cast<ssize_t>(old_region_deficit));
|
||||
} else {
|
||||
// We'll try to xfer from both mutator excess and from young collector reserve
|
||||
size_t available_reserves = old_available + young_reserve + mutator_xfer_limit;
|
||||
size_t old_entitlement = (available_reserves * ShenandoahOldEvacPercent) / 100;
|
||||
|
||||
// Round old_entitlement down to nearest multiple of regions to be transferred to old
|
||||
size_t entitled_xfer = old_entitlement - old_available;
|
||||
entitled_xfer = region_size_bytes * (entitled_xfer / region_size_bytes);
|
||||
size_t unaffiliated_young_regions = young_generation()->free_unaffiliated_regions();
|
||||
size_t unaffiliated_young_memory = unaffiliated_young_regions * region_size_bytes;
|
||||
if (entitled_xfer > unaffiliated_young_memory) {
|
||||
entitled_xfer = unaffiliated_young_memory;
|
||||
}
|
||||
old_entitlement = old_available + entitled_xfer;
|
||||
if (old_entitlement < old_reserve) {
|
||||
// There's not enough memory to satisfy our desire. Scale back our old-gen intentions.
|
||||
size_t budget_overrun = old_reserve - old_entitlement;;
|
||||
if (reserve_for_promo > budget_overrun) {
|
||||
reserve_for_promo -= budget_overrun;
|
||||
old_reserve -= budget_overrun;
|
||||
} else {
|
||||
budget_overrun -= reserve_for_promo;
|
||||
reserve_for_promo = 0;
|
||||
reserve_for_mixed = (reserve_for_mixed > budget_overrun)? reserve_for_mixed - budget_overrun: 0;
|
||||
old_reserve = reserve_for_promo + reserve_for_mixed;
|
||||
}
|
||||
}
|
||||
|
||||
// Because of adjustments above, old_reserve may be smaller now than it was when we tested the branch
|
||||
// condition above: "(old_available + mutator_xfer_limit >= old_reserve)
|
||||
// Therefore, we do NOT know that: mutator_xfer_limit < old_reserve - old_available
|
||||
|
||||
size_t old_deficit = old_reserve - old_available;
|
||||
old_region_deficit = (old_deficit + region_size_bytes - 1) / region_size_bytes;
|
||||
|
||||
// Shrink young_reserve to account for loan to old reserve
|
||||
const size_t reserve_xfer_regions = old_region_deficit - mutator_region_xfer_limit;
|
||||
young_reserve -= reserve_xfer_regions * region_size_bytes;
|
||||
old_generation()->set_region_balance(0 - checked_cast<ssize_t>(old_region_deficit));
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahGenerationalHeap::reset_generation_reserves() {
|
||||
ShenandoahHeapLocker locker(lock());
|
||||
young_generation()->set_evacuation_reserve(0);
|
||||
old_generation()->set_evacuation_reserve(0);
|
||||
old_generation()->set_promoted_reserve(0);
|
||||
assert(old_region_deficit == 0 || old_region_surplus == 0, "Only surplus or deficit, never both");
|
||||
assert(young_reserve + reserve_for_mixed + reserve_for_promo <= old_available + young_available,
|
||||
"Cannot reserve more memory than is available: %zu + %zu + %zu <= %zu + %zu",
|
||||
young_reserve, reserve_for_mixed, reserve_for_promo, old_available, young_available);
|
||||
|
||||
// deficit/surplus adjustments to generation sizes will precede rebuild
|
||||
young_generation()->set_evacuation_reserve(young_reserve);
|
||||
old_generation()->set_evacuation_reserve(reserve_for_mixed);
|
||||
old_generation()->set_promoted_reserve(reserve_for_promo);
|
||||
}
|
||||
|
||||
void ShenandoahGenerationalHeap::coalesce_and_fill_old_regions(bool concurrent) {
|
||||
@ -1015,10 +1104,6 @@ void ShenandoahGenerationalHeap::final_update_refs_update_region_states() {
|
||||
|
||||
void ShenandoahGenerationalHeap::complete_degenerated_cycle() {
|
||||
shenandoah_assert_heaplocked_or_safepoint();
|
||||
// In case degeneration interrupted concurrent evacuation or update references, we need to clean up
|
||||
// transient state. Otherwise, these actions have no effect.
|
||||
reset_generation_reserves();
|
||||
|
||||
if (!old_generation()->is_parsable()) {
|
||||
ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_coalesce_and_fill);
|
||||
coalesce_and_fill_old_regions(false);
|
||||
@ -1036,7 +1121,6 @@ void ShenandoahGenerationalHeap::complete_concurrent_cycle() {
|
||||
// throw off the heuristics.
|
||||
entry_global_coalesce_and_fill();
|
||||
}
|
||||
reset_generation_reserves();
|
||||
}
|
||||
|
||||
void ShenandoahGenerationalHeap::entry_global_coalesce_and_fill() {
|
||||
|
||||
@ -136,7 +136,7 @@ public:
|
||||
void reset_generation_reserves();
|
||||
|
||||
// Computes the optimal size for the old generation, represented as a surplus or deficit of old regions
|
||||
void compute_old_generation_balance(size_t old_xfer_limit, size_t old_cset_regions);
|
||||
void compute_old_generation_balance(size_t old_xfer_limit, size_t old_trashed_regions, size_t young_trashed_regions);
|
||||
|
||||
// Balances generations, coalesces and fills old regions if necessary
|
||||
void complete_degenerated_cycle();
|
||||
|
||||
@ -425,20 +425,29 @@ jint ShenandoahHeap::initialize() {
|
||||
|
||||
_affiliations[i] = ShenandoahAffiliation::FREE;
|
||||
}
|
||||
|
||||
if (mode()->is_generational()) {
|
||||
size_t young_reserve = (soft_max_capacity() * ShenandoahEvacReserve) / 100;
|
||||
young_generation()->set_evacuation_reserve(young_reserve);
|
||||
old_generation()->set_evacuation_reserve((size_t) 0);
|
||||
old_generation()->set_promoted_reserve((size_t) 0);
|
||||
}
|
||||
|
||||
_free_set = new ShenandoahFreeSet(this, _num_regions);
|
||||
post_initialize_heuristics();
|
||||
|
||||
// We are initializing free set. We ignore cset region tallies.
|
||||
size_t young_cset_regions, old_cset_regions, first_old, last_old, num_old;
|
||||
_free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
|
||||
size_t young_trashed_regions, old_trashed_regions, first_old, last_old, num_old;
|
||||
_free_set->prepare_to_rebuild(young_trashed_regions, old_trashed_regions, first_old, last_old, num_old);
|
||||
if (mode()->is_generational()) {
|
||||
ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
|
||||
// We cannot call
|
||||
// gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions)
|
||||
// until after the heap is fully initialized. So we make up a safe value here.
|
||||
size_t allocation_runway = InitialHeapSize / 2;
|
||||
gen_heap->compute_old_generation_balance(allocation_runway, old_cset_regions);
|
||||
gen_heap->compute_old_generation_balance(allocation_runway, old_trashed_regions, young_trashed_regions);
|
||||
}
|
||||
_free_set->finish_rebuild(young_cset_regions, old_cset_regions, num_old);
|
||||
_free_set->finish_rebuild(young_trashed_regions, old_trashed_regions, num_old);
|
||||
}
|
||||
|
||||
if (AlwaysPreTouch) {
|
||||
@ -2521,13 +2530,10 @@ void ShenandoahHeap::final_update_refs_update_region_states() {
|
||||
parallel_heap_region_iterate(&cl);
|
||||
}
|
||||
|
||||
void ShenandoahHeap::rebuild_free_set(bool concurrent) {
|
||||
ShenandoahGCPhase phase(concurrent ?
|
||||
ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
|
||||
ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
|
||||
void ShenandoahHeap::rebuild_free_set_within_phase() {
|
||||
ShenandoahHeapLocker locker(lock());
|
||||
size_t young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count;
|
||||
_free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count);
|
||||
size_t young_trashed_regions, old_trashed_regions, first_old_region, last_old_region, old_region_count;
|
||||
_free_set->prepare_to_rebuild(young_trashed_regions, old_trashed_regions, first_old_region, last_old_region, old_region_count);
|
||||
// If there are no old regions, first_old_region will be greater than last_old_region
|
||||
assert((first_old_region > last_old_region) ||
|
||||
((last_old_region + 1 - first_old_region >= old_region_count) &&
|
||||
@ -2546,19 +2552,11 @@ void ShenandoahHeap::rebuild_free_set(bool concurrent) {
|
||||
// available for transfer to old. Note that transfer of humongous regions does not impact available.
|
||||
ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
|
||||
size_t allocation_runway =
|
||||
gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions);
|
||||
gen_heap->compute_old_generation_balance(allocation_runway, old_cset_regions);
|
||||
|
||||
// Total old_available may have been expanded to hold anticipated promotions. We trigger if the fragmented available
|
||||
// memory represents more than 16 regions worth of data. Note that fragmentation may increase when we promote regular
|
||||
// regions in place when many of these regular regions have an abundant amount of available memory within them.
|
||||
// Fragmentation will decrease as promote-by-copy consumes the available memory within these partially consumed regions.
|
||||
//
|
||||
// We consider old-gen to have excessive fragmentation if more than 12.5% of old-gen is free memory that resides
|
||||
// within partially consumed regions of memory.
|
||||
gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_trashed_regions);
|
||||
gen_heap->compute_old_generation_balance(allocation_runway, old_trashed_regions, young_trashed_regions);
|
||||
}
|
||||
// Rebuild free set based on adjusted generation sizes.
|
||||
_free_set->finish_rebuild(young_cset_regions, old_cset_regions, old_region_count);
|
||||
_free_set->finish_rebuild(young_trashed_regions, old_trashed_regions, old_region_count);
|
||||
|
||||
if (mode()->is_generational()) {
|
||||
ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
|
||||
@ -2567,6 +2565,13 @@ void ShenandoahHeap::rebuild_free_set(bool concurrent) {
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahHeap::rebuild_free_set(bool concurrent) {
|
||||
ShenandoahGCPhase phase(concurrent ?
|
||||
ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
|
||||
ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
|
||||
rebuild_free_set_within_phase();
|
||||
}
|
||||
|
||||
bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
|
||||
size_t slice = r->index() / _bitmap_regions_per_slice;
|
||||
|
||||
|
||||
@ -481,7 +481,9 @@ private:
|
||||
void rendezvous_threads(const char* name);
|
||||
void recycle_trash();
|
||||
public:
|
||||
// The following two functions rebuild the free set at the end of GC, in preparation for an idle phase.
|
||||
void rebuild_free_set(bool concurrent);
|
||||
void rebuild_free_set_within_phase();
|
||||
void notify_gc_progress();
|
||||
void notify_gc_no_progress();
|
||||
size_t get_gc_no_progress_count() const;
|
||||
|
||||
@ -75,6 +75,7 @@ ShenandoahHeapRegion::ShenandoahHeapRegion(HeapWord* start, size_t index, bool c
|
||||
_plab_allocs(0),
|
||||
_live_data(0),
|
||||
_critical_pins(0),
|
||||
_mixed_candidate_garbage_words(0),
|
||||
_update_watermark(start),
|
||||
_age(0),
|
||||
#ifdef SHENANDOAH_CENSUS_NOISE
|
||||
@ -565,6 +566,7 @@ void ShenandoahHeapRegion::recycle_internal() {
|
||||
assert(_recycling.is_set() && is_trash(), "Wrong state");
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
|
||||
_mixed_candidate_garbage_words = 0;
|
||||
set_top(bottom());
|
||||
clear_live_data();
|
||||
reset_alloc_metadata();
|
||||
@ -593,6 +595,8 @@ void ShenandoahHeapRegion::try_recycle_under_lock() {
|
||||
_recycling.unset();
|
||||
} else {
|
||||
// Ensure recycling is unset before returning to mutator to continue memory allocation.
|
||||
// Otherwise, the mutator might see region as fully recycled and might change its affiliation only to have
|
||||
// the racing GC worker thread overwrite its affiliation to FREE.
|
||||
while (_recycling.is_set()) {
|
||||
if (os::is_MP()) {
|
||||
SpinPause();
|
||||
@ -603,6 +607,8 @@ void ShenandoahHeapRegion::try_recycle_under_lock() {
|
||||
}
|
||||
}
|
||||
|
||||
// Note that return from try_recycle() does not mean the region has been recycled. It only means that
|
||||
// some GC worker thread has taken responsibility to recycle the region, eventually.
|
||||
void ShenandoahHeapRegion::try_recycle() {
|
||||
shenandoah_assert_not_heaplocked();
|
||||
if (is_trash() && _recycling.try_set()) {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user