mirror of
https://github.com/openjdk/jdk.git
synced 2026-01-28 03:58:21 +00:00
Compare commits
78 Commits
e217e5f3b3
...
cc227d3861
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cc227d3861 | ||
|
|
479ac8b2fd | ||
|
|
992a8ef46b | ||
|
|
40d1b642a4 | ||
|
|
528bbe7919 | ||
|
|
5990165d82 | ||
|
|
4ff5f3a8c0 | ||
|
|
b1aea55205 | ||
|
|
e0445c09f7 | ||
|
|
ee2deaded8 | ||
|
|
6fda44172e | ||
|
|
bd92c68ef0 | ||
|
|
5c05d6f230 | ||
|
|
cba7d88ca4 | ||
|
|
fdcc122a9d | ||
|
|
12570be64a | ||
|
|
82bd3831b0 | ||
|
|
c69275ddfe | ||
|
|
3220c4cb43 | ||
|
|
b42861a2aa | ||
|
|
67beb9cd81 | ||
|
|
bbae38e510 | ||
|
|
f4607ed0a7 | ||
|
|
6648567574 | ||
|
|
99b4e05d50 | ||
|
|
61b722d59a | ||
|
|
b59f49a1c3 | ||
|
|
fef85ff932 | ||
|
|
512f95cf26 | ||
|
|
319e21e9b4 | ||
|
|
37cb22826a | ||
|
|
8a9127fc2d | ||
|
|
de5c7a9e86 | ||
|
|
0f1b96a50a | ||
|
|
30675faa67 | ||
|
|
48d636872f | ||
|
|
42c0126fb2 | ||
|
|
90d065e677 | ||
|
|
0bc2dc3401 | ||
|
|
c3360ff511 | ||
|
|
a49986c62f | ||
|
|
4597046984 | ||
|
|
e7cadd90b2 | ||
|
|
2af271e5e6 | ||
|
|
90b5469253 | ||
|
|
38b66b1258 | ||
|
|
932556026d | ||
|
|
a40dbce495 | ||
|
|
a3b1aa9f7d | ||
|
|
44b74e165e | ||
|
|
e55124041e | ||
|
|
e617ccd529 | ||
|
|
e88edd0bc6 | ||
|
|
e08fb3a914 | ||
|
|
2c3ad0f425 | ||
|
|
40f7a18b2d | ||
|
|
3fb118a29e | ||
|
|
6f6966b28b | ||
|
|
fa20391e73 | ||
|
|
ca37dba4d4 | ||
|
|
315bf07b23 | ||
|
|
39f0e6d6f9 | ||
|
|
7f2aa59f82 | ||
|
|
0f087a7fef | ||
|
|
25d2b52ab9 | ||
|
|
d6ebcf8a4f | ||
|
|
f3121d1023 | ||
|
|
96a2649e29 | ||
|
|
5dfda66e13 | ||
|
|
8c82b58db9 | ||
|
|
07f6617e0b | ||
|
|
26aab3cccd | ||
|
|
025041ba04 | ||
|
|
eda15aa19c | ||
|
|
0d1d4d07b9 | ||
|
|
5e0ed3f408 | ||
|
|
66e950e9b6 | ||
|
|
0ad81fbd16 |
@ -72,6 +72,7 @@ id="toc-notes-for-specific-tests">Notes for Specific Tests</a>
|
||||
<li><a href="#non-us-locale" id="toc-non-us-locale">Non-US
|
||||
locale</a></li>
|
||||
<li><a href="#pkcs11-tests" id="toc-pkcs11-tests">PKCS11 Tests</a></li>
|
||||
<li><a href="#sctp-tests" id="toc-sctp-tests">SCTP Tests</a></li>
|
||||
<li><a href="#testing-ahead-of-time-optimizations"
|
||||
id="toc-testing-ahead-of-time-optimizations">Testing Ahead-of-time
|
||||
Optimizations</a></li>
|
||||
@ -621,6 +622,21 @@ element of the appropriate <code>@Artifact</code> class. (See
|
||||
JTREG="JAVA_OPTIONS=-Djdk.test.lib.artifacts.nsslib-linux_aarch64=/path/to/NSS-libs"</code></pre>
|
||||
<p>For more notes about the PKCS11 tests, please refer to
|
||||
test/jdk/sun/security/pkcs11/README.</p>
|
||||
<h3 id="sctp-tests">SCTP Tests</h3>
|
||||
<p>The SCTP tests require the SCTP runtime library, which is often not
|
||||
installed by default in popular Linux distributions. Without this
|
||||
library, the SCTP tests will be skipped. If you want to enable the SCTP
|
||||
tests, you should install the SCTP library before running the tests.</p>
|
||||
<p>For distributions using the .deb packaging format and the apt tool
|
||||
(such as Debian, Ubuntu, etc.), try this:</p>
|
||||
<pre><code>sudo apt install libsctp1
|
||||
sudo modprobe sctp
|
||||
lsmod | grep sctp</code></pre>
|
||||
<p>For distributions using the .rpm packaging format and the dnf tool
|
||||
(such as Fedora, Red Hat, etc.), try this:</p>
|
||||
<pre><code>sudo dnf install -y lksctp-tools
|
||||
sudo modprobe sctp
|
||||
lsmod | grep sctp</code></pre>
|
||||
<h3 id="testing-ahead-of-time-optimizations">Testing Ahead-of-time
|
||||
Optimizations</h3>
|
||||
<p>One way to improve test coverage of ahead-of-time (AOT) optimizations
|
||||
|
||||
@ -640,6 +640,32 @@ $ make test TEST="jtreg:sun/security/pkcs11/Secmod/AddTrustedCert.java" \
|
||||
For more notes about the PKCS11 tests, please refer to
|
||||
test/jdk/sun/security/pkcs11/README.
|
||||
|
||||
|
||||
### SCTP Tests
|
||||
|
||||
The SCTP tests require the SCTP runtime library, which is often not installed
|
||||
by default in popular Linux distributions. Without this library, the SCTP tests
|
||||
will be skipped. If you want to enable the SCTP tests, you should install the
|
||||
SCTP library before running the tests.
|
||||
|
||||
For distributions using the .deb packaging format and the apt tool
|
||||
(such as Debian, Ubuntu, etc.), try this:
|
||||
|
||||
```
|
||||
sudo apt install libsctp1
|
||||
sudo modprobe sctp
|
||||
lsmod | grep sctp
|
||||
```
|
||||
|
||||
For distributions using the .rpm packaging format and the dnf tool
|
||||
(such as Fedora, Red Hat, etc.), try this:
|
||||
|
||||
```
|
||||
sudo dnf install -y lksctp-tools
|
||||
sudo modprobe sctp
|
||||
lsmod | grep sctp
|
||||
```
|
||||
|
||||
### Testing Ahead-of-time Optimizations
|
||||
|
||||
One way to improve test coverage of ahead-of-time (AOT) optimizations in
|
||||
|
||||
@ -69,22 +69,18 @@ AC_DEFUN([FLAGS_SETUP_DEBUG_SYMBOLS],
|
||||
# Debug prefix mapping if supported by compiler
|
||||
DEBUG_PREFIX_CFLAGS=
|
||||
|
||||
UTIL_ARG_WITH(NAME: native-debug-symbols-level, TYPE: string,
|
||||
DEFAULT: "",
|
||||
RESULT: DEBUG_SYMBOLS_LEVEL,
|
||||
UTIL_ARG_WITH(NAME: native-debug-symbols-level, TYPE: literal,
|
||||
DEFAULT: [auto], VALID_VALUES: [auto 1 2 3],
|
||||
CHECK_AVAILABLE: [
|
||||
if test x$TOOLCHAIN_TYPE = xmicrosoft; then
|
||||
AVAILABLE=false
|
||||
fi
|
||||
],
|
||||
DESC: [set the native debug symbol level (GCC and Clang only)],
|
||||
DEFAULT_DESC: [toolchain default])
|
||||
AC_SUBST(DEBUG_SYMBOLS_LEVEL)
|
||||
|
||||
if test "x${TOOLCHAIN_TYPE}" = xgcc || \
|
||||
test "x${TOOLCHAIN_TYPE}" = xclang; then
|
||||
DEBUG_SYMBOLS_LEVEL_FLAGS="-g"
|
||||
if test "x${DEBUG_SYMBOLS_LEVEL}" != "x"; then
|
||||
DEBUG_SYMBOLS_LEVEL_FLAGS="-g${DEBUG_SYMBOLS_LEVEL}"
|
||||
FLAGS_COMPILER_CHECK_ARGUMENTS(ARGUMENT: [${DEBUG_SYMBOLS_LEVEL_FLAGS}],
|
||||
IF_FALSE: AC_MSG_ERROR("Debug info level ${DEBUG_SYMBOLS_LEVEL} is not supported"))
|
||||
fi
|
||||
fi
|
||||
DEFAULT_DESC: [toolchain default],
|
||||
IF_AUTO: [
|
||||
RESULT=""
|
||||
])
|
||||
|
||||
# Debug symbols
|
||||
if test "x$TOOLCHAIN_TYPE" = xgcc; then
|
||||
@ -111,8 +107,8 @@ AC_DEFUN([FLAGS_SETUP_DEBUG_SYMBOLS],
|
||||
fi
|
||||
|
||||
# Debug info level should follow the debug format to be effective.
|
||||
CFLAGS_DEBUG_SYMBOLS="-gdwarf-4 ${DEBUG_SYMBOLS_LEVEL_FLAGS}"
|
||||
ASFLAGS_DEBUG_SYMBOLS="${DEBUG_SYMBOLS_LEVEL_FLAGS}"
|
||||
CFLAGS_DEBUG_SYMBOLS="-gdwarf-4 -g${NATIVE_DEBUG_SYMBOLS_LEVEL}"
|
||||
ASFLAGS_DEBUG_SYMBOLS="-g${NATIVE_DEBUG_SYMBOLS_LEVEL}"
|
||||
elif test "x$TOOLCHAIN_TYPE" = xclang; then
|
||||
if test "x$ALLOW_ABSOLUTE_PATHS_IN_OUTPUT" = "xfalse"; then
|
||||
# Check if compiler supports -fdebug-prefix-map. If so, use that to make
|
||||
@ -132,8 +128,8 @@ AC_DEFUN([FLAGS_SETUP_DEBUG_SYMBOLS],
|
||||
IF_FALSE: [GDWARF_FLAGS=""])
|
||||
|
||||
# Debug info level should follow the debug format to be effective.
|
||||
CFLAGS_DEBUG_SYMBOLS="${GDWARF_FLAGS} ${DEBUG_SYMBOLS_LEVEL_FLAGS}"
|
||||
ASFLAGS_DEBUG_SYMBOLS="${DEBUG_SYMBOLS_LEVEL_FLAGS}"
|
||||
CFLAGS_DEBUG_SYMBOLS="${GDWARF_FLAGS} -g${NATIVE_DEBUG_SYMBOLS_LEVEL}"
|
||||
ASFLAGS_DEBUG_SYMBOLS="-g${NATIVE_DEBUG_SYMBOLS_LEVEL}"
|
||||
elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
|
||||
CFLAGS_DEBUG_SYMBOLS="-Z7"
|
||||
fi
|
||||
|
||||
@ -31,13 +31,14 @@ include LibCommon.gmk
|
||||
## Build libjaas
|
||||
################################################################################
|
||||
|
||||
$(eval $(call SetupJdkLibrary, BUILD_LIBJAAS, \
|
||||
NAME := jaas, \
|
||||
OPTIMIZATION := LOW, \
|
||||
EXTRA_HEADER_DIRS := java.base:libjava, \
|
||||
LIBS_windows := advapi32.lib mpr.lib netapi32.lib user32.lib, \
|
||||
))
|
||||
|
||||
TARGETS += $(BUILD_LIBJAAS)
|
||||
ifeq ($(call isTargetOs, windows), true)
|
||||
$(eval $(call SetupJdkLibrary, BUILD_LIBJAAS, \
|
||||
NAME := jaas, \
|
||||
OPTIMIZATION := LOW, \
|
||||
EXTRA_HEADER_DIRS := java.base:libjava, \
|
||||
LIBS_windows := advapi32.lib mpr.lib netapi32.lib user32.lib, \
|
||||
))
|
||||
|
||||
TARGETS += $(BUILD_LIBJAAS)
|
||||
endif
|
||||
################################################################################
|
||||
|
||||
@ -5782,6 +5782,9 @@ address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3,
|
||||
// return false;
|
||||
bind(A_IS_NOT_NULL);
|
||||
ldrw(cnt1, Address(a1, length_offset));
|
||||
ldrw(tmp5, Address(a2, length_offset));
|
||||
cmp(cnt1, tmp5);
|
||||
br(NE, DONE); // If lengths differ, return false
|
||||
// Increase loop counter by diff between base- and actual start-offset.
|
||||
addw(cnt1, cnt1, extra_length);
|
||||
lea(a1, Address(a1, start_offset));
|
||||
@ -5848,6 +5851,9 @@ address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3,
|
||||
cbz(a1, DONE);
|
||||
ldrw(cnt1, Address(a1, length_offset));
|
||||
cbz(a2, DONE);
|
||||
ldrw(tmp5, Address(a2, length_offset));
|
||||
cmp(cnt1, tmp5);
|
||||
br(NE, DONE); // If lengths differ, return false
|
||||
// Increase loop counter by diff between base- and actual start-offset.
|
||||
addw(cnt1, cnt1, extra_length);
|
||||
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2026 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -412,12 +412,8 @@ run_stub:
|
||||
}
|
||||
|
||||
void os::Aix::init_thread_fpu_state(void) {
|
||||
#if !defined(USE_XLC_BUILTINS)
|
||||
// Disable FP exceptions.
|
||||
__asm__ __volatile__ ("mtfsfi 6,0");
|
||||
#else
|
||||
__mtfsfi(6, 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2013 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2012, 2026 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,29 +29,21 @@
|
||||
// Included in runtime/prefetch.inline.hpp
|
||||
|
||||
inline void Prefetch::read(const void *loc, intx interval) {
|
||||
#if !defined(USE_XLC_BUILTINS)
|
||||
__asm__ __volatile__ (
|
||||
" dcbt 0, %0 \n"
|
||||
:
|
||||
: /*%0*/"r" ( ((address)loc) +((long)interval) )
|
||||
//:
|
||||
);
|
||||
#else
|
||||
__dcbt(((address)loc) +((long)interval));
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void Prefetch::write(void *loc, intx interval) {
|
||||
#if !defined(USE_XLC_BUILTINS)
|
||||
__asm__ __volatile__ (
|
||||
" dcbtst 0, %0 \n"
|
||||
:
|
||||
: /*%0*/"r" ( ((address)loc) +((long)interval) )
|
||||
//:
|
||||
);
|
||||
#else
|
||||
__dcbtst( ((address)loc) +((long)interval) );
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif // OS_CPU_AIX_PPC_PREFETCH_AIX_PPC_INLINE_HPP
|
||||
|
||||
34
src/hotspot/share/cds/aotGrowableArray.cpp
Normal file
34
src/hotspot/share/cds/aotGrowableArray.cpp
Normal file
@ -0,0 +1,34 @@
|
||||
/*
|
||||
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "cds/aotGrowableArray.hpp"
|
||||
#include "cds/aotMetaspace.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
|
||||
void AOTGrowableArrayHelper::deallocate(void* mem) {
|
||||
if (!AOTMetaspace::in_aot_cache(mem)) {
|
||||
GrowableArrayCHeapAllocator::deallocate(mem);
|
||||
}
|
||||
}
|
||||
76
src/hotspot/share/cds/aotGrowableArray.hpp
Normal file
76
src/hotspot/share/cds/aotGrowableArray.hpp
Normal file
@ -0,0 +1,76 @@
|
||||
/*
|
||||
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_AOT_AOTGROWABLEARRAY_HPP
|
||||
#define SHARE_AOT_AOTGROWABLEARRAY_HPP
|
||||
|
||||
#include <memory/metaspaceClosureType.hpp>
|
||||
#include <utilities/growableArray.hpp>
|
||||
|
||||
class AOTGrowableArrayHelper {
|
||||
public:
|
||||
static void deallocate(void* mem);
|
||||
};
|
||||
|
||||
// An AOTGrowableArray<T> provides the same functionality as a GrowableArray<T> that
|
||||
// uses the C heap allocator. In addition, AOTGrowableArray<T> can be iterated with
|
||||
// MetaspaceClosure. This type should be used for growable arrays that need to be
|
||||
// stored in the AOT cache. See ModuleEntry::_reads for an example.
|
||||
template <typename E>
|
||||
class AOTGrowableArray : public GrowableArrayWithAllocator<E, AOTGrowableArray<E>> {
|
||||
friend class VMStructs;
|
||||
friend class GrowableArrayWithAllocator<E, AOTGrowableArray>;
|
||||
|
||||
static E* allocate(int max, MemTag mem_tag) {
|
||||
return (E*)GrowableArrayCHeapAllocator::allocate(max, sizeof(E), mem_tag);
|
||||
}
|
||||
|
||||
E* allocate() {
|
||||
return allocate(this->_capacity, mtClass);
|
||||
}
|
||||
|
||||
void deallocate(E* mem) {
|
||||
#if INCLUDE_CDS
|
||||
AOTGrowableArrayHelper::deallocate(mem);
|
||||
#else
|
||||
GrowableArrayCHeapAllocator::deallocate(mem);
|
||||
#endif
|
||||
}
|
||||
|
||||
public:
|
||||
AOTGrowableArray(int initial_capacity, MemTag mem_tag) :
|
||||
GrowableArrayWithAllocator<E, AOTGrowableArray>(
|
||||
allocate(initial_capacity, mem_tag),
|
||||
initial_capacity) {}
|
||||
|
||||
AOTGrowableArray() : AOTGrowableArray(0, mtClassShared) {}
|
||||
|
||||
// methods required by MetaspaceClosure
|
||||
void metaspace_pointers_do(MetaspaceClosure* it);
|
||||
int size_in_heapwords() const { return (int)heap_word_size(sizeof(*this)); }
|
||||
MetaspaceClosureType type() const { return MetaspaceClosureType::GrowableArrayType; }
|
||||
static bool is_read_only_by_default() { return false; }
|
||||
};
|
||||
|
||||
#endif // SHARE_AOT_AOTGROWABLEARRAY_HPP
|
||||
37
src/hotspot/share/cds/aotGrowableArray.inline.hpp
Normal file
37
src/hotspot/share/cds/aotGrowableArray.inline.hpp
Normal file
@ -0,0 +1,37 @@
|
||||
/*
|
||||
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_CDS_AOTGROWABLEARRAY_INLINE_HPP
|
||||
#define SHARE_CDS_AOTGROWABLEARRAY_INLINE_HPP
|
||||
|
||||
#include "cds/aotGrowableArray.hpp"
|
||||
|
||||
#include "memory/metaspaceClosure.hpp"
|
||||
|
||||
template <typename E>
|
||||
void AOTGrowableArray<E>::metaspace_pointers_do(MetaspaceClosure* it) {
|
||||
it->push_c_array(AOTGrowableArray<E>::data_addr(), AOTGrowableArray<E>::capacity());
|
||||
}
|
||||
|
||||
#endif // SHARE_CDS_AOTGROWABLEARRAY_INLINE_HPP
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,6 +29,8 @@
|
||||
#include "cds/aotStreamedHeapWriter.hpp"
|
||||
#include "cds/cdsConfig.hpp"
|
||||
#include "cds/filemap.hpp"
|
||||
#include "classfile/moduleEntry.hpp"
|
||||
#include "classfile/packageEntry.hpp"
|
||||
#include "classfile/systemDictionaryShared.hpp"
|
||||
#include "classfile/vmClasses.hpp"
|
||||
#include "logging/log.hpp"
|
||||
@ -141,7 +143,7 @@ public:
|
||||
info._buffered_addr = ref->obj();
|
||||
info._requested_addr = ref->obj();
|
||||
info._bytes = ref->size() * BytesPerWord;
|
||||
info._type = ref->msotype();
|
||||
info._type = ref->type();
|
||||
_objs.append(info);
|
||||
}
|
||||
|
||||
@ -214,7 +216,7 @@ void AOTMapLogger::dumptime_log_metaspace_region(const char* name, DumpRegion* r
|
||||
info._buffered_addr = src_info->buffered_addr();
|
||||
info._requested_addr = info._buffered_addr + _buffer_to_requested_delta;
|
||||
info._bytes = src_info->size_in_bytes();
|
||||
info._type = src_info->msotype();
|
||||
info._type = src_info->type();
|
||||
objs.append(info);
|
||||
}
|
||||
|
||||
@ -332,43 +334,52 @@ void AOTMapLogger::log_metaspace_objects_impl(address region_base, address regio
|
||||
address buffered_addr = info._buffered_addr;
|
||||
address requested_addr = info._requested_addr;
|
||||
int bytes = info._bytes;
|
||||
MetaspaceObj::Type type = info._type;
|
||||
const char* type_name = MetaspaceObj::type_name(type);
|
||||
MetaspaceClosureType type = info._type;
|
||||
const char* type_name = MetaspaceClosure::type_name(type);
|
||||
|
||||
log_as_hex(last_obj_base, buffered_addr, last_obj_base + _buffer_to_requested_delta);
|
||||
|
||||
switch (type) {
|
||||
case MetaspaceObj::ClassType:
|
||||
case MetaspaceClosureType::ClassType:
|
||||
log_klass((Klass*)src, requested_addr, type_name, bytes, current);
|
||||
break;
|
||||
case MetaspaceObj::ConstantPoolType:
|
||||
case MetaspaceClosureType::ConstantPoolType:
|
||||
log_constant_pool((ConstantPool*)src, requested_addr, type_name, bytes, current);
|
||||
break;
|
||||
case MetaspaceObj::ConstantPoolCacheType:
|
||||
case MetaspaceClosureType::ConstantPoolCacheType:
|
||||
log_constant_pool_cache((ConstantPoolCache*)src, requested_addr, type_name, bytes, current);
|
||||
break;
|
||||
case MetaspaceObj::ConstMethodType:
|
||||
case MetaspaceClosureType::ConstMethodType:
|
||||
log_const_method((ConstMethod*)src, requested_addr, type_name, bytes, current);
|
||||
break;
|
||||
case MetaspaceObj::MethodType:
|
||||
case MetaspaceClosureType::MethodType:
|
||||
log_method((Method*)src, requested_addr, type_name, bytes, current);
|
||||
break;
|
||||
case MetaspaceObj::MethodCountersType:
|
||||
case MetaspaceClosureType::MethodCountersType:
|
||||
log_method_counters((MethodCounters*)src, requested_addr, type_name, bytes, current);
|
||||
break;
|
||||
case MetaspaceObj::MethodDataType:
|
||||
case MetaspaceClosureType::MethodDataType:
|
||||
log_method_data((MethodData*)src, requested_addr, type_name, bytes, current);
|
||||
break;
|
||||
case MetaspaceObj::SymbolType:
|
||||
case MetaspaceClosureType::ModuleEntryType:
|
||||
log_module_entry((ModuleEntry*)src, requested_addr, type_name, bytes, current);
|
||||
break;
|
||||
case MetaspaceClosureType::PackageEntryType:
|
||||
log_package_entry((PackageEntry*)src, requested_addr, type_name, bytes, current);
|
||||
break;
|
||||
case MetaspaceClosureType::GrowableArrayType:
|
||||
log_growable_array((GrowableArrayBase*)src, requested_addr, type_name, bytes, current);
|
||||
break;
|
||||
case MetaspaceClosureType::SymbolType:
|
||||
log_symbol((Symbol*)src, requested_addr, type_name, bytes, current);
|
||||
break;
|
||||
case MetaspaceObj::KlassTrainingDataType:
|
||||
case MetaspaceClosureType::KlassTrainingDataType:
|
||||
log_klass_training_data((KlassTrainingData*)src, requested_addr, type_name, bytes, current);
|
||||
break;
|
||||
case MetaspaceObj::MethodTrainingDataType:
|
||||
case MetaspaceClosureType::MethodTrainingDataType:
|
||||
log_method_training_data((MethodTrainingData*)src, requested_addr, type_name, bytes, current);
|
||||
break;
|
||||
case MetaspaceObj::CompileTrainingDataType:
|
||||
case MetaspaceClosureType::CompileTrainingDataType:
|
||||
log_compile_training_data((CompileTrainingData*)src, requested_addr, type_name, bytes, current);
|
||||
break;
|
||||
default:
|
||||
@ -421,6 +432,27 @@ void AOTMapLogger::log_method_data(MethodData* md, address requested_addr, const
|
||||
log_debug(aot, map)(_LOG_PREFIX " %s", p2i(requested_addr), type_name, bytes, md->method()->external_name());
|
||||
}
|
||||
|
||||
void AOTMapLogger::log_module_entry(ModuleEntry* mod, address requested_addr, const char* type_name,
|
||||
int bytes, Thread* current) {
|
||||
ResourceMark rm(current);
|
||||
log_debug(aot, map)(_LOG_PREFIX " %s", p2i(requested_addr), type_name, bytes,
|
||||
mod->name_as_C_string());
|
||||
}
|
||||
|
||||
void AOTMapLogger::log_package_entry(PackageEntry* pkg, address requested_addr, const char* type_name,
|
||||
int bytes, Thread* current) {
|
||||
ResourceMark rm(current);
|
||||
log_debug(aot, map)(_LOG_PREFIX " %s - %s", p2i(requested_addr), type_name, bytes,
|
||||
pkg->module()->name_as_C_string(), pkg->name_as_C_string());
|
||||
}
|
||||
|
||||
void AOTMapLogger::log_growable_array(GrowableArrayBase* arr, address requested_addr, const char* type_name,
|
||||
int bytes, Thread* current) {
|
||||
ResourceMark rm(current);
|
||||
log_debug(aot, map)(_LOG_PREFIX " %d (%d)", p2i(requested_addr), type_name, bytes,
|
||||
arr->length(), arr->capacity());
|
||||
}
|
||||
|
||||
void AOTMapLogger::log_klass(Klass* k, address requested_addr, const char* type_name,
|
||||
int bytes, Thread* current) {
|
||||
ResourceMark rm(current);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,6 +28,7 @@
|
||||
#include "cds/archiveBuilder.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/allStatic.hpp"
|
||||
#include "memory/metaspaceClosureType.hpp"
|
||||
#include "oops/oopsHierarchy.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
@ -37,9 +38,13 @@ class ArchiveStreamedHeapInfo;
|
||||
class CompileTrainingData;
|
||||
class DumpRegion;
|
||||
class FileMapInfo;
|
||||
class GrowableArrayBase;
|
||||
class KlassTrainingData;
|
||||
class MethodCounters;
|
||||
class MethodTrainingData;
|
||||
class ModuleEntry;
|
||||
class outputStream;
|
||||
class PackageEntry;
|
||||
|
||||
// Write detailed info to a mapfile to analyze contents of the AOT cache/CDS archive.
|
||||
// -Xlog:aot+map* can be used both when creating an AOT cache, or when using an AOT cache.
|
||||
@ -62,7 +67,7 @@ class AOTMapLogger : AllStatic {
|
||||
address _buffered_addr;
|
||||
address _requested_addr;
|
||||
int _bytes;
|
||||
MetaspaceObj::Type _type;
|
||||
MetaspaceClosureType _type;
|
||||
};
|
||||
|
||||
public:
|
||||
@ -142,6 +147,9 @@ private:
|
||||
Thread* current);
|
||||
static void log_klass(Klass* k, address requested_addr, const char* type_name, int bytes, Thread* current);
|
||||
static void log_method(Method* m, address requested_addr, const char* type_name, int bytes, Thread* current);
|
||||
static void log_module_entry(ModuleEntry* mod, address requested_addr, const char* type_name, int bytes, Thread* current);
|
||||
static void log_package_entry(PackageEntry* pkg, address requested_addr, const char* type_name, int bytes, Thread* current);
|
||||
static void log_growable_array(GrowableArrayBase* arr, address requested_addr, const char* type_name, int bytes, Thread* current);
|
||||
static void log_symbol(Symbol* s, address requested_addr, const char* type_name, int bytes, Thread* current);
|
||||
static void log_klass_training_data(KlassTrainingData* ktd, address requested_addr, const char* type_name, int bytes, Thread* current);
|
||||
static void log_method_training_data(MethodTrainingData* mtd, address requested_addr, const char* type_name, int bytes, Thread* current);
|
||||
|
||||
@ -698,6 +698,9 @@ public:
|
||||
Universe::metaspace_pointers_do(it);
|
||||
vmSymbols::metaspace_pointers_do(it);
|
||||
TrainingData::iterate_roots(it);
|
||||
if (CDSConfig::is_dumping_full_module_graph()) {
|
||||
ClassLoaderDataShared::iterate_roots(it);
|
||||
}
|
||||
|
||||
// The above code should find all the symbols that are referenced by the
|
||||
// archived classes. We just need to add the extra symbols which
|
||||
@ -795,6 +798,10 @@ void VM_PopulateDumpSharedSpace::doit() {
|
||||
_builder.make_klasses_shareable();
|
||||
AOTMetaspace::make_method_handle_intrinsics_shareable();
|
||||
|
||||
if (CDSConfig::is_dumping_full_module_graph()) {
|
||||
ClassLoaderDataShared::remove_unshareable_info();
|
||||
}
|
||||
|
||||
dump_java_heap_objects();
|
||||
dump_shared_symbol_table(_builder.symbols());
|
||||
|
||||
@ -1135,6 +1142,7 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS
|
||||
HeapShared::init_heap_writer();
|
||||
if (CDSConfig::is_dumping_full_module_graph()) {
|
||||
ClassLoaderDataShared::ensure_module_entry_tables_exist();
|
||||
ClassLoaderDataShared::build_tables(CHECK);
|
||||
HeapShared::reset_archived_object_states(CHECK);
|
||||
}
|
||||
|
||||
|
||||
@ -243,7 +243,7 @@ bool ArchiveBuilder::gather_klass_and_symbol(MetaspaceClosure::Ref* ref, bool re
|
||||
if (get_follow_mode(ref) != make_a_copy) {
|
||||
return false;
|
||||
}
|
||||
if (ref->msotype() == MetaspaceObj::ClassType) {
|
||||
if (ref->type() == MetaspaceClosureType::ClassType) {
|
||||
Klass* klass = (Klass*)ref->obj();
|
||||
assert(klass->is_klass(), "must be");
|
||||
if (!is_excluded(klass)) {
|
||||
@ -252,7 +252,7 @@ bool ArchiveBuilder::gather_klass_and_symbol(MetaspaceClosure::Ref* ref, bool re
|
||||
assert(klass->is_instance_klass(), "must be");
|
||||
}
|
||||
}
|
||||
} else if (ref->msotype() == MetaspaceObj::SymbolType) {
|
||||
} else if (ref->type() == MetaspaceClosureType::SymbolType) {
|
||||
// Make sure the symbol won't be GC'ed while we are dumping the archive.
|
||||
Symbol* sym = (Symbol*)ref->obj();
|
||||
sym->increment_refcount();
|
||||
@ -271,11 +271,6 @@ void ArchiveBuilder::gather_klasses_and_symbols() {
|
||||
aot_log_info(aot)("Gathering classes and symbols ... ");
|
||||
GatherKlassesAndSymbols doit(this);
|
||||
iterate_roots(&doit);
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
if (CDSConfig::is_dumping_full_module_graph()) {
|
||||
ClassLoaderDataShared::iterate_symbols(&doit);
|
||||
}
|
||||
#endif
|
||||
doit.finish();
|
||||
|
||||
if (CDSConfig::is_dumping_static_archive()) {
|
||||
@ -446,14 +441,14 @@ bool ArchiveBuilder::gather_one_source_obj(MetaspaceClosure::Ref* ref, bool read
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
if (ref->msotype() == MetaspaceObj::MethodType) {
|
||||
if (ref->type() == MetaspaceClosureType::MethodType) {
|
||||
Method* m = (Method*)ref->obj();
|
||||
assert(!RegeneratedClasses::has_been_regenerated((address)m->method_holder()),
|
||||
"Should not archive methods in a class that has been regenerated");
|
||||
}
|
||||
#endif
|
||||
|
||||
if (ref->msotype() == MetaspaceObj::MethodDataType) {
|
||||
if (ref->type() == MetaspaceClosureType::MethodDataType) {
|
||||
MethodData* md = (MethodData*)ref->obj();
|
||||
md->clean_method_data(false /* always_clean */);
|
||||
}
|
||||
@ -554,16 +549,16 @@ ArchiveBuilder::FollowMode ArchiveBuilder::get_follow_mode(MetaspaceClosure::Ref
|
||||
if (CDSConfig::is_dumping_dynamic_archive() && AOTMetaspace::in_aot_cache(obj)) {
|
||||
// Don't dump existing shared metadata again.
|
||||
return point_to_it;
|
||||
} else if (ref->msotype() == MetaspaceObj::MethodDataType ||
|
||||
ref->msotype() == MetaspaceObj::MethodCountersType ||
|
||||
ref->msotype() == MetaspaceObj::KlassTrainingDataType ||
|
||||
ref->msotype() == MetaspaceObj::MethodTrainingDataType ||
|
||||
ref->msotype() == MetaspaceObj::CompileTrainingDataType) {
|
||||
} else if (ref->type() == MetaspaceClosureType::MethodDataType ||
|
||||
ref->type() == MetaspaceClosureType::MethodCountersType ||
|
||||
ref->type() == MetaspaceClosureType::KlassTrainingDataType ||
|
||||
ref->type() == MetaspaceClosureType::MethodTrainingDataType ||
|
||||
ref->type() == MetaspaceClosureType::CompileTrainingDataType) {
|
||||
return (TrainingData::need_data() || TrainingData::assembling_data()) ? make_a_copy : set_to_null;
|
||||
} else if (ref->msotype() == MetaspaceObj::AdapterHandlerEntryType) {
|
||||
} else if (ref->type() == MetaspaceClosureType::AdapterHandlerEntryType) {
|
||||
return CDSConfig::is_dumping_adapters() ? make_a_copy : set_to_null;
|
||||
} else {
|
||||
if (ref->msotype() == MetaspaceObj::ClassType) {
|
||||
if (ref->type() == MetaspaceClosureType::ClassType) {
|
||||
Klass* klass = (Klass*)ref->obj();
|
||||
assert(klass->is_klass(), "must be");
|
||||
if (RegeneratedClasses::has_been_regenerated(klass)) {
|
||||
@ -571,7 +566,12 @@ ArchiveBuilder::FollowMode ArchiveBuilder::get_follow_mode(MetaspaceClosure::Ref
|
||||
}
|
||||
if (is_excluded(klass)) {
|
||||
ResourceMark rm;
|
||||
log_debug(cds, dynamic)("Skipping class (excluded): %s", klass->external_name());
|
||||
aot_log_trace(aot)("pointer set to null: class (excluded): %s", klass->external_name());
|
||||
return set_to_null;
|
||||
}
|
||||
if (klass->is_array_klass() && CDSConfig::is_dumping_dynamic_archive()) {
|
||||
ResourceMark rm;
|
||||
aot_log_trace(aot)("pointer set to null: array class not supported in dynamic region: %s", klass->external_name());
|
||||
return set_to_null;
|
||||
}
|
||||
}
|
||||
@ -615,15 +615,6 @@ void ArchiveBuilder::dump_rw_metadata() {
|
||||
ResourceMark rm;
|
||||
aot_log_info(aot)("Allocating RW objects ... ");
|
||||
make_shallow_copies(&_rw_region, &_rw_src_objs);
|
||||
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
if (CDSConfig::is_dumping_full_module_graph()) {
|
||||
// Archive the ModuleEntry's and PackageEntry's of the 3 built-in loaders
|
||||
char* start = rw_region()->top();
|
||||
ClassLoaderDataShared::allocate_archived_tables();
|
||||
alloc_stats()->record_modules(rw_region()->top() - start, /*read_only*/false);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void ArchiveBuilder::dump_ro_metadata() {
|
||||
@ -632,15 +623,6 @@ void ArchiveBuilder::dump_ro_metadata() {
|
||||
|
||||
start_dump_region(&_ro_region);
|
||||
make_shallow_copies(&_ro_region, &_ro_src_objs);
|
||||
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
if (CDSConfig::is_dumping_full_module_graph()) {
|
||||
char* start = ro_region()->top();
|
||||
ClassLoaderDataShared::init_archived_tables();
|
||||
alloc_stats()->record_modules(ro_region()->top() - start, /*read_only*/true);
|
||||
}
|
||||
#endif
|
||||
|
||||
RegeneratedClasses::record_regenerated_objects();
|
||||
}
|
||||
|
||||
@ -658,7 +640,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
|
||||
size_t alignment = SharedSpaceObjectAlignment; // alignment for the dest pointer
|
||||
|
||||
char* oldtop = dump_region->top();
|
||||
if (src_info->msotype() == MetaspaceObj::ClassType) {
|
||||
if (src_info->type() == MetaspaceClosureType::ClassType) {
|
||||
// Allocate space for a pointer directly in front of the future InstanceKlass, so
|
||||
// we can do a quick lookup from InstanceKlass* -> RunTimeClassInfo*
|
||||
// without building another hashtable. See RunTimeClassInfo::get_for()
|
||||
@ -674,7 +656,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
|
||||
alignment = nth_bit(ArchiveBuilder::precomputed_narrow_klass_shift());
|
||||
}
|
||||
#endif
|
||||
} else if (src_info->msotype() == MetaspaceObj::SymbolType) {
|
||||
} else if (src_info->type() == MetaspaceClosureType::SymbolType) {
|
||||
// Symbols may be allocated by using AllocateHeap, so their sizes
|
||||
// may be less than size_in_bytes() indicates.
|
||||
bytes = ((Symbol*)src)->byte_size();
|
||||
@ -684,7 +666,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
|
||||
memcpy(dest, src, bytes);
|
||||
|
||||
// Update the hash of buffered sorted symbols for static dump so that the symbols have deterministic contents
|
||||
if (CDSConfig::is_dumping_static_archive() && (src_info->msotype() == MetaspaceObj::SymbolType)) {
|
||||
if (CDSConfig::is_dumping_static_archive() && (src_info->type() == MetaspaceClosureType::SymbolType)) {
|
||||
Symbol* buffered_symbol = (Symbol*)dest;
|
||||
assert(((Symbol*)src)->is_permanent(), "archived symbols must be permanent");
|
||||
buffered_symbol->update_identity_hash();
|
||||
@ -699,7 +681,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
|
||||
}
|
||||
}
|
||||
|
||||
intptr_t* archived_vtable = CppVtables::get_archived_vtable(src_info->msotype(), (address)dest);
|
||||
intptr_t* archived_vtable = CppVtables::get_archived_vtable(src_info->type(), (address)dest);
|
||||
if (archived_vtable != nullptr) {
|
||||
*(address*)dest = (address)archived_vtable;
|
||||
ArchivePtrMarker::mark_pointer((address*)dest);
|
||||
@ -709,7 +691,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
|
||||
src_info->set_buffered_addr((address)dest);
|
||||
|
||||
char* newtop = dump_region->top();
|
||||
_alloc_stats.record(src_info->msotype(), int(newtop - oldtop), src_info->read_only());
|
||||
_alloc_stats.record(src_info->type(), int(newtop - oldtop), src_info->read_only());
|
||||
|
||||
DEBUG_ONLY(_alloc_stats.verify((int)dump_region->used(), src_info->read_only()));
|
||||
}
|
||||
@ -992,15 +974,15 @@ void ArchiveBuilder::make_training_data_shareable() {
|
||||
return;
|
||||
}
|
||||
|
||||
if (info.msotype() == MetaspaceObj::KlassTrainingDataType ||
|
||||
info.msotype() == MetaspaceObj::MethodTrainingDataType ||
|
||||
info.msotype() == MetaspaceObj::CompileTrainingDataType) {
|
||||
if (info.type() == MetaspaceClosureType::KlassTrainingDataType ||
|
||||
info.type() == MetaspaceClosureType::MethodTrainingDataType ||
|
||||
info.type() == MetaspaceClosureType::CompileTrainingDataType) {
|
||||
TrainingData* buffered_td = (TrainingData*)info.buffered_addr();
|
||||
buffered_td->remove_unshareable_info();
|
||||
} else if (info.msotype() == MetaspaceObj::MethodDataType) {
|
||||
} else if (info.type() == MetaspaceClosureType::MethodDataType) {
|
||||
MethodData* buffered_mdo = (MethodData*)info.buffered_addr();
|
||||
buffered_mdo->remove_unshareable_info();
|
||||
} else if (info.msotype() == MetaspaceObj::MethodCountersType) {
|
||||
} else if (info.type() == MetaspaceClosureType::MethodCountersType) {
|
||||
MethodCounters* buffered_mc = (MethodCounters*)info.buffered_addr();
|
||||
buffered_mc->remove_unshareable_info();
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -134,13 +134,13 @@ private:
|
||||
int _size_in_bytes;
|
||||
int _id; // Each object has a unique serial ID, starting from zero. The ID is assigned
|
||||
// when the object is added into _source_objs.
|
||||
MetaspaceObj::Type _msotype;
|
||||
MetaspaceClosureType _type;
|
||||
address _source_addr; // The source object to be copied.
|
||||
address _buffered_addr; // The copy of this object insider the buffer.
|
||||
public:
|
||||
SourceObjInfo(MetaspaceClosure::Ref* ref, bool read_only, FollowMode follow_mode) :
|
||||
_ptrmap_start(0), _ptrmap_end(0), _read_only(read_only), _has_embedded_pointer(false), _follow_mode(follow_mode),
|
||||
_size_in_bytes(ref->size() * BytesPerWord), _id(0), _msotype(ref->msotype()),
|
||||
_size_in_bytes(ref->size() * BytesPerWord), _id(0), _type(ref->type()),
|
||||
_source_addr(ref->obj()) {
|
||||
if (follow_mode == point_to_it) {
|
||||
_buffered_addr = ref->obj();
|
||||
@ -155,7 +155,7 @@ private:
|
||||
SourceObjInfo(address src, SourceObjInfo* renegerated_obj_info) :
|
||||
_ptrmap_start(0), _ptrmap_end(0), _read_only(false),
|
||||
_follow_mode(renegerated_obj_info->_follow_mode),
|
||||
_size_in_bytes(0), _msotype(renegerated_obj_info->_msotype),
|
||||
_size_in_bytes(0), _type(renegerated_obj_info->_type),
|
||||
_source_addr(src), _buffered_addr(renegerated_obj_info->_buffered_addr) {}
|
||||
|
||||
bool should_copy() const { return _follow_mode == make_a_copy; }
|
||||
@ -182,7 +182,7 @@ private:
|
||||
}
|
||||
return _buffered_addr;
|
||||
}
|
||||
MetaspaceObj::Type msotype() const { return _msotype; }
|
||||
MetaspaceClosureType type() const { return _type; }
|
||||
FollowMode follow_mode() const { return _follow_mode; }
|
||||
};
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -22,12 +22,14 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "cds/aotGrowableArray.hpp"
|
||||
#include "cds/aotMetaspace.hpp"
|
||||
#include "cds/archiveBuilder.hpp"
|
||||
#include "cds/archiveUtils.hpp"
|
||||
#include "cds/cdsConfig.hpp"
|
||||
#include "cds/cppVtables.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/instanceClassLoaderKlass.hpp"
|
||||
#include "oops/instanceMirrorKlass.hpp"
|
||||
#include "oops/instanceRefKlass.hpp"
|
||||
@ -53,6 +55,19 @@
|
||||
// + at run time: we clone the actual contents of the vtables from libjvm.so
|
||||
// into our own tables.
|
||||
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
// AOTGrowableArray has a vtable only when in non-product builds (due to
|
||||
// the virtual printing functions in AnyObj).
|
||||
|
||||
using GrowableArray_ModuleEntry_ptr = AOTGrowableArray<ModuleEntry*>;
|
||||
|
||||
#define DEBUG_CPP_VTABLE_TYPES_DO(f) \
|
||||
f(GrowableArray_ModuleEntry_ptr) \
|
||||
|
||||
#endif
|
||||
|
||||
// Currently, the archive contains ONLY the following types of objects that have C++ vtables.
|
||||
#define CPP_VTABLE_TYPES_DO(f) \
|
||||
f(ConstantPool) \
|
||||
@ -68,7 +83,8 @@
|
||||
f(TypeArrayKlass) \
|
||||
f(KlassTrainingData) \
|
||||
f(MethodTrainingData) \
|
||||
f(CompileTrainingData)
|
||||
f(CompileTrainingData) \
|
||||
NOT_PRODUCT(DEBUG_CPP_VTABLE_TYPES_DO(f))
|
||||
|
||||
class CppVtableInfo {
|
||||
intptr_t _vtable_size;
|
||||
@ -86,7 +102,7 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
static inline intptr_t* vtable_of(const Metadata* m) {
|
||||
static inline intptr_t* vtable_of(const void* m) {
|
||||
return *((intptr_t**)m);
|
||||
}
|
||||
|
||||
@ -116,6 +132,7 @@ CppVtableInfo* CppVtableCloner<T>::allocate_and_initialize(const char* name) {
|
||||
|
||||
template <class T>
|
||||
void CppVtableCloner<T>::initialize(const char* name, CppVtableInfo* info) {
|
||||
ResourceMark rm;
|
||||
T tmp; // Allocate temporary dummy metadata object to get to the original vtable.
|
||||
int n = info->vtable_size();
|
||||
intptr_t* srcvtable = vtable_of(&tmp);
|
||||
@ -268,7 +285,7 @@ void CppVtables::serialize(SerializeClosure* soc) {
|
||||
}
|
||||
}
|
||||
|
||||
intptr_t* CppVtables::get_archived_vtable(MetaspaceObj::Type msotype, address obj) {
|
||||
intptr_t* CppVtables::get_archived_vtable(MetaspaceClosureType type, address obj) {
|
||||
if (!_orig_cpp_vtptrs_inited) {
|
||||
CPP_VTABLE_TYPES_DO(INIT_ORIG_CPP_VTPTRS);
|
||||
_orig_cpp_vtptrs_inited = true;
|
||||
@ -276,19 +293,23 @@ intptr_t* CppVtables::get_archived_vtable(MetaspaceObj::Type msotype, address ob
|
||||
|
||||
assert(CDSConfig::is_dumping_archive(), "sanity");
|
||||
int kind = -1;
|
||||
switch (msotype) {
|
||||
case MetaspaceObj::SymbolType:
|
||||
case MetaspaceObj::TypeArrayU1Type:
|
||||
case MetaspaceObj::TypeArrayU2Type:
|
||||
case MetaspaceObj::TypeArrayU4Type:
|
||||
case MetaspaceObj::TypeArrayU8Type:
|
||||
case MetaspaceObj::TypeArrayOtherType:
|
||||
case MetaspaceObj::ConstMethodType:
|
||||
case MetaspaceObj::ConstantPoolCacheType:
|
||||
case MetaspaceObj::AnnotationsType:
|
||||
case MetaspaceObj::RecordComponentType:
|
||||
case MetaspaceObj::AdapterHandlerEntryType:
|
||||
case MetaspaceObj::AdapterFingerPrintType:
|
||||
switch (type) {
|
||||
case MetaspaceClosureType::SymbolType:
|
||||
case MetaspaceClosureType::TypeArrayU1Type:
|
||||
case MetaspaceClosureType::TypeArrayU2Type:
|
||||
case MetaspaceClosureType::TypeArrayU4Type:
|
||||
case MetaspaceClosureType::TypeArrayU8Type:
|
||||
case MetaspaceClosureType::TypeArrayOtherType:
|
||||
case MetaspaceClosureType::CArrayType:
|
||||
case MetaspaceClosureType::ConstMethodType:
|
||||
case MetaspaceClosureType::ConstantPoolCacheType:
|
||||
case MetaspaceClosureType::AnnotationsType:
|
||||
case MetaspaceClosureType::ModuleEntryType:
|
||||
case MetaspaceClosureType::PackageEntryType:
|
||||
case MetaspaceClosureType::RecordComponentType:
|
||||
case MetaspaceClosureType::AdapterHandlerEntryType:
|
||||
case MetaspaceClosureType::AdapterFingerPrintType:
|
||||
PRODUCT_ONLY(case MetaspaceClosureType::GrowableArrayType:)
|
||||
// These have no vtables.
|
||||
break;
|
||||
default:
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,6 +27,7 @@
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/allStatic.hpp"
|
||||
#include "memory/metaspaceClosureType.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
class ArchiveBuilder;
|
||||
@ -40,7 +41,7 @@ class CppVtables : AllStatic {
|
||||
public:
|
||||
static void dumptime_init(ArchiveBuilder* builder);
|
||||
static void zero_archived_vtables();
|
||||
static intptr_t* get_archived_vtable(MetaspaceObj::Type msotype, address obj);
|
||||
static intptr_t* get_archived_vtable(MetaspaceClosureType type, address obj);
|
||||
static void serialize(SerializeClosure* sc);
|
||||
static bool is_valid_shared_method(const Method* m) NOT_CDS_RETURN_(false);
|
||||
static char* vtables_serialized_base() { return _vtables_serialized_base; }
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,32 +27,34 @@
|
||||
|
||||
#include "classfile/compactHashtable.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/metaspaceClosureType.hpp"
|
||||
|
||||
// This is for dumping detailed statistics for the allocations
|
||||
// in the shared spaces.
|
||||
class DumpAllocStats : public StackObj {
|
||||
public:
|
||||
|
||||
// Here's poor man's enum inheritance
|
||||
#define SHAREDSPACE_OBJ_TYPES_DO(f) \
|
||||
METASPACE_OBJ_TYPES_DO(f) \
|
||||
#define DUMPED_OBJ_TYPES_DO(f) \
|
||||
METASPACE_CLOSURE_TYPES_DO(f) \
|
||||
f(SymbolHashentry) \
|
||||
f(SymbolBucket) \
|
||||
f(StringHashentry) \
|
||||
f(StringBucket) \
|
||||
f(ModulesNatives) \
|
||||
f(CppVTables) \
|
||||
f(Other)
|
||||
|
||||
#define DUMPED_TYPE_DECLARE(name) name ## Type,
|
||||
#define DUMPED_TYPE_NAME_CASE(name) case name ## Type: return #name;
|
||||
|
||||
enum Type {
|
||||
// Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc
|
||||
SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE)
|
||||
DUMPED_OBJ_TYPES_DO(DUMPED_TYPE_DECLARE)
|
||||
_number_of_types
|
||||
};
|
||||
|
||||
static const char* type_name(Type type) {
|
||||
switch(type) {
|
||||
SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE)
|
||||
DUMPED_OBJ_TYPES_DO(DUMPED_TYPE_NAME_CASE)
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
return nullptr;
|
||||
@ -101,16 +103,12 @@ public:
|
||||
CompactHashtableStats* symbol_stats() { return &_symbol_stats; }
|
||||
CompactHashtableStats* string_stats() { return &_string_stats; }
|
||||
|
||||
void record(MetaspaceObj::Type type, int byte_size, bool read_only) {
|
||||
assert(int(type) >= 0 && type < MetaspaceObj::_number_of_types, "sanity");
|
||||
void record(MetaspaceClosureType type, int byte_size, bool read_only) {
|
||||
int t = (int)type;
|
||||
assert(t >= 0 && t < (int)MetaspaceClosureType::_number_of_types, "sanity");
|
||||
int which = (read_only) ? RO : RW;
|
||||
_counts[which][type] ++;
|
||||
_bytes [which][type] += byte_size;
|
||||
}
|
||||
|
||||
void record_modules(int byte_size, bool read_only) {
|
||||
int which = (read_only) ? RO : RW;
|
||||
_bytes [which][ModulesNativesType] += byte_size;
|
||||
_counts[which][t] ++;
|
||||
_bytes [which][t] += byte_size;
|
||||
}
|
||||
|
||||
void record_other_type(int byte_size, bool read_only) {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -948,10 +948,6 @@ void HeapShared::archive_subgraphs() {
|
||||
true /* is_full_module_graph */);
|
||||
}
|
||||
}
|
||||
|
||||
if (CDSConfig::is_dumping_full_module_graph()) {
|
||||
Modules::verify_archived_modules();
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
@ -216,6 +216,10 @@ ciField::ciField(fieldDescriptor *fd) :
|
||||
static bool trust_final_non_static_fields(ciInstanceKlass* holder) {
|
||||
if (holder == nullptr)
|
||||
return false;
|
||||
if (holder->trust_final_fields()) {
|
||||
// Explicit opt-in from system classes
|
||||
return true;
|
||||
}
|
||||
// Even if general trusting is disabled, trust system-built closures in these packages.
|
||||
if (holder->is_in_package("java/lang/invoke") || holder->is_in_package("sun/invoke") ||
|
||||
holder->is_in_package("java/lang/reflect") || holder->is_in_package("jdk/internal/reflect") ||
|
||||
@ -230,14 +234,6 @@ static bool trust_final_non_static_fields(ciInstanceKlass* holder) {
|
||||
// Trust final fields in records
|
||||
if (holder->is_record())
|
||||
return true;
|
||||
// Trust Atomic*FieldUpdaters: they are very important for performance, and make up one
|
||||
// more reason not to use Unsafe, if their final fields are trusted. See more in JDK-8140483.
|
||||
if (holder->name() == ciSymbols::java_util_concurrent_atomic_AtomicIntegerFieldUpdater_Impl() ||
|
||||
holder->name() == ciSymbols::java_util_concurrent_atomic_AtomicLongFieldUpdater_CASUpdater() ||
|
||||
holder->name() == ciSymbols::java_util_concurrent_atomic_AtomicLongFieldUpdater_LockedUpdater() ||
|
||||
holder->name() == ciSymbols::java_util_concurrent_atomic_AtomicReferenceFieldUpdater_Impl()) {
|
||||
return true;
|
||||
}
|
||||
return TrustFinalNonStaticFields;
|
||||
}
|
||||
|
||||
|
||||
@ -65,6 +65,7 @@ ciInstanceKlass::ciInstanceKlass(Klass* k) :
|
||||
_has_nonstatic_concrete_methods = ik->has_nonstatic_concrete_methods();
|
||||
_is_hidden = ik->is_hidden();
|
||||
_is_record = ik->is_record();
|
||||
_trust_final_fields = ik->trust_final_fields();
|
||||
_nonstatic_fields = nullptr; // initialized lazily by compute_nonstatic_fields:
|
||||
_has_injected_fields = -1;
|
||||
_implementor = nullptr; // we will fill these lazily
|
||||
|
||||
@ -59,6 +59,7 @@ private:
|
||||
bool _has_nonstatic_concrete_methods;
|
||||
bool _is_hidden;
|
||||
bool _is_record;
|
||||
bool _trust_final_fields;
|
||||
bool _has_trusted_loader;
|
||||
|
||||
ciFlags _flags;
|
||||
@ -207,6 +208,10 @@ public:
|
||||
return _is_record;
|
||||
}
|
||||
|
||||
bool trust_final_fields() const {
|
||||
return _trust_final_fields;
|
||||
}
|
||||
|
||||
ciInstanceKlass* get_canonical_holder(int offset);
|
||||
ciField* get_field_by_offset(int field_offset, bool is_static);
|
||||
ciField* get_field_by_name(ciSymbol* name, ciSymbol* signature, bool is_static);
|
||||
|
||||
@ -943,6 +943,7 @@ public:
|
||||
_java_lang_Deprecated_for_removal,
|
||||
_jdk_internal_vm_annotation_AOTSafeClassInitializer,
|
||||
_method_AOTRuntimeSetup,
|
||||
_jdk_internal_vm_annotation_TrustFinalFields,
|
||||
_annotation_LIMIT
|
||||
};
|
||||
const Location _location;
|
||||
@ -1878,6 +1879,11 @@ AnnotationCollector::annotation_index(const ClassLoaderData* loader_data,
|
||||
if (!privileged) break; // only allow in privileged code
|
||||
return _field_Stable;
|
||||
}
|
||||
case VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_TrustFinalFields_signature): {
|
||||
if (_location != _in_class) break; // only allow for classes
|
||||
if (!privileged) break; // only allow in privileged code
|
||||
return _jdk_internal_vm_annotation_TrustFinalFields;
|
||||
}
|
||||
case VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_Contended_signature): {
|
||||
if (_location != _in_field && _location != _in_class) {
|
||||
break; // only allow for fields and classes
|
||||
@ -1992,6 +1998,9 @@ void ClassFileParser::ClassAnnotationCollector::apply_to(InstanceKlass* ik) {
|
||||
if (has_annotation(_jdk_internal_vm_annotation_AOTSafeClassInitializer)) {
|
||||
ik->set_has_aot_safe_initializer();
|
||||
}
|
||||
if (has_annotation(_jdk_internal_vm_annotation_TrustFinalFields)) {
|
||||
ik->set_trust_final_fields(true);
|
||||
}
|
||||
}
|
||||
|
||||
#define MAX_ARGS_SIZE 255
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -33,6 +33,7 @@
|
||||
#include "classfile/packageEntry.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/metaspaceClosure.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
|
||||
@ -56,9 +57,9 @@ class ArchivedClassLoaderData {
|
||||
public:
|
||||
ArchivedClassLoaderData() : _packages(nullptr), _modules(nullptr), _unnamed_module(nullptr) {}
|
||||
|
||||
void iterate_symbols(ClassLoaderData* loader_data, MetaspaceClosure* closure);
|
||||
void allocate(ClassLoaderData* loader_data);
|
||||
void init_archived_entries(ClassLoaderData* loader_data);
|
||||
void iterate_roots(MetaspaceClosure* closure);
|
||||
void build_tables(ClassLoaderData* loader_data, TRAPS);
|
||||
void remove_unshareable_info();
|
||||
ModuleEntry* unnamed_module() {
|
||||
return _unnamed_module;
|
||||
}
|
||||
@ -80,17 +81,14 @@ static ModuleEntry* _archived_javabase_moduleEntry = nullptr;
|
||||
static int _platform_loader_root_index = -1;
|
||||
static int _system_loader_root_index = -1;
|
||||
|
||||
void ArchivedClassLoaderData::iterate_symbols(ClassLoaderData* loader_data, MetaspaceClosure* closure) {
|
||||
void ArchivedClassLoaderData::iterate_roots(MetaspaceClosure* it) {
|
||||
assert(CDSConfig::is_dumping_full_module_graph(), "must be");
|
||||
assert_valid(loader_data);
|
||||
if (loader_data != nullptr) {
|
||||
loader_data->packages()->iterate_symbols(closure);
|
||||
loader_data->modules() ->iterate_symbols(closure);
|
||||
loader_data->unnamed_module()->iterate_symbols(closure);
|
||||
}
|
||||
it->push(&_packages);
|
||||
it->push(&_modules);
|
||||
it->push(&_unnamed_module);
|
||||
}
|
||||
|
||||
void ArchivedClassLoaderData::allocate(ClassLoaderData* loader_data) {
|
||||
void ArchivedClassLoaderData::build_tables(ClassLoaderData* loader_data, TRAPS) {
|
||||
assert(CDSConfig::is_dumping_full_module_graph(), "must be");
|
||||
assert_valid(loader_data);
|
||||
if (loader_data != nullptr) {
|
||||
@ -98,19 +96,28 @@ void ArchivedClassLoaderData::allocate(ClassLoaderData* loader_data) {
|
||||
// address of the Symbols, which may be relocated at runtime due to ASLR.
|
||||
// So we store the packages/modules in Arrays. At runtime, we create
|
||||
// the hashtables using these arrays.
|
||||
_packages = loader_data->packages()->allocate_archived_entries();
|
||||
_modules = loader_data->modules() ->allocate_archived_entries();
|
||||
_unnamed_module = loader_data->unnamed_module()->allocate_archived_entry();
|
||||
_packages = loader_data->packages()->build_aot_table(loader_data, CHECK);
|
||||
_modules = loader_data->modules()->build_aot_table(loader_data, CHECK);
|
||||
_unnamed_module = loader_data->unnamed_module();
|
||||
}
|
||||
}
|
||||
|
||||
void ArchivedClassLoaderData::init_archived_entries(ClassLoaderData* loader_data) {
|
||||
assert(CDSConfig::is_dumping_full_module_graph(), "must be");
|
||||
assert_valid(loader_data);
|
||||
if (loader_data != nullptr) {
|
||||
loader_data->packages()->init_archived_entries(_packages);
|
||||
loader_data->modules() ->init_archived_entries(_modules);
|
||||
_unnamed_module->init_as_archived_entry();
|
||||
void ArchivedClassLoaderData::remove_unshareable_info() {
|
||||
if (_packages != nullptr) {
|
||||
_packages = ArchiveBuilder::current()->get_buffered_addr(_packages);
|
||||
for (int i = 0; i < _packages->length(); i++) {
|
||||
_packages->at(i)->remove_unshareable_info();
|
||||
}
|
||||
}
|
||||
if (_modules != nullptr) {
|
||||
_modules = ArchiveBuilder::current()->get_buffered_addr(_modules);
|
||||
for (int i = 0; i < _modules->length(); i++) {
|
||||
_modules->at(i)->remove_unshareable_info();
|
||||
}
|
||||
}
|
||||
if (_unnamed_module != nullptr) {
|
||||
_unnamed_module = ArchiveBuilder::current()->get_buffered_addr(_unnamed_module);
|
||||
_unnamed_module->remove_unshareable_info();
|
||||
}
|
||||
}
|
||||
|
||||
@ -153,7 +160,6 @@ void ArchivedClassLoaderData::clear_archived_oops() {
|
||||
// ------------------------------
|
||||
|
||||
void ClassLoaderDataShared::load_archived_platform_and_system_class_loaders() {
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
// The streaming object loader prefers loading the class loader related objects before
|
||||
// the CLD constructor which has a NoSafepointVerifier.
|
||||
if (!HeapShared::is_loading_streaming_mode()) {
|
||||
@ -178,7 +184,6 @@ void ClassLoaderDataShared::load_archived_platform_and_system_class_loaders() {
|
||||
if (system_loader_module_entry != nullptr) {
|
||||
system_loader_module_entry->preload_archived_oops();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static ClassLoaderData* null_class_loader_data() {
|
||||
@ -210,28 +215,27 @@ void ClassLoaderDataShared::ensure_module_entry_table_exists(oop class_loader) {
|
||||
assert(met != nullptr, "sanity");
|
||||
}
|
||||
|
||||
void ClassLoaderDataShared::iterate_symbols(MetaspaceClosure* closure) {
|
||||
void ClassLoaderDataShared::build_tables(TRAPS) {
|
||||
assert(CDSConfig::is_dumping_full_module_graph(), "must be");
|
||||
_archived_boot_loader_data.iterate_symbols (null_class_loader_data(), closure);
|
||||
_archived_platform_loader_data.iterate_symbols(java_platform_loader_data_or_null(), closure);
|
||||
_archived_system_loader_data.iterate_symbols (java_system_loader_data_or_null(), closure);
|
||||
_archived_boot_loader_data.build_tables(null_class_loader_data(), CHECK);
|
||||
_archived_platform_loader_data.build_tables(java_platform_loader_data_or_null(), CHECK);
|
||||
_archived_system_loader_data.build_tables(java_system_loader_data_or_null(), CHECK);
|
||||
}
|
||||
|
||||
void ClassLoaderDataShared::allocate_archived_tables() {
|
||||
void ClassLoaderDataShared::iterate_roots(MetaspaceClosure* it) {
|
||||
assert(CDSConfig::is_dumping_full_module_graph(), "must be");
|
||||
_archived_boot_loader_data.allocate (null_class_loader_data());
|
||||
_archived_platform_loader_data.allocate(java_platform_loader_data_or_null());
|
||||
_archived_system_loader_data.allocate (java_system_loader_data_or_null());
|
||||
_archived_boot_loader_data.iterate_roots(it);
|
||||
_archived_platform_loader_data.iterate_roots(it);
|
||||
_archived_system_loader_data.iterate_roots(it);
|
||||
}
|
||||
|
||||
void ClassLoaderDataShared::init_archived_tables() {
|
||||
void ClassLoaderDataShared::remove_unshareable_info() {
|
||||
assert(CDSConfig::is_dumping_full_module_graph(), "must be");
|
||||
_archived_boot_loader_data.remove_unshareable_info();
|
||||
_archived_platform_loader_data.remove_unshareable_info();
|
||||
_archived_system_loader_data.remove_unshareable_info();
|
||||
|
||||
_archived_boot_loader_data.init_archived_entries (null_class_loader_data());
|
||||
_archived_platform_loader_data.init_archived_entries(java_platform_loader_data_or_null());
|
||||
_archived_system_loader_data.init_archived_entries (java_system_loader_data_or_null());
|
||||
|
||||
_archived_javabase_moduleEntry = ModuleEntry::get_archived_entry(ModuleEntryTable::javabase_moduleEntry());
|
||||
_archived_javabase_moduleEntry = ArchiveBuilder::current()->get_buffered_addr(ModuleEntryTable::javabase_moduleEntry());
|
||||
|
||||
_platform_loader_root_index = HeapShared::append_root(SystemDictionary::java_platform_loader());
|
||||
_system_loader_root_index = HeapShared::append_root(SystemDictionary::java_system_loader());
|
||||
@ -271,7 +275,6 @@ ModuleEntry* ClassLoaderDataShared::archived_unnamed_module(ClassLoaderData* loa
|
||||
return archived_module;
|
||||
}
|
||||
|
||||
|
||||
void ClassLoaderDataShared::clear_archived_oops() {
|
||||
assert(!CDSConfig::is_using_full_module_graph(), "must be");
|
||||
_archived_boot_loader_data.clear_archived_oops();
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -40,11 +40,11 @@ class ClassLoaderDataShared : AllStatic {
|
||||
public:
|
||||
static void load_archived_platform_and_system_class_loaders() NOT_CDS_JAVA_HEAP_RETURN;
|
||||
static void restore_archived_modules_for_preloading_classes(JavaThread* current) NOT_CDS_JAVA_HEAP_RETURN;
|
||||
static void build_tables(TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
|
||||
static void iterate_roots(MetaspaceClosure* closure) NOT_CDS_JAVA_HEAP_RETURN;
|
||||
static void remove_unshareable_info() NOT_CDS_JAVA_HEAP_RETURN;
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
static void ensure_module_entry_tables_exist();
|
||||
static void allocate_archived_tables();
|
||||
static void iterate_symbols(MetaspaceClosure* closure);
|
||||
static void init_archived_tables();
|
||||
static void serialize(SerializeClosure* f);
|
||||
static void clear_archived_oops();
|
||||
static void restore_archived_entries_for_null_class_loader_data();
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -23,6 +23,7 @@
|
||||
*/
|
||||
|
||||
#include "cds/aotClassLocation.hpp"
|
||||
#include "cds/aotGrowableArray.inline.hpp"
|
||||
#include "cds/archiveBuilder.hpp"
|
||||
#include "cds/archiveUtils.hpp"
|
||||
#include "cds/cdsConfig.hpp"
|
||||
@ -37,6 +38,7 @@
|
||||
#include "jni.h"
|
||||
#include "logging/log.hpp"
|
||||
#include "logging/logStream.hpp"
|
||||
#include "memory/metadataFactory.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/oopHandle.inline.hpp"
|
||||
@ -44,7 +46,6 @@
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
#include "utilities/events.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
#include "utilities/hashTable.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
#include "utilities/quickSort.hpp"
|
||||
@ -167,7 +168,7 @@ void ModuleEntry::add_read(ModuleEntry* m) {
|
||||
} else {
|
||||
if (reads() == nullptr) {
|
||||
// Lazily create a module's reads list
|
||||
GrowableArray<ModuleEntry*>* new_reads = new (mtModule) GrowableArray<ModuleEntry*>(MODULE_READS_SIZE, mtModule);
|
||||
AOTGrowableArray<ModuleEntry*>* new_reads = new (mtModule) AOTGrowableArray<ModuleEntry*>(MODULE_READS_SIZE, mtModule);
|
||||
set_reads(new_reads);
|
||||
}
|
||||
|
||||
@ -274,8 +275,7 @@ ModuleEntry::ModuleEntry(Handle module_handle,
|
||||
_has_default_read_edges(false),
|
||||
_must_walk_reads(false),
|
||||
_is_open(is_open),
|
||||
_is_patched(false)
|
||||
DEBUG_ONLY(COMMA _reads_is_archived(false)) {
|
||||
_is_patched(false) {
|
||||
|
||||
// Initialize fields specific to a ModuleEntry
|
||||
if (_name == nullptr) {
|
||||
@ -394,7 +394,6 @@ ModuleEntryTable::~ModuleEntryTable() {
|
||||
ModuleEntryTableDeleter deleter;
|
||||
_table.unlink(&deleter);
|
||||
assert(_table.number_of_entries() == 0, "should have removed all entries");
|
||||
|
||||
}
|
||||
|
||||
void ModuleEntry::set_loader_data(ClassLoaderData* cld) {
|
||||
@ -402,147 +401,51 @@ void ModuleEntry::set_loader_data(ClassLoaderData* cld) {
|
||||
_loader_data = cld;
|
||||
}
|
||||
|
||||
void ModuleEntry::metaspace_pointers_do(MetaspaceClosure* it) {
|
||||
it->push(&_name);
|
||||
it->push(&_reads);
|
||||
it->push(&_version);
|
||||
it->push(&_location);
|
||||
}
|
||||
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
typedef HashTable<
|
||||
const ModuleEntry*,
|
||||
ModuleEntry*,
|
||||
557, // prime number
|
||||
AnyObj::C_HEAP> ArchivedModuleEntries;
|
||||
static ArchivedModuleEntries* _archive_modules_entries = nullptr;
|
||||
|
||||
#ifndef PRODUCT
|
||||
static int _num_archived_module_entries = 0;
|
||||
static int _num_inited_module_entries = 0;
|
||||
#endif
|
||||
|
||||
bool ModuleEntry::should_be_archived() const {
|
||||
return SystemDictionaryShared::is_builtin_loader(loader_data());
|
||||
}
|
||||
|
||||
ModuleEntry* ModuleEntry::allocate_archived_entry() const {
|
||||
precond(should_be_archived());
|
||||
precond(CDSConfig::is_dumping_full_module_graph());
|
||||
ModuleEntry* archived_entry = (ModuleEntry*)ArchiveBuilder::rw_region_alloc(sizeof(ModuleEntry));
|
||||
memcpy((void*)archived_entry, (void*)this, sizeof(ModuleEntry));
|
||||
void ModuleEntry::remove_unshareable_info() {
|
||||
_archived_module_index = HeapShared::append_root(module_oop());
|
||||
|
||||
archived_entry->_archived_module_index = HeapShared::append_root(module_oop());
|
||||
if (_archive_modules_entries == nullptr) {
|
||||
_archive_modules_entries = new (mtClass)ArchivedModuleEntries();
|
||||
}
|
||||
assert(_archive_modules_entries->get(this) == nullptr, "Each ModuleEntry must not be shared across ModuleEntryTables");
|
||||
_archive_modules_entries->put(this, archived_entry);
|
||||
DEBUG_ONLY(_num_archived_module_entries++);
|
||||
|
||||
if (CDSConfig::is_dumping_final_static_archive()) {
|
||||
OopHandle null_handle;
|
||||
archived_entry->_shared_pd = null_handle;
|
||||
} else {
|
||||
assert(archived_entry->shared_protection_domain() == nullptr, "never set during -Xshare:dump");
|
||||
if (_reads != nullptr) {
|
||||
_reads->set_in_aot_cache();
|
||||
}
|
||||
|
||||
// Clear handles and restore at run time. Handles cannot be archived.
|
||||
if (CDSConfig::is_dumping_final_static_archive()) {
|
||||
OopHandle null_handle;
|
||||
_shared_pd = null_handle;
|
||||
} else {
|
||||
assert(shared_protection_domain() == nullptr, "never set during -Xshare:dump");
|
||||
}
|
||||
|
||||
OopHandle null_handle;
|
||||
archived_entry->_module_handle = null_handle;
|
||||
|
||||
// For verify_archived_module_entries()
|
||||
DEBUG_ONLY(_num_inited_module_entries++);
|
||||
|
||||
if (log_is_enabled(Info, aot, module)) {
|
||||
ResourceMark rm;
|
||||
LogStream ls(Log(aot, module)::info());
|
||||
ls.print("Stored in archive: ");
|
||||
archived_entry->print(&ls);
|
||||
}
|
||||
return archived_entry;
|
||||
}
|
||||
|
||||
bool ModuleEntry::has_been_archived() {
|
||||
assert(!ArchiveBuilder::current()->is_in_buffer_space(this), "must be called on original ModuleEntry");
|
||||
return _archive_modules_entries->contains(this);
|
||||
}
|
||||
|
||||
ModuleEntry* ModuleEntry::get_archived_entry(ModuleEntry* orig_entry) {
|
||||
ModuleEntry** ptr = _archive_modules_entries->get(orig_entry);
|
||||
assert(ptr != nullptr && *ptr != nullptr, "must have been allocated");
|
||||
return *ptr;
|
||||
}
|
||||
|
||||
// This function is used to archive ModuleEntry::_reads and PackageEntry::_qualified_exports.
|
||||
// GrowableArray cannot be directly archived, as it needs to be expandable at runtime.
|
||||
// Write it out as an Array, and convert it back to GrowableArray at runtime.
|
||||
Array<ModuleEntry*>* ModuleEntry::write_growable_array(GrowableArray<ModuleEntry*>* array) {
|
||||
Array<ModuleEntry*>* archived_array = nullptr;
|
||||
int length = (array == nullptr) ? 0 : array->length();
|
||||
if (length > 0) {
|
||||
archived_array = ArchiveBuilder::new_ro_array<ModuleEntry*>(length);
|
||||
for (int i = 0; i < length; i++) {
|
||||
ModuleEntry* archived_entry = get_archived_entry(array->at(i));
|
||||
archived_array->at_put(i, archived_entry);
|
||||
ArchivePtrMarker::mark_pointer((address*)archived_array->adr_at(i));
|
||||
}
|
||||
}
|
||||
|
||||
return archived_array;
|
||||
}
|
||||
|
||||
GrowableArray<ModuleEntry*>* ModuleEntry::restore_growable_array(Array<ModuleEntry*>* archived_array) {
|
||||
GrowableArray<ModuleEntry*>* array = nullptr;
|
||||
int length = (archived_array == nullptr) ? 0 : archived_array->length();
|
||||
if (length > 0) {
|
||||
array = new (mtModule) GrowableArray<ModuleEntry*>(length, mtModule);
|
||||
for (int i = 0; i < length; i++) {
|
||||
ModuleEntry* archived_entry = archived_array->at(i);
|
||||
array->append(archived_entry);
|
||||
}
|
||||
}
|
||||
|
||||
return array;
|
||||
}
|
||||
|
||||
void ModuleEntry::iterate_symbols(MetaspaceClosure* closure) {
|
||||
closure->push(&_name);
|
||||
closure->push(&_version);
|
||||
closure->push(&_location);
|
||||
}
|
||||
|
||||
void ModuleEntry::init_as_archived_entry() {
|
||||
set_archived_reads(write_growable_array(reads()));
|
||||
_module_handle = null_handle;
|
||||
|
||||
_loader_data = nullptr; // re-init at runtime
|
||||
if (name() != nullptr) {
|
||||
_shared_path_index = AOTClassLocationConfig::dumptime()->get_module_shared_path_index(_location);
|
||||
_name = ArchiveBuilder::get_buffered_symbol(_name);
|
||||
ArchivePtrMarker::mark_pointer((address*)&_name);
|
||||
Symbol* src_location = ArchiveBuilder::current()->get_source_addr(_location);
|
||||
_shared_path_index = AOTClassLocationConfig::dumptime()->get_module_shared_path_index(src_location);
|
||||
} else {
|
||||
// _shared_path_index is used only by SystemDictionary::is_shared_class_visible_impl()
|
||||
// for checking classes in named modules.
|
||||
_shared_path_index = -1;
|
||||
}
|
||||
if (_version != nullptr) {
|
||||
_version = ArchiveBuilder::get_buffered_symbol(_version);
|
||||
}
|
||||
if (_location != nullptr) {
|
||||
_location = ArchiveBuilder::get_buffered_symbol(_location);
|
||||
}
|
||||
JFR_ONLY(set_trace_id(0);) // re-init at runtime
|
||||
|
||||
ArchivePtrMarker::mark_pointer((address*)&_reads);
|
||||
ArchivePtrMarker::mark_pointer((address*)&_version);
|
||||
ArchivePtrMarker::mark_pointer((address*)&_location);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void ModuleEntry::verify_archived_module_entries() {
|
||||
assert(_num_archived_module_entries == _num_inited_module_entries,
|
||||
"%d ModuleEntries have been archived but %d of them have been properly initialized with archived java.lang.Module objects",
|
||||
_num_archived_module_entries, _num_inited_module_entries);
|
||||
}
|
||||
#endif // PRODUCT
|
||||
|
||||
void ModuleEntry::load_from_archive(ClassLoaderData* loader_data) {
|
||||
assert(CDSConfig::is_using_archive(), "runtime only");
|
||||
set_loader_data(loader_data);
|
||||
set_reads(restore_growable_array(archived_reads()));
|
||||
JFR_ONLY(INIT_ID(this);)
|
||||
}
|
||||
|
||||
@ -581,38 +484,28 @@ static int compare_module_by_name(ModuleEntry* a, ModuleEntry* b) {
|
||||
return a->name()->fast_compare(b->name());
|
||||
}
|
||||
|
||||
void ModuleEntryTable::iterate_symbols(MetaspaceClosure* closure) {
|
||||
auto syms = [&] (const SymbolHandle& key, ModuleEntry*& m) {
|
||||
m->iterate_symbols(closure);
|
||||
};
|
||||
_table.iterate_all(syms);
|
||||
}
|
||||
|
||||
Array<ModuleEntry*>* ModuleEntryTable::allocate_archived_entries() {
|
||||
Array<ModuleEntry*>* archived_modules = ArchiveBuilder::new_rw_array<ModuleEntry*>(_table.number_of_entries());
|
||||
Array<ModuleEntry*>* ModuleEntryTable::build_aot_table(ClassLoaderData* loader_data, TRAPS) {
|
||||
Array<ModuleEntry*>* aot_table =
|
||||
MetadataFactory::new_array<ModuleEntry*>(loader_data, _table.number_of_entries(), nullptr, CHECK_NULL);
|
||||
int n = 0;
|
||||
auto grab = [&] (const SymbolHandle& key, ModuleEntry*& m) {
|
||||
archived_modules->at_put(n++, m);
|
||||
m->pack_reads();
|
||||
aot_table->at_put(n++, m);
|
||||
if (log_is_enabled(Info, aot, module)) {
|
||||
ResourceMark rm;
|
||||
LogStream ls(Log(aot, module)::info());
|
||||
ls.print("Stored in archive: ");
|
||||
m->print(&ls);
|
||||
}
|
||||
};
|
||||
_table.iterate_all(grab);
|
||||
|
||||
if (n > 1) {
|
||||
// Always allocate in the same order to produce deterministic archive.
|
||||
QuickSort::sort(archived_modules->data(), n, compare_module_by_name);
|
||||
QuickSort::sort(aot_table->data(), n, compare_module_by_name);
|
||||
}
|
||||
for (int i = 0; i < n; i++) {
|
||||
archived_modules->at_put(i, archived_modules->at(i)->allocate_archived_entry());
|
||||
ArchivePtrMarker::mark_pointer((address*)archived_modules->adr_at(i));
|
||||
}
|
||||
return archived_modules;
|
||||
}
|
||||
|
||||
void ModuleEntryTable::init_archived_entries(Array<ModuleEntry*>* archived_modules) {
|
||||
assert(CDSConfig::is_dumping_full_module_graph(), "sanity");
|
||||
for (int i = 0; i < archived_modules->length(); i++) {
|
||||
ModuleEntry* archived_entry = archived_modules->at(i);
|
||||
archived_entry->init_as_archived_entry();
|
||||
}
|
||||
return aot_table;
|
||||
}
|
||||
|
||||
void ModuleEntryTable::load_archived_entries(ClassLoaderData* loader_data,
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,7 +25,9 @@
|
||||
#ifndef SHARE_CLASSFILE_MODULEENTRY_HPP
|
||||
#define SHARE_CLASSFILE_MODULEENTRY_HPP
|
||||
|
||||
#include "cds/aotGrowableArray.hpp"
|
||||
#include "jni.h"
|
||||
#include "memory/metaspaceClosureType.hpp"
|
||||
#include "oops/oopHandle.hpp"
|
||||
#include "oops/symbol.hpp"
|
||||
#include "oops/symbolHandle.hpp"
|
||||
@ -68,11 +70,8 @@ private:
|
||||
// for shared classes from this module
|
||||
Symbol* _name; // name of this module
|
||||
ClassLoaderData* _loader_data;
|
||||
AOTGrowableArray<ModuleEntry*>* _reads; // list of modules that are readable by this module
|
||||
|
||||
union {
|
||||
GrowableArray<ModuleEntry*>* _reads; // list of modules that are readable by this module
|
||||
Array<ModuleEntry*>* _archived_reads; // List of readable modules stored in the CDS archive
|
||||
};
|
||||
Symbol* _version; // module version number
|
||||
Symbol* _location; // module location
|
||||
CDS_ONLY(int _shared_path_index;) // >=0 if classes in this module are in CDS archive
|
||||
@ -81,7 +80,6 @@ private:
|
||||
bool _must_walk_reads; // walk module's reads list at GC safepoints to purge out dead modules
|
||||
bool _is_open; // whether the packages in the module are all unqualifiedly exported
|
||||
bool _is_patched; // whether the module is patched via --patch-module
|
||||
DEBUG_ONLY(bool _reads_is_archived);
|
||||
CDS_JAVA_HEAP_ONLY(int _archived_module_index;)
|
||||
|
||||
JFR_ONLY(DEFINE_TRACE_ID_FIELD;)
|
||||
@ -120,22 +118,18 @@ public:
|
||||
|
||||
bool can_read(ModuleEntry* m) const;
|
||||
bool has_reads_list() const;
|
||||
GrowableArray<ModuleEntry*>* reads() const {
|
||||
assert(!_reads_is_archived, "sanity");
|
||||
AOTGrowableArray<ModuleEntry*>* reads() const {
|
||||
return _reads;
|
||||
}
|
||||
void set_reads(GrowableArray<ModuleEntry*>* r) {
|
||||
void set_reads(AOTGrowableArray<ModuleEntry*>* r) {
|
||||
_reads = r;
|
||||
DEBUG_ONLY(_reads_is_archived = false);
|
||||
}
|
||||
Array<ModuleEntry*>* archived_reads() const {
|
||||
assert(_reads_is_archived, "sanity");
|
||||
return _archived_reads;
|
||||
}
|
||||
void set_archived_reads(Array<ModuleEntry*>* r) {
|
||||
_archived_reads = r;
|
||||
DEBUG_ONLY(_reads_is_archived = true);
|
||||
void pack_reads() {
|
||||
if (_reads != nullptr) {
|
||||
_reads->shrink_to_fit();
|
||||
}
|
||||
}
|
||||
|
||||
void add_read(ModuleEntry* m);
|
||||
void set_read_walk_required(ClassLoaderData* m_loader_data);
|
||||
|
||||
@ -189,6 +183,13 @@ public:
|
||||
const char* name_as_C_string() const {
|
||||
return is_named() ? name()->as_C_string() : UNNAMED_MODULE;
|
||||
}
|
||||
|
||||
// methods required by MetaspaceClosure
|
||||
void metaspace_pointers_do(MetaspaceClosure* it);
|
||||
int size_in_heapwords() const { return (int)heap_word_size(sizeof(ModuleEntry)); }
|
||||
MetaspaceClosureType type() const { return MetaspaceClosureType::ModuleEntryType; }
|
||||
static bool is_read_only_by_default() { return false; }
|
||||
|
||||
void print(outputStream* st = tty) const;
|
||||
void verify();
|
||||
|
||||
@ -198,18 +199,11 @@ public:
|
||||
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
bool should_be_archived() const;
|
||||
void iterate_symbols(MetaspaceClosure* closure);
|
||||
ModuleEntry* allocate_archived_entry() const;
|
||||
void init_as_archived_entry();
|
||||
static ModuleEntry* get_archived_entry(ModuleEntry* orig_entry);
|
||||
bool has_been_archived();
|
||||
static Array<ModuleEntry*>* write_growable_array(GrowableArray<ModuleEntry*>* array);
|
||||
static GrowableArray<ModuleEntry*>* restore_growable_array(Array<ModuleEntry*>* archived_array);
|
||||
void remove_unshareable_info();
|
||||
void load_from_archive(ClassLoaderData* loader_data);
|
||||
void preload_archived_oops();
|
||||
void restore_archived_oops(ClassLoaderData* loader_data);
|
||||
void clear_archived_oops();
|
||||
static void verify_archived_module_entries() PRODUCT_RETURN;
|
||||
#endif
|
||||
};
|
||||
|
||||
@ -275,9 +269,7 @@ public:
|
||||
void verify();
|
||||
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
void iterate_symbols(MetaspaceClosure* closure);
|
||||
Array<ModuleEntry*>* allocate_archived_entries();
|
||||
void init_archived_entries(Array<ModuleEntry*>* archived_modules);
|
||||
Array<ModuleEntry*>* build_aot_table(ClassLoaderData* loader_data, TRAPS);
|
||||
void load_archived_entries(ClassLoaderData* loader_data,
|
||||
Array<ModuleEntry*>* archived_modules);
|
||||
void restore_archived_oops(ClassLoaderData* loader_data,
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -505,13 +505,10 @@ void Modules::check_archived_module_oop(oop orig_module_obj) {
|
||||
ClassLoaderData* loader_data = orig_module_ent->loader_data();
|
||||
assert(loader_data->is_builtin_class_loader_data(), "must be");
|
||||
|
||||
if (orig_module_ent->name() != nullptr) {
|
||||
// For each named module, we archive both the java.lang.Module oop and the ModuleEntry.
|
||||
assert(orig_module_ent->has_been_archived(), "sanity");
|
||||
} else {
|
||||
precond(ArchiveBuilder::current()->has_been_archived(orig_module_ent));
|
||||
if (orig_module_ent->name() == nullptr) {
|
||||
// We always archive unnamed module oop for boot, platform, and system loaders.
|
||||
precond(orig_module_ent->should_be_archived());
|
||||
precond(orig_module_ent->has_been_archived());
|
||||
|
||||
if (loader_data->is_boot_class_loader_data()) {
|
||||
assert(!_seen_boot_unnamed_module, "only once");
|
||||
@ -529,10 +526,6 @@ void Modules::check_archived_module_oop(oop orig_module_obj) {
|
||||
}
|
||||
}
|
||||
|
||||
void Modules::verify_archived_modules() {
|
||||
ModuleEntry::verify_archived_module_entries();
|
||||
}
|
||||
|
||||
class Modules::ArchivedProperty {
|
||||
const char* _prop;
|
||||
const bool _numbered;
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -59,7 +59,6 @@ public:
|
||||
TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
|
||||
static void init_archived_modules(JavaThread* current, Handle h_platform_loader, Handle h_system_loader)
|
||||
NOT_CDS_JAVA_HEAP_RETURN;
|
||||
static void verify_archived_modules() NOT_CDS_JAVA_HEAP_RETURN;
|
||||
static void dump_archived_module_info() NOT_CDS_JAVA_HEAP_RETURN;
|
||||
static void serialize_archived_module_info(SerializeClosure* soc) NOT_CDS_JAVA_HEAP_RETURN;
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -22,6 +22,8 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "cds/aotGrowableArray.inline.hpp"
|
||||
#include "cds/aotMetaspace.hpp"
|
||||
#include "cds/archiveBuilder.hpp"
|
||||
#include "cds/archiveUtils.hpp"
|
||||
#include "cds/cdsConfig.hpp"
|
||||
@ -31,13 +33,13 @@
|
||||
#include "classfile/vmSymbols.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "logging/logStream.hpp"
|
||||
#include "memory/metadataFactory.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/array.hpp"
|
||||
#include "oops/symbol.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "utilities/events.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
#include "utilities/hashTable.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
#include "utilities/quickSort.hpp"
|
||||
@ -51,7 +53,7 @@ PackageEntry::PackageEntry(Symbol* name, ModuleEntry* module) :
|
||||
_qualified_exports(nullptr),
|
||||
_defined_by_cds_in_class_path(0)
|
||||
{
|
||||
// name can't be null
|
||||
// name can't be null -- a class in the default package gets a PackageEntry of nullptr.
|
||||
_name->increment_refcount();
|
||||
|
||||
JFR_ONLY(INIT_ID(this);)
|
||||
@ -81,7 +83,7 @@ void PackageEntry::add_qexport(ModuleEntry* m) {
|
||||
if (!has_qual_exports_list()) {
|
||||
// Lazily create a package's qualified exports list.
|
||||
// Initial size is small, do not anticipate export lists to be large.
|
||||
_qualified_exports = new (mtModule) GrowableArray<ModuleEntry*>(QUAL_EXP_SIZE, mtModule);
|
||||
_qualified_exports = new (mtModule) AOTGrowableArray<ModuleEntry*>(QUAL_EXP_SIZE, mtModule);
|
||||
}
|
||||
|
||||
// Determine, based on this newly established export to module m,
|
||||
@ -183,12 +185,24 @@ void PackageEntry::purge_qualified_exports() {
|
||||
}
|
||||
|
||||
void PackageEntry::delete_qualified_exports() {
|
||||
if (_qualified_exports != nullptr) {
|
||||
if (_qualified_exports != nullptr && !AOTMetaspace::in_aot_cache(_qualified_exports)) {
|
||||
delete _qualified_exports;
|
||||
}
|
||||
_qualified_exports = nullptr;
|
||||
}
|
||||
|
||||
void PackageEntry::pack_qualified_exports() {
|
||||
if (_qualified_exports != nullptr) {
|
||||
_qualified_exports->shrink_to_fit();
|
||||
}
|
||||
}
|
||||
|
||||
void PackageEntry::metaspace_pointers_do(MetaspaceClosure* it) {
|
||||
it->push(&_name);
|
||||
it->push(&_module);
|
||||
it->push(&_qualified_exports);
|
||||
}
|
||||
|
||||
PackageEntryTable::PackageEntryTable() { }
|
||||
|
||||
PackageEntryTable::~PackageEntryTable() {
|
||||
@ -212,66 +226,19 @@ PackageEntryTable::~PackageEntryTable() {
|
||||
}
|
||||
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
typedef HashTable<
|
||||
const PackageEntry*,
|
||||
PackageEntry*,
|
||||
557, // prime number
|
||||
AnyObj::C_HEAP> ArchivedPackageEntries;
|
||||
static ArchivedPackageEntries* _archived_packages_entries = nullptr;
|
||||
|
||||
bool PackageEntry::should_be_archived() const {
|
||||
return module()->should_be_archived();
|
||||
}
|
||||
|
||||
PackageEntry* PackageEntry::allocate_archived_entry() const {
|
||||
precond(should_be_archived());
|
||||
PackageEntry* archived_entry = (PackageEntry*)ArchiveBuilder::rw_region_alloc(sizeof(PackageEntry));
|
||||
memcpy((void*)archived_entry, (void*)this, sizeof(PackageEntry));
|
||||
|
||||
if (_archived_packages_entries == nullptr) {
|
||||
_archived_packages_entries = new (mtClass)ArchivedPackageEntries();
|
||||
void PackageEntry::remove_unshareable_info() {
|
||||
if (_qualified_exports != nullptr) {
|
||||
_qualified_exports->set_in_aot_cache();
|
||||
}
|
||||
assert(_archived_packages_entries->get(this) == nullptr, "Each PackageEntry must not be shared across PackageEntryTables");
|
||||
_archived_packages_entries->put(this, archived_entry);
|
||||
|
||||
return archived_entry;
|
||||
}
|
||||
|
||||
PackageEntry* PackageEntry::get_archived_entry(PackageEntry* orig_entry) {
|
||||
PackageEntry** ptr = _archived_packages_entries->get(orig_entry);
|
||||
if (ptr != nullptr) {
|
||||
return *ptr;
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void PackageEntry::iterate_symbols(MetaspaceClosure* closure) {
|
||||
closure->push(&_name);
|
||||
}
|
||||
|
||||
void PackageEntry::init_as_archived_entry() {
|
||||
Array<ModuleEntry*>* archived_qualified_exports = ModuleEntry::write_growable_array(_qualified_exports);
|
||||
|
||||
_name = ArchiveBuilder::get_buffered_symbol(_name);
|
||||
_module = ModuleEntry::get_archived_entry(_module);
|
||||
_qualified_exports = (GrowableArray<ModuleEntry*>*)archived_qualified_exports;
|
||||
_defined_by_cds_in_class_path = 0;
|
||||
JFR_ONLY(set_trace_id(0);) // re-init at runtime
|
||||
|
||||
ArchivePtrMarker::mark_pointer((address*)&_name);
|
||||
ArchivePtrMarker::mark_pointer((address*)&_module);
|
||||
ArchivePtrMarker::mark_pointer((address*)&_qualified_exports);
|
||||
|
||||
LogStreamHandle(Info, aot, package) st;
|
||||
if (st.is_enabled()) {
|
||||
st.print("archived ");
|
||||
print(&st);
|
||||
}
|
||||
}
|
||||
|
||||
void PackageEntry::load_from_archive() {
|
||||
_qualified_exports = ModuleEntry::restore_growable_array((Array<ModuleEntry*>*)_qualified_exports);
|
||||
JFR_ONLY(INIT_ID(this);)
|
||||
}
|
||||
|
||||
@ -280,14 +247,7 @@ static int compare_package_by_name(PackageEntry* a, PackageEntry* b) {
|
||||
return a->name()->fast_compare(b->name());
|
||||
}
|
||||
|
||||
void PackageEntryTable::iterate_symbols(MetaspaceClosure* closure) {
|
||||
auto syms = [&] (const SymbolHandle& key, PackageEntry*& p) {
|
||||
p->iterate_symbols(closure);
|
||||
};
|
||||
_table.iterate_all(syms);
|
||||
}
|
||||
|
||||
Array<PackageEntry*>* PackageEntryTable::allocate_archived_entries() {
|
||||
Array<PackageEntry*>* PackageEntryTable::build_aot_table(ClassLoaderData* loader_data, TRAPS) {
|
||||
// First count the packages in named modules
|
||||
int n = 0;
|
||||
auto count = [&] (const SymbolHandle& key, PackageEntry*& p) {
|
||||
@ -297,12 +257,19 @@ Array<PackageEntry*>* PackageEntryTable::allocate_archived_entries() {
|
||||
};
|
||||
_table.iterate_all(count);
|
||||
|
||||
Array<PackageEntry*>* archived_packages = ArchiveBuilder::new_rw_array<PackageEntry*>(n);
|
||||
Array<PackageEntry*>* archived_packages = MetadataFactory::new_array<PackageEntry*>(loader_data, n, nullptr, CHECK_NULL);
|
||||
// reset n
|
||||
n = 0;
|
||||
auto grab = [&] (const SymbolHandle& key, PackageEntry*& p) {
|
||||
if (p->should_be_archived()) {
|
||||
p->pack_qualified_exports();
|
||||
archived_packages->at_put(n++, p);
|
||||
|
||||
LogStreamHandle(Info, aot, package) st;
|
||||
if (st.is_enabled()) {
|
||||
st.print("archived ");
|
||||
p->print(&st);
|
||||
}
|
||||
}
|
||||
};
|
||||
_table.iterate_all(grab);
|
||||
@ -311,18 +278,8 @@ Array<PackageEntry*>* PackageEntryTable::allocate_archived_entries() {
|
||||
// Always allocate in the same order to produce deterministic archive.
|
||||
QuickSort::sort(archived_packages->data(), n, compare_package_by_name);
|
||||
}
|
||||
for (int i = 0; i < n; i++) {
|
||||
archived_packages->at_put(i, archived_packages->at(i)->allocate_archived_entry());
|
||||
ArchivePtrMarker::mark_pointer((address*)archived_packages->adr_at(i));
|
||||
}
|
||||
return archived_packages;
|
||||
}
|
||||
|
||||
void PackageEntryTable::init_archived_entries(Array<PackageEntry*>* archived_packages) {
|
||||
for (int i = 0; i < archived_packages->length(); i++) {
|
||||
PackageEntry* archived_entry = archived_packages->at(i);
|
||||
archived_entry->init_as_archived_entry();
|
||||
}
|
||||
return archived_packages;
|
||||
}
|
||||
|
||||
void PackageEntryTable::load_archived_entries(Array<PackageEntry*>* archived_packages) {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,7 +25,9 @@
|
||||
#ifndef SHARE_CLASSFILE_PACKAGEENTRY_HPP
|
||||
#define SHARE_CLASSFILE_PACKAGEENTRY_HPP
|
||||
|
||||
#include "cds/aotGrowableArray.hpp"
|
||||
#include "classfile/moduleEntry.hpp"
|
||||
#include "memory/metaspaceClosureType.hpp"
|
||||
#include "oops/symbol.hpp"
|
||||
#include "oops/symbolHandle.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
@ -114,7 +116,7 @@ private:
|
||||
bool _must_walk_exports;
|
||||
// Contains list of modules this package is qualifiedly exported to. Access
|
||||
// to this list is protected by the Module_lock.
|
||||
GrowableArray<ModuleEntry*>* _qualified_exports;
|
||||
AOTGrowableArray<ModuleEntry*>* _qualified_exports;
|
||||
JFR_ONLY(DEFINE_TRACE_ID_FIELD;)
|
||||
|
||||
// Initial size of a package entry's list of qualified exports.
|
||||
@ -205,14 +207,24 @@ public:
|
||||
void purge_qualified_exports();
|
||||
void delete_qualified_exports();
|
||||
|
||||
void pack_qualified_exports(); // used by AOT
|
||||
|
||||
// methods required by MetaspaceClosure
|
||||
void metaspace_pointers_do(MetaspaceClosure* it);
|
||||
int size_in_heapwords() const { return (int)heap_word_size(sizeof(PackageEntry)); }
|
||||
MetaspaceClosureType type() const { return MetaspaceClosureType::PackageEntryType; }
|
||||
static bool is_read_only_by_default() { return false; }
|
||||
|
||||
void print(outputStream* st = tty);
|
||||
|
||||
char* name_as_C_string() const {
|
||||
assert(_name != nullptr, "name can't be null");
|
||||
return name()->as_C_string();
|
||||
}
|
||||
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
bool should_be_archived() const;
|
||||
void iterate_symbols(MetaspaceClosure* closure);
|
||||
PackageEntry* allocate_archived_entry() const;
|
||||
void init_as_archived_entry();
|
||||
static PackageEntry* get_archived_entry(PackageEntry* orig_entry);
|
||||
void remove_unshareable_info();
|
||||
void load_from_archive();
|
||||
#endif
|
||||
|
||||
@ -271,9 +283,7 @@ public:
|
||||
void print(outputStream* st = tty);
|
||||
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
void iterate_symbols(MetaspaceClosure* closure);
|
||||
Array<PackageEntry*>* allocate_archived_entries();
|
||||
void init_archived_entries(Array<PackageEntry*>* archived_packages);
|
||||
Array<PackageEntry*>* build_aot_table(ClassLoaderData* loader_data, TRAPS);
|
||||
void load_archived_entries(Array<PackageEntry*>* archived_packages);
|
||||
#endif
|
||||
};
|
||||
|
||||
@ -245,10 +245,6 @@ class SerializeClosure;
|
||||
\
|
||||
/* Concurrency support */ \
|
||||
template(java_util_concurrent_locks_AbstractOwnableSynchronizer, "java/util/concurrent/locks/AbstractOwnableSynchronizer") \
|
||||
template(java_util_concurrent_atomic_AtomicIntegerFieldUpdater_Impl, "java/util/concurrent/atomic/AtomicIntegerFieldUpdater$AtomicIntegerFieldUpdaterImpl") \
|
||||
template(java_util_concurrent_atomic_AtomicLongFieldUpdater_CASUpdater, "java/util/concurrent/atomic/AtomicLongFieldUpdater$CASUpdater") \
|
||||
template(java_util_concurrent_atomic_AtomicLongFieldUpdater_LockedUpdater, "java/util/concurrent/atomic/AtomicLongFieldUpdater$LockedUpdater") \
|
||||
template(java_util_concurrent_atomic_AtomicReferenceFieldUpdater_Impl, "java/util/concurrent/atomic/AtomicReferenceFieldUpdater$AtomicReferenceFieldUpdaterImpl") \
|
||||
template(jdk_internal_vm_annotation_Contended_signature, "Ljdk/internal/vm/annotation/Contended;") \
|
||||
template(jdk_internal_vm_annotation_ReservedStackAccess_signature, "Ljdk/internal/vm/annotation/ReservedStackAccess;") \
|
||||
template(jdk_internal_ValueBased_signature, "Ljdk/internal/ValueBased;") \
|
||||
@ -302,6 +298,7 @@ class SerializeClosure;
|
||||
template(jdk_internal_misc_Scoped_signature, "Ljdk/internal/misc/ScopedMemoryAccess$Scoped;") \
|
||||
template(jdk_internal_vm_annotation_IntrinsicCandidate_signature, "Ljdk/internal/vm/annotation/IntrinsicCandidate;") \
|
||||
template(jdk_internal_vm_annotation_Stable_signature, "Ljdk/internal/vm/annotation/Stable;") \
|
||||
template(jdk_internal_vm_annotation_TrustFinalFields_signature, "Ljdk/internal/vm/annotation/TrustFinalFields;") \
|
||||
\
|
||||
template(jdk_internal_vm_annotation_ChangesCurrentThread_signature, "Ljdk/internal/vm/annotation/ChangesCurrentThread;") \
|
||||
template(jdk_internal_vm_annotation_JvmtiHideEvents_signature, "Ljdk/internal/vm/annotation/JvmtiHideEvents;") \
|
||||
|
||||
@ -70,7 +70,11 @@ inline void G1BarrierSet::write_ref_field_pre(T* field) {
|
||||
|
||||
template <DecoratorSet decorators, typename T>
|
||||
inline void G1BarrierSet::write_ref_field_post(T* field) {
|
||||
volatile CardValue* byte = _card_table->byte_for(field);
|
||||
// Make sure that the card table reference is read only once. Otherwise the compiler
|
||||
// might reload that value in the two accesses below, that could cause writes to
|
||||
// the wrong card table.
|
||||
CardTable* card_table = AtomicAccess::load(&_card_table);
|
||||
CardValue* byte = card_table->byte_for(field);
|
||||
if (*byte == G1CardTable::clean_card_val()) {
|
||||
*byte = G1CardTable::dirty_card_val();
|
||||
}
|
||||
|
||||
@ -29,7 +29,6 @@
|
||||
#include "gc/shared/gcLogPrecious.hpp"
|
||||
#include "gc/shared/gcTraceTime.inline.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/globals_extension.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
@ -192,32 +191,32 @@ const char* G1CardSetConfiguration::mem_object_type_name_str(uint index) {
|
||||
void G1CardSetCoarsenStats::reset() {
|
||||
STATIC_ASSERT(ARRAY_SIZE(_coarsen_from) == ARRAY_SIZE(_coarsen_collision));
|
||||
for (uint i = 0; i < ARRAY_SIZE(_coarsen_from); i++) {
|
||||
_coarsen_from[i] = 0;
|
||||
_coarsen_collision[i] = 0;
|
||||
_coarsen_from[i].store_relaxed(0);
|
||||
_coarsen_collision[i].store_relaxed(0);
|
||||
}
|
||||
}
|
||||
|
||||
void G1CardSetCoarsenStats::set(G1CardSetCoarsenStats& other) {
|
||||
STATIC_ASSERT(ARRAY_SIZE(_coarsen_from) == ARRAY_SIZE(_coarsen_collision));
|
||||
for (uint i = 0; i < ARRAY_SIZE(_coarsen_from); i++) {
|
||||
_coarsen_from[i] = other._coarsen_from[i];
|
||||
_coarsen_collision[i] = other._coarsen_collision[i];
|
||||
_coarsen_from[i].store_relaxed(other._coarsen_from[i].load_relaxed());
|
||||
_coarsen_collision[i].store_relaxed(other._coarsen_collision[i].load_relaxed());
|
||||
}
|
||||
}
|
||||
|
||||
void G1CardSetCoarsenStats::subtract_from(G1CardSetCoarsenStats& other) {
|
||||
STATIC_ASSERT(ARRAY_SIZE(_coarsen_from) == ARRAY_SIZE(_coarsen_collision));
|
||||
for (uint i = 0; i < ARRAY_SIZE(_coarsen_from); i++) {
|
||||
_coarsen_from[i] = other._coarsen_from[i] - _coarsen_from[i];
|
||||
_coarsen_collision[i] = other._coarsen_collision[i] - _coarsen_collision[i];
|
||||
_coarsen_from[i].store_relaxed(other._coarsen_from[i].load_relaxed() - _coarsen_from[i].load_relaxed());
|
||||
_coarsen_collision[i].store_relaxed(other._coarsen_collision[i].load_relaxed() - _coarsen_collision[i].load_relaxed());
|
||||
}
|
||||
}
|
||||
|
||||
void G1CardSetCoarsenStats::record_coarsening(uint tag, bool collision) {
|
||||
assert(tag < ARRAY_SIZE(_coarsen_from), "tag %u out of bounds", tag);
|
||||
AtomicAccess::inc(&_coarsen_from[tag], memory_order_relaxed);
|
||||
_coarsen_from[tag].add_then_fetch(1u, memory_order_relaxed);
|
||||
if (collision) {
|
||||
AtomicAccess::inc(&_coarsen_collision[tag], memory_order_relaxed);
|
||||
_coarsen_collision[tag].add_then_fetch(1u, memory_order_relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
@ -228,13 +227,13 @@ void G1CardSetCoarsenStats::print_on(outputStream* out) {
|
||||
"Inline->AoC %zu (%zu) "
|
||||
"AoC->BitMap %zu (%zu) "
|
||||
"BitMap->Full %zu (%zu) ",
|
||||
_coarsen_from[0], _coarsen_collision[0],
|
||||
_coarsen_from[1], _coarsen_collision[1],
|
||||
_coarsen_from[0].load_relaxed(), _coarsen_collision[0].load_relaxed(),
|
||||
_coarsen_from[1].load_relaxed(), _coarsen_collision[1].load_relaxed(),
|
||||
// There is no BitMap at the first level so we can't .
|
||||
_coarsen_from[3], _coarsen_collision[3],
|
||||
_coarsen_from[4], _coarsen_collision[4],
|
||||
_coarsen_from[5], _coarsen_collision[5],
|
||||
_coarsen_from[6], _coarsen_collision[6]
|
||||
_coarsen_from[3].load_relaxed(), _coarsen_collision[3].load_relaxed(),
|
||||
_coarsen_from[4].load_relaxed(), _coarsen_collision[4].load_relaxed(),
|
||||
_coarsen_from[5].load_relaxed(), _coarsen_collision[5].load_relaxed(),
|
||||
_coarsen_from[6].load_relaxed(), _coarsen_collision[6].load_relaxed()
|
||||
);
|
||||
}
|
||||
|
||||
@ -248,7 +247,7 @@ class G1CardSetHashTable : public CHeapObj<mtGCCardSet> {
|
||||
// the per region cardsets.
|
||||
const static uint GroupBucketClaimSize = 4;
|
||||
// Did we insert at least one card in the table?
|
||||
bool volatile _inserted_card;
|
||||
Atomic<bool> _inserted_card;
|
||||
|
||||
G1CardSetMemoryManager* _mm;
|
||||
CardSetHash _table;
|
||||
@ -311,10 +310,10 @@ public:
|
||||
G1CardSetHashTableValue value(region_idx, G1CardSetInlinePtr());
|
||||
bool inserted = _table.insert_get(Thread::current(), lookup, value, found, should_grow);
|
||||
|
||||
if (!_inserted_card && inserted) {
|
||||
if (!_inserted_card.load_relaxed() && inserted) {
|
||||
// It does not matter to us who is setting the flag so a regular atomic store
|
||||
// is sufficient.
|
||||
AtomicAccess::store(&_inserted_card, true);
|
||||
_inserted_card.store_relaxed(true);
|
||||
}
|
||||
|
||||
return found.value();
|
||||
@ -343,9 +342,9 @@ public:
|
||||
}
|
||||
|
||||
void reset() {
|
||||
if (AtomicAccess::load(&_inserted_card)) {
|
||||
if (_inserted_card.load_relaxed()) {
|
||||
_table.unsafe_reset(InitialLogTableSize);
|
||||
AtomicAccess::store(&_inserted_card, false);
|
||||
_inserted_card.store_relaxed(false);
|
||||
}
|
||||
}
|
||||
|
||||
@ -455,14 +454,14 @@ void G1CardSet::free_mem_object(ContainerPtr container) {
|
||||
_mm->free(container_type_to_mem_object_type(type), value);
|
||||
}
|
||||
|
||||
G1CardSet::ContainerPtr G1CardSet::acquire_container(ContainerPtr volatile* container_addr) {
|
||||
G1CardSet::ContainerPtr G1CardSet::acquire_container(Atomic<ContainerPtr>* container_addr) {
|
||||
// Update reference counts under RCU critical section to avoid a
|
||||
// use-after-cleapup bug where we increment a reference count for
|
||||
// an object whose memory has already been cleaned up and reused.
|
||||
GlobalCounter::CriticalSection cs(Thread::current());
|
||||
while (true) {
|
||||
// Get ContainerPtr and increment refcount atomically wrt to memory reuse.
|
||||
ContainerPtr container = AtomicAccess::load_acquire(container_addr);
|
||||
ContainerPtr container = container_addr->load_acquire();
|
||||
uint cs_type = container_type(container);
|
||||
if (container == FullCardSet || cs_type == ContainerInlinePtr) {
|
||||
return container;
|
||||
@ -503,15 +502,15 @@ class G1ReleaseCardsets : public StackObj {
|
||||
G1CardSet* _card_set;
|
||||
using ContainerPtr = G1CardSet::ContainerPtr;
|
||||
|
||||
void coarsen_to_full(ContainerPtr* container_addr) {
|
||||
void coarsen_to_full(Atomic<ContainerPtr>* container_addr) {
|
||||
while (true) {
|
||||
ContainerPtr cur_container = AtomicAccess::load_acquire(container_addr);
|
||||
ContainerPtr cur_container = container_addr->load_acquire();
|
||||
uint cs_type = G1CardSet::container_type(cur_container);
|
||||
if (cur_container == G1CardSet::FullCardSet) {
|
||||
return;
|
||||
}
|
||||
|
||||
ContainerPtr old_value = AtomicAccess::cmpxchg(container_addr, cur_container, G1CardSet::FullCardSet);
|
||||
ContainerPtr old_value = container_addr->compare_exchange(cur_container, G1CardSet::FullCardSet);
|
||||
|
||||
if (old_value == cur_container) {
|
||||
_card_set->release_and_maybe_free_container(cur_container);
|
||||
@ -523,7 +522,7 @@ class G1ReleaseCardsets : public StackObj {
|
||||
public:
|
||||
explicit G1ReleaseCardsets(G1CardSet* card_set) : _card_set(card_set) { }
|
||||
|
||||
void operator ()(ContainerPtr* container_addr) {
|
||||
void operator ()(Atomic<ContainerPtr>* container_addr) {
|
||||
coarsen_to_full(container_addr);
|
||||
}
|
||||
};
|
||||
@ -544,10 +543,10 @@ G1AddCardResult G1CardSet::add_to_howl(ContainerPtr parent_container,
|
||||
ContainerPtr container;
|
||||
|
||||
uint bucket = _config->howl_bucket_index(card_in_region);
|
||||
ContainerPtr volatile* bucket_entry = howl->container_addr(bucket);
|
||||
Atomic<ContainerPtr>* bucket_entry = howl->container_addr(bucket);
|
||||
|
||||
while (true) {
|
||||
if (AtomicAccess::load(&howl->_num_entries) >= _config->cards_in_howl_threshold()) {
|
||||
if (howl->_num_entries.load_relaxed() >= _config->cards_in_howl_threshold()) {
|
||||
return Overflow;
|
||||
}
|
||||
|
||||
@ -571,7 +570,7 @@ G1AddCardResult G1CardSet::add_to_howl(ContainerPtr parent_container,
|
||||
}
|
||||
|
||||
if (increment_total && add_result == Added) {
|
||||
AtomicAccess::inc(&howl->_num_entries, memory_order_relaxed);
|
||||
howl->_num_entries.add_then_fetch(1u, memory_order_relaxed);
|
||||
}
|
||||
|
||||
if (to_transfer != nullptr) {
|
||||
@ -588,7 +587,7 @@ G1AddCardResult G1CardSet::add_to_bitmap(ContainerPtr container, uint card_in_re
|
||||
return bitmap->add(card_offset, _config->cards_in_howl_bitmap_threshold(), _config->max_cards_in_howl_bitmap());
|
||||
}
|
||||
|
||||
G1AddCardResult G1CardSet::add_to_inline_ptr(ContainerPtr volatile* container_addr, ContainerPtr container, uint card_in_region) {
|
||||
G1AddCardResult G1CardSet::add_to_inline_ptr(Atomic<ContainerPtr>* container_addr, ContainerPtr container, uint card_in_region) {
|
||||
G1CardSetInlinePtr value(container_addr, container);
|
||||
return value.add(card_in_region, _config->inline_ptr_bits_per_card(), _config->max_cards_in_inline_ptr());
|
||||
}
|
||||
@ -610,7 +609,7 @@ G1CardSet::ContainerPtr G1CardSet::create_coarsened_array_of_cards(uint card_in_
|
||||
return new_container;
|
||||
}
|
||||
|
||||
bool G1CardSet::coarsen_container(ContainerPtr volatile* container_addr,
|
||||
bool G1CardSet::coarsen_container(Atomic<ContainerPtr>* container_addr,
|
||||
ContainerPtr cur_container,
|
||||
uint card_in_region,
|
||||
bool within_howl) {
|
||||
@ -640,7 +639,7 @@ bool G1CardSet::coarsen_container(ContainerPtr volatile* container_addr,
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
ContainerPtr old_value = AtomicAccess::cmpxchg(container_addr, cur_container, new_container); // Memory order?
|
||||
ContainerPtr old_value = container_addr->compare_exchange(cur_container, new_container); // Memory order?
|
||||
if (old_value == cur_container) {
|
||||
// Success. Indicate that the cards from the current card set must be transferred
|
||||
// by this caller.
|
||||
@ -687,7 +686,7 @@ void G1CardSet::transfer_cards(G1CardSetHashTableValue* table_entry, ContainerPt
|
||||
assert(container_type(source_container) == ContainerHowl, "must be");
|
||||
// Need to correct for that the Full remembered set occupies more cards than the
|
||||
// AoCS before.
|
||||
AtomicAccess::add(&_num_occupied, _config->max_cards_in_region() - table_entry->_num_occupied, memory_order_relaxed);
|
||||
_num_occupied.add_then_fetch(_config->max_cards_in_region() - table_entry->_num_occupied.load_relaxed(), memory_order_relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
@ -713,18 +712,18 @@ void G1CardSet::transfer_cards_in_howl(ContainerPtr parent_container,
|
||||
diff -= 1;
|
||||
|
||||
G1CardSetHowl* howling_array = container_ptr<G1CardSetHowl>(parent_container);
|
||||
AtomicAccess::add(&howling_array->_num_entries, diff, memory_order_relaxed);
|
||||
howling_array->_num_entries.add_then_fetch(diff, memory_order_relaxed);
|
||||
|
||||
G1CardSetHashTableValue* table_entry = get_container(card_region);
|
||||
assert(table_entry != nullptr, "Table entry not found for transferred cards");
|
||||
|
||||
AtomicAccess::add(&table_entry->_num_occupied, diff, memory_order_relaxed);
|
||||
table_entry->_num_occupied.add_then_fetch(diff, memory_order_relaxed);
|
||||
|
||||
AtomicAccess::add(&_num_occupied, diff, memory_order_relaxed);
|
||||
_num_occupied.add_then_fetch(diff, memory_order_relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
G1AddCardResult G1CardSet::add_to_container(ContainerPtr volatile* container_addr,
|
||||
G1AddCardResult G1CardSet::add_to_container(Atomic<ContainerPtr>* container_addr,
|
||||
ContainerPtr container,
|
||||
uint card_region,
|
||||
uint card_in_region,
|
||||
@ -827,8 +826,8 @@ G1AddCardResult G1CardSet::add_card(uint card_region, uint card_in_region, bool
|
||||
}
|
||||
|
||||
if (increment_total && add_result == Added) {
|
||||
AtomicAccess::inc(&table_entry->_num_occupied, memory_order_relaxed);
|
||||
AtomicAccess::inc(&_num_occupied, memory_order_relaxed);
|
||||
table_entry->_num_occupied.add_then_fetch(1u, memory_order_relaxed);
|
||||
_num_occupied.add_then_fetch(1u, memory_order_relaxed);
|
||||
}
|
||||
if (should_grow_table) {
|
||||
_table->grow();
|
||||
@ -853,7 +852,7 @@ bool G1CardSet::contains_card(uint card_region, uint card_in_region) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ContainerPtr container = table_entry->_container;
|
||||
ContainerPtr container = table_entry->_container.load_relaxed();
|
||||
if (container == FullCardSet) {
|
||||
// contains_card() is not a performance critical method so we do not hide that
|
||||
// case in the switch below.
|
||||
@ -889,7 +888,7 @@ void G1CardSet::print_info(outputStream* st, uintptr_t card) {
|
||||
return;
|
||||
}
|
||||
|
||||
ContainerPtr container = table_entry->_container;
|
||||
ContainerPtr container = table_entry->_container.load_relaxed();
|
||||
if (container == FullCardSet) {
|
||||
st->print("FULL card set)");
|
||||
return;
|
||||
@ -940,7 +939,7 @@ void G1CardSet::iterate_cards_during_transfer(ContainerPtr const container, Card
|
||||
void G1CardSet::iterate_containers(ContainerPtrClosure* cl, bool at_safepoint) {
|
||||
auto do_value =
|
||||
[&] (G1CardSetHashTableValue* value) {
|
||||
cl->do_containerptr(value->_region_idx, value->_num_occupied, value->_container);
|
||||
cl->do_containerptr(value->_region_idx, value->_num_occupied.load_relaxed(), value->_container.load_relaxed());
|
||||
return true;
|
||||
};
|
||||
|
||||
@ -1001,11 +1000,11 @@ bool G1CardSet::occupancy_less_or_equal_to(size_t limit) const {
|
||||
}
|
||||
|
||||
bool G1CardSet::is_empty() const {
|
||||
return _num_occupied == 0;
|
||||
return _num_occupied.load_relaxed() == 0;
|
||||
}
|
||||
|
||||
size_t G1CardSet::occupied() const {
|
||||
return _num_occupied;
|
||||
return _num_occupied.load_relaxed();
|
||||
}
|
||||
|
||||
size_t G1CardSet::num_containers() {
|
||||
@ -1051,7 +1050,7 @@ size_t G1CardSet::static_mem_size() {
|
||||
|
||||
void G1CardSet::clear() {
|
||||
_table->reset();
|
||||
_num_occupied = 0;
|
||||
_num_occupied.store_relaxed(0);
|
||||
_mm->flush();
|
||||
}
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,6 +27,7 @@
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/memRegion.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "utilities/concurrentHashTable.hpp"
|
||||
|
||||
class G1CardSetAllocOptions;
|
||||
@ -154,8 +155,8 @@ public:
|
||||
|
||||
private:
|
||||
// Indices are "from" indices.
|
||||
size_t _coarsen_from[NumCoarsenCategories];
|
||||
size_t _coarsen_collision[NumCoarsenCategories];
|
||||
Atomic<size_t> _coarsen_from[NumCoarsenCategories];
|
||||
Atomic<size_t> _coarsen_collision[NumCoarsenCategories];
|
||||
|
||||
public:
|
||||
G1CardSetCoarsenStats() { reset(); }
|
||||
@ -271,11 +272,11 @@ private:
|
||||
|
||||
// Total number of cards in this card set. This is a best-effort value, i.e. there may
|
||||
// be (slightly) more cards in the card set than this value in reality.
|
||||
size_t _num_occupied;
|
||||
Atomic<size_t> _num_occupied;
|
||||
|
||||
ContainerPtr make_container_ptr(void* value, uintptr_t type);
|
||||
|
||||
ContainerPtr acquire_container(ContainerPtr volatile* container_addr);
|
||||
ContainerPtr acquire_container(Atomic<ContainerPtr>* container_addr);
|
||||
// Returns true if the card set container should be released
|
||||
bool release_container(ContainerPtr container);
|
||||
// Release card set and free if needed.
|
||||
@ -288,7 +289,7 @@ private:
|
||||
// coarsen_container does not transfer cards from cur_container
|
||||
// to the new container. Transfer is achieved by transfer_cards.
|
||||
// Returns true if this was the thread that coarsened the container (and added the card).
|
||||
bool coarsen_container(ContainerPtr volatile* container_addr,
|
||||
bool coarsen_container(Atomic<ContainerPtr>* container_addr,
|
||||
ContainerPtr cur_container,
|
||||
uint card_in_region, bool within_howl = false);
|
||||
|
||||
@ -300,9 +301,9 @@ private:
|
||||
void transfer_cards(G1CardSetHashTableValue* table_entry, ContainerPtr source_container, uint card_region);
|
||||
void transfer_cards_in_howl(ContainerPtr parent_container, ContainerPtr source_container, uint card_region);
|
||||
|
||||
G1AddCardResult add_to_container(ContainerPtr volatile* container_addr, ContainerPtr container, uint card_region, uint card, bool increment_total = true);
|
||||
G1AddCardResult add_to_container(Atomic<ContainerPtr>* container_addr, ContainerPtr container, uint card_region, uint card, bool increment_total = true);
|
||||
|
||||
G1AddCardResult add_to_inline_ptr(ContainerPtr volatile* container_addr, ContainerPtr container, uint card_in_region);
|
||||
G1AddCardResult add_to_inline_ptr(Atomic<ContainerPtr>* container_addr, ContainerPtr container, uint card_in_region);
|
||||
G1AddCardResult add_to_array(ContainerPtr container, uint card_in_region);
|
||||
G1AddCardResult add_to_bitmap(ContainerPtr container, uint card_in_region);
|
||||
G1AddCardResult add_to_howl(ContainerPtr parent_container, uint card_region, uint card_in_region, bool increment_total = true);
|
||||
@ -366,7 +367,6 @@ public:
|
||||
|
||||
size_t num_containers();
|
||||
|
||||
static G1CardSetCoarsenStats coarsen_stats();
|
||||
static void print_coarsen_stats(outputStream* out);
|
||||
|
||||
// Returns size of the actual remembered set containers in bytes.
|
||||
@ -412,8 +412,15 @@ public:
|
||||
using ContainerPtr = G1CardSet::ContainerPtr;
|
||||
|
||||
const uint _region_idx;
|
||||
uint volatile _num_occupied;
|
||||
ContainerPtr volatile _container;
|
||||
Atomic<uint> _num_occupied;
|
||||
Atomic<ContainerPtr> _container;
|
||||
|
||||
// Copy constructor needed for use in ConcurrentHashTable.
|
||||
G1CardSetHashTableValue(const G1CardSetHashTableValue& other) :
|
||||
_region_idx(other._region_idx),
|
||||
_num_occupied(other._num_occupied.load_relaxed()),
|
||||
_container(other._container.load_relaxed())
|
||||
{ }
|
||||
|
||||
G1CardSetHashTableValue(uint region_idx, ContainerPtr container) : _region_idx(region_idx), _num_occupied(0), _container(container) { }
|
||||
};
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,7 +27,7 @@
|
||||
|
||||
#include "gc/g1/g1CardSet.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "utilities/bitMap.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
@ -67,7 +67,7 @@ class G1CardSetInlinePtr : public StackObj {
|
||||
|
||||
using ContainerPtr = G1CardSet::ContainerPtr;
|
||||
|
||||
ContainerPtr volatile * _value_addr;
|
||||
Atomic<ContainerPtr>* _value_addr;
|
||||
ContainerPtr _value;
|
||||
|
||||
static const uint SizeFieldLen = 3;
|
||||
@ -103,7 +103,7 @@ public:
|
||||
explicit G1CardSetInlinePtr(ContainerPtr value) :
|
||||
G1CardSetInlinePtr(nullptr, value) {}
|
||||
|
||||
G1CardSetInlinePtr(ContainerPtr volatile* value_addr, ContainerPtr value) : _value_addr(value_addr), _value(value) {
|
||||
G1CardSetInlinePtr(Atomic<ContainerPtr>* value_addr, ContainerPtr value) : _value_addr(value_addr), _value(value) {
|
||||
assert(G1CardSet::container_type(_value) == G1CardSet::ContainerInlinePtr, "Value " PTR_FORMAT " is not a valid G1CardSetInlinePtr.", p2i(_value));
|
||||
}
|
||||
|
||||
@ -145,13 +145,13 @@ public:
|
||||
// All but inline pointers are of this kind. For those, card entries are stored
|
||||
// directly in the ContainerPtr of the ConcurrentHashTable node.
|
||||
class G1CardSetContainer {
|
||||
uintptr_t _ref_count;
|
||||
Atomic<uintptr_t> _ref_count;
|
||||
protected:
|
||||
~G1CardSetContainer() = default;
|
||||
public:
|
||||
G1CardSetContainer() : _ref_count(3) { }
|
||||
|
||||
uintptr_t refcount() const { return AtomicAccess::load_acquire(&_ref_count); }
|
||||
uintptr_t refcount() const { return _ref_count.load_acquire(); }
|
||||
|
||||
bool try_increment_refcount();
|
||||
|
||||
@ -172,7 +172,7 @@ public:
|
||||
using ContainerPtr = G1CardSet::ContainerPtr;
|
||||
private:
|
||||
EntryCountType _size;
|
||||
EntryCountType volatile _num_entries;
|
||||
Atomic<EntryCountType> _num_entries;
|
||||
// VLA implementation.
|
||||
EntryDataType _data[1];
|
||||
|
||||
@ -180,10 +180,10 @@ private:
|
||||
static const EntryCountType EntryMask = LockBitMask - 1;
|
||||
|
||||
class G1CardSetArrayLocker : public StackObj {
|
||||
EntryCountType volatile* _num_entries_addr;
|
||||
Atomic<EntryCountType>* _num_entries_addr;
|
||||
EntryCountType _local_num_entries;
|
||||
public:
|
||||
G1CardSetArrayLocker(EntryCountType volatile* value);
|
||||
G1CardSetArrayLocker(Atomic<EntryCountType>* value);
|
||||
|
||||
EntryCountType num_entries() const { return _local_num_entries; }
|
||||
void inc_num_entries() {
|
||||
@ -192,7 +192,7 @@ private:
|
||||
}
|
||||
|
||||
~G1CardSetArrayLocker() {
|
||||
AtomicAccess::release_store(_num_entries_addr, _local_num_entries);
|
||||
_num_entries_addr->release_store(_local_num_entries);
|
||||
}
|
||||
};
|
||||
|
||||
@ -213,7 +213,7 @@ public:
|
||||
template <class CardVisitor>
|
||||
void iterate(CardVisitor& found);
|
||||
|
||||
size_t num_entries() const { return _num_entries & EntryMask; }
|
||||
size_t num_entries() const { return _num_entries.load_relaxed() & EntryMask; }
|
||||
|
||||
static size_t header_size_in_bytes();
|
||||
|
||||
@ -223,7 +223,7 @@ public:
|
||||
};
|
||||
|
||||
class G1CardSetBitMap : public G1CardSetContainer {
|
||||
size_t _num_bits_set;
|
||||
Atomic<size_t> _num_bits_set;
|
||||
BitMap::bm_word_t _bits[1];
|
||||
|
||||
public:
|
||||
@ -236,7 +236,7 @@ public:
|
||||
return bm.at(card_idx);
|
||||
}
|
||||
|
||||
uint num_bits_set() const { return (uint)_num_bits_set; }
|
||||
uint num_bits_set() const { return (uint)_num_bits_set.load_relaxed(); }
|
||||
|
||||
template <class CardVisitor>
|
||||
void iterate(CardVisitor& found, size_t const size_in_bits, uint offset);
|
||||
@ -255,10 +255,10 @@ class G1CardSetHowl : public G1CardSetContainer {
|
||||
public:
|
||||
typedef uint EntryCountType;
|
||||
using ContainerPtr = G1CardSet::ContainerPtr;
|
||||
EntryCountType volatile _num_entries;
|
||||
Atomic<EntryCountType> _num_entries;
|
||||
private:
|
||||
// VLA implementation.
|
||||
ContainerPtr _buckets[1];
|
||||
Atomic<ContainerPtr> _buckets[1];
|
||||
// Do not add class member variables beyond this point.
|
||||
|
||||
// Iterates over the given ContainerPtr with at index in this Howl card set,
|
||||
@ -268,14 +268,14 @@ private:
|
||||
|
||||
ContainerPtr at(EntryCountType index) const;
|
||||
|
||||
ContainerPtr const* buckets() const;
|
||||
Atomic<ContainerPtr> const* buckets() const;
|
||||
|
||||
public:
|
||||
G1CardSetHowl(EntryCountType card_in_region, G1CardSetConfiguration* config);
|
||||
|
||||
ContainerPtr const* container_addr(EntryCountType index) const;
|
||||
Atomic<ContainerPtr> const* container_addr(EntryCountType index) const;
|
||||
|
||||
ContainerPtr* container_addr(EntryCountType index);
|
||||
Atomic<ContainerPtr>* container_addr(EntryCountType index);
|
||||
|
||||
bool contains(uint card_idx, G1CardSetConfiguration* config);
|
||||
// Iterates over all ContainerPtrs in this Howl card set, applying a CardOrRangeVisitor
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -67,7 +67,7 @@ inline G1AddCardResult G1CardSetInlinePtr::add(uint card_idx, uint bits_per_card
|
||||
return Overflow;
|
||||
}
|
||||
ContainerPtr new_value = merge(_value, card_idx, num_cards, bits_per_card);
|
||||
ContainerPtr old_value = AtomicAccess::cmpxchg(_value_addr, _value, new_value, memory_order_relaxed);
|
||||
ContainerPtr old_value = _value_addr->compare_exchange(_value, new_value, memory_order_relaxed);
|
||||
if (_value == old_value) {
|
||||
return Added;
|
||||
}
|
||||
@ -126,7 +126,7 @@ inline bool G1CardSetContainer::try_increment_refcount() {
|
||||
}
|
||||
|
||||
uintptr_t new_value = old_value + 2;
|
||||
uintptr_t ref_count = AtomicAccess::cmpxchg(&_ref_count, old_value, new_value);
|
||||
uintptr_t ref_count = _ref_count.compare_exchange(old_value, new_value);
|
||||
if (ref_count == old_value) {
|
||||
return true;
|
||||
}
|
||||
@ -137,7 +137,7 @@ inline bool G1CardSetContainer::try_increment_refcount() {
|
||||
inline uintptr_t G1CardSetContainer::decrement_refcount() {
|
||||
uintptr_t old_value = refcount();
|
||||
assert((old_value & 0x1) != 0 && old_value >= 3, "precondition");
|
||||
return AtomicAccess::sub(&_ref_count, 2u);
|
||||
return _ref_count.sub_then_fetch(2u);
|
||||
}
|
||||
|
||||
inline G1CardSetArray::G1CardSetArray(uint card_in_region, EntryCountType num_cards) :
|
||||
@ -149,14 +149,13 @@ inline G1CardSetArray::G1CardSetArray(uint card_in_region, EntryCountType num_ca
|
||||
*entry_addr(0) = checked_cast<EntryDataType>(card_in_region);
|
||||
}
|
||||
|
||||
inline G1CardSetArray::G1CardSetArrayLocker::G1CardSetArrayLocker(EntryCountType volatile* num_entries_addr) :
|
||||
inline G1CardSetArray::G1CardSetArrayLocker::G1CardSetArrayLocker(Atomic<EntryCountType>* num_entries_addr) :
|
||||
_num_entries_addr(num_entries_addr) {
|
||||
SpinYield s;
|
||||
EntryCountType num_entries = AtomicAccess::load(_num_entries_addr) & EntryMask;
|
||||
EntryCountType num_entries = _num_entries_addr->load_relaxed() & EntryMask;
|
||||
while (true) {
|
||||
EntryCountType old_value = AtomicAccess::cmpxchg(_num_entries_addr,
|
||||
num_entries,
|
||||
(EntryCountType)(num_entries | LockBitMask));
|
||||
EntryCountType old_value = _num_entries_addr->compare_exchange(num_entries,
|
||||
(EntryCountType)(num_entries | LockBitMask));
|
||||
if (old_value == num_entries) {
|
||||
// Succeeded locking the array.
|
||||
_local_num_entries = num_entries;
|
||||
@ -174,7 +173,7 @@ inline G1CardSetArray::EntryDataType const* G1CardSetArray::base_addr() const {
|
||||
}
|
||||
|
||||
inline G1CardSetArray::EntryDataType const* G1CardSetArray::entry_addr(EntryCountType index) const {
|
||||
assert(index < _num_entries, "precondition");
|
||||
assert(index < _num_entries.load_relaxed(), "precondition");
|
||||
return base_addr() + index;
|
||||
}
|
||||
|
||||
@ -189,7 +188,7 @@ inline G1CardSetArray::EntryDataType G1CardSetArray::at(EntryCountType index) co
|
||||
inline G1AddCardResult G1CardSetArray::add(uint card_idx) {
|
||||
assert(card_idx < (1u << (sizeof(EntryDataType) * BitsPerByte)),
|
||||
"Card index %u does not fit allowed card value range.", card_idx);
|
||||
EntryCountType num_entries = AtomicAccess::load_acquire(&_num_entries) & EntryMask;
|
||||
EntryCountType num_entries = _num_entries.load_acquire() & EntryMask;
|
||||
EntryCountType idx = 0;
|
||||
for (; idx < num_entries; idx++) {
|
||||
if (at(idx) == card_idx) {
|
||||
@ -223,7 +222,7 @@ inline G1AddCardResult G1CardSetArray::add(uint card_idx) {
|
||||
}
|
||||
|
||||
inline bool G1CardSetArray::contains(uint card_idx) {
|
||||
EntryCountType num_entries = AtomicAccess::load_acquire(&_num_entries) & EntryMask;
|
||||
EntryCountType num_entries = _num_entries.load_acquire() & EntryMask;
|
||||
|
||||
for (EntryCountType idx = 0; idx < num_entries; idx++) {
|
||||
if (at(idx) == card_idx) {
|
||||
@ -235,7 +234,7 @@ inline bool G1CardSetArray::contains(uint card_idx) {
|
||||
|
||||
template <class CardVisitor>
|
||||
void G1CardSetArray::iterate(CardVisitor& found) {
|
||||
EntryCountType num_entries = AtomicAccess::load_acquire(&_num_entries) & EntryMask;
|
||||
EntryCountType num_entries = _num_entries.load_acquire() & EntryMask;
|
||||
for (EntryCountType idx = 0; idx < num_entries; idx++) {
|
||||
found(at(idx));
|
||||
}
|
||||
@ -256,11 +255,11 @@ inline G1CardSetBitMap::G1CardSetBitMap(uint card_in_region, uint size_in_bits)
|
||||
|
||||
inline G1AddCardResult G1CardSetBitMap::add(uint card_idx, size_t threshold, size_t size_in_bits) {
|
||||
BitMapView bm(_bits, size_in_bits);
|
||||
if (_num_bits_set >= threshold) {
|
||||
if (_num_bits_set.load_relaxed() >= threshold) {
|
||||
return bm.at(card_idx) ? Found : Overflow;
|
||||
}
|
||||
if (bm.par_set_bit(card_idx)) {
|
||||
AtomicAccess::inc(&_num_bits_set, memory_order_relaxed);
|
||||
_num_bits_set.add_then_fetch(1u, memory_order_relaxed);
|
||||
return Added;
|
||||
}
|
||||
return Found;
|
||||
@ -276,22 +275,22 @@ inline size_t G1CardSetBitMap::header_size_in_bytes() {
|
||||
return offset_of(G1CardSetBitMap, _bits);
|
||||
}
|
||||
|
||||
inline G1CardSetHowl::ContainerPtr const* G1CardSetHowl::container_addr(EntryCountType index) const {
|
||||
assert(index < _num_entries, "precondition");
|
||||
inline Atomic<G1CardSetHowl::ContainerPtr> const* G1CardSetHowl::container_addr(EntryCountType index) const {
|
||||
assert(index < _num_entries.load_relaxed(), "precondition");
|
||||
return buckets() + index;
|
||||
}
|
||||
|
||||
inline G1CardSetHowl::ContainerPtr* G1CardSetHowl::container_addr(EntryCountType index) {
|
||||
return const_cast<ContainerPtr*>(const_cast<const G1CardSetHowl*>(this)->container_addr(index));
|
||||
inline Atomic<G1CardSetHowl::ContainerPtr>* G1CardSetHowl::container_addr(EntryCountType index) {
|
||||
return const_cast<Atomic<ContainerPtr>*>(const_cast<const G1CardSetHowl*>(this)->container_addr(index));
|
||||
}
|
||||
|
||||
inline G1CardSetHowl::ContainerPtr G1CardSetHowl::at(EntryCountType index) const {
|
||||
return *container_addr(index);
|
||||
return (*container_addr(index)).load_relaxed();
|
||||
}
|
||||
|
||||
inline G1CardSetHowl::ContainerPtr const* G1CardSetHowl::buckets() const {
|
||||
inline Atomic<G1CardSetHowl::ContainerPtr> const* G1CardSetHowl::buckets() const {
|
||||
const void* ptr = reinterpret_cast<const char*>(this) + header_size_in_bytes();
|
||||
return reinterpret_cast<ContainerPtr const*>(ptr);
|
||||
return reinterpret_cast<Atomic<ContainerPtr> const*>(ptr);
|
||||
}
|
||||
|
||||
inline G1CardSetHowl::G1CardSetHowl(EntryCountType card_in_region, G1CardSetConfiguration* config) :
|
||||
@ -300,7 +299,7 @@ inline G1CardSetHowl::G1CardSetHowl(EntryCountType card_in_region, G1CardSetConf
|
||||
EntryCountType num_buckets = config->num_buckets_in_howl();
|
||||
EntryCountType bucket = config->howl_bucket_index(card_in_region);
|
||||
for (uint i = 0; i < num_buckets; ++i) {
|
||||
*container_addr(i) = G1CardSetInlinePtr();
|
||||
container_addr(i)->store_relaxed(G1CardSetInlinePtr());
|
||||
if (i == bucket) {
|
||||
G1CardSetInlinePtr value(container_addr(i), at(i));
|
||||
value.add(card_in_region, config->inline_ptr_bits_per_card(), config->max_cards_in_inline_ptr());
|
||||
@ -310,8 +309,8 @@ inline G1CardSetHowl::G1CardSetHowl(EntryCountType card_in_region, G1CardSetConf
|
||||
|
||||
inline bool G1CardSetHowl::contains(uint card_idx, G1CardSetConfiguration* config) {
|
||||
EntryCountType bucket = config->howl_bucket_index(card_idx);
|
||||
ContainerPtr* array_entry = container_addr(bucket);
|
||||
ContainerPtr container = AtomicAccess::load_acquire(array_entry);
|
||||
Atomic<ContainerPtr>* array_entry = container_addr(bucket);
|
||||
ContainerPtr container = array_entry->load_acquire();
|
||||
|
||||
switch (G1CardSet::container_type(container)) {
|
||||
case G1CardSet::ContainerArrayOfCards: {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,7 +26,6 @@
|
||||
#include "gc/g1/g1CardSetContainers.inline.hpp"
|
||||
#include "gc/g1/g1CardSetMemory.inline.hpp"
|
||||
#include "gc/g1/g1MonotonicArena.inline.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
||||
G1CardSetAllocator::G1CardSetAllocator(const char* name,
|
||||
|
||||
@ -31,6 +31,7 @@
|
||||
#include "gc/g1/g1CollectorState.hpp"
|
||||
#include "gc/g1/g1ConcurrentMark.inline.hpp"
|
||||
#include "gc/g1/g1EvacFailureRegions.hpp"
|
||||
#include "gc/g1/g1EvacStats.inline.hpp"
|
||||
#include "gc/g1/g1HeapRegion.inline.hpp"
|
||||
#include "gc/g1/g1HeapRegionManager.inline.hpp"
|
||||
#include "gc/g1/g1HeapRegionRemSet.hpp"
|
||||
|
||||
@ -203,13 +203,13 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
|
||||
G1CollectedHeap* _g1h;
|
||||
G1HeapRegionClaimer _hrclaimer;
|
||||
|
||||
uint volatile _num_regions_added;
|
||||
Atomic<uint> _num_regions_added;
|
||||
|
||||
G1BuildCandidateArray _result;
|
||||
|
||||
void update_totals(uint num_regions) {
|
||||
if (num_regions > 0) {
|
||||
AtomicAccess::add(&_num_regions_added, num_regions);
|
||||
_num_regions_added.add_then_fetch(num_regions);
|
||||
}
|
||||
}
|
||||
|
||||
@ -221,7 +221,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
|
||||
void prune(G1HeapRegion** data) {
|
||||
G1Policy* p = G1CollectedHeap::heap()->policy();
|
||||
|
||||
uint num_candidates = AtomicAccess::load(&_num_regions_added);
|
||||
uint num_candidates = _num_regions_added.load_relaxed();
|
||||
|
||||
uint min_old_cset_length = p->calc_min_old_cset_length(num_candidates);
|
||||
uint num_pruned = 0;
|
||||
@ -254,7 +254,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
|
||||
wasted_bytes,
|
||||
allowed_waste);
|
||||
|
||||
AtomicAccess::sub(&_num_regions_added, num_pruned, memory_order_relaxed);
|
||||
_num_regions_added.sub_then_fetch(num_pruned, memory_order_relaxed);
|
||||
}
|
||||
|
||||
public:
|
||||
@ -275,7 +275,7 @@ public:
|
||||
_result.sort_by_gc_efficiency();
|
||||
prune(_result.array());
|
||||
candidates->set_candidates_from_marking(_result.array(),
|
||||
_num_regions_added);
|
||||
_num_regions_added.load_relaxed());
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@ -291,9 +291,9 @@ void G1CMMarkStack::expand() {
|
||||
_chunk_allocator.try_expand();
|
||||
}
|
||||
|
||||
void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) {
|
||||
elem->next = *list;
|
||||
*list = elem;
|
||||
void G1CMMarkStack::add_chunk_to_list(Atomic<TaskQueueEntryChunk*>* list, TaskQueueEntryChunk* elem) {
|
||||
elem->next = list->load_relaxed();
|
||||
list->store_relaxed(elem);
|
||||
}
|
||||
|
||||
void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) {
|
||||
@ -307,10 +307,10 @@ void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) {
|
||||
add_chunk_to_list(&_free_list, elem);
|
||||
}
|
||||
|
||||
G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) {
|
||||
TaskQueueEntryChunk* result = *list;
|
||||
G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(Atomic<TaskQueueEntryChunk*>* list) {
|
||||
TaskQueueEntryChunk* result = list->load_relaxed();
|
||||
if (result != nullptr) {
|
||||
*list = (*list)->next;
|
||||
list->store_relaxed(list->load_relaxed()->next);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
@ -364,8 +364,8 @@ bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) {
|
||||
|
||||
void G1CMMarkStack::set_empty() {
|
||||
_chunks_in_chunk_list = 0;
|
||||
_chunk_list = nullptr;
|
||||
_free_list = nullptr;
|
||||
_chunk_list.store_relaxed(nullptr);
|
||||
_free_list.store_relaxed(nullptr);
|
||||
_chunk_allocator.reset();
|
||||
}
|
||||
|
||||
|
||||
@ -210,17 +210,17 @@ private:
|
||||
ChunkAllocator _chunk_allocator;
|
||||
|
||||
char _pad0[DEFAULT_PADDING_SIZE];
|
||||
TaskQueueEntryChunk* volatile _free_list; // Linked list of free chunks that can be allocated by users.
|
||||
Atomic<TaskQueueEntryChunk*> _free_list; // Linked list of free chunks that can be allocated by users.
|
||||
char _pad1[DEFAULT_PADDING_SIZE - sizeof(TaskQueueEntryChunk*)];
|
||||
TaskQueueEntryChunk* volatile _chunk_list; // List of chunks currently containing data.
|
||||
Atomic<TaskQueueEntryChunk*> _chunk_list; // List of chunks currently containing data.
|
||||
volatile size_t _chunks_in_chunk_list;
|
||||
char _pad2[DEFAULT_PADDING_SIZE - sizeof(TaskQueueEntryChunk*) - sizeof(size_t)];
|
||||
|
||||
// Atomically add the given chunk to the list.
|
||||
void add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem);
|
||||
void add_chunk_to_list(Atomic<TaskQueueEntryChunk*>* list, TaskQueueEntryChunk* elem);
|
||||
// Atomically remove and return a chunk from the given list. Returns null if the
|
||||
// list is empty.
|
||||
TaskQueueEntryChunk* remove_chunk_from_list(TaskQueueEntryChunk* volatile* list);
|
||||
TaskQueueEntryChunk* remove_chunk_from_list(Atomic<TaskQueueEntryChunk*>* list);
|
||||
|
||||
void add_chunk_to_chunk_list(TaskQueueEntryChunk* elem);
|
||||
void add_chunk_to_free_list(TaskQueueEntryChunk* elem);
|
||||
@ -252,7 +252,7 @@ private:
|
||||
|
||||
// Return whether the chunk list is empty. Racy due to unsynchronized access to
|
||||
// _chunk_list.
|
||||
bool is_empty() const { return _chunk_list == nullptr; }
|
||||
bool is_empty() const { return _chunk_list.load_relaxed() == nullptr; }
|
||||
|
||||
size_t capacity() const { return _chunk_allocator.capacity(); }
|
||||
|
||||
|
||||
@ -90,7 +90,7 @@ inline void G1CMMarkStack::iterate(Fn fn) const {
|
||||
|
||||
size_t num_chunks = 0;
|
||||
|
||||
TaskQueueEntryChunk* cur = _chunk_list;
|
||||
TaskQueueEntryChunk* cur = _chunk_list.load_relaxed();
|
||||
while (cur != nullptr) {
|
||||
guarantee(num_chunks <= _chunks_in_chunk_list, "Found %zu oop chunks which is more than there should be", num_chunks);
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -30,7 +30,6 @@
|
||||
#include "gc/g1/g1HeapRegionPrinter.hpp"
|
||||
#include "gc/g1/g1RemSetTrackingPolicy.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
|
||||
struct G1UpdateRegionLivenessAndSelectForRebuildTask::G1OnRegionClosure : public G1HeapRegionClosure {
|
||||
@ -154,7 +153,7 @@ void G1UpdateRegionLivenessAndSelectForRebuildTask::work(uint worker_id) {
|
||||
G1OnRegionClosure on_region_cl(_g1h, _cm, &local_cleanup_list);
|
||||
_g1h->heap_region_par_iterate_from_worker_offset(&on_region_cl, &_hrclaimer, worker_id);
|
||||
|
||||
AtomicAccess::add(&_total_selected_for_rebuild, on_region_cl._num_selected_for_rebuild);
|
||||
_total_selected_for_rebuild.add_then_fetch(on_region_cl._num_selected_for_rebuild);
|
||||
|
||||
// Update the old/humongous region sets
|
||||
_g1h->remove_from_old_gen_sets(on_region_cl._num_old_regions_removed,
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,6 +29,7 @@
|
||||
#include "gc/g1/g1HeapRegionManager.hpp"
|
||||
#include "gc/g1/g1HeapRegionSet.hpp"
|
||||
#include "gc/shared/workerThread.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
|
||||
class G1CollectedHeap;
|
||||
class G1ConcurrentMark;
|
||||
@ -41,7 +42,7 @@ class G1UpdateRegionLivenessAndSelectForRebuildTask : public WorkerTask {
|
||||
G1ConcurrentMark* _cm;
|
||||
G1HeapRegionClaimer _hrclaimer;
|
||||
|
||||
uint volatile _total_selected_for_rebuild;
|
||||
Atomic<uint> _total_selected_for_rebuild;
|
||||
|
||||
// Reclaimed empty regions
|
||||
G1FreeRegionList _cleanup_list;
|
||||
@ -57,7 +58,9 @@ public:
|
||||
|
||||
void work(uint worker_id) override;
|
||||
|
||||
uint total_selected_for_rebuild() const { return _total_selected_for_rebuild; }
|
||||
uint total_selected_for_rebuild() const {
|
||||
return _total_selected_for_rebuild.load_relaxed();
|
||||
}
|
||||
|
||||
static uint desired_num_workers(uint num_regions);
|
||||
};
|
||||
|
||||
@ -28,6 +28,7 @@
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1CollectionSet.hpp"
|
||||
#include "gc/g1/g1ConcurrentRefine.hpp"
|
||||
#include "gc/g1/g1ConcurrentRefineStats.inline.hpp"
|
||||
#include "gc/g1/g1ConcurrentRefineSweepTask.hpp"
|
||||
#include "gc/g1/g1ConcurrentRefineThread.hpp"
|
||||
#include "gc/g1/g1HeapRegion.inline.hpp"
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -22,7 +22,7 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "gc/g1/g1ConcurrentRefineStats.hpp"
|
||||
#include "gc/g1/g1ConcurrentRefineStats.inline.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/timer.hpp"
|
||||
|
||||
@ -39,19 +39,27 @@ G1ConcurrentRefineStats::G1ConcurrentRefineStats() :
|
||||
{}
|
||||
|
||||
void G1ConcurrentRefineStats::add_atomic(G1ConcurrentRefineStats* other) {
|
||||
AtomicAccess::add(&_sweep_duration, other->_sweep_duration, memory_order_relaxed);
|
||||
AtomicAccess::add(&_yield_during_sweep_duration, other->_yield_during_sweep_duration, memory_order_relaxed);
|
||||
_sweep_duration.add_then_fetch(other->_sweep_duration.load_relaxed(), memory_order_relaxed);
|
||||
_yield_during_sweep_duration.add_then_fetch(other->yield_during_sweep_duration(), memory_order_relaxed);
|
||||
|
||||
AtomicAccess::add(&_cards_scanned, other->_cards_scanned, memory_order_relaxed);
|
||||
AtomicAccess::add(&_cards_clean, other->_cards_clean, memory_order_relaxed);
|
||||
AtomicAccess::add(&_cards_not_parsable, other->_cards_not_parsable, memory_order_relaxed);
|
||||
AtomicAccess::add(&_cards_already_refer_to_cset, other->_cards_already_refer_to_cset, memory_order_relaxed);
|
||||
AtomicAccess::add(&_cards_refer_to_cset, other->_cards_refer_to_cset, memory_order_relaxed);
|
||||
AtomicAccess::add(&_cards_no_cross_region, other->_cards_no_cross_region, memory_order_relaxed);
|
||||
_cards_scanned.add_then_fetch(other->cards_scanned(), memory_order_relaxed);
|
||||
_cards_clean.add_then_fetch(other->cards_clean(), memory_order_relaxed);
|
||||
_cards_not_parsable.add_then_fetch(other->cards_not_parsable(), memory_order_relaxed);
|
||||
_cards_already_refer_to_cset.add_then_fetch(other->cards_already_refer_to_cset(), memory_order_relaxed);
|
||||
_cards_refer_to_cset.add_then_fetch(other->cards_refer_to_cset(), memory_order_relaxed);
|
||||
_cards_no_cross_region.add_then_fetch(other->cards_no_cross_region(), memory_order_relaxed);
|
||||
|
||||
AtomicAccess::add(&_refine_duration, other->_refine_duration, memory_order_relaxed);
|
||||
_refine_duration.add_then_fetch(other->refine_duration(), memory_order_relaxed);
|
||||
}
|
||||
|
||||
void G1ConcurrentRefineStats::reset() {
|
||||
*this = G1ConcurrentRefineStats();
|
||||
_sweep_duration.store_relaxed(0);
|
||||
_yield_during_sweep_duration.store_relaxed(0);
|
||||
_cards_scanned.store_relaxed(0);
|
||||
_cards_clean.store_relaxed(0);
|
||||
_cards_not_parsable.store_relaxed(0);
|
||||
_cards_already_refer_to_cset.store_relaxed(0);
|
||||
_cards_refer_to_cset.store_relaxed(0);
|
||||
_cards_no_cross_region.store_relaxed(0);
|
||||
_refine_duration.store_relaxed(0);
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,61 +26,61 @@
|
||||
#define SHARE_GC_G1_G1CONCURRENTREFINESTATS_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/ticks.hpp"
|
||||
|
||||
// Collection of statistics for concurrent refinement processing.
|
||||
// Used for collecting per-thread statistics and for summaries over a
|
||||
// collection of threads.
|
||||
class G1ConcurrentRefineStats : public CHeapObj<mtGC> {
|
||||
jlong _sweep_duration; // Time spent sweeping the table finding non-clean cards
|
||||
// and refining them.
|
||||
jlong _yield_during_sweep_duration; // Time spent yielding during the sweep (not doing the sweep).
|
||||
Atomic<jlong> _sweep_duration; // Time spent sweeping the table finding non-clean cards
|
||||
// and refining them.
|
||||
Atomic<jlong> _yield_during_sweep_duration; // Time spent yielding during the sweep (not doing the sweep).
|
||||
|
||||
size_t _cards_scanned; // Total number of cards scanned.
|
||||
size_t _cards_clean; // Number of cards found clean.
|
||||
size_t _cards_not_parsable; // Number of cards we could not parse and left unrefined.
|
||||
size_t _cards_already_refer_to_cset;// Number of cards marked found to be already young.
|
||||
size_t _cards_refer_to_cset; // Number of dirty cards that were recently found to contain a to-cset reference.
|
||||
size_t _cards_no_cross_region; // Number of dirty cards that were dirtied, but then cleaned again by the mutator.
|
||||
Atomic<size_t> _cards_scanned; // Total number of cards scanned.
|
||||
Atomic<size_t> _cards_clean; // Number of cards found clean.
|
||||
Atomic<size_t> _cards_not_parsable; // Number of cards we could not parse and left unrefined.
|
||||
Atomic<size_t> _cards_already_refer_to_cset;// Number of cards marked found to be already young.
|
||||
Atomic<size_t> _cards_refer_to_cset; // Number of dirty cards that were recently found to contain a to-cset reference.
|
||||
Atomic<size_t> _cards_no_cross_region; // Number of dirty cards that were dirtied, but then cleaned again by the mutator.
|
||||
|
||||
jlong _refine_duration; // Time spent during actual refinement.
|
||||
Atomic<jlong> _refine_duration; // Time spent during actual refinement.
|
||||
|
||||
public:
|
||||
G1ConcurrentRefineStats();
|
||||
|
||||
// Time spent performing sweeping the refinement table (includes actual refinement,
|
||||
// but not yield time).
|
||||
jlong sweep_duration() const { return _sweep_duration - _yield_during_sweep_duration; }
|
||||
jlong yield_during_sweep_duration() const { return _yield_during_sweep_duration; }
|
||||
jlong refine_duration() const { return _refine_duration; }
|
||||
inline jlong sweep_duration() const;
|
||||
inline jlong yield_during_sweep_duration() const;
|
||||
inline jlong refine_duration() const;
|
||||
|
||||
// Number of refined cards.
|
||||
size_t refined_cards() const { return cards_not_clean(); }
|
||||
inline size_t refined_cards() const;
|
||||
|
||||
size_t cards_scanned() const { return _cards_scanned; }
|
||||
size_t cards_clean() const { return _cards_clean; }
|
||||
size_t cards_not_clean() const { return _cards_scanned - _cards_clean; }
|
||||
size_t cards_not_parsable() const { return _cards_not_parsable; }
|
||||
size_t cards_already_refer_to_cset() const { return _cards_already_refer_to_cset; }
|
||||
size_t cards_refer_to_cset() const { return _cards_refer_to_cset; }
|
||||
size_t cards_no_cross_region() const { return _cards_no_cross_region; }
|
||||
inline size_t cards_scanned() const;
|
||||
inline size_t cards_clean() const;
|
||||
inline size_t cards_not_clean() const;
|
||||
inline size_t cards_not_parsable() const;
|
||||
inline size_t cards_already_refer_to_cset() const;
|
||||
inline size_t cards_refer_to_cset() const;
|
||||
inline size_t cards_no_cross_region() const;
|
||||
// Number of cards that were marked dirty and in need of refinement. This includes cards recently
|
||||
// found to refer to the collection set as they originally were dirty.
|
||||
size_t cards_pending() const { return cards_not_clean() - _cards_already_refer_to_cset; }
|
||||
inline size_t cards_pending() const;
|
||||
|
||||
size_t cards_to_cset() const { return _cards_already_refer_to_cset + _cards_refer_to_cset; }
|
||||
inline size_t cards_to_cset() const;
|
||||
|
||||
void inc_sweep_time(jlong t) { _sweep_duration += t; }
|
||||
void inc_yield_during_sweep_duration(jlong t) { _yield_during_sweep_duration += t; }
|
||||
void inc_refine_duration(jlong t) { _refine_duration += t; }
|
||||
inline void inc_sweep_time(jlong t);
|
||||
inline void inc_yield_during_sweep_duration(jlong t);
|
||||
inline void inc_refine_duration(jlong t);
|
||||
|
||||
void inc_cards_scanned(size_t increment) { _cards_scanned += increment; }
|
||||
void inc_cards_clean(size_t increment) { _cards_clean += increment; }
|
||||
void inc_cards_not_parsable() { _cards_not_parsable++; }
|
||||
void inc_cards_already_refer_to_cset() { _cards_already_refer_to_cset++; }
|
||||
void inc_cards_refer_to_cset() { _cards_refer_to_cset++; }
|
||||
void inc_cards_no_cross_region() { _cards_no_cross_region++; }
|
||||
inline void inc_cards_scanned(size_t increment);
|
||||
inline void inc_cards_clean(size_t increment);
|
||||
inline void inc_cards_not_parsable();
|
||||
inline void inc_cards_already_refer_to_cset();
|
||||
inline void inc_cards_refer_to_cset();
|
||||
inline void inc_cards_no_cross_region();
|
||||
|
||||
void add_atomic(G1ConcurrentRefineStats* other);
|
||||
|
||||
|
||||
118
src/hotspot/share/gc/g1/g1ConcurrentRefineStats.inline.hpp
Normal file
118
src/hotspot/share/gc/g1/g1ConcurrentRefineStats.inline.hpp
Normal file
@ -0,0 +1,118 @@
|
||||
/*
|
||||
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_G1_G1CONCURRENTREFINESTATS_INLINE_HPP
|
||||
#define SHARE_GC_G1_G1CONCURRENTREFINESTATS_INLINE_HPP
|
||||
|
||||
#include "gc/g1/g1ConcurrentRefineStats.hpp"
|
||||
|
||||
inline jlong G1ConcurrentRefineStats::sweep_duration() const {
|
||||
return _sweep_duration.load_relaxed() - yield_during_sweep_duration();
|
||||
}
|
||||
|
||||
inline jlong G1ConcurrentRefineStats::yield_during_sweep_duration() const {
|
||||
return _yield_during_sweep_duration.load_relaxed();
|
||||
}
|
||||
|
||||
inline jlong G1ConcurrentRefineStats::refine_duration() const {
|
||||
return _refine_duration.load_relaxed();
|
||||
}
|
||||
|
||||
inline size_t G1ConcurrentRefineStats::refined_cards() const {
|
||||
return cards_not_clean();
|
||||
}
|
||||
|
||||
inline size_t G1ConcurrentRefineStats::cards_scanned() const {
|
||||
return _cards_scanned.load_relaxed();
|
||||
}
|
||||
|
||||
inline size_t G1ConcurrentRefineStats::cards_clean() const {
|
||||
return _cards_clean.load_relaxed();
|
||||
}
|
||||
|
||||
inline size_t G1ConcurrentRefineStats::cards_not_clean() const {
|
||||
return cards_scanned() - cards_clean();
|
||||
}
|
||||
|
||||
inline size_t G1ConcurrentRefineStats::cards_not_parsable() const {
|
||||
return _cards_not_parsable.load_relaxed();
|
||||
}
|
||||
|
||||
inline size_t G1ConcurrentRefineStats::cards_already_refer_to_cset() const {
|
||||
return _cards_already_refer_to_cset.load_relaxed();
|
||||
}
|
||||
|
||||
inline size_t G1ConcurrentRefineStats::cards_refer_to_cset() const {
|
||||
return _cards_refer_to_cset.load_relaxed();
|
||||
}
|
||||
|
||||
inline size_t G1ConcurrentRefineStats::cards_no_cross_region() const {
|
||||
return _cards_no_cross_region.load_relaxed();
|
||||
}
|
||||
|
||||
inline size_t G1ConcurrentRefineStats::cards_pending() const {
|
||||
return cards_not_clean() - cards_already_refer_to_cset();
|
||||
}
|
||||
|
||||
inline size_t G1ConcurrentRefineStats::cards_to_cset() const {
|
||||
return cards_already_refer_to_cset() + cards_refer_to_cset();
|
||||
}
|
||||
|
||||
inline void G1ConcurrentRefineStats::inc_sweep_time(jlong t) {
|
||||
_sweep_duration.store_relaxed(_sweep_duration.load_relaxed() + t);
|
||||
}
|
||||
|
||||
inline void G1ConcurrentRefineStats::inc_yield_during_sweep_duration(jlong t) {
|
||||
_yield_during_sweep_duration.store_relaxed(yield_during_sweep_duration() + t);
|
||||
}
|
||||
|
||||
inline void G1ConcurrentRefineStats::inc_refine_duration(jlong t) {
|
||||
_refine_duration.store_relaxed(refine_duration() + t);
|
||||
}
|
||||
|
||||
inline void G1ConcurrentRefineStats::inc_cards_scanned(size_t increment) {
|
||||
_cards_scanned.store_relaxed(cards_scanned() + increment);
|
||||
}
|
||||
|
||||
inline void G1ConcurrentRefineStats::inc_cards_clean(size_t increment) {
|
||||
_cards_clean.store_relaxed(cards_clean() + increment);
|
||||
}
|
||||
|
||||
inline void G1ConcurrentRefineStats::inc_cards_not_parsable() {
|
||||
_cards_not_parsable.store_relaxed(cards_not_parsable() + 1);
|
||||
}
|
||||
|
||||
inline void G1ConcurrentRefineStats::inc_cards_already_refer_to_cset() {
|
||||
_cards_already_refer_to_cset.store_relaxed(cards_already_refer_to_cset() + 1);
|
||||
}
|
||||
|
||||
inline void G1ConcurrentRefineStats::inc_cards_refer_to_cset() {
|
||||
_cards_refer_to_cset.store_relaxed(cards_refer_to_cset() + 1);
|
||||
}
|
||||
|
||||
inline void G1ConcurrentRefineStats::inc_cards_no_cross_region() {
|
||||
_cards_no_cross_region.store_relaxed(cards_no_cross_region() + 1);
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_G1_G1CONCURRENTREFINESTATS_INLINE_HPP
|
||||
@ -24,6 +24,7 @@
|
||||
|
||||
#include "gc/g1/g1CardTableClaimTable.inline.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1ConcurrentRefineStats.inline.hpp"
|
||||
#include "gc/g1/g1ConcurrentRefineSweepTask.hpp"
|
||||
|
||||
class G1RefineRegionClosure : public G1HeapRegionClosure {
|
||||
|
||||
@ -25,10 +25,10 @@
|
||||
#ifndef SHARE_GC_G1_G1CONCURRENTREFINESWEEPTASK_HPP
|
||||
#define SHARE_GC_G1_G1CONCURRENTREFINESWEEPTASK_HPP
|
||||
|
||||
#include "gc/g1/g1ConcurrentRefineStats.hpp"
|
||||
#include "gc/shared/workerThread.hpp"
|
||||
|
||||
class G1CardTableClaimTable;
|
||||
class G1ConcurrentRefineStats;
|
||||
|
||||
class G1ConcurrentRefineSweepTask : public WorkerTask {
|
||||
G1CardTableClaimTable* _scan_state;
|
||||
|
||||
@ -26,7 +26,7 @@
|
||||
#include "gc/g1/g1CardTableClaimTable.inline.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1ConcurrentRefine.hpp"
|
||||
#include "gc/g1/g1ConcurrentRefineStats.hpp"
|
||||
#include "gc/g1/g1ConcurrentRefineStats.inline.hpp"
|
||||
#include "gc/g1/g1ConcurrentRefineSweepTask.hpp"
|
||||
#include "gc/g1/g1ConcurrentRefineThread.hpp"
|
||||
#include "gc/shared/gcTraceTime.inline.hpp"
|
||||
|
||||
@ -25,7 +25,6 @@
|
||||
#ifndef SHARE_GC_G1_G1CONCURRENTREFINETHREAD_HPP
|
||||
#define SHARE_GC_G1_G1CONCURRENTREFINETHREAD_HPP
|
||||
|
||||
#include "gc/g1/g1ConcurrentRefineStats.hpp"
|
||||
#include "gc/shared/concurrentGCThread.hpp"
|
||||
#include "runtime/mutex.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -22,13 +22,24 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "gc/g1/g1EvacStats.hpp"
|
||||
#include "gc/g1/g1EvacStats.inline.hpp"
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/shared/gcId.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
|
||||
void G1EvacStats::reset() {
|
||||
PLABStats::reset();
|
||||
_region_end_waste.store_relaxed(0);
|
||||
_regions_filled.store_relaxed(0);
|
||||
_num_plab_filled.store_relaxed(0);
|
||||
_direct_allocated.store_relaxed(0);
|
||||
_num_direct_allocated.store_relaxed(0);
|
||||
_failure_used.store_relaxed(0);
|
||||
_failure_waste.store_relaxed(0);
|
||||
}
|
||||
|
||||
void G1EvacStats::log_plab_allocation() {
|
||||
log_debug(gc, plab)("%s PLAB allocation: "
|
||||
"allocated: %zuB, "
|
||||
@ -51,13 +62,13 @@ void G1EvacStats::log_plab_allocation() {
|
||||
"failure used: %zuB, "
|
||||
"failure wasted: %zuB",
|
||||
_description,
|
||||
_region_end_waste * HeapWordSize,
|
||||
_regions_filled,
|
||||
_num_plab_filled,
|
||||
_direct_allocated * HeapWordSize,
|
||||
_num_direct_allocated,
|
||||
_failure_used * HeapWordSize,
|
||||
_failure_waste * HeapWordSize);
|
||||
region_end_waste() * HeapWordSize,
|
||||
regions_filled(),
|
||||
num_plab_filled(),
|
||||
direct_allocated() * HeapWordSize,
|
||||
num_direct_allocated(),
|
||||
failure_used() * HeapWordSize,
|
||||
failure_waste() * HeapWordSize);
|
||||
}
|
||||
|
||||
void G1EvacStats::log_sizing(size_t calculated_words, size_t net_desired_words) {
|
||||
@ -109,7 +120,7 @@ size_t G1EvacStats::compute_desired_plab_size() const {
|
||||
// threads do not allocate anything but a few rather large objects. In this
|
||||
// degenerate case the PLAB size would simply quickly tend to minimum PLAB size,
|
||||
// which is an okay reaction.
|
||||
size_t const used_for_waste_calculation = used() > _region_end_waste ? used() - _region_end_waste : 0;
|
||||
size_t const used_for_waste_calculation = used() > region_end_waste() ? used() - region_end_waste() : 0;
|
||||
|
||||
size_t const total_waste_allowed = used_for_waste_calculation * TargetPLABWastePct;
|
||||
return (size_t)((double)total_waste_allowed / (100 - G1LastPLABAverageOccupancy));
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,6 +27,7 @@
|
||||
|
||||
#include "gc/shared/gcUtil.hpp"
|
||||
#include "gc/shared/plab.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
|
||||
// Records various memory allocation statistics gathered during evacuation. All sizes
|
||||
// are in HeapWords.
|
||||
@ -36,30 +37,21 @@ class G1EvacStats : public PLABStats {
|
||||
AdaptiveWeightedAverage
|
||||
_net_plab_size_filter; // Integrator with decay
|
||||
|
||||
size_t _region_end_waste; // Number of words wasted due to skipping to the next region.
|
||||
uint _regions_filled; // Number of regions filled completely.
|
||||
size_t _num_plab_filled; // Number of PLABs filled and retired.
|
||||
size_t _direct_allocated; // Number of words allocated directly into the regions.
|
||||
size_t _num_direct_allocated; // Number of direct allocation attempts.
|
||||
Atomic<size_t> _region_end_waste; // Number of words wasted due to skipping to the next region.
|
||||
Atomic<uint> _regions_filled; // Number of regions filled completely.
|
||||
Atomic<size_t> _num_plab_filled; // Number of PLABs filled and retired.
|
||||
Atomic<size_t> _direct_allocated; // Number of words allocated directly into the regions.
|
||||
Atomic<size_t> _num_direct_allocated; // Number of direct allocation attempts.
|
||||
|
||||
// Number of words in live objects remaining in regions that ultimately suffered an
|
||||
// evacuation failure. This is used in the regions when the regions are made old regions.
|
||||
size_t _failure_used;
|
||||
Atomic<size_t> _failure_used;
|
||||
// Number of words wasted in regions which failed evacuation. This is the sum of space
|
||||
// for objects successfully copied out of the regions (now dead space) plus waste at the
|
||||
// end of regions.
|
||||
size_t _failure_waste;
|
||||
Atomic<size_t> _failure_waste;
|
||||
|
||||
virtual void reset() {
|
||||
PLABStats::reset();
|
||||
_region_end_waste = 0;
|
||||
_regions_filled = 0;
|
||||
_num_plab_filled = 0;
|
||||
_direct_allocated = 0;
|
||||
_num_direct_allocated = 0;
|
||||
_failure_used = 0;
|
||||
_failure_waste = 0;
|
||||
}
|
||||
virtual void reset();
|
||||
|
||||
void log_plab_allocation();
|
||||
void log_sizing(size_t calculated_words, size_t net_desired_words);
|
||||
@ -77,16 +69,16 @@ public:
|
||||
// Should be called at the end of a GC pause.
|
||||
void adjust_desired_plab_size();
|
||||
|
||||
uint regions_filled() const { return _regions_filled; }
|
||||
size_t num_plab_filled() const { return _num_plab_filled; }
|
||||
size_t region_end_waste() const { return _region_end_waste; }
|
||||
size_t direct_allocated() const { return _direct_allocated; }
|
||||
size_t num_direct_allocated() const { return _num_direct_allocated; }
|
||||
uint regions_filled() const;
|
||||
size_t num_plab_filled() const;
|
||||
size_t region_end_waste() const;
|
||||
size_t direct_allocated() const;
|
||||
size_t num_direct_allocated() const;
|
||||
|
||||
// Amount of space in heapwords used in the failing regions when an evacuation failure happens.
|
||||
size_t failure_used() const { return _failure_used; }
|
||||
size_t failure_used() const;
|
||||
// Amount of space in heapwords wasted (unused) in the failing regions when an evacuation failure happens.
|
||||
size_t failure_waste() const { return _failure_waste; }
|
||||
size_t failure_waste() const;
|
||||
|
||||
inline void add_num_plab_filled(size_t value);
|
||||
inline void add_direct_allocated(size_t value);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,28 +27,54 @@
|
||||
|
||||
#include "gc/g1/g1EvacStats.hpp"
|
||||
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
inline uint G1EvacStats::regions_filled() const {
|
||||
return _regions_filled.load_relaxed();
|
||||
}
|
||||
|
||||
inline size_t G1EvacStats::num_plab_filled() const {
|
||||
return _num_plab_filled.load_relaxed();
|
||||
}
|
||||
|
||||
inline size_t G1EvacStats::region_end_waste() const {
|
||||
return _region_end_waste.load_relaxed();
|
||||
}
|
||||
|
||||
inline size_t G1EvacStats::direct_allocated() const {
|
||||
return _direct_allocated.load_relaxed();
|
||||
}
|
||||
|
||||
inline size_t G1EvacStats::num_direct_allocated() const {
|
||||
return _num_direct_allocated.load_relaxed();
|
||||
}
|
||||
|
||||
inline size_t G1EvacStats::failure_used() const {
|
||||
return _failure_used.load_relaxed();
|
||||
}
|
||||
|
||||
inline size_t G1EvacStats::failure_waste() const {
|
||||
return _failure_waste.load_relaxed();
|
||||
}
|
||||
|
||||
inline void G1EvacStats::add_direct_allocated(size_t value) {
|
||||
AtomicAccess::add(&_direct_allocated, value, memory_order_relaxed);
|
||||
_direct_allocated.add_then_fetch(value, memory_order_relaxed);
|
||||
}
|
||||
|
||||
inline void G1EvacStats::add_num_plab_filled(size_t value) {
|
||||
AtomicAccess::add(&_num_plab_filled, value, memory_order_relaxed);
|
||||
_num_plab_filled.add_then_fetch(value, memory_order_relaxed);
|
||||
}
|
||||
|
||||
inline void G1EvacStats::add_num_direct_allocated(size_t value) {
|
||||
AtomicAccess::add(&_num_direct_allocated, value, memory_order_relaxed);
|
||||
_num_direct_allocated.add_then_fetch(value, memory_order_relaxed);
|
||||
}
|
||||
|
||||
inline void G1EvacStats::add_region_end_waste(size_t value) {
|
||||
AtomicAccess::add(&_region_end_waste, value, memory_order_relaxed);
|
||||
AtomicAccess::inc(&_regions_filled, memory_order_relaxed);
|
||||
_region_end_waste.add_then_fetch(value, memory_order_relaxed);
|
||||
_regions_filled.add_then_fetch(1u, memory_order_relaxed);
|
||||
}
|
||||
|
||||
inline void G1EvacStats::add_failure_used_and_waste(size_t used, size_t waste) {
|
||||
AtomicAccess::add(&_failure_used, used, memory_order_relaxed);
|
||||
AtomicAccess::add(&_failure_waste, waste, memory_order_relaxed);
|
||||
_failure_used.add_then_fetch(used, memory_order_relaxed);
|
||||
_failure_waste.add_then_fetch(waste, memory_order_relaxed);
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_G1_G1EVACSTATS_INLINE_HPP
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -116,8 +116,8 @@ G1FullCollector::G1FullCollector(G1CollectedHeap* heap,
|
||||
_num_workers(calc_active_workers()),
|
||||
_has_compaction_targets(false),
|
||||
_has_humongous(false),
|
||||
_oop_queue_set(_num_workers),
|
||||
_array_queue_set(_num_workers),
|
||||
_marking_task_queues(_num_workers),
|
||||
_partial_array_state_manager(nullptr),
|
||||
_preserved_marks_set(true),
|
||||
_serial_compaction_point(this, nullptr),
|
||||
_humongous_compaction_point(this, nullptr),
|
||||
@ -134,32 +134,40 @@ G1FullCollector::G1FullCollector(G1CollectedHeap* heap,
|
||||
_compaction_points = NEW_C_HEAP_ARRAY(G1FullGCCompactionPoint*, _num_workers, mtGC);
|
||||
|
||||
_live_stats = NEW_C_HEAP_ARRAY(G1RegionMarkStats, _heap->max_num_regions(), mtGC);
|
||||
_compaction_tops = NEW_C_HEAP_ARRAY(HeapWord*, _heap->max_num_regions(), mtGC);
|
||||
_compaction_tops = NEW_C_HEAP_ARRAY(Atomic<HeapWord*>, _heap->max_num_regions(), mtGC);
|
||||
for (uint j = 0; j < heap->max_num_regions(); j++) {
|
||||
_live_stats[j].clear();
|
||||
_compaction_tops[j] = nullptr;
|
||||
::new (&_compaction_tops[j]) Atomic<HeapWord*>{};
|
||||
}
|
||||
|
||||
_partial_array_state_manager = new PartialArrayStateManager(_num_workers);
|
||||
|
||||
for (uint i = 0; i < _num_workers; i++) {
|
||||
_markers[i] = new G1FullGCMarker(this, i, _live_stats);
|
||||
_compaction_points[i] = new G1FullGCCompactionPoint(this, _preserved_marks_set.get(i));
|
||||
_oop_queue_set.register_queue(i, marker(i)->oop_stack());
|
||||
_array_queue_set.register_queue(i, marker(i)->objarray_stack());
|
||||
_marking_task_queues.register_queue(i, marker(i)->task_queue());
|
||||
}
|
||||
|
||||
_serial_compaction_point.set_preserved_stack(_preserved_marks_set.get(0));
|
||||
_humongous_compaction_point.set_preserved_stack(_preserved_marks_set.get(0));
|
||||
_region_attr_table.initialize(heap->reserved(), G1HeapRegion::GrainBytes);
|
||||
}
|
||||
|
||||
PartialArrayStateManager* G1FullCollector::partial_array_state_manager() const {
|
||||
return _partial_array_state_manager;
|
||||
}
|
||||
|
||||
G1FullCollector::~G1FullCollector() {
|
||||
for (uint i = 0; i < _num_workers; i++) {
|
||||
delete _markers[i];
|
||||
delete _compaction_points[i];
|
||||
}
|
||||
|
||||
delete _partial_array_state_manager;
|
||||
|
||||
FREE_C_HEAP_ARRAY(G1FullGCMarker*, _markers);
|
||||
FREE_C_HEAP_ARRAY(G1FullGCCompactionPoint*, _compaction_points);
|
||||
FREE_C_HEAP_ARRAY(HeapWord*, _compaction_tops);
|
||||
FREE_C_HEAP_ARRAY(Atomic<HeapWord*>, _compaction_tops);
|
||||
FREE_C_HEAP_ARRAY(G1RegionMarkStats, _live_stats);
|
||||
}
|
||||
|
||||
@ -279,8 +287,8 @@ public:
|
||||
uint index = (_tm == RefProcThreadModel::Single) ? 0 : worker_id;
|
||||
G1FullKeepAliveClosure keep_alive(_collector.marker(index));
|
||||
BarrierEnqueueDiscoveredFieldClosure enqueue;
|
||||
G1FollowStackClosure* complete_gc = _collector.marker(index)->stack_closure();
|
||||
_rp_task->rp_work(worker_id, &is_alive, &keep_alive, &enqueue, complete_gc);
|
||||
G1MarkStackClosure* complete_marking = _collector.marker(index)->stack_closure();
|
||||
_rp_task->rp_work(worker_id, &is_alive, &keep_alive, &enqueue, complete_marking);
|
||||
}
|
||||
};
|
||||
|
||||
@ -302,7 +310,7 @@ void G1FullCollector::phase1_mark_live_objects() {
|
||||
const ReferenceProcessorStats& stats = reference_processor()->process_discovered_references(task, _heap->workers(), pt);
|
||||
scope()->tracer()->report_gc_reference_stats(stats);
|
||||
pt.print_all_references();
|
||||
assert(marker(0)->oop_stack()->is_empty(), "Should be no oops on the stack");
|
||||
assert(marker(0)->task_queue()->is_empty(), "Should be no oops on the stack");
|
||||
}
|
||||
|
||||
{
|
||||
@ -328,8 +336,7 @@ void G1FullCollector::phase1_mark_live_objects() {
|
||||
scope()->tracer()->report_object_count_after_gc(&_is_alive, _heap->workers());
|
||||
}
|
||||
#if TASKQUEUE_STATS
|
||||
oop_queue_set()->print_and_reset_taskqueue_stats("Oop Queue");
|
||||
array_queue_set()->print_and_reset_taskqueue_stats("ObjArrayOop Queue");
|
||||
marking_task_queues()->print_and_reset_taskqueue_stats("Marking Task Queue");
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
@ -79,8 +79,8 @@ class G1FullCollector : StackObj {
|
||||
bool _has_humongous;
|
||||
G1FullGCMarker** _markers;
|
||||
G1FullGCCompactionPoint** _compaction_points;
|
||||
OopQueueSet _oop_queue_set;
|
||||
ObjArrayTaskQueueSet _array_queue_set;
|
||||
G1MarkTasksQueueSet _marking_task_queues;
|
||||
PartialArrayStateManager* _partial_array_state_manager;
|
||||
PreservedMarksSet _preserved_marks_set;
|
||||
G1FullGCCompactionPoint _serial_compaction_point;
|
||||
G1FullGCCompactionPoint _humongous_compaction_point;
|
||||
@ -96,7 +96,7 @@ class G1FullCollector : StackObj {
|
||||
|
||||
G1FullGCHeapRegionAttr _region_attr_table;
|
||||
|
||||
HeapWord* volatile* _compaction_tops;
|
||||
Atomic<HeapWord*>* _compaction_tops;
|
||||
|
||||
public:
|
||||
G1FullCollector(G1CollectedHeap* heap,
|
||||
@ -113,8 +113,7 @@ public:
|
||||
uint workers() { return _num_workers; }
|
||||
G1FullGCMarker* marker(uint id) { return _markers[id]; }
|
||||
G1FullGCCompactionPoint* compaction_point(uint id) { return _compaction_points[id]; }
|
||||
OopQueueSet* oop_queue_set() { return &_oop_queue_set; }
|
||||
ObjArrayTaskQueueSet* array_queue_set() { return &_array_queue_set; }
|
||||
G1MarkTasksQueueSet* marking_task_queues() { return &_marking_task_queues; }
|
||||
PreservedMarksSet* preserved_mark_set() { return &_preserved_marks_set; }
|
||||
G1FullGCCompactionPoint* serial_compaction_point() { return &_serial_compaction_point; }
|
||||
G1FullGCCompactionPoint* humongous_compaction_point() { return &_humongous_compaction_point; }
|
||||
@ -125,6 +124,8 @@ public:
|
||||
return _live_stats[region_index].live_words();
|
||||
}
|
||||
|
||||
PartialArrayStateManager* partial_array_state_manager() const;
|
||||
|
||||
void before_marking_update_attribute_table(G1HeapRegion* hr);
|
||||
|
||||
inline bool is_compacting(oop obj) const;
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -63,11 +63,11 @@ void G1FullCollector::update_from_skip_compacting_to_compacting(uint region_idx)
|
||||
}
|
||||
|
||||
void G1FullCollector::set_compaction_top(G1HeapRegion* r, HeapWord* value) {
|
||||
AtomicAccess::store(&_compaction_tops[r->hrm_index()], value);
|
||||
_compaction_tops[r->hrm_index()].store_relaxed(value);
|
||||
}
|
||||
|
||||
HeapWord* G1FullCollector::compaction_top(G1HeapRegion* r) const {
|
||||
return AtomicAccess::load(&_compaction_tops[r->hrm_index()]);
|
||||
return _compaction_tops[r->hrm_index()].load_relaxed();
|
||||
}
|
||||
|
||||
void G1FullCollector::set_has_compaction_targets() {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -34,7 +34,7 @@
|
||||
G1FullGCMarkTask::G1FullGCMarkTask(G1FullCollector* collector) :
|
||||
G1FullGCTask("G1 Parallel Marking Task", collector),
|
||||
_root_processor(G1CollectedHeap::heap(), collector->workers()),
|
||||
_terminator(collector->workers(), collector->array_queue_set()) {
|
||||
_terminator(collector->workers(), collector->marking_task_queues()) {
|
||||
}
|
||||
|
||||
void G1FullGCMarkTask::work(uint worker_id) {
|
||||
@ -54,10 +54,9 @@ void G1FullGCMarkTask::work(uint worker_id) {
|
||||
}
|
||||
|
||||
// Mark stack is populated, now process and drain it.
|
||||
marker->complete_marking(collector()->oop_queue_set(), collector()->array_queue_set(), &_terminator);
|
||||
marker->complete_marking(collector()->marking_task_queues(), &_terminator);
|
||||
|
||||
// This is the point where the entire marking should have completed.
|
||||
assert(marker->oop_stack()->is_empty(), "Marking should have completed");
|
||||
assert(marker->objarray_stack()->is_empty(), "Array marking should have completed");
|
||||
assert(marker->task_queue()->is_empty(), "Marking should have completed");
|
||||
log_task("Marking task", worker_id, start);
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,6 +25,8 @@
|
||||
#include "classfile/classLoaderData.hpp"
|
||||
#include "classfile/classLoaderDataGraph.hpp"
|
||||
#include "gc/g1/g1FullGCMarker.inline.hpp"
|
||||
#include "gc/shared/partialArraySplitter.inline.hpp"
|
||||
#include "gc/shared/partialArrayState.hpp"
|
||||
#include "gc/shared/referenceProcessor.hpp"
|
||||
#include "gc/shared/taskTerminator.hpp"
|
||||
#include "gc/shared/verifyOption.hpp"
|
||||
@ -36,8 +38,8 @@ G1FullGCMarker::G1FullGCMarker(G1FullCollector* collector,
|
||||
_collector(collector),
|
||||
_worker_id(worker_id),
|
||||
_bitmap(collector->mark_bitmap()),
|
||||
_oop_stack(),
|
||||
_objarray_stack(),
|
||||
_task_queue(),
|
||||
_partial_array_splitter(collector->partial_array_state_manager(), collector->workers(), ObjArrayMarkingStride),
|
||||
_mark_closure(worker_id, this, ClassLoaderData::_claim_stw_fullgc_mark, G1CollectedHeap::heap()->ref_processor_stw()),
|
||||
_stack_closure(this),
|
||||
_cld_closure(mark_closure(), ClassLoaderData::_claim_stw_fullgc_mark),
|
||||
@ -47,24 +49,36 @@ G1FullGCMarker::G1FullGCMarker(G1FullCollector* collector,
|
||||
}
|
||||
|
||||
G1FullGCMarker::~G1FullGCMarker() {
|
||||
assert(is_empty(), "Must be empty at this point");
|
||||
assert(is_task_queue_empty(), "Must be empty at this point");
|
||||
}
|
||||
|
||||
void G1FullGCMarker::complete_marking(OopQueueSet* oop_stacks,
|
||||
ObjArrayTaskQueueSet* array_stacks,
|
||||
void G1FullGCMarker::process_partial_array(PartialArrayState* state, bool stolen) {
|
||||
// Access state before release by claim().
|
||||
objArrayOop obj_array = objArrayOop(state->source());
|
||||
PartialArraySplitter::Claim claim =
|
||||
_partial_array_splitter.claim(state, task_queue(), stolen);
|
||||
process_array_chunk(obj_array, claim._start, claim._end);
|
||||
}
|
||||
|
||||
void G1FullGCMarker::start_partial_array_processing(objArrayOop obj) {
|
||||
mark_closure()->do_klass(obj->klass());
|
||||
// Don't push empty arrays to avoid unnecessary work.
|
||||
size_t array_length = obj->length();
|
||||
if (array_length > 0) {
|
||||
size_t initial_chunk_size = _partial_array_splitter.start(task_queue(), obj, nullptr, array_length);
|
||||
process_array_chunk(obj, 0, initial_chunk_size);
|
||||
}
|
||||
}
|
||||
|
||||
void G1FullGCMarker::complete_marking(G1ScannerTasksQueueSet* task_queues,
|
||||
TaskTerminator* terminator) {
|
||||
do {
|
||||
follow_marking_stacks();
|
||||
ObjArrayTask steal_array;
|
||||
if (array_stacks->steal(_worker_id, steal_array)) {
|
||||
follow_array_chunk(objArrayOop(steal_array.obj()), steal_array.index());
|
||||
} else {
|
||||
oop steal_oop;
|
||||
if (oop_stacks->steal(_worker_id, steal_oop)) {
|
||||
follow_object(steal_oop);
|
||||
}
|
||||
process_marking_stacks();
|
||||
ScannerTask stolen_task;
|
||||
if (task_queues->steal(_worker_id, stolen_task)) {
|
||||
dispatch_task(stolen_task, true);
|
||||
}
|
||||
} while (!is_empty() || !terminator->offer_termination());
|
||||
} while (!is_task_queue_empty() || !terminator->offer_termination());
|
||||
}
|
||||
|
||||
void G1FullGCMarker::flush_mark_stats_cache() {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,6 +28,8 @@
|
||||
#include "gc/g1/g1FullGCOopClosures.hpp"
|
||||
#include "gc/g1/g1OopClosures.hpp"
|
||||
#include "gc/g1/g1RegionMarkStatsCache.hpp"
|
||||
#include "gc/shared/partialArraySplitter.hpp"
|
||||
#include "gc/shared/partialArrayState.hpp"
|
||||
#include "gc/shared/stringdedup/stringDedup.hpp"
|
||||
#include "gc/shared/taskqueue.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
@ -38,16 +40,15 @@
|
||||
#include "utilities/growableArray.hpp"
|
||||
#include "utilities/stack.hpp"
|
||||
|
||||
typedef OverflowTaskQueue<oop, mtGC> OopQueue;
|
||||
typedef OverflowTaskQueue<ObjArrayTask, mtGC> ObjArrayTaskQueue;
|
||||
|
||||
typedef GenericTaskQueueSet<OopQueue, mtGC> OopQueueSet;
|
||||
typedef GenericTaskQueueSet<ObjArrayTaskQueue, mtGC> ObjArrayTaskQueueSet;
|
||||
|
||||
class G1CMBitMap;
|
||||
class G1FullCollector;
|
||||
class TaskTerminator;
|
||||
|
||||
typedef OverflowTaskQueue<ScannerTask, mtGC> G1MarkTasksQueue;
|
||||
typedef GenericTaskQueueSet<G1MarkTasksQueue, mtGC> G1MarkTasksQueueSet;
|
||||
|
||||
class G1FullGCMarker : public CHeapObj<mtGC> {
|
||||
G1FullCollector* _collector;
|
||||
|
||||
@ -56,56 +57,50 @@ class G1FullGCMarker : public CHeapObj<mtGC> {
|
||||
G1CMBitMap* _bitmap;
|
||||
|
||||
// Mark stack
|
||||
OopQueue _oop_stack;
|
||||
ObjArrayTaskQueue _objarray_stack;
|
||||
G1MarkTasksQueue _task_queue;
|
||||
PartialArraySplitter _partial_array_splitter;
|
||||
|
||||
// Marking closures
|
||||
G1MarkAndPushClosure _mark_closure;
|
||||
G1FollowStackClosure _stack_closure;
|
||||
G1MarkStackClosure _stack_closure;
|
||||
CLDToOopClosure _cld_closure;
|
||||
StringDedup::Requests _string_dedup_requests;
|
||||
|
||||
|
||||
G1RegionMarkStatsCache _mark_stats_cache;
|
||||
|
||||
inline bool is_empty();
|
||||
inline void push_objarray(oop obj, size_t index);
|
||||
inline bool is_task_queue_empty();
|
||||
inline bool mark_object(oop obj);
|
||||
|
||||
// Marking helpers
|
||||
inline void follow_object(oop obj);
|
||||
inline void follow_array(objArrayOop array);
|
||||
inline void follow_array_chunk(objArrayOop array, int index);
|
||||
inline void process_array_chunk(objArrayOop obj, size_t start, size_t end);
|
||||
inline void dispatch_task(const ScannerTask& task, bool stolen);
|
||||
// Start processing the given objArrayOop by first pushing its continuations and
|
||||
// then scanning the first chunk.
|
||||
void start_partial_array_processing(objArrayOop obj);
|
||||
// Process the given continuation.
|
||||
void process_partial_array(PartialArrayState* state, bool stolen);
|
||||
|
||||
inline void publish_and_drain_oop_tasks();
|
||||
// Try to publish all contents from the objArray task queue overflow stack to
|
||||
// the shared objArray stack.
|
||||
// Returns true and a valid task if there has not been enough space in the shared
|
||||
// objArray stack, otherwise returns false and the task is invalid.
|
||||
inline bool publish_or_pop_objarray_tasks(ObjArrayTask& task);
|
||||
|
||||
public:
|
||||
G1FullGCMarker(G1FullCollector* collector,
|
||||
uint worker_id,
|
||||
G1RegionMarkStats* mark_stats);
|
||||
~G1FullGCMarker();
|
||||
|
||||
// Stack getters
|
||||
OopQueue* oop_stack() { return &_oop_stack; }
|
||||
ObjArrayTaskQueue* objarray_stack() { return &_objarray_stack; }
|
||||
G1MarkTasksQueue* task_queue() { return &_task_queue; }
|
||||
|
||||
// Marking entry points
|
||||
template <class T> inline void mark_and_push(T* p);
|
||||
|
||||
inline void follow_marking_stacks();
|
||||
void complete_marking(OopQueueSet* oop_stacks,
|
||||
ObjArrayTaskQueueSet* array_stacks,
|
||||
inline void process_marking_stacks();
|
||||
void complete_marking(G1MarkTasksQueueSet* task_queues,
|
||||
TaskTerminator* terminator);
|
||||
|
||||
// Closure getters
|
||||
CLDToOopClosure* cld_closure() { return &_cld_closure; }
|
||||
G1MarkAndPushClosure* mark_closure() { return &_mark_closure; }
|
||||
G1FollowStackClosure* stack_closure() { return &_stack_closure; }
|
||||
G1MarkStackClosure* stack_closure() { return &_stack_closure; }
|
||||
|
||||
// Flush live bytes to regions
|
||||
void flush_mark_stats_cache();
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -42,6 +42,7 @@
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "utilities/checkedCast.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
inline bool G1FullGCMarker::mark_object(oop obj) {
|
||||
@ -71,94 +72,55 @@ template <class T> inline void G1FullGCMarker::mark_and_push(T* p) {
|
||||
if (!CompressedOops::is_null(heap_oop)) {
|
||||
oop obj = CompressedOops::decode_not_null(heap_oop);
|
||||
if (mark_object(obj)) {
|
||||
_oop_stack.push(obj);
|
||||
_task_queue.push(ScannerTask(obj));
|
||||
}
|
||||
assert(_bitmap->is_marked(obj), "Must be marked");
|
||||
}
|
||||
}
|
||||
|
||||
inline bool G1FullGCMarker::is_empty() {
|
||||
return _oop_stack.is_empty() && _objarray_stack.is_empty();
|
||||
inline bool G1FullGCMarker::is_task_queue_empty() {
|
||||
return _task_queue.is_empty();
|
||||
}
|
||||
|
||||
inline void G1FullGCMarker::push_objarray(oop obj, size_t index) {
|
||||
ObjArrayTask task(obj, index);
|
||||
assert(task.is_valid(), "bad ObjArrayTask");
|
||||
_objarray_stack.push(task);
|
||||
inline void G1FullGCMarker::process_array_chunk(objArrayOop obj, size_t start, size_t end) {
|
||||
obj->oop_iterate_elements_range(mark_closure(),
|
||||
checked_cast<int>(start),
|
||||
checked_cast<int>(end));
|
||||
}
|
||||
|
||||
inline void G1FullGCMarker::follow_array(objArrayOop array) {
|
||||
mark_closure()->do_klass(array->klass());
|
||||
// Don't push empty arrays to avoid unnecessary work.
|
||||
if (array->length() > 0) {
|
||||
push_objarray(array, 0);
|
||||
}
|
||||
}
|
||||
|
||||
void G1FullGCMarker::follow_array_chunk(objArrayOop array, int index) {
|
||||
const int len = array->length();
|
||||
const int beg_index = index;
|
||||
assert(beg_index < len || len == 0, "index too large");
|
||||
|
||||
const int stride = MIN2(len - beg_index, (int) ObjArrayMarkingStride);
|
||||
const int end_index = beg_index + stride;
|
||||
|
||||
// Push the continuation first to allow more efficient work stealing.
|
||||
if (end_index < len) {
|
||||
push_objarray(array, end_index);
|
||||
}
|
||||
|
||||
array->oop_iterate_elements_range(mark_closure(), beg_index, end_index);
|
||||
}
|
||||
|
||||
inline void G1FullGCMarker::follow_object(oop obj) {
|
||||
assert(_bitmap->is_marked(obj), "should be marked");
|
||||
if (obj->is_objArray()) {
|
||||
// Handle object arrays explicitly to allow them to
|
||||
// be split into chunks if needed.
|
||||
follow_array((objArrayOop)obj);
|
||||
inline void G1FullGCMarker::dispatch_task(const ScannerTask& task, bool stolen) {
|
||||
if (task.is_partial_array_state()) {
|
||||
assert(_bitmap->is_marked(task.to_partial_array_state()->source()), "should be marked");
|
||||
process_partial_array(task.to_partial_array_state(), stolen);
|
||||
} else {
|
||||
obj->oop_iterate(mark_closure());
|
||||
oop obj = task.to_oop();
|
||||
assert(_bitmap->is_marked(obj), "should be marked");
|
||||
if (obj->is_objArray()) {
|
||||
// Handle object arrays explicitly to allow them to
|
||||
// be split into chunks if needed.
|
||||
start_partial_array_processing((objArrayOop)obj);
|
||||
} else {
|
||||
obj->oop_iterate(mark_closure());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline void G1FullGCMarker::publish_and_drain_oop_tasks() {
|
||||
oop obj;
|
||||
while (_oop_stack.pop_overflow(obj)) {
|
||||
if (!_oop_stack.try_push_to_taskqueue(obj)) {
|
||||
assert(_bitmap->is_marked(obj), "must be marked");
|
||||
follow_object(obj);
|
||||
ScannerTask task;
|
||||
while (_task_queue.pop_overflow(task)) {
|
||||
if (!_task_queue.try_push_to_taskqueue(task)) {
|
||||
dispatch_task(task, false);
|
||||
}
|
||||
}
|
||||
while (_oop_stack.pop_local(obj)) {
|
||||
assert(_bitmap->is_marked(obj), "must be marked");
|
||||
follow_object(obj);
|
||||
while (_task_queue.pop_local(task)) {
|
||||
dispatch_task(task, false);
|
||||
}
|
||||
}
|
||||
|
||||
inline bool G1FullGCMarker::publish_or_pop_objarray_tasks(ObjArrayTask& task) {
|
||||
// It is desirable to move as much as possible work from the overflow queue to
|
||||
// the shared queue as quickly as possible.
|
||||
while (_objarray_stack.pop_overflow(task)) {
|
||||
if (!_objarray_stack.try_push_to_taskqueue(task)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void G1FullGCMarker::follow_marking_stacks() {
|
||||
void G1FullGCMarker::process_marking_stacks() {
|
||||
do {
|
||||
// First, drain regular oop stack.
|
||||
publish_and_drain_oop_tasks();
|
||||
|
||||
// Then process ObjArrays one at a time to avoid marking stack bloat.
|
||||
ObjArrayTask task;
|
||||
if (publish_or_pop_objarray_tasks(task) ||
|
||||
_objarray_stack.pop_local(task)) {
|
||||
follow_array_chunk(objArrayOop(task.obj()), task.index());
|
||||
}
|
||||
} while (!is_empty());
|
||||
} while (!is_task_queue_empty());
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_G1_G1FULLGCMARKER_INLINE_HPP
|
||||
|
||||
@ -35,7 +35,7 @@
|
||||
G1IsAliveClosure::G1IsAliveClosure(G1FullCollector* collector) :
|
||||
G1IsAliveClosure(collector, collector->mark_bitmap()) { }
|
||||
|
||||
void G1FollowStackClosure::do_void() { _marker->follow_marking_stacks(); }
|
||||
void G1MarkStackClosure::do_void() { _marker->process_marking_stacks(); }
|
||||
|
||||
void G1FullKeepAliveClosure::do_oop(oop* p) { do_oop_work(p); }
|
||||
void G1FullKeepAliveClosure::do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
|
||||
@ -86,11 +86,11 @@ public:
|
||||
virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS; }
|
||||
};
|
||||
|
||||
class G1FollowStackClosure: public VoidClosure {
|
||||
class G1MarkStackClosure: public VoidClosure {
|
||||
G1FullGCMarker* _marker;
|
||||
|
||||
public:
|
||||
G1FollowStackClosure(G1FullGCMarker* marker) : _marker(marker) {}
|
||||
G1MarkStackClosure(G1FullGCMarker* marker) : _marker(marker) {}
|
||||
virtual void do_void();
|
||||
};
|
||||
|
||||
|
||||
@ -32,7 +32,7 @@
|
||||
#include "gc/g1/g1ConcurrentMark.hpp"
|
||||
#include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
|
||||
#include "gc/g1/g1ConcurrentRefine.hpp"
|
||||
#include "gc/g1/g1ConcurrentRefineStats.hpp"
|
||||
#include "gc/g1/g1ConcurrentRefineStats.inline.hpp"
|
||||
#include "gc/g1/g1GCPhaseTimes.hpp"
|
||||
#include "gc/g1/g1HeapRegion.inline.hpp"
|
||||
#include "gc/g1/g1HeapRegionRemSet.inline.hpp"
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -50,7 +50,7 @@
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
@ -107,46 +107,48 @@ class G1RemSetScanState : public CHeapObj<mtGC> {
|
||||
// Set of (unique) regions that can be added to concurrently.
|
||||
class G1DirtyRegions : public CHeapObj<mtGC> {
|
||||
uint* _buffer;
|
||||
uint _cur_idx;
|
||||
Atomic<uint> _cur_idx;
|
||||
size_t _max_reserved_regions;
|
||||
|
||||
bool* _contains;
|
||||
Atomic<bool>* _contains;
|
||||
|
||||
public:
|
||||
G1DirtyRegions(size_t max_reserved_regions) :
|
||||
_buffer(NEW_C_HEAP_ARRAY(uint, max_reserved_regions, mtGC)),
|
||||
_cur_idx(0),
|
||||
_max_reserved_regions(max_reserved_regions),
|
||||
_contains(NEW_C_HEAP_ARRAY(bool, max_reserved_regions, mtGC)) {
|
||||
_contains(NEW_C_HEAP_ARRAY(Atomic<bool>, max_reserved_regions, mtGC)) {
|
||||
|
||||
reset();
|
||||
}
|
||||
|
||||
~G1DirtyRegions() {
|
||||
FREE_C_HEAP_ARRAY(uint, _buffer);
|
||||
FREE_C_HEAP_ARRAY(bool, _contains);
|
||||
FREE_C_HEAP_ARRAY(Atomic<bool>, _contains);
|
||||
}
|
||||
|
||||
void reset() {
|
||||
_cur_idx = 0;
|
||||
::memset(_contains, false, _max_reserved_regions * sizeof(bool));
|
||||
_cur_idx.store_relaxed(0);
|
||||
for (uint i = 0; i < _max_reserved_regions; i++) {
|
||||
_contains[i].store_relaxed(false);
|
||||
}
|
||||
}
|
||||
|
||||
uint size() const { return _cur_idx; }
|
||||
uint size() const { return _cur_idx.load_relaxed(); }
|
||||
|
||||
uint at(uint idx) const {
|
||||
assert(idx < _cur_idx, "Index %u beyond valid regions", idx);
|
||||
assert(idx < size(), "Index %u beyond valid regions", idx);
|
||||
return _buffer[idx];
|
||||
}
|
||||
|
||||
void add_dirty_region(uint region) {
|
||||
if (_contains[region]) {
|
||||
if (_contains[region].load_relaxed()) {
|
||||
return;
|
||||
}
|
||||
|
||||
bool marked_as_dirty = AtomicAccess::cmpxchg(&_contains[region], false, true) == false;
|
||||
bool marked_as_dirty = _contains[region].compare_set(false, true);
|
||||
if (marked_as_dirty) {
|
||||
uint allocated = AtomicAccess::fetch_then_add(&_cur_idx, 1u);
|
||||
uint allocated = _cur_idx.fetch_then_add(1u);
|
||||
_buffer[allocated] = region;
|
||||
}
|
||||
}
|
||||
@ -155,9 +157,11 @@ class G1RemSetScanState : public CHeapObj<mtGC> {
|
||||
void merge(const G1DirtyRegions* other) {
|
||||
for (uint i = 0; i < other->size(); i++) {
|
||||
uint region = other->at(i);
|
||||
if (!_contains[region]) {
|
||||
_buffer[_cur_idx++] = region;
|
||||
_contains[region] = true;
|
||||
if (!_contains[region].load_relaxed()) {
|
||||
uint cur = _cur_idx.load_relaxed();
|
||||
_buffer[cur] = region;
|
||||
_cur_idx.store_relaxed(cur + 1);
|
||||
_contains[region].store_relaxed(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -173,7 +177,7 @@ class G1RemSetScanState : public CHeapObj<mtGC> {
|
||||
class G1ClearCardTableTask : public G1AbstractSubTask {
|
||||
G1CollectedHeap* _g1h;
|
||||
G1DirtyRegions* _regions;
|
||||
uint volatile _cur_dirty_regions;
|
||||
Atomic<uint> _cur_dirty_regions;
|
||||
|
||||
G1RemSetScanState* _scan_state;
|
||||
|
||||
@ -210,8 +214,9 @@ class G1ClearCardTableTask : public G1AbstractSubTask {
|
||||
void do_work(uint worker_id) override {
|
||||
const uint num_regions_per_worker = num_cards_per_worker / (uint)G1HeapRegion::CardsPerRegion;
|
||||
|
||||
while (_cur_dirty_regions < _regions->size()) {
|
||||
uint next = AtomicAccess::fetch_then_add(&_cur_dirty_regions, num_regions_per_worker);
|
||||
uint cur = _cur_dirty_regions.load_relaxed();
|
||||
while (cur < _regions->size()) {
|
||||
uint next = _cur_dirty_regions.fetch_then_add(num_regions_per_worker);
|
||||
uint max = MIN2(next + num_regions_per_worker, _regions->size());
|
||||
|
||||
for (uint i = next; i < max; i++) {
|
||||
@ -226,6 +231,7 @@ class G1ClearCardTableTask : public G1AbstractSubTask {
|
||||
// old regions use it for old->collection set candidates, so they should not be cleared
|
||||
// either.
|
||||
}
|
||||
cur = max;
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -1115,7 +1121,7 @@ class G1MergeHeapRootsTask : public WorkerTask {
|
||||
|
||||
bool _initial_evacuation;
|
||||
|
||||
volatile bool _fast_reclaim_handled;
|
||||
Atomic<bool> _fast_reclaim_handled;
|
||||
|
||||
public:
|
||||
G1MergeHeapRootsTask(G1RemSetScanState* scan_state, uint num_workers, bool initial_evacuation) :
|
||||
@ -1143,8 +1149,8 @@ public:
|
||||
// 1. eager-reclaim candidates
|
||||
if (_initial_evacuation &&
|
||||
g1h->has_humongous_reclaim_candidates() &&
|
||||
!_fast_reclaim_handled &&
|
||||
!AtomicAccess::cmpxchg(&_fast_reclaim_handled, false, true)) {
|
||||
!_fast_reclaim_handled.load_relaxed() &&
|
||||
_fast_reclaim_handled.compare_set(false, true)) {
|
||||
|
||||
G1GCParPhaseTimesTracker subphase_x(p, G1GCPhaseTimes::MergeER, worker_id);
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -58,6 +58,7 @@
|
||||
#include "gc/shared/workerThread.hpp"
|
||||
#include "jfr/jfrEvents.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/threads.hpp"
|
||||
#include "utilities/ticks.hpp"
|
||||
|
||||
@ -459,8 +460,8 @@ class G1PrepareEvacuationTask : public WorkerTask {
|
||||
|
||||
G1CollectedHeap* _g1h;
|
||||
G1HeapRegionClaimer _claimer;
|
||||
volatile uint _humongous_total;
|
||||
volatile uint _humongous_candidates;
|
||||
Atomic<uint> _humongous_total;
|
||||
Atomic<uint> _humongous_candidates;
|
||||
|
||||
G1MonotonicArenaMemoryStats _all_card_set_stats;
|
||||
|
||||
@ -481,19 +482,19 @@ public:
|
||||
}
|
||||
|
||||
void add_humongous_candidates(uint candidates) {
|
||||
AtomicAccess::add(&_humongous_candidates, candidates);
|
||||
_humongous_candidates.add_then_fetch(candidates);
|
||||
}
|
||||
|
||||
void add_humongous_total(uint total) {
|
||||
AtomicAccess::add(&_humongous_total, total);
|
||||
_humongous_total.add_then_fetch(total);
|
||||
}
|
||||
|
||||
uint humongous_candidates() {
|
||||
return _humongous_candidates;
|
||||
return _humongous_candidates.load_relaxed();
|
||||
}
|
||||
|
||||
uint humongous_total() {
|
||||
return _humongous_total;
|
||||
return _humongous_total.load_relaxed();
|
||||
}
|
||||
|
||||
const G1MonotonicArenaMemoryStats all_card_set_stats() const {
|
||||
@ -698,7 +699,7 @@ protected:
|
||||
virtual void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) = 0;
|
||||
|
||||
private:
|
||||
volatile bool _pinned_regions_recorded;
|
||||
Atomic<bool> _pinned_regions_recorded;
|
||||
|
||||
public:
|
||||
G1EvacuateRegionsBaseTask(const char* name,
|
||||
@ -722,7 +723,7 @@ public:
|
||||
G1ParScanThreadState* pss = _per_thread_states->state_for_worker(worker_id);
|
||||
pss->set_ref_discoverer(_g1h->ref_processor_stw());
|
||||
|
||||
if (!AtomicAccess::cmpxchg(&_pinned_regions_recorded, false, true)) {
|
||||
if (_pinned_regions_recorded.compare_set(false, true)) {
|
||||
record_pinned_regions(pss, worker_id);
|
||||
}
|
||||
scan_roots(pss, worker_id);
|
||||
|
||||
@ -46,6 +46,7 @@
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/prefetch.inline.hpp"
|
||||
#include "runtime/threads.hpp"
|
||||
#include "runtime/threadSMR.hpp"
|
||||
@ -759,7 +760,7 @@ class G1PostEvacuateCollectionSetCleanupTask2::FreeCollectionSetTask : public G1
|
||||
const size_t* _surviving_young_words;
|
||||
uint _active_workers;
|
||||
G1EvacFailureRegions* _evac_failure_regions;
|
||||
volatile uint _num_retained_regions;
|
||||
Atomic<uint> _num_retained_regions;
|
||||
|
||||
FreeCSetStats* worker_stats(uint worker) {
|
||||
return &_worker_stats[worker];
|
||||
@ -794,7 +795,7 @@ public:
|
||||
virtual ~FreeCollectionSetTask() {
|
||||
Ticks serial_time = Ticks::now();
|
||||
|
||||
bool has_new_retained_regions = AtomicAccess::load(&_num_retained_regions) != 0;
|
||||
bool has_new_retained_regions = _num_retained_regions.load_relaxed() != 0;
|
||||
if (has_new_retained_regions) {
|
||||
G1CollectionSetCandidates* candidates = _g1h->collection_set()->candidates();
|
||||
candidates->sort_by_efficiency();
|
||||
@ -829,7 +830,7 @@ public:
|
||||
// Report per-region type timings.
|
||||
cl.report_timing();
|
||||
|
||||
AtomicAccess::add(&_num_retained_regions, cl.num_retained_regions(), memory_order_relaxed);
|
||||
_num_retained_regions.add_then_fetch(cl.num_retained_regions(), memory_order_relaxed);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@ -23,7 +23,6 @@
|
||||
*/
|
||||
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1ConcurrentRefineStats.hpp"
|
||||
#include "gc/g1/g1RegionPinCache.inline.hpp"
|
||||
#include "gc/g1/g1ThreadLocalData.hpp"
|
||||
#include "gc/g1/g1YoungGCPreEvacuateTasks.hpp"
|
||||
|
||||
@ -31,7 +31,6 @@
|
||||
#include "gc/shared/oopStorageSet.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
template <bool concurrent, bool is_const>
|
||||
|
||||
@ -28,7 +28,6 @@
|
||||
#include "memory/arena.hpp"
|
||||
#include "nmt/memTag.hpp"
|
||||
#include "oops/oopsHierarchy.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/orderAccess.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
@ -28,7 +28,6 @@
|
||||
#include "gc/shared/partialArrayTaskStepper.hpp"
|
||||
|
||||
#include "gc/shared/partialArrayState.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "utilities/checkedCast.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
|
||||
@ -25,7 +25,6 @@
|
||||
#include "gc/shared/taskqueue.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/javaThread.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
@ -32,7 +32,6 @@
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/orderAccess.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
||||
@ -26,7 +26,6 @@
|
||||
#include "gc/shared/workerThread.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/init.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
|
||||
@ -68,9 +68,9 @@ ShenandoahAdaptiveHeuristics::ShenandoahAdaptiveHeuristics(ShenandoahSpaceInfo*
|
||||
|
||||
ShenandoahAdaptiveHeuristics::~ShenandoahAdaptiveHeuristics() {}
|
||||
|
||||
void ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t actual_free) {
|
||||
size_t ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t actual_free) {
|
||||
size_t garbage_threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
|
||||
|
||||
// The logic for cset selection in adaptive is as follows:
|
||||
@ -124,6 +124,7 @@ void ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata(Shenand
|
||||
cur_garbage = new_garbage;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ShenandoahAdaptiveHeuristics::record_cycle_start() {
|
||||
|
||||
@ -108,9 +108,9 @@ public:
|
||||
|
||||
virtual ~ShenandoahAdaptiveHeuristics();
|
||||
|
||||
virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t actual_free) override;
|
||||
virtual size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t actual_free) override;
|
||||
|
||||
virtual void record_cycle_start() override;
|
||||
virtual void record_success_concurrent() override;
|
||||
|
||||
@ -39,15 +39,16 @@ ShenandoahAggressiveHeuristics::ShenandoahAggressiveHeuristics(ShenandoahSpaceIn
|
||||
SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahEvacReserveOverflow);
|
||||
}
|
||||
|
||||
void ShenandoahAggressiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t free) {
|
||||
size_t ShenandoahAggressiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t free) {
|
||||
for (size_t idx = 0; idx < size; idx++) {
|
||||
ShenandoahHeapRegion* r = data[idx].get_region();
|
||||
if (r->garbage() > 0) {
|
||||
cset->add_region(r);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool ShenandoahAggressiveHeuristics::should_start_gc() {
|
||||
|
||||
@ -35,9 +35,9 @@ class ShenandoahAggressiveHeuristics : public ShenandoahHeuristics {
|
||||
public:
|
||||
ShenandoahAggressiveHeuristics(ShenandoahSpaceInfo* space_info);
|
||||
|
||||
virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t free);
|
||||
virtual size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t free);
|
||||
|
||||
virtual bool should_start_gc();
|
||||
|
||||
|
||||
@ -76,9 +76,9 @@ bool ShenandoahCompactHeuristics::should_start_gc() {
|
||||
return ShenandoahHeuristics::should_start_gc();
|
||||
}
|
||||
|
||||
void ShenandoahCompactHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t actual_free) {
|
||||
size_t ShenandoahCompactHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t actual_free) {
|
||||
// Do not select too large CSet that would overflow the available free space
|
||||
size_t max_cset = actual_free * 3 / 4;
|
||||
|
||||
@ -97,4 +97,5 @@ void ShenandoahCompactHeuristics::choose_collection_set_from_regiondata(Shenando
|
||||
cset->add_region(r);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -37,9 +37,9 @@ public:
|
||||
|
||||
virtual bool should_start_gc();
|
||||
|
||||
virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t actual_free);
|
||||
virtual size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t actual_free);
|
||||
|
||||
virtual const char* name() { return "Compact"; }
|
||||
virtual bool is_diagnostic() { return false; }
|
||||
|
||||
@ -37,7 +37,7 @@ ShenandoahGenerationalHeuristics::ShenandoahGenerationalHeuristics(ShenandoahGen
|
||||
: ShenandoahAdaptiveHeuristics(generation), _generation(generation) {
|
||||
}
|
||||
|
||||
void ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set) {
|
||||
size_t ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set) {
|
||||
assert(collection_set->is_empty(), "Must be empty");
|
||||
|
||||
auto heap = ShenandoahGenerationalHeap::heap();
|
||||
@ -168,16 +168,12 @@ void ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectio
|
||||
byte_size_in_proper_unit(total_garbage), proper_unit_for_byte_size(total_garbage));
|
||||
|
||||
size_t immediate_percent = (total_garbage == 0) ? 0 : (immediate_garbage * 100 / total_garbage);
|
||||
|
||||
bool doing_promote_in_place = (humongous_regions_promoted + regular_regions_promoted_in_place > 0);
|
||||
if (doing_promote_in_place || (preselected_candidates > 0) || (immediate_percent <= ShenandoahImmediateThreshold)) {
|
||||
// Only young collections need to prime the collection set.
|
||||
if (_generation->is_young()) {
|
||||
heap->old_generation()->heuristics()->prime_collection_set(collection_set);
|
||||
}
|
||||
|
||||
size_t add_regions_to_old = 0;
|
||||
if (doing_promote_in_place || (preselected_candidates > 0) || (immediate_percent <= ShenandoahImmediateThreshold)) {
|
||||
// Call the subclasses to add young-gen regions into the collection set.
|
||||
choose_collection_set_from_regiondata(collection_set, candidates, cand_idx, immediate_garbage + free);
|
||||
add_regions_to_old = choose_collection_set_from_regiondata(collection_set, candidates, cand_idx, immediate_garbage + free);
|
||||
}
|
||||
|
||||
if (collection_set->has_old_regions()) {
|
||||
@ -194,6 +190,7 @@ void ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectio
|
||||
regular_regions_promoted_free,
|
||||
immediate_regions,
|
||||
immediate_garbage);
|
||||
return add_regions_to_old;
|
||||
}
|
||||
|
||||
|
||||
@ -210,13 +207,6 @@ size_t ShenandoahGenerationalHeuristics::add_preselected_regions_to_collection_s
|
||||
assert(ShenandoahGenerationalHeap::heap()->is_tenurable(r), "Preselected regions must have tenure age");
|
||||
// Entire region will be promoted, This region does not impact young-gen or old-gen evacuation reserve.
|
||||
// This region has been pre-selected and its impact on promotion reserve is already accounted for.
|
||||
|
||||
// r->used() is r->garbage() + r->get_live_data_bytes()
|
||||
// Since all live data in this region is being evacuated from young-gen, it is as if this memory
|
||||
// is garbage insofar as young-gen is concerned. Counting this as garbage reduces the need to
|
||||
// reclaim highly utilized young-gen regions just for the sake of finding min_garbage to reclaim
|
||||
// within young-gen memory.
|
||||
|
||||
cur_young_garbage += r->garbage();
|
||||
cset->add_region(r);
|
||||
}
|
||||
|
||||
@ -44,7 +44,7 @@ class ShenandoahGenerationalHeuristics : public ShenandoahAdaptiveHeuristics {
|
||||
public:
|
||||
explicit ShenandoahGenerationalHeuristics(ShenandoahGeneration* generation);
|
||||
|
||||
void choose_collection_set(ShenandoahCollectionSet* collection_set) override;
|
||||
size_t choose_collection_set(ShenandoahCollectionSet* collection_set) override;
|
||||
protected:
|
||||
ShenandoahGeneration* _generation;
|
||||
|
||||
|
||||
@ -24,6 +24,7 @@
|
||||
*/
|
||||
|
||||
#include "gc/shenandoah/heuristics/shenandoahGlobalHeuristics.hpp"
|
||||
#include "gc/shenandoah/shenandoahAsserts.hpp"
|
||||
#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
|
||||
#include "gc/shenandoah/shenandoahGenerationalHeap.inline.hpp"
|
||||
#include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
|
||||
@ -35,13 +36,14 @@ ShenandoahGlobalHeuristics::ShenandoahGlobalHeuristics(ShenandoahGlobalGeneratio
|
||||
}
|
||||
|
||||
|
||||
void ShenandoahGlobalHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t actual_free) {
|
||||
size_t ShenandoahGlobalHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t actual_free) {
|
||||
// Better select garbage-first regions
|
||||
QuickSort::sort<RegionData>(data, (int) size, compare_by_garbage);
|
||||
|
||||
choose_global_collection_set(cset, data, size, actual_free, 0 /* cur_young_garbage */);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@ -49,94 +51,212 @@ void ShenandoahGlobalHeuristics::choose_global_collection_set(ShenandoahCollecti
|
||||
const ShenandoahHeuristics::RegionData* data,
|
||||
size_t size, size_t actual_free,
|
||||
size_t cur_young_garbage) const {
|
||||
shenandoah_assert_heaplocked_or_safepoint();
|
||||
auto heap = ShenandoahGenerationalHeap::heap();
|
||||
auto free_set = heap->free_set();
|
||||
size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
|
||||
size_t capacity = heap->soft_max_capacity();
|
||||
|
||||
size_t garbage_threshold = region_size_bytes * ShenandoahGarbageThreshold / 100;
|
||||
size_t ignore_threshold = region_size_bytes * ShenandoahIgnoreGarbageThreshold / 100;
|
||||
|
||||
size_t young_evac_reserve = heap->young_generation()->get_evacuation_reserve();
|
||||
size_t original_young_evac_reserve = young_evac_reserve;
|
||||
size_t old_evac_reserve = heap->old_generation()->get_evacuation_reserve();
|
||||
size_t max_young_cset = (size_t) (young_evac_reserve / ShenandoahEvacWaste);
|
||||
size_t young_cur_cset = 0;
|
||||
size_t max_old_cset = (size_t) (old_evac_reserve / ShenandoahOldEvacWaste);
|
||||
size_t old_cur_cset = 0;
|
||||
size_t old_promo_reserve = heap->old_generation()->get_promoted_reserve();
|
||||
|
||||
// Figure out how many unaffiliated young regions are dedicated to mutator and to evacuator. Allow the young
|
||||
// collector's unaffiliated regions to be transferred to old-gen if old-gen has more easily reclaimed garbage
|
||||
// than young-gen. At the end of this cycle, any excess regions remaining in old-gen will be transferred back
|
||||
// to young. Do not transfer the mutator's unaffiliated regions to old-gen. Those must remain available
|
||||
// to the mutator as it needs to be able to consume this memory during concurrent GC.
|
||||
|
||||
size_t unaffiliated_young_regions = heap->young_generation()->free_unaffiliated_regions();
|
||||
size_t unaffiliated_young_regions = free_set->collector_unaffiliated_regions();
|
||||
size_t unaffiliated_young_memory = unaffiliated_young_regions * region_size_bytes;
|
||||
size_t unaffiliated_old_regions = free_set->old_collector_unaffiliated_regions();
|
||||
size_t unaffiliated_old_memory = unaffiliated_old_regions * region_size_bytes;
|
||||
|
||||
if (unaffiliated_young_memory > max_young_cset) {
|
||||
size_t unaffiliated_mutator_memory = unaffiliated_young_memory - max_young_cset;
|
||||
unaffiliated_young_memory -= unaffiliated_mutator_memory;
|
||||
unaffiliated_young_regions = unaffiliated_young_memory / region_size_bytes; // round down
|
||||
unaffiliated_young_memory = unaffiliated_young_regions * region_size_bytes;
|
||||
// Figure out how many unaffiliated regions are dedicated to Collector and OldCollector reserves. Let these
|
||||
// be shuffled between young and old generations in order to expedite evacuation of whichever regions have the
|
||||
// most garbage, regardless of whether these garbage-first regions reside in young or old generation.
|
||||
// Excess reserves will be transferred back to the mutator after collection set has been chosen. At the end
|
||||
// of evacuation, any reserves not consumed by evacuation will also be transferred to the mutator free set.
|
||||
|
||||
// Truncate reserves to only target unaffiliated memory
|
||||
size_t shared_reserve_regions = 0;
|
||||
if (young_evac_reserve > unaffiliated_young_memory) {
|
||||
shared_reserve_regions += unaffiliated_young_regions;
|
||||
} else {
|
||||
size_t delta_regions = young_evac_reserve / region_size_bytes;
|
||||
shared_reserve_regions += delta_regions;
|
||||
}
|
||||
young_evac_reserve = 0;
|
||||
size_t total_old_reserve = old_evac_reserve + old_promo_reserve;
|
||||
if (total_old_reserve > unaffiliated_old_memory) {
|
||||
// Give all the unaffiliated memory to the shared reserves. Leave the rest for promo reserve.
|
||||
shared_reserve_regions += unaffiliated_old_regions;
|
||||
old_promo_reserve = total_old_reserve - unaffiliated_old_memory;
|
||||
} else {
|
||||
size_t delta_regions = old_evac_reserve / region_size_bytes;
|
||||
shared_reserve_regions += delta_regions;
|
||||
}
|
||||
old_evac_reserve = 0;
|
||||
assert(shared_reserve_regions <=
|
||||
(heap->young_generation()->free_unaffiliated_regions() + heap->old_generation()->free_unaffiliated_regions()),
|
||||
"simple math");
|
||||
|
||||
// We'll affiliate these unaffiliated regions with either old or young, depending on need.
|
||||
max_young_cset -= unaffiliated_young_memory;
|
||||
size_t shared_reserves = shared_reserve_regions * region_size_bytes;
|
||||
size_t committed_from_shared_reserves = 0;
|
||||
|
||||
// Keep track of how many regions we plan to transfer from young to old.
|
||||
size_t regions_transferred_to_old = 0;
|
||||
size_t promo_bytes = 0;
|
||||
size_t old_evac_bytes = 0;
|
||||
size_t young_evac_bytes = 0;
|
||||
|
||||
size_t free_target = (capacity * ShenandoahMinFreeThreshold) / 100 + max_young_cset;
|
||||
size_t consumed_by_promo = 0; // promo_bytes * ShenandoahPromoEvacWaste
|
||||
size_t consumed_by_old_evac = 0; // old_evac_bytes * ShenandoahOldEvacWaste
|
||||
size_t consumed_by_young_evac = 0; // young_evac_bytes * ShenandoahEvacWaste
|
||||
|
||||
// Of the memory reclaimed by GC, some of this will need to be reserved for the next GC collection. Use the current
|
||||
// young reserve as an approximation of the future Collector reserve requirement. Try to end with at least
|
||||
// (capacity * ShenandoahMinFreeThreshold) / 100 bytes available to the mutator.
|
||||
size_t free_target = (capacity * ShenandoahMinFreeThreshold) / 100 + original_young_evac_reserve;
|
||||
size_t min_garbage = (free_target > actual_free) ? (free_target - actual_free) : 0;
|
||||
|
||||
log_info(gc, ergo)("Adaptive CSet Selection for GLOBAL. Max Young Evacuation: %zu"
|
||||
"%s, Max Old Evacuation: %zu%s, Max Either Evacuation: %zu%s, Actual Free: %zu%s.",
|
||||
byte_size_in_proper_unit(max_young_cset), proper_unit_for_byte_size(max_young_cset),
|
||||
byte_size_in_proper_unit(max_old_cset), proper_unit_for_byte_size(max_old_cset),
|
||||
byte_size_in_proper_unit(unaffiliated_young_memory), proper_unit_for_byte_size(unaffiliated_young_memory),
|
||||
byte_size_in_proper_unit(actual_free), proper_unit_for_byte_size(actual_free));
|
||||
size_t aged_regions_promoted = 0;
|
||||
size_t young_regions_evacuated = 0;
|
||||
size_t old_regions_evacuated = 0;
|
||||
|
||||
log_info(gc, ergo)("Adaptive CSet Selection for GLOBAL. Discretionary evacuation budget (for either old or young): %zu%s"
|
||||
", Actual Free: %zu%s.",
|
||||
byte_size_in_proper_unit(shared_reserves), proper_unit_for_byte_size(shared_reserves),
|
||||
byte_size_in_proper_unit(actual_free), proper_unit_for_byte_size(actual_free));
|
||||
|
||||
size_t cur_garbage = cur_young_garbage;
|
||||
for (size_t idx = 0; idx < size; idx++) {
|
||||
ShenandoahHeapRegion* r = data[idx].get_region();
|
||||
assert(!cset->is_preselected(r->index()), "There should be no preselected regions during GLOBAL GC");
|
||||
bool add_region = false;
|
||||
if (r->is_old() || heap->is_tenurable(r)) {
|
||||
size_t new_cset = old_cur_cset + r->get_live_data_bytes();
|
||||
if ((r->garbage() > garbage_threshold)) {
|
||||
while ((new_cset > max_old_cset) && (unaffiliated_young_regions > 0)) {
|
||||
unaffiliated_young_regions--;
|
||||
regions_transferred_to_old++;
|
||||
max_old_cset += region_size_bytes / ShenandoahOldEvacWaste;
|
||||
size_t region_garbage = r->garbage();
|
||||
size_t new_garbage = cur_garbage + region_garbage;
|
||||
bool add_regardless = (region_garbage > ignore_threshold) && (new_garbage < min_garbage);
|
||||
size_t live_bytes = r->get_live_data_bytes();
|
||||
if (add_regardless || (region_garbage >= garbage_threshold)) {
|
||||
if (r->is_old()) {
|
||||
size_t anticipated_consumption = (size_t) (live_bytes * ShenandoahOldEvacWaste);
|
||||
size_t new_old_consumption = consumed_by_old_evac + anticipated_consumption;
|
||||
size_t new_old_evac_reserve = old_evac_reserve;
|
||||
size_t proposed_old_region_expansion = 0;
|
||||
while ((new_old_consumption > new_old_evac_reserve) && (committed_from_shared_reserves < shared_reserves)) {
|
||||
committed_from_shared_reserves += region_size_bytes;
|
||||
proposed_old_region_expansion++;
|
||||
new_old_evac_reserve += region_size_bytes;
|
||||
}
|
||||
}
|
||||
if ((new_cset <= max_old_cset) && (r->garbage() > garbage_threshold)) {
|
||||
add_region = true;
|
||||
old_cur_cset = new_cset;
|
||||
}
|
||||
} else {
|
||||
assert(r->is_young() && !heap->is_tenurable(r), "DeMorgan's law (assuming r->is_affiliated)");
|
||||
size_t new_cset = young_cur_cset + r->get_live_data_bytes();
|
||||
size_t region_garbage = r->garbage();
|
||||
size_t new_garbage = cur_young_garbage + region_garbage;
|
||||
bool add_regardless = (region_garbage > ignore_threshold) && (new_garbage < min_garbage);
|
||||
|
||||
if (add_regardless || (r->garbage() > garbage_threshold)) {
|
||||
while ((new_cset > max_young_cset) && (unaffiliated_young_regions > 0)) {
|
||||
unaffiliated_young_regions--;
|
||||
max_young_cset += region_size_bytes / ShenandoahEvacWaste;
|
||||
// If this region has free memory and we choose to place it in the collection set, its free memory is no longer
|
||||
// available to hold promotion results. So we behave as if its free memory is consumed within the promotion reserve.
|
||||
size_t anticipated_loss_from_promo_reserve = r->free();
|
||||
size_t new_promo_consumption = consumed_by_promo + anticipated_loss_from_promo_reserve;
|
||||
size_t new_promo_reserve = old_promo_reserve;
|
||||
while ((new_promo_consumption > new_promo_reserve) && (committed_from_shared_reserves < shared_reserves)) {
|
||||
committed_from_shared_reserves += region_size_bytes;
|
||||
proposed_old_region_expansion++;
|
||||
new_promo_reserve += region_size_bytes;
|
||||
}
|
||||
if ((new_old_consumption <= new_old_evac_reserve) && (new_promo_consumption <= new_promo_reserve)) {
|
||||
add_region = true;
|
||||
old_evac_reserve = new_old_evac_reserve;
|
||||
old_promo_reserve = new_promo_reserve;
|
||||
old_evac_bytes += live_bytes;
|
||||
consumed_by_old_evac = new_old_consumption;
|
||||
consumed_by_promo = new_promo_consumption;
|
||||
cur_garbage = new_garbage;
|
||||
old_regions_evacuated++;
|
||||
} else {
|
||||
// We failed to sufficiently expand old so unwind proposed expansion
|
||||
committed_from_shared_reserves -= proposed_old_region_expansion * region_size_bytes;
|
||||
}
|
||||
} else if (heap->is_tenurable(r)) {
|
||||
size_t anticipated_consumption = (size_t) (live_bytes * ShenandoahPromoEvacWaste);
|
||||
size_t new_promo_consumption = consumed_by_promo + anticipated_consumption;
|
||||
size_t new_promo_reserve = old_promo_reserve;
|
||||
size_t proposed_old_region_expansion = 0;
|
||||
while ((new_promo_consumption > new_promo_reserve) && (committed_from_shared_reserves < shared_reserves)) {
|
||||
committed_from_shared_reserves += region_size_bytes;
|
||||
proposed_old_region_expansion++;
|
||||
new_promo_reserve += region_size_bytes;
|
||||
}
|
||||
if (new_promo_consumption <= new_promo_reserve) {
|
||||
add_region = true;
|
||||
old_promo_reserve = new_promo_reserve;
|
||||
promo_bytes += live_bytes;
|
||||
consumed_by_promo = new_promo_consumption;
|
||||
cur_garbage = new_garbage;
|
||||
aged_regions_promoted++;
|
||||
} else {
|
||||
// We failed to sufficiently expand old so unwind proposed expansion
|
||||
committed_from_shared_reserves -= proposed_old_region_expansion * region_size_bytes;
|
||||
}
|
||||
} else {
|
||||
assert(r->is_young() && !heap->is_tenurable(r), "DeMorgan's law (assuming r->is_affiliated)");
|
||||
size_t anticipated_consumption = (size_t) (live_bytes * ShenandoahEvacWaste);
|
||||
size_t new_young_evac_consumption = consumed_by_young_evac + anticipated_consumption;
|
||||
size_t new_young_evac_reserve = young_evac_reserve;
|
||||
size_t proposed_young_region_expansion = 0;
|
||||
while ((new_young_evac_consumption > new_young_evac_reserve) && (committed_from_shared_reserves < shared_reserves)) {
|
||||
committed_from_shared_reserves += region_size_bytes;
|
||||
proposed_young_region_expansion++;
|
||||
new_young_evac_reserve += region_size_bytes;
|
||||
}
|
||||
if (new_young_evac_consumption <= new_young_evac_reserve) {
|
||||
add_region = true;
|
||||
young_evac_reserve = new_young_evac_reserve;
|
||||
young_evac_bytes += live_bytes;
|
||||
consumed_by_young_evac = new_young_evac_consumption;
|
||||
cur_garbage = new_garbage;
|
||||
young_regions_evacuated++;
|
||||
} else {
|
||||
// We failed to sufficiently expand old so unwind proposed expansion
|
||||
committed_from_shared_reserves -= proposed_young_region_expansion * region_size_bytes;
|
||||
}
|
||||
}
|
||||
if ((new_cset <= max_young_cset) && (add_regardless || (region_garbage > garbage_threshold))) {
|
||||
add_region = true;
|
||||
young_cur_cset = new_cset;
|
||||
cur_young_garbage = new_garbage;
|
||||
}
|
||||
}
|
||||
if (add_region) {
|
||||
cset->add_region(r);
|
||||
}
|
||||
}
|
||||
if (regions_transferred_to_old > 0) {
|
||||
assert(young_evac_reserve > regions_transferred_to_old * region_size_bytes, "young reserve cannot be negative");
|
||||
heap->young_generation()->set_evacuation_reserve(young_evac_reserve - regions_transferred_to_old * region_size_bytes);
|
||||
heap->old_generation()->set_evacuation_reserve(old_evac_reserve + regions_transferred_to_old * region_size_bytes);
|
||||
|
||||
if (committed_from_shared_reserves < shared_reserves) {
|
||||
// Give all the rest to promotion
|
||||
old_promo_reserve += (shared_reserves - committed_from_shared_reserves);
|
||||
// dead code: committed_from_shared_reserves = shared_reserves;
|
||||
}
|
||||
|
||||
// Consider the effects of round-off:
|
||||
// 1. We know that the sum over each evacuation mutiplied by Evacuation Waste is <= total evacuation reserve
|
||||
// 2. However, the reserve for each individual evacuation may be rounded down. In the worst case, we will be over budget
|
||||
// by the number of regions evacuated, since each region's reserve might be under-estimated by at most 1
|
||||
// 3. Likewise, if we take the sum of bytes evacuated and multiply this by the Evacuation Waste and then round down
|
||||
// to nearest integer, the calculated reserve will underestimate the true reserve needs by at most 1.
|
||||
// 4. This explains the adjustments to subtotals in the assert statements below.
|
||||
assert(young_evac_bytes * ShenandoahEvacWaste <= young_evac_reserve + young_regions_evacuated,
|
||||
"budget: %zu <= %zu", (size_t) (young_evac_bytes * ShenandoahEvacWaste), young_evac_reserve);
|
||||
assert(old_evac_bytes * ShenandoahOldEvacWaste <= old_evac_reserve + old_regions_evacuated,
|
||||
"budget: %zu <= %zu", (size_t) (old_evac_bytes * ShenandoahOldEvacWaste), old_evac_reserve);
|
||||
assert(promo_bytes * ShenandoahPromoEvacWaste <= old_promo_reserve + aged_regions_promoted,
|
||||
"budget: %zu <= %zu", (size_t) (promo_bytes * ShenandoahPromoEvacWaste), old_promo_reserve);
|
||||
assert(young_evac_reserve + old_evac_reserve + old_promo_reserve <=
|
||||
heap->young_generation()->get_evacuation_reserve() + heap->old_generation()->get_evacuation_reserve() +
|
||||
heap->old_generation()->get_promoted_reserve(), "Exceeded budget");
|
||||
|
||||
if (heap->young_generation()->get_evacuation_reserve() < young_evac_reserve) {
|
||||
size_t delta_bytes = young_evac_reserve - heap->young_generation()->get_evacuation_reserve();
|
||||
size_t delta_regions = delta_bytes / region_size_bytes;
|
||||
size_t regions_to_transfer = MIN2(unaffiliated_old_regions, delta_regions);
|
||||
log_info(gc)("Global GC moves %zu unaffiliated regions from old collector to young collector reserves", regions_to_transfer);
|
||||
ssize_t negated_regions = -regions_to_transfer;
|
||||
heap->free_set()->move_unaffiliated_regions_from_collector_to_old_collector(negated_regions);
|
||||
} else if (heap->young_generation()->get_evacuation_reserve() > young_evac_reserve) {
|
||||
size_t delta_bytes = heap->young_generation()->get_evacuation_reserve() - young_evac_reserve;
|
||||
size_t delta_regions = delta_bytes / region_size_bytes;
|
||||
size_t regions_to_transfer = MIN2(unaffiliated_young_regions, delta_regions);
|
||||
log_info(gc)("Global GC moves %zu unaffiliated regions from young collector to old collector reserves", regions_to_transfer);
|
||||
heap->free_set()->move_unaffiliated_regions_from_collector_to_old_collector(regions_to_transfer);
|
||||
}
|
||||
|
||||
heap->young_generation()->set_evacuation_reserve(young_evac_reserve);
|
||||
heap->old_generation()->set_evacuation_reserve(old_evac_reserve);
|
||||
heap->old_generation()->set_promoted_reserve(old_promo_reserve);
|
||||
}
|
||||
|
||||
@ -39,9 +39,9 @@ class ShenandoahGlobalHeuristics : public ShenandoahGenerationalHeuristics {
|
||||
public:
|
||||
ShenandoahGlobalHeuristics(ShenandoahGlobalGeneration* generation);
|
||||
|
||||
void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t actual_free) override;
|
||||
size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t actual_free) override;
|
||||
|
||||
private:
|
||||
void choose_global_collection_set(ShenandoahCollectionSet* cset,
|
||||
|
||||
@ -72,7 +72,7 @@ ShenandoahHeuristics::~ShenandoahHeuristics() {
|
||||
FREE_C_HEAP_ARRAY(RegionGarbage, _region_data);
|
||||
}
|
||||
|
||||
void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set) {
|
||||
size_t ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set) {
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
|
||||
assert(collection_set->is_empty(), "Must be empty");
|
||||
@ -153,8 +153,8 @@ void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collec
|
||||
if (immediate_percent <= ShenandoahImmediateThreshold) {
|
||||
choose_collection_set_from_regiondata(collection_set, candidates, cand_idx, immediate_garbage + free);
|
||||
}
|
||||
|
||||
collection_set->summarize(total_garbage, immediate_garbage, immediate_regions);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ShenandoahHeuristics::record_cycle_start() {
|
||||
|
||||
@ -183,9 +183,12 @@ protected:
|
||||
|
||||
static int compare_by_garbage(RegionData a, RegionData b);
|
||||
|
||||
virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
|
||||
RegionData* data, size_t data_size,
|
||||
size_t free) = 0;
|
||||
// This is a helper function to choose_collection_set(), returning the number of regions that need to be transferred to
|
||||
// the old reserve from the young reserve in order to effectively evacuate the chosen collection set. In non-generational
|
||||
// mode, the return value is 0.
|
||||
virtual size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
|
||||
RegionData* data, size_t data_size,
|
||||
size_t free) = 0;
|
||||
|
||||
void adjust_penalty(intx step);
|
||||
|
||||
@ -233,7 +236,9 @@ public:
|
||||
|
||||
virtual void record_requested_gc();
|
||||
|
||||
virtual void choose_collection_set(ShenandoahCollectionSet* collection_set);
|
||||
// Choose the collection set, returning the number of regions that need to be transferred to the old reserve from the young
|
||||
// reserve in order to effectively evacuate the chosen collection set. In non-generational mode, the return value is 0.
|
||||
virtual size_t choose_collection_set(ShenandoahCollectionSet* collection_set);
|
||||
|
||||
virtual bool can_unload_classes();
|
||||
|
||||
|
||||
@ -26,9 +26,11 @@
|
||||
#include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
|
||||
#include "gc/shenandoah/shenandoahCollectionSet.hpp"
|
||||
#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
|
||||
#include "gc/shenandoah/shenandoahFreeSet.hpp"
|
||||
#include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
|
||||
#include "gc/shenandoah/shenandoahOldGeneration.hpp"
|
||||
#include "gc/shenandoah/shenandoahYoungGeneration.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "utilities/quickSort.hpp"
|
||||
|
||||
@ -77,15 +79,17 @@ ShenandoahOldHeuristics::ShenandoahOldHeuristics(ShenandoahOldGeneration* genera
|
||||
}
|
||||
|
||||
bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* collection_set) {
|
||||
if (unprocessed_old_collection_candidates() == 0) {
|
||||
return false;
|
||||
}
|
||||
_mixed_evac_cset = collection_set;
|
||||
_included_old_regions = 0;
|
||||
_evacuated_old_bytes = 0;
|
||||
_collected_old_bytes = 0;
|
||||
|
||||
if (_old_generation->is_preparing_for_mark()) {
|
||||
// We have unprocessed old collection candidates, but the heuristic has given up on evacuating them.
|
||||
// This is most likely because they were _all_ pinned at the time of the last mixed evacuation (and
|
||||
// this in turn is most likely because there are just one or two candidate regions remaining).
|
||||
log_info(gc, ergo)("Remaining " UINT32_FORMAT " old regions are being coalesced and filled", unprocessed_old_collection_candidates());
|
||||
log_info(gc, ergo)("Remaining " UINT32_FORMAT
|
||||
" old regions are being coalesced and filled", unprocessed_old_collection_candidates());
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -111,150 +115,44 @@ bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* coll
|
||||
// of memory that can still be evacuated. We address this by reducing the evacuation budget by the amount
|
||||
// of live memory in that region and by the amount of unallocated memory in that region if the evacuation
|
||||
// budget is constrained by availability of free memory.
|
||||
const size_t old_evacuation_reserve = _old_generation->get_evacuation_reserve();
|
||||
const size_t old_evacuation_budget = (size_t) ((double) old_evacuation_reserve / ShenandoahOldEvacWaste);
|
||||
size_t unfragmented_available = _old_generation->free_unaffiliated_regions() * ShenandoahHeapRegion::region_size_bytes();
|
||||
size_t fragmented_available;
|
||||
size_t excess_fragmented_available;
|
||||
_old_evacuation_reserve = _old_generation->get_evacuation_reserve();
|
||||
_old_evacuation_budget = (size_t) ((double) _old_evacuation_reserve / ShenandoahOldEvacWaste);
|
||||
|
||||
if (unfragmented_available > old_evacuation_budget) {
|
||||
unfragmented_available = old_evacuation_budget;
|
||||
fragmented_available = 0;
|
||||
excess_fragmented_available = 0;
|
||||
// fragmented_available is the amount of memory within partially consumed old regions that may be required to
|
||||
// hold the results of old evacuations. If all of the memory required by the old evacuation reserve is available
|
||||
// in unfragmented regions (unaffiliated old regions), then fragmented_available is zero because we do not need
|
||||
// to evacuate into the existing partially consumed old regions.
|
||||
|
||||
// if fragmented_available is non-zero, excess_fragmented_old_budget represents the amount of fragmented memory
|
||||
// that is available within old, but is not required to hold the resuilts of old evacuation. As old-gen regions
|
||||
// are added into the collection set, their free memory is subtracted from excess_fragmented_old_budget until the
|
||||
// excess is exhausted. For old-gen regions subsequently added to the collection set, their free memory is
|
||||
// subtracted from fragmented_available and from the old_evacuation_budget (since the budget decreases when this
|
||||
// fragmented_available memory decreases). After fragmented_available has been exhausted, any further old regions
|
||||
// selected for the cset do not further decrease the old_evacuation_budget because all further evacuation is targeted
|
||||
// to unfragmented regions.
|
||||
|
||||
size_t unaffiliated_available = _old_generation->free_unaffiliated_regions() * ShenandoahHeapRegion::region_size_bytes();
|
||||
if (unaffiliated_available > _old_evacuation_reserve) {
|
||||
_unspent_unfragmented_old_budget = _old_evacuation_budget;
|
||||
_unspent_fragmented_old_budget = 0;
|
||||
_excess_fragmented_old_budget = 0;
|
||||
} else {
|
||||
assert(_old_generation->available() >= old_evacuation_budget, "Cannot budget more than is available");
|
||||
fragmented_available = _old_generation->available() - unfragmented_available;
|
||||
assert(fragmented_available + unfragmented_available >= old_evacuation_budget, "Budgets do not add up");
|
||||
if (fragmented_available + unfragmented_available > old_evacuation_budget) {
|
||||
excess_fragmented_available = (fragmented_available + unfragmented_available) - old_evacuation_budget;
|
||||
fragmented_available -= excess_fragmented_available;
|
||||
assert(_old_generation->available() >= _old_evacuation_reserve, "Cannot reserve more than is available");
|
||||
size_t affiliated_available = _old_generation->available() - unaffiliated_available;
|
||||
assert(affiliated_available + unaffiliated_available >= _old_evacuation_reserve, "Budgets do not add up");
|
||||
if (affiliated_available + unaffiliated_available > _old_evacuation_reserve) {
|
||||
_excess_fragmented_old_budget = (affiliated_available + unaffiliated_available) - _old_evacuation_reserve;
|
||||
affiliated_available -= _excess_fragmented_old_budget;
|
||||
}
|
||||
_unspent_fragmented_old_budget = (size_t) ((double) affiliated_available / ShenandoahOldEvacWaste);
|
||||
_unspent_unfragmented_old_budget = (size_t) ((double) unaffiliated_available / ShenandoahOldEvacWaste);
|
||||
}
|
||||
|
||||
size_t remaining_old_evacuation_budget = old_evacuation_budget;
|
||||
log_debug(gc)("Choose old regions for mixed collection: old evacuation budget: %zu%s, candidates: %u",
|
||||
byte_size_in_proper_unit(old_evacuation_budget), proper_unit_for_byte_size(old_evacuation_budget),
|
||||
log_debug(gc)("Choose old regions for mixed collection: old evacuation budget: " PROPERFMT ", candidates: %u",
|
||||
PROPERFMTARGS(_old_evacuation_budget),
|
||||
unprocessed_old_collection_candidates());
|
||||
|
||||
size_t lost_evacuation_capacity = 0;
|
||||
|
||||
// The number of old-gen regions that were selected as candidates for collection at the end of the most recent old-gen
|
||||
// concurrent marking phase and have not yet been collected is represented by unprocessed_old_collection_candidates().
|
||||
// Candidate regions are ordered according to increasing amount of live data. If there is not sufficient room to
|
||||
// evacuate region N, then there is no need to even consider evacuating region N+1.
|
||||
while (unprocessed_old_collection_candidates() > 0) {
|
||||
// Old collection candidates are sorted in order of decreasing garbage contained therein.
|
||||
ShenandoahHeapRegion* r = next_old_collection_candidate();
|
||||
if (r == nullptr) {
|
||||
break;
|
||||
}
|
||||
assert(r->is_regular(), "There should be no humongous regions in the set of mixed-evac candidates");
|
||||
|
||||
// If region r is evacuated to fragmented memory (to free memory within a partially used region), then we need
|
||||
// to decrease the capacity of the fragmented memory by the scaled loss.
|
||||
|
||||
const size_t live_data_for_evacuation = r->get_live_data_bytes();
|
||||
size_t lost_available = r->free();
|
||||
|
||||
if ((lost_available > 0) && (excess_fragmented_available > 0)) {
|
||||
if (lost_available < excess_fragmented_available) {
|
||||
excess_fragmented_available -= lost_available;
|
||||
lost_evacuation_capacity -= lost_available;
|
||||
lost_available = 0;
|
||||
} else {
|
||||
lost_available -= excess_fragmented_available;
|
||||
lost_evacuation_capacity -= excess_fragmented_available;
|
||||
excess_fragmented_available = 0;
|
||||
}
|
||||
}
|
||||
size_t scaled_loss = (size_t) ((double) lost_available / ShenandoahOldEvacWaste);
|
||||
if ((lost_available > 0) && (fragmented_available > 0)) {
|
||||
if (scaled_loss + live_data_for_evacuation < fragmented_available) {
|
||||
fragmented_available -= scaled_loss;
|
||||
scaled_loss = 0;
|
||||
} else {
|
||||
// We will have to allocate this region's evacuation memory from unfragmented memory, so don't bother
|
||||
// to decrement scaled_loss
|
||||
}
|
||||
}
|
||||
if (scaled_loss > 0) {
|
||||
// We were not able to account for the lost free memory within fragmented memory, so we need to take this
|
||||
// allocation out of unfragmented memory. Unfragmented memory does not need to account for loss of free.
|
||||
if (live_data_for_evacuation > unfragmented_available) {
|
||||
// There is no room to evacuate this region or any that come after it in within the candidates array.
|
||||
log_debug(gc, cset)("Not enough unfragmented memory (%zu) to hold evacuees (%zu) from region: (%zu)",
|
||||
unfragmented_available, live_data_for_evacuation, r->index());
|
||||
break;
|
||||
} else {
|
||||
unfragmented_available -= live_data_for_evacuation;
|
||||
}
|
||||
} else {
|
||||
// Since scaled_loss == 0, we have accounted for the loss of free memory, so we can allocate from either
|
||||
// fragmented or unfragmented available memory. Use up the fragmented memory budget first.
|
||||
size_t evacuation_need = live_data_for_evacuation;
|
||||
|
||||
if (evacuation_need > fragmented_available) {
|
||||
evacuation_need -= fragmented_available;
|
||||
fragmented_available = 0;
|
||||
} else {
|
||||
fragmented_available -= evacuation_need;
|
||||
evacuation_need = 0;
|
||||
}
|
||||
if (evacuation_need > unfragmented_available) {
|
||||
// There is no room to evacuate this region or any that come after it in within the candidates array.
|
||||
log_debug(gc, cset)("Not enough unfragmented memory (%zu) to hold evacuees (%zu) from region: (%zu)",
|
||||
unfragmented_available, live_data_for_evacuation, r->index());
|
||||
break;
|
||||
} else {
|
||||
unfragmented_available -= evacuation_need;
|
||||
// dead code: evacuation_need == 0;
|
||||
}
|
||||
}
|
||||
collection_set->add_region(r);
|
||||
included_old_regions++;
|
||||
evacuated_old_bytes += live_data_for_evacuation;
|
||||
collected_old_bytes += r->garbage();
|
||||
consume_old_collection_candidate();
|
||||
}
|
||||
|
||||
if (_first_pinned_candidate != NOT_FOUND) {
|
||||
// Need to deal with pinned regions
|
||||
slide_pinned_regions_to_front();
|
||||
}
|
||||
decrease_unprocessed_old_collection_candidates_live_memory(evacuated_old_bytes);
|
||||
if (included_old_regions > 0) {
|
||||
log_info(gc, ergo)("Old-gen piggyback evac (" UINT32_FORMAT " regions, evacuating " PROPERFMT ", reclaiming: " PROPERFMT ")",
|
||||
included_old_regions, PROPERFMTARGS(evacuated_old_bytes), PROPERFMTARGS(collected_old_bytes));
|
||||
}
|
||||
|
||||
if (unprocessed_old_collection_candidates() == 0) {
|
||||
// We have added the last of our collection candidates to a mixed collection.
|
||||
// Any triggers that occurred during mixed evacuations may no longer be valid. They can retrigger if appropriate.
|
||||
clear_triggers();
|
||||
|
||||
_old_generation->complete_mixed_evacuations();
|
||||
} else if (included_old_regions == 0) {
|
||||
// We have candidates, but none were included for evacuation - are they all pinned?
|
||||
// or did we just not have enough room for any of them in this collection set?
|
||||
// We don't want a region with a stuck pin to prevent subsequent old collections, so
|
||||
// if they are all pinned we transition to a state that will allow us to make these uncollected
|
||||
// (pinned) regions parsable.
|
||||
if (all_candidates_are_pinned()) {
|
||||
log_info(gc, ergo)("All candidate regions " UINT32_FORMAT " are pinned", unprocessed_old_collection_candidates());
|
||||
_old_generation->abandon_mixed_evacuations();
|
||||
} else {
|
||||
log_info(gc, ergo)("No regions selected for mixed collection. "
|
||||
"Old evacuation budget: " PROPERFMT ", Remaining evacuation budget: " PROPERFMT
|
||||
", Lost capacity: " PROPERFMT
|
||||
", Next candidate: " UINT32_FORMAT ", Last candidate: " UINT32_FORMAT,
|
||||
PROPERFMTARGS(old_evacuation_reserve),
|
||||
PROPERFMTARGS(remaining_old_evacuation_budget),
|
||||
PROPERFMTARGS(lost_evacuation_capacity),
|
||||
_next_old_collection_candidate, _last_old_collection_candidate);
|
||||
}
|
||||
}
|
||||
|
||||
return (included_old_regions > 0);
|
||||
return add_old_regions_to_cset();
|
||||
}
|
||||
|
||||
bool ShenandoahOldHeuristics::all_candidates_are_pinned() {
|
||||
@ -328,6 +226,187 @@ void ShenandoahOldHeuristics::slide_pinned_regions_to_front() {
|
||||
_next_old_collection_candidate = write_index + 1;
|
||||
}
|
||||
|
||||
bool ShenandoahOldHeuristics::add_old_regions_to_cset() {
|
||||
if (unprocessed_old_collection_candidates() == 0) {
|
||||
return false;
|
||||
}
|
||||
_first_pinned_candidate = NOT_FOUND;
|
||||
|
||||
// The number of old-gen regions that were selected as candidates for collection at the end of the most recent old-gen
|
||||
// concurrent marking phase and have not yet been collected is represented by unprocessed_old_collection_candidates().
|
||||
// Candidate regions are ordered according to increasing amount of live data. If there is not sufficient room to
|
||||
// evacuate region N, then there is no need to even consider evacuating region N+1.
|
||||
while (unprocessed_old_collection_candidates() > 0) {
|
||||
// Old collection candidates are sorted in order of decreasing garbage contained therein.
|
||||
ShenandoahHeapRegion* r = next_old_collection_candidate();
|
||||
if (r == nullptr) {
|
||||
break;
|
||||
}
|
||||
assert(r->is_regular(), "There should be no humongous regions in the set of mixed-evac candidates");
|
||||
|
||||
// If region r is evacuated to fragmented memory (to free memory within a partially used region), then we need
|
||||
// to decrease the capacity of the fragmented memory by the scaled loss.
|
||||
|
||||
const size_t live_data_for_evacuation = r->get_live_data_bytes();
|
||||
size_t lost_available = r->free();
|
||||
|
||||
ssize_t fragmented_delta = 0;
|
||||
ssize_t unfragmented_delta = 0;
|
||||
ssize_t excess_delta = 0;
|
||||
|
||||
// We must decrease our mixed-evacuation budgets proportional to the lost available memory. This memory that is no
|
||||
// longer available was likely "promised" to promotions, so we must decrease our mixed evacuations now.
|
||||
// (e.g. if we loose 14 bytes of available old memory, we must decrease the evacuation budget by 10 bytes.)
|
||||
size_t scaled_loss = (size_t) (((double) lost_available) / ShenandoahOldEvacWaste);
|
||||
if (lost_available > 0) {
|
||||
// We need to subtract lost_available from our working evacuation budgets
|
||||
if (scaled_loss < _excess_fragmented_old_budget) {
|
||||
excess_delta -= scaled_loss;
|
||||
_excess_fragmented_old_budget -= scaled_loss;
|
||||
} else {
|
||||
excess_delta -= _excess_fragmented_old_budget;
|
||||
_excess_fragmented_old_budget = 0;
|
||||
}
|
||||
|
||||
if (scaled_loss < _unspent_fragmented_old_budget) {
|
||||
_unspent_fragmented_old_budget -= scaled_loss;
|
||||
fragmented_delta = -scaled_loss;
|
||||
scaled_loss = 0;
|
||||
} else {
|
||||
scaled_loss -= _unspent_fragmented_old_budget;
|
||||
fragmented_delta = -_unspent_fragmented_old_budget;
|
||||
_unspent_fragmented_old_budget = 0;
|
||||
}
|
||||
|
||||
if (scaled_loss < _unspent_unfragmented_old_budget) {
|
||||
_unspent_unfragmented_old_budget -= scaled_loss;
|
||||
unfragmented_delta = -scaled_loss;
|
||||
scaled_loss = 0;
|
||||
} else {
|
||||
scaled_loss -= _unspent_unfragmented_old_budget;
|
||||
fragmented_delta = -_unspent_unfragmented_old_budget;
|
||||
_unspent_unfragmented_old_budget = 0;
|
||||
}
|
||||
}
|
||||
|
||||
// Allocate replica from unfragmented memory if that exists
|
||||
size_t evacuation_need = live_data_for_evacuation;
|
||||
if (evacuation_need < _unspent_unfragmented_old_budget) {
|
||||
_unspent_unfragmented_old_budget -= evacuation_need;
|
||||
} else {
|
||||
if (_unspent_unfragmented_old_budget > 0) {
|
||||
evacuation_need -= _unspent_unfragmented_old_budget;
|
||||
unfragmented_delta -= _unspent_unfragmented_old_budget;
|
||||
_unspent_unfragmented_old_budget = 0;
|
||||
}
|
||||
// Take the remaining allocation out of fragmented available
|
||||
if (_unspent_fragmented_old_budget > evacuation_need) {
|
||||
_unspent_fragmented_old_budget -= evacuation_need;
|
||||
} else {
|
||||
// We cannot add this region into the collection set. We're done. Undo the adjustments to available.
|
||||
_unspent_fragmented_old_budget -= fragmented_delta;
|
||||
_unspent_unfragmented_old_budget -= unfragmented_delta;
|
||||
_excess_fragmented_old_budget -= excess_delta;
|
||||
break;
|
||||
}
|
||||
}
|
||||
_mixed_evac_cset->add_region(r);
|
||||
_included_old_regions++;
|
||||
_evacuated_old_bytes += live_data_for_evacuation;
|
||||
_collected_old_bytes += r->garbage();
|
||||
consume_old_collection_candidate();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ShenandoahOldHeuristics::finalize_mixed_evacs() {
|
||||
if (_first_pinned_candidate != NOT_FOUND) {
|
||||
// Need to deal with pinned regions
|
||||
slide_pinned_regions_to_front();
|
||||
}
|
||||
decrease_unprocessed_old_collection_candidates_live_memory(_evacuated_old_bytes);
|
||||
if (_included_old_regions > 0) {
|
||||
log_info(gc)("Old-gen mixed evac (%zu regions, evacuating %zu%s, reclaiming: %zu%s)",
|
||||
_included_old_regions,
|
||||
byte_size_in_proper_unit(_evacuated_old_bytes), proper_unit_for_byte_size(_evacuated_old_bytes),
|
||||
byte_size_in_proper_unit(_collected_old_bytes), proper_unit_for_byte_size(_collected_old_bytes));
|
||||
}
|
||||
|
||||
if (unprocessed_old_collection_candidates() == 0) {
|
||||
// We have added the last of our collection candidates to a mixed collection.
|
||||
// Any triggers that occurred during mixed evacuations may no longer be valid. They can retrigger if appropriate.
|
||||
clear_triggers();
|
||||
_old_generation->complete_mixed_evacuations();
|
||||
} else if (_included_old_regions == 0) {
|
||||
// We have candidates, but none were included for evacuation - are they all pinned?
|
||||
// or did we just not have enough room for any of them in this collection set?
|
||||
// We don't want a region with a stuck pin to prevent subsequent old collections, so
|
||||
// if they are all pinned we transition to a state that will allow us to make these uncollected
|
||||
// (pinned) regions parsable.
|
||||
if (all_candidates_are_pinned()) {
|
||||
log_info(gc)("All candidate regions " UINT32_FORMAT " are pinned", unprocessed_old_collection_candidates());
|
||||
_old_generation->abandon_mixed_evacuations();
|
||||
} else {
|
||||
log_info(gc)("No regions selected for mixed collection. "
|
||||
"Old evacuation budget: " PROPERFMT ", Next candidate: " UINT32_FORMAT ", Last candidate: " UINT32_FORMAT,
|
||||
PROPERFMTARGS(_old_evacuation_reserve),
|
||||
_next_old_collection_candidate, _last_old_collection_candidate);
|
||||
}
|
||||
}
|
||||
return (_included_old_regions > 0);
|
||||
}
|
||||
|
||||
bool ShenandoahOldHeuristics::top_off_collection_set(size_t &add_regions_to_old) {
|
||||
if (unprocessed_old_collection_candidates() == 0) {
|
||||
add_regions_to_old = 0;
|
||||
return false;
|
||||
} else {
|
||||
ShenandoahYoungGeneration* young_generation = _heap->young_generation();
|
||||
size_t young_unaffiliated_regions = young_generation->free_unaffiliated_regions();
|
||||
size_t max_young_cset = young_generation->get_evacuation_reserve();
|
||||
|
||||
// We have budgeted to assure the live_bytes_in_tenurable_regions() get evacuated into old generation. Young reserves
|
||||
// only for untenurable region evacuations.
|
||||
size_t planned_young_evac = _mixed_evac_cset->get_live_bytes_in_untenurable_regions();
|
||||
size_t consumed_from_young_cset = (size_t) (planned_young_evac * ShenandoahEvacWaste);
|
||||
|
||||
size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
|
||||
size_t regions_required_for_collector_reserve = (consumed_from_young_cset + region_size_bytes - 1) / region_size_bytes;
|
||||
|
||||
assert(consumed_from_young_cset <= max_young_cset, "sanity");
|
||||
assert(max_young_cset <= young_unaffiliated_regions * region_size_bytes, "sanity");
|
||||
|
||||
size_t regions_for_old_expansion;
|
||||
if (consumed_from_young_cset < max_young_cset) {
|
||||
size_t excess_young_reserves = max_young_cset - consumed_from_young_cset;
|
||||
// We can only transfer empty regions from young to old. Furthermore, we must be careful to assure that the young
|
||||
// Collector reserve that remains after transfer is comprised entirely of empty (unaffiliated) regions.
|
||||
size_t consumed_unaffiliated_regions = (consumed_from_young_cset + region_size_bytes - 1) / region_size_bytes;
|
||||
size_t available_unaffiliated_regions = ((young_unaffiliated_regions > consumed_unaffiliated_regions)?
|
||||
young_unaffiliated_regions - consumed_unaffiliated_regions: 0);
|
||||
regions_for_old_expansion = MIN2(available_unaffiliated_regions, excess_young_reserves / region_size_bytes);
|
||||
} else {
|
||||
regions_for_old_expansion = 0;
|
||||
}
|
||||
if (regions_for_old_expansion > 0) {
|
||||
log_info(gc)("Augmenting old-gen evacuation budget from unexpended young-generation reserve by %zu regions",
|
||||
regions_for_old_expansion);
|
||||
add_regions_to_old = regions_for_old_expansion;
|
||||
size_t budget_supplement = region_size_bytes * regions_for_old_expansion;
|
||||
size_t supplement_without_waste = (size_t) (((double) budget_supplement) / ShenandoahOldEvacWaste);
|
||||
_old_evacuation_budget += supplement_without_waste;
|
||||
_unspent_unfragmented_old_budget += supplement_without_waste;
|
||||
_old_generation->augment_evacuation_reserve(budget_supplement);
|
||||
young_generation->set_evacuation_reserve(max_young_cset - budget_supplement);
|
||||
|
||||
return add_old_regions_to_cset();
|
||||
} else {
|
||||
add_regions_to_old = 0;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahOldHeuristics::prepare_for_old_collections() {
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
|
||||
@ -336,7 +415,6 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() {
|
||||
size_t immediate_garbage = 0;
|
||||
size_t immediate_regions = 0;
|
||||
size_t live_data = 0;
|
||||
|
||||
RegionData* candidates = _region_data;
|
||||
for (size_t i = 0; i < num_regions; i++) {
|
||||
ShenandoahHeapRegion* region = heap->get_region(i);
|
||||
@ -355,10 +433,10 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() {
|
||||
// else, regions that were promoted in place had 0 old live data at mark start
|
||||
|
||||
if (region->is_regular() || region->is_regular_pinned()) {
|
||||
// Only place regular or pinned regions with live data into the candidate set.
|
||||
// Pinned regions cannot be evacuated, but we are not actually choosing candidates
|
||||
// for the collection set here. That happens later during the next young GC cycle,
|
||||
// by which time, the pinned region may no longer be pinned.
|
||||
// Only place regular or pinned regions with live data into the candidate set.
|
||||
// Pinned regions cannot be evacuated, but we are not actually choosing candidates
|
||||
// for the collection set here. That happens later during the next young GC cycle,
|
||||
// by which time, the pinned region may no longer be pinned.
|
||||
if (!region->has_live()) {
|
||||
assert(!region->is_pinned(), "Pinned region should have live (pinned) objects.");
|
||||
region->make_trash_immediate();
|
||||
@ -561,6 +639,7 @@ unsigned int ShenandoahOldHeuristics::get_coalesce_and_fill_candidates(Shenandoa
|
||||
void ShenandoahOldHeuristics::abandon_collection_candidates() {
|
||||
_last_old_collection_candidate = 0;
|
||||
_next_old_collection_candidate = 0;
|
||||
_live_bytes_in_unprocessed_candidates = 0;
|
||||
_last_old_region = 0;
|
||||
}
|
||||
|
||||
@ -805,8 +884,9 @@ bool ShenandoahOldHeuristics::is_experimental() {
|
||||
return true;
|
||||
}
|
||||
|
||||
void ShenandoahOldHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
|
||||
ShenandoahHeuristics::RegionData* data,
|
||||
size_t data_size, size_t free) {
|
||||
size_t ShenandoahOldHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
|
||||
ShenandoahHeuristics::RegionData* data,
|
||||
size_t data_size, size_t free) {
|
||||
ShouldNotReachHere();
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -102,6 +102,30 @@ private:
|
||||
size_t _fragmentation_first_old_region;
|
||||
size_t _fragmentation_last_old_region;
|
||||
|
||||
// State variables involved in construction of a mixed-evacuation collection set. These variables are initialized
|
||||
// when client code invokes prime_collection_set(). They are consulted, and sometimes modified, when client code
|
||||
// calls top_off_collection_set() to possibly expand the number of old-gen regions in a mixed evacuation cset, and by
|
||||
// finalize_mixed_evacs(), which prepares the way for mixed evacuations to begin.
|
||||
ShenandoahCollectionSet* _mixed_evac_cset;
|
||||
size_t _evacuated_old_bytes;
|
||||
size_t _collected_old_bytes;
|
||||
size_t _included_old_regions;
|
||||
size_t _old_evacuation_reserve;
|
||||
size_t _old_evacuation_budget;
|
||||
|
||||
// This represents the amount of memory that can be evacuated from old into initially empty regions during a mixed evacuation.
|
||||
// This is the total amount of unfragmented free memory in old divided by ShenandoahOldEvacWaste.
|
||||
size_t _unspent_unfragmented_old_budget;
|
||||
|
||||
// This represents the amount of memory that can be evacuated from old into initially non-empty regions during a mixed
|
||||
// evacuation. This is the total amount of initially fragmented free memory in old divided by ShenandoahOldEvacWaste.
|
||||
size_t _unspent_fragmented_old_budget;
|
||||
|
||||
// If there is more available memory in old than is required by the intended mixed evacuation, the amount of excess
|
||||
// memory is represented by _excess_fragmented_old. To convert this value into a promotion budget, multiply by
|
||||
// ShenandoahOldEvacWaste and divide by ShenandoahPromoWaste.
|
||||
size_t _excess_fragmented_old_budget;
|
||||
|
||||
// The value of command-line argument ShenandoahOldGarbageThreshold represents the percent of garbage that must
|
||||
// be present within an old-generation region before that region is considered a good candidate for inclusion in
|
||||
// the collection set under normal circumstances. For our purposes, normal circustances are when the memory consumed
|
||||
@ -131,7 +155,15 @@ private:
|
||||
void set_trigger_if_old_is_overgrown();
|
||||
|
||||
protected:
|
||||
void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set, RegionData* data, size_t data_size, size_t free) override;
|
||||
size_t
|
||||
choose_collection_set_from_regiondata(ShenandoahCollectionSet* set, RegionData* data, size_t data_size, size_t free) override;
|
||||
|
||||
// This internal helper routine adds as many mixed evacuation candidate regions as fit within the old-gen evacuation budget
|
||||
// to the collection set. This may be called twice to prepare for any given mixed evacuation cycle, the first time with
|
||||
// a conservative old evacuation budget, and the second time with a larger more aggressive old evacuation budget. Returns
|
||||
// true iff we need to finalize mixed evacs. (If no regions are added to the collection set, there is no need to finalize
|
||||
// mixed evacuations.)
|
||||
bool add_old_regions_to_cset();
|
||||
|
||||
public:
|
||||
explicit ShenandoahOldHeuristics(ShenandoahOldGeneration* generation, ShenandoahGenerationalHeap* gen_heap);
|
||||
@ -139,8 +171,22 @@ public:
|
||||
// Prepare for evacuation of old-gen regions by capturing the mark results of a recently completed concurrent mark pass.
|
||||
void prepare_for_old_collections();
|
||||
|
||||
// Return true iff the collection set is primed with at least one old-gen region.
|
||||
bool prime_collection_set(ShenandoahCollectionSet* set);
|
||||
// Initialize instance variables to support the preparation of a mixed-evacuation collection set. Adds as many
|
||||
// old candidate regions into the collection set as can fit within the iniital conservative old evacuation budget.
|
||||
// Returns true iff we need to finalize mixed evacs.
|
||||
bool prime_collection_set(ShenandoahCollectionSet* collection_set);
|
||||
|
||||
// If young evacuation did not consume all of its available evacuation reserve, add as many additional mixed-
|
||||
// evacuation candidate regions into the collection set as will fit within this excess repurposed reserved.
|
||||
// Returns true iff we need to finalize mixed evacs. Upon return, the var parameter regions_to_xfer holds the
|
||||
// number of regions to transfer from young to old.
|
||||
bool top_off_collection_set(size_t &add_regions_to_old);
|
||||
|
||||
// Having added all eligible mixed-evacuation candidates to the collection set, this function updates the total count
|
||||
// of how much old-gen memory remains to be evacuated and adjusts the representation of old-gen regions that remain to
|
||||
// be evacuated, giving special attention to regions that are currently pinned. It outputs relevant log messages and
|
||||
// returns true iff the collection set holds at least one unpinned mixed evacuation candidate.
|
||||
bool finalize_mixed_evacs();
|
||||
|
||||
// How many old-collection candidates have not yet been processed?
|
||||
uint unprocessed_old_collection_candidates() const;
|
||||
|
||||
@ -50,9 +50,9 @@ bool ShenandoahPassiveHeuristics::should_degenerate_cycle() {
|
||||
return ShenandoahDegeneratedGC;
|
||||
}
|
||||
|
||||
void ShenandoahPassiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t actual_free) {
|
||||
size_t ShenandoahPassiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t actual_free) {
|
||||
assert(ShenandoahDegeneratedGC, "This path is only taken for Degenerated GC");
|
||||
|
||||
// Do not select too large CSet that would overflow the available free space.
|
||||
@ -76,4 +76,5 @@ void ShenandoahPassiveHeuristics::choose_collection_set_from_regiondata(Shenando
|
||||
cset->add_region(r);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -46,9 +46,9 @@ public:
|
||||
|
||||
virtual bool should_degenerate_cycle();
|
||||
|
||||
virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
|
||||
RegionData* data, size_t data_size,
|
||||
size_t free);
|
||||
virtual size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
|
||||
RegionData* data, size_t data_size,
|
||||
size_t free);
|
||||
|
||||
virtual const char* name() { return "Passive"; }
|
||||
virtual bool is_diagnostic() { return true; }
|
||||
|
||||
@ -59,9 +59,9 @@ bool ShenandoahStaticHeuristics::should_start_gc() {
|
||||
return ShenandoahHeuristics::should_start_gc();
|
||||
}
|
||||
|
||||
void ShenandoahStaticHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t free) {
|
||||
size_t ShenandoahStaticHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t free) {
|
||||
size_t threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
|
||||
|
||||
for (size_t idx = 0; idx < size; idx++) {
|
||||
@ -70,4 +70,5 @@ void ShenandoahStaticHeuristics::choose_collection_set_from_regiondata(Shenandoa
|
||||
cset->add_region(r);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -40,9 +40,9 @@ public:
|
||||
|
||||
virtual bool should_start_gc();
|
||||
|
||||
virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t free);
|
||||
virtual size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t free);
|
||||
|
||||
virtual const char* name() { return "Static"; }
|
||||
virtual bool is_diagnostic() { return false; }
|
||||
|
||||
@ -33,11 +33,11 @@
|
||||
#include "utilities/quickSort.hpp"
|
||||
|
||||
ShenandoahYoungHeuristics::ShenandoahYoungHeuristics(ShenandoahYoungGeneration* generation)
|
||||
: ShenandoahGenerationalHeuristics(generation) {
|
||||
: ShenandoahGenerationalHeuristics(generation) {
|
||||
}
|
||||
|
||||
|
||||
void ShenandoahYoungHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
size_t ShenandoahYoungHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t actual_free) {
|
||||
// See comments in ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata():
|
||||
@ -48,6 +48,8 @@ void ShenandoahYoungHeuristics::choose_collection_set_from_regiondata(Shenandoah
|
||||
// array before younger regions that typically contain more garbage. This is one reason why,
|
||||
// for example, we continue examining regions even after rejecting a region that has
|
||||
// more live data than we can evacuate.
|
||||
ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
|
||||
bool need_to_finalize_mixed = heap->old_generation()->heuristics()->prime_collection_set(cset);
|
||||
|
||||
// Better select garbage-first regions
|
||||
QuickSort::sort<RegionData>(data, (int) size, compare_by_garbage);
|
||||
@ -55,6 +57,17 @@ void ShenandoahYoungHeuristics::choose_collection_set_from_regiondata(Shenandoah
|
||||
size_t cur_young_garbage = add_preselected_regions_to_collection_set(cset, data, size);
|
||||
|
||||
choose_young_collection_set(cset, data, size, actual_free, cur_young_garbage);
|
||||
|
||||
// Especially when young-gen trigger is expedited in order to finish mixed evacuations, there may not be
|
||||
// enough consolidated garbage to make effective use of young-gen evacuation reserve. If there is still
|
||||
// young-gen reserve available following selection of the young-gen collection set, see if we can use
|
||||
// this memory to expand the old-gen evacuation collection set.
|
||||
size_t add_regions_to_old;
|
||||
need_to_finalize_mixed |= heap->old_generation()->heuristics()->top_off_collection_set(add_regions_to_old);
|
||||
if (need_to_finalize_mixed) {
|
||||
heap->old_generation()->heuristics()->finalize_mixed_evacs();
|
||||
}
|
||||
return add_regions_to_old;
|
||||
}
|
||||
|
||||
void ShenandoahYoungHeuristics::choose_young_collection_set(ShenandoahCollectionSet* cset,
|
||||
|
||||
@ -38,9 +38,9 @@ public:
|
||||
explicit ShenandoahYoungHeuristics(ShenandoahYoungGeneration* generation);
|
||||
|
||||
|
||||
void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t actual_free) override;
|
||||
size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
|
||||
RegionData* data, size_t size,
|
||||
size_t actual_free) override;
|
||||
|
||||
bool should_start_gc() override;
|
||||
|
||||
|
||||
@ -50,6 +50,8 @@ ShenandoahCollectionSet::ShenandoahCollectionSet(ShenandoahHeap* heap, ReservedS
|
||||
_region_count(0),
|
||||
_old_garbage(0),
|
||||
_preselected_regions(nullptr),
|
||||
_young_available_bytes_collected(0),
|
||||
_old_available_bytes_collected(0),
|
||||
_current_index(0) {
|
||||
|
||||
// The collection set map is reserved to cover the entire heap *and* zero addresses.
|
||||
@ -104,6 +106,7 @@ void ShenandoahCollectionSet::add_region(ShenandoahHeapRegion* r) {
|
||||
}
|
||||
} else if (r->is_old()) {
|
||||
_old_bytes_to_evacuate += live;
|
||||
_old_available_bytes_collected += free;
|
||||
_old_garbage += garbage;
|
||||
}
|
||||
|
||||
@ -140,6 +143,7 @@ void ShenandoahCollectionSet::clear() {
|
||||
_old_bytes_to_evacuate = 0;
|
||||
|
||||
_young_available_bytes_collected = 0;
|
||||
_old_available_bytes_collected = 0;
|
||||
|
||||
_has_old_regions = false;
|
||||
}
|
||||
|
||||
@ -75,6 +75,10 @@ private:
|
||||
// should be subtracted from what's available.
|
||||
size_t _young_available_bytes_collected;
|
||||
|
||||
// When a region having memory available to be allocated is added to the collection set, the region's available memory
|
||||
// should be subtracted from what's available.
|
||||
size_t _old_available_bytes_collected;
|
||||
|
||||
shenandoah_padding(0);
|
||||
volatile size_t _current_index;
|
||||
shenandoah_padding(1);
|
||||
@ -121,6 +125,9 @@ public:
|
||||
// Returns the amount of free bytes in young regions in the collection set.
|
||||
size_t get_young_available_bytes_collected() const { return _young_available_bytes_collected; }
|
||||
|
||||
// Returns the amount of free bytes in old regions in the collection set.
|
||||
size_t get_old_available_bytes_collected() const { return _old_available_bytes_collected; }
|
||||
|
||||
// Returns the amount of garbage in old regions in the collection set.
|
||||
inline size_t get_old_garbage() const;
|
||||
|
||||
|
||||
@ -204,9 +204,8 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
|
||||
return false;
|
||||
}
|
||||
|
||||
entry_concurrent_update_refs_prepare(heap);
|
||||
|
||||
// Perform update-refs phase.
|
||||
entry_concurrent_update_refs_prepare(heap);
|
||||
if (ShenandoahVerify) {
|
||||
vmop_entry_init_update_refs();
|
||||
}
|
||||
@ -227,6 +226,7 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
|
||||
// Update references freed up collection set, kick the cleanup to reclaim the space.
|
||||
entry_cleanup_complete();
|
||||
} else {
|
||||
_abbreviated = true;
|
||||
if (!entry_final_roots()) {
|
||||
assert(_degen_point != _degenerated_unset, "Need to know where to start degenerated cycle");
|
||||
return false;
|
||||
@ -235,7 +235,6 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
|
||||
if (VerifyAfterGC) {
|
||||
vmop_entry_verify_final_roots();
|
||||
}
|
||||
_abbreviated = true;
|
||||
}
|
||||
|
||||
// We defer generation resizing actions until after cset regions have been recycled. We do this even following an
|
||||
@ -282,7 +281,6 @@ bool ShenandoahConcurrentGC::complete_abbreviated_cycle() {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
void ShenandoahConcurrentGC::vmop_entry_init_mark() {
|
||||
ShenandoahHeap* const heap = ShenandoahHeap::heap();
|
||||
TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
|
||||
@ -536,6 +534,12 @@ void ShenandoahConcurrentGC::entry_cleanup_early() {
|
||||
// This phase does not use workers, no need for setup
|
||||
heap->try_inject_alloc_failure();
|
||||
op_cleanup_early();
|
||||
if (!heap->is_evacuation_in_progress()) {
|
||||
// This is an abbreviated cycle. Rebuild the freeset in order to establish reserves for the next GC cycle. Doing
|
||||
// the rebuild ASAP also expedites availability of immediate trash, reducing the likelihood that we will degenerate
|
||||
// during promote-in-place processing.
|
||||
heap->rebuild_free_set(true /*concurrent*/);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahConcurrentGC::entry_evacuate() {
|
||||
|
||||
@ -326,7 +326,7 @@ void ShenandoahRegionPartitions::initialize_old_collector() {
|
||||
}
|
||||
|
||||
void ShenandoahRegionPartitions::make_all_regions_unavailable() {
|
||||
shenandoah_assert_heaplocked();
|
||||
shenandoah_assert_heaplocked_or_safepoint();
|
||||
for (size_t partition_id = 0; partition_id < IntNumPartitions; partition_id++) {
|
||||
_membership[partition_id].clear_all();
|
||||
_leftmosts[partition_id] = _max;
|
||||
@ -439,6 +439,13 @@ void ShenandoahRegionPartitions::set_capacity_of(ShenandoahFreeSetPartitionId wh
|
||||
_available[int(which_partition)] = value - _used[int(which_partition)];
|
||||
}
|
||||
|
||||
void ShenandoahRegionPartitions::set_used_by(ShenandoahFreeSetPartitionId which_partition, size_t value) {
|
||||
shenandoah_assert_heaplocked();
|
||||
assert (which_partition < NumPartitions, "selected free set must be valid");
|
||||
_used[int(which_partition)] = value;
|
||||
_available[int(which_partition)] = _capacity[int(which_partition)] - value;
|
||||
}
|
||||
|
||||
|
||||
void ShenandoahRegionPartitions::increase_capacity(ShenandoahFreeSetPartitionId which_partition, size_t bytes) {
|
||||
shenandoah_assert_heaplocked();
|
||||
@ -900,7 +907,7 @@ idx_t ShenandoahRegionPartitions::rightmost_empty(ShenandoahFreeSetPartitionId w
|
||||
|
||||
|
||||
#ifdef ASSERT
|
||||
void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) {
|
||||
void ShenandoahRegionPartitions::assert_bounds() {
|
||||
|
||||
size_t capacities[UIntNumPartitions];
|
||||
size_t used[UIntNumPartitions];
|
||||
@ -936,7 +943,7 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) {
|
||||
switch (partition) {
|
||||
case ShenandoahFreeSetPartitionId::NotFree:
|
||||
{
|
||||
assert(!validate_totals || (capacity != _region_size_bytes), "Should not be retired if empty");
|
||||
assert(capacity != _region_size_bytes, "Should not be retired if empty");
|
||||
ShenandoahHeapRegion* r = ShenandoahHeap::heap()->get_region(i);
|
||||
if (r->is_humongous()) {
|
||||
if (r->is_old()) {
|
||||
@ -976,12 +983,12 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) {
|
||||
case ShenandoahFreeSetPartitionId::Collector:
|
||||
case ShenandoahFreeSetPartitionId::OldCollector:
|
||||
{
|
||||
ShenandoahHeapRegion* r = ShenandoahHeap::heap()->get_region(i);
|
||||
assert(capacity > 0, "free regions must have allocation capacity");
|
||||
bool is_empty = (capacity == _region_size_bytes);
|
||||
regions[int(partition)]++;
|
||||
used[int(partition)] += _region_size_bytes - capacity;
|
||||
capacities[int(partition)] += _region_size_bytes;
|
||||
|
||||
if (i < leftmosts[int(partition)]) {
|
||||
leftmosts[int(partition)] = i;
|
||||
}
|
||||
@ -1020,20 +1027,20 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) {
|
||||
idx_t beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)];
|
||||
idx_t end_off = rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)];
|
||||
assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::Mutator),
|
||||
"Mutator free regions before the leftmost: %zd, bound %zd",
|
||||
"Mutator free region before the leftmost: %zd, bound %zd",
|
||||
beg_off, leftmost(ShenandoahFreeSetPartitionId::Mutator));
|
||||
assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::Mutator),
|
||||
"Mutator free regions past the rightmost: %zd, bound %zd",
|
||||
"Mutator free region past the rightmost: %zd, bound %zd",
|
||||
end_off, rightmost(ShenandoahFreeSetPartitionId::Mutator));
|
||||
|
||||
beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)];
|
||||
end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)];
|
||||
assert (beg_off >= leftmost_empty(ShenandoahFreeSetPartitionId::Mutator),
|
||||
"Mutator free empty regions before the leftmost: %zd, bound %zd",
|
||||
beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::Mutator));
|
||||
assert (end_off <= rightmost_empty(ShenandoahFreeSetPartitionId::Mutator),
|
||||
"Mutator free empty regions past the rightmost: %zd, bound %zd",
|
||||
end_off, rightmost_empty(ShenandoahFreeSetPartitionId::Mutator));
|
||||
assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)],
|
||||
"free empty region (%zd) before the leftmost bound %zd",
|
||||
beg_off, _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)]);
|
||||
assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)],
|
||||
"free empty region (%zd) past the rightmost bound %zd",
|
||||
end_off, _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)]);
|
||||
|
||||
// Performance invariants. Failing these would not break the free partition, but performance would suffer.
|
||||
assert (leftmost(ShenandoahFreeSetPartitionId::Collector) <= _max, "leftmost in bounds: %zd < %zd",
|
||||
@ -1053,20 +1060,20 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) {
|
||||
beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::Collector)];
|
||||
end_off = rightmosts[int(ShenandoahFreeSetPartitionId::Collector)];
|
||||
assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::Collector),
|
||||
"Collector free regions before the leftmost: %zd, bound %zd",
|
||||
"Collector free region before the leftmost: %zd, bound %zd",
|
||||
beg_off, leftmost(ShenandoahFreeSetPartitionId::Collector));
|
||||
assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::Collector),
|
||||
"Collector free regions past the rightmost: %zd, bound %zd",
|
||||
"Collector free region past the rightmost: %zd, bound %zd",
|
||||
end_off, rightmost(ShenandoahFreeSetPartitionId::Collector));
|
||||
|
||||
beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::Collector)];
|
||||
end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::Collector)];
|
||||
assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
|
||||
"Collector free empty regions before the leftmost: %zd, bound %zd",
|
||||
beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::Collector));
|
||||
"Collector free empty region before the leftmost: %zd, bound %zd",
|
||||
beg_off, _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)]);
|
||||
assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
|
||||
"Collector free empty regions past the rightmost: %zd, bound %zd",
|
||||
end_off, rightmost_empty(ShenandoahFreeSetPartitionId::Collector));
|
||||
"Collector free empty region past the rightmost: %zd, bound %zd",
|
||||
end_off, _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)]);
|
||||
|
||||
// Performance invariants. Failing these would not break the free partition, but performance would suffer.
|
||||
assert (leftmost(ShenandoahFreeSetPartitionId::OldCollector) <= _max, "OldCollector leftmost in bounds: %zd < %zd",
|
||||
@ -1083,106 +1090,109 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) {
|
||||
ShenandoahFreeSetPartitionId::OldCollector),
|
||||
"OldCollector rightmost region should be free: %zd", rightmost(ShenandoahFreeSetPartitionId::OldCollector));
|
||||
|
||||
// Concurrent recycling of trash recycles a region (changing its state from is_trash to is_empty without the heap lock),
|
||||
|
||||
// If OldCollector partition is empty, leftmosts will both equal max, rightmosts will both equal zero.
|
||||
// Likewise for empty region partitions.
|
||||
beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
|
||||
end_off = rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
|
||||
assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::OldCollector),
|
||||
"OldCollector free regions before the leftmost: %zd, bound %zd",
|
||||
assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::OldCollector), "free regions before the leftmost: %zd, bound %zd",
|
||||
beg_off, leftmost(ShenandoahFreeSetPartitionId::OldCollector));
|
||||
assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::OldCollector),
|
||||
"OldCollector free regions past the rightmost: %zd, bound %zd",
|
||||
assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::OldCollector), "free regions past the rightmost: %zd, bound %zd",
|
||||
end_off, rightmost(ShenandoahFreeSetPartitionId::OldCollector));
|
||||
|
||||
beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
|
||||
end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
|
||||
assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
|
||||
"OldCollector free empty regions before the leftmost: %zd, bound %zd",
|
||||
beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::OldCollector));
|
||||
"free empty region (%zd) before the leftmost bound %zd, region %s trash",
|
||||
beg_off, _leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
|
||||
((beg_off >= _max)? "out of bounds is not":
|
||||
(ShenandoahHeap::heap()->get_region(_leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)])->is_trash()?
|
||||
"is": "is not")));
|
||||
assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
|
||||
"OldCollector free empty regions past the rightmost: %zd, bound %zd",
|
||||
end_off, rightmost_empty(ShenandoahFreeSetPartitionId::OldCollector));
|
||||
"free empty region (%zd) past the rightmost bound %zd, region %s trash",
|
||||
end_off, _rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
|
||||
((end_off < 0)? "out of bounds is not" :
|
||||
(ShenandoahHeap::heap()->get_region(_rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)])->is_trash()?
|
||||
"is": "is not")));
|
||||
|
||||
if (validate_totals) {
|
||||
// young_retired_regions need to be added to either Mutator or Collector partitions, 100% used.
|
||||
// Give enough of young_retired_regions, young_retired_capacity, young_retired_user
|
||||
// to the Mutator partition to top it off so that it matches the running totals.
|
||||
//
|
||||
// Give any remnants to the Collector partition. After topping off the Collector partition, its values
|
||||
// should also match running totals.
|
||||
// young_retired_regions need to be added to either Mutator or Collector partitions, 100% used.
|
||||
// Give enough of young_retired_regions, young_retired_capacity, young_retired_user
|
||||
// to the Mutator partition to top it off so that it matches the running totals.
|
||||
//
|
||||
// Give any remnants to the Collector partition. After topping off the Collector partition, its values
|
||||
// should also match running totals.
|
||||
assert(young_retired_regions * _region_size_bytes == young_retired_capacity, "sanity");
|
||||
assert(young_retired_capacity == young_retired_used, "sanity");
|
||||
|
||||
assert(young_retired_regions * _region_size_bytes == young_retired_capacity, "sanity");
|
||||
assert(young_retired_capacity == young_retired_used, "sanity");
|
||||
assert(capacities[int(ShenandoahFreeSetPartitionId::OldCollector)]
|
||||
== _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old collector capacities must match (%zu != %zu)",
|
||||
capacities[int(ShenandoahFreeSetPartitionId::OldCollector)],
|
||||
_capacity[int(ShenandoahFreeSetPartitionId::OldCollector)]);
|
||||
assert(used[int(ShenandoahFreeSetPartitionId::OldCollector)]
|
||||
== _used[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old collector used must match");
|
||||
assert(regions[int(ShenandoahFreeSetPartitionId::OldCollector)]
|
||||
== _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] / _region_size_bytes, "Old collector regions must match");
|
||||
assert(_capacity[int(ShenandoahFreeSetPartitionId::OldCollector)]
|
||||
>= _used[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old Collector capacity must be >= used");
|
||||
assert(_available[int(ShenandoahFreeSetPartitionId::OldCollector)] ==
|
||||
(_capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] - _used[int(ShenandoahFreeSetPartitionId::OldCollector)]),
|
||||
"Old Collector available must equal capacity minus used");
|
||||
assert(_humongous_waste[int(ShenandoahFreeSetPartitionId::OldCollector)] ==
|
||||
humongous_waste[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old Collector humongous waste must match");
|
||||
|
||||
assert(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] >= capacities[int(ShenandoahFreeSetPartitionId::Mutator)],
|
||||
"Capacity total must be >= counted tally");
|
||||
size_t mutator_capacity_shortfall =
|
||||
_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] - capacities[int(ShenandoahFreeSetPartitionId::Mutator)];
|
||||
assert(mutator_capacity_shortfall <= young_retired_capacity, "sanity");
|
||||
capacities[int(ShenandoahFreeSetPartitionId::Mutator)] += mutator_capacity_shortfall;
|
||||
young_retired_capacity -= mutator_capacity_shortfall;
|
||||
capacities[int(ShenandoahFreeSetPartitionId::Collector)] += young_retired_capacity;
|
||||
|
||||
assert(capacities[int(ShenandoahFreeSetPartitionId::OldCollector)]
|
||||
== _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old collector capacities must match");
|
||||
assert(used[int(ShenandoahFreeSetPartitionId::OldCollector)]
|
||||
== _used[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old collector used must match");
|
||||
assert(regions[int(ShenandoahFreeSetPartitionId::OldCollector)]
|
||||
== _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] / _region_size_bytes, "Old collector regions must match");
|
||||
assert(_capacity[int(ShenandoahFreeSetPartitionId::OldCollector)]
|
||||
>= _used[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old Collector capacity must be >= used");
|
||||
assert(_available[int(ShenandoahFreeSetPartitionId::OldCollector)] ==
|
||||
(_capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] - _used[int(ShenandoahFreeSetPartitionId::OldCollector)]),
|
||||
"Old Collector available must equal capacity minus used");
|
||||
assert(_humongous_waste[int(ShenandoahFreeSetPartitionId::OldCollector)] ==
|
||||
humongous_waste[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old Collector humongous waste must match");
|
||||
assert(_used[int(ShenandoahFreeSetPartitionId::Mutator)] >= used[int(ShenandoahFreeSetPartitionId::Mutator)],
|
||||
"Used total must be >= counted tally");
|
||||
size_t mutator_used_shortfall =
|
||||
_used[int(ShenandoahFreeSetPartitionId::Mutator)] - used[int(ShenandoahFreeSetPartitionId::Mutator)];
|
||||
assert(mutator_used_shortfall <= young_retired_used, "sanity");
|
||||
used[int(ShenandoahFreeSetPartitionId::Mutator)] += mutator_used_shortfall;
|
||||
young_retired_used -= mutator_used_shortfall;
|
||||
used[int(ShenandoahFreeSetPartitionId::Collector)] += young_retired_used;
|
||||
|
||||
assert(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] >= capacities[int(ShenandoahFreeSetPartitionId::Mutator)],
|
||||
"Capacity total must be >= counted tally");
|
||||
size_t mutator_capacity_shortfall =
|
||||
_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] - capacities[int(ShenandoahFreeSetPartitionId::Mutator)];
|
||||
assert(mutator_capacity_shortfall <= young_retired_capacity, "sanity");
|
||||
capacities[int(ShenandoahFreeSetPartitionId::Mutator)] += mutator_capacity_shortfall;
|
||||
young_retired_capacity -= mutator_capacity_shortfall;
|
||||
capacities[int(ShenandoahFreeSetPartitionId::Collector)] += young_retired_capacity;
|
||||
assert(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] / _region_size_bytes
|
||||
>= regions[int(ShenandoahFreeSetPartitionId::Mutator)], "Region total must be >= counted tally");
|
||||
size_t mutator_regions_shortfall = (_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] / _region_size_bytes
|
||||
- regions[int(ShenandoahFreeSetPartitionId::Mutator)]);
|
||||
assert(mutator_regions_shortfall <= young_retired_regions, "sanity");
|
||||
regions[int(ShenandoahFreeSetPartitionId::Mutator)] += mutator_regions_shortfall;
|
||||
young_retired_regions -= mutator_regions_shortfall;
|
||||
regions[int(ShenandoahFreeSetPartitionId::Collector)] += young_retired_regions;
|
||||
|
||||
assert(capacities[int(ShenandoahFreeSetPartitionId::Collector)] == _capacity[int(ShenandoahFreeSetPartitionId::Collector)],
|
||||
"Collector capacities must match");
|
||||
assert(used[int(ShenandoahFreeSetPartitionId::Collector)] == _used[int(ShenandoahFreeSetPartitionId::Collector)],
|
||||
"Collector used must match");
|
||||
assert(regions[int(ShenandoahFreeSetPartitionId::Collector)]
|
||||
== _capacity[int(ShenandoahFreeSetPartitionId::Collector)] / _region_size_bytes, "Collector regions must match");
|
||||
assert(_capacity[int(ShenandoahFreeSetPartitionId::Collector)] >= _used[int(ShenandoahFreeSetPartitionId::Collector)],
|
||||
"Collector Capacity must be >= used");
|
||||
assert(_available[int(ShenandoahFreeSetPartitionId::Collector)] ==
|
||||
(_capacity[int(ShenandoahFreeSetPartitionId::Collector)] - _used[int(ShenandoahFreeSetPartitionId::Collector)]),
|
||||
"Collector Available must equal capacity minus used");
|
||||
|
||||
assert(_used[int(ShenandoahFreeSetPartitionId::Mutator)] >= used[int(ShenandoahFreeSetPartitionId::Mutator)],
|
||||
"Used total must be >= counted tally");
|
||||
size_t mutator_used_shortfall =
|
||||
_used[int(ShenandoahFreeSetPartitionId::Mutator)] - used[int(ShenandoahFreeSetPartitionId::Mutator)];
|
||||
assert(mutator_used_shortfall <= young_retired_used, "sanity");
|
||||
used[int(ShenandoahFreeSetPartitionId::Mutator)] += mutator_used_shortfall;
|
||||
young_retired_used -= mutator_used_shortfall;
|
||||
used[int(ShenandoahFreeSetPartitionId::Collector)] += young_retired_used;
|
||||
|
||||
assert(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] / _region_size_bytes
|
||||
>= regions[int(ShenandoahFreeSetPartitionId::Mutator)], "Region total must be >= counted tally");
|
||||
size_t mutator_regions_shortfall = (_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] / _region_size_bytes
|
||||
- regions[int(ShenandoahFreeSetPartitionId::Mutator)]);
|
||||
assert(mutator_regions_shortfall <= young_retired_regions, "sanity");
|
||||
regions[int(ShenandoahFreeSetPartitionId::Mutator)] += mutator_regions_shortfall;
|
||||
young_retired_regions -= mutator_regions_shortfall;
|
||||
regions[int(ShenandoahFreeSetPartitionId::Collector)] += young_retired_regions;
|
||||
|
||||
assert(capacities[int(ShenandoahFreeSetPartitionId::Collector)] == _capacity[int(ShenandoahFreeSetPartitionId::Collector)],
|
||||
"Collector capacities must match");
|
||||
assert(used[int(ShenandoahFreeSetPartitionId::Collector)] == _used[int(ShenandoahFreeSetPartitionId::Collector)],
|
||||
"Collector used must match");
|
||||
assert(regions[int(ShenandoahFreeSetPartitionId::Collector)]
|
||||
== _capacity[int(ShenandoahFreeSetPartitionId::Collector)] / _region_size_bytes, "Collector regions must match");
|
||||
assert(_capacity[int(ShenandoahFreeSetPartitionId::Collector)] >= _used[int(ShenandoahFreeSetPartitionId::Collector)],
|
||||
"Collector Capacity must be >= used");
|
||||
assert(_available[int(ShenandoahFreeSetPartitionId::Collector)] ==
|
||||
(_capacity[int(ShenandoahFreeSetPartitionId::Collector)] - _used[int(ShenandoahFreeSetPartitionId::Collector)]),
|
||||
"Collector Available must equal capacity minus used");
|
||||
|
||||
assert(capacities[int(ShenandoahFreeSetPartitionId::Mutator)] == _capacity[int(ShenandoahFreeSetPartitionId::Mutator)],
|
||||
"Mutator capacities must match");
|
||||
assert(used[int(ShenandoahFreeSetPartitionId::Mutator)] == _used[int(ShenandoahFreeSetPartitionId::Mutator)],
|
||||
"Mutator used must match");
|
||||
assert(regions[int(ShenandoahFreeSetPartitionId::Mutator)]
|
||||
== _capacity[int(ShenandoahFreeSetPartitionId::Mutator)] / _region_size_bytes, "Mutator regions must match");
|
||||
assert(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] >= _used[int(ShenandoahFreeSetPartitionId::Mutator)],
|
||||
"Mutator capacity must be >= used");
|
||||
assert(_available[int(ShenandoahFreeSetPartitionId::Mutator)] ==
|
||||
(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] - _used[int(ShenandoahFreeSetPartitionId::Mutator)]),
|
||||
"Mutator available must equal capacity minus used");
|
||||
assert(_humongous_waste[int(ShenandoahFreeSetPartitionId::Mutator)] == young_humongous_waste,
|
||||
"Mutator humongous waste must match");
|
||||
}
|
||||
assert(capacities[int(ShenandoahFreeSetPartitionId::Mutator)] == _capacity[int(ShenandoahFreeSetPartitionId::Mutator)],
|
||||
"Mutator capacities must match");
|
||||
assert(used[int(ShenandoahFreeSetPartitionId::Mutator)] == _used[int(ShenandoahFreeSetPartitionId::Mutator)],
|
||||
"Mutator used must match");
|
||||
assert(regions[int(ShenandoahFreeSetPartitionId::Mutator)]
|
||||
== _capacity[int(ShenandoahFreeSetPartitionId::Mutator)] / _region_size_bytes, "Mutator regions must match");
|
||||
assert(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] >= _used[int(ShenandoahFreeSetPartitionId::Mutator)],
|
||||
"Mutator capacity must be >= used");
|
||||
assert(_available[int(ShenandoahFreeSetPartitionId::Mutator)] ==
|
||||
(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] - _used[int(ShenandoahFreeSetPartitionId::Mutator)]),
|
||||
"Mutator available must equal capacity minus used");
|
||||
assert(_humongous_waste[int(ShenandoahFreeSetPartitionId::Mutator)] == young_humongous_waste,
|
||||
"Mutator humongous waste must match");
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -1206,6 +1216,36 @@ ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :
|
||||
clear_internal();
|
||||
}
|
||||
|
||||
void ShenandoahFreeSet::move_unaffiliated_regions_from_collector_to_old_collector(ssize_t count) {
|
||||
shenandoah_assert_heaplocked();
|
||||
size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
|
||||
|
||||
size_t old_capacity = _partitions.get_capacity(ShenandoahFreeSetPartitionId::OldCollector);
|
||||
size_t collector_capacity = _partitions.get_capacity(ShenandoahFreeSetPartitionId::Collector);
|
||||
if (count > 0) {
|
||||
size_t ucount = count;
|
||||
size_t bytes_moved = ucount * region_size_bytes;
|
||||
assert(collector_capacity >= bytes_moved, "Cannot transfer");
|
||||
assert(_partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::Collector) >= ucount,
|
||||
"Cannot transfer %zu of %zu", ucount, _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::Collector));
|
||||
_partitions.decrease_empty_region_counts(ShenandoahFreeSetPartitionId::Collector, ucount);
|
||||
_partitions.set_capacity_of(ShenandoahFreeSetPartitionId::Collector, collector_capacity - bytes_moved);
|
||||
_partitions.set_capacity_of(ShenandoahFreeSetPartitionId::OldCollector, old_capacity + bytes_moved);
|
||||
_partitions.increase_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector, ucount);
|
||||
} else if (count < 0) {
|
||||
size_t ucount = -count;
|
||||
size_t bytes_moved = ucount * region_size_bytes;
|
||||
assert(old_capacity >= bytes_moved, "Cannot transfer");
|
||||
assert(_partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector) >= ucount,
|
||||
"Cannot transfer %zu of %zu", ucount, _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector));
|
||||
_partitions.decrease_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector, ucount);
|
||||
_partitions.set_capacity_of(ShenandoahFreeSetPartitionId::OldCollector, old_capacity - bytes_moved);
|
||||
_partitions.set_capacity_of(ShenandoahFreeSetPartitionId::Collector, collector_capacity + bytes_moved);
|
||||
_partitions.increase_empty_region_counts(ShenandoahFreeSetPartitionId::Collector, ucount);
|
||||
}
|
||||
// else, do nothing
|
||||
}
|
||||
|
||||
// was pip_pad_bytes
|
||||
void ShenandoahFreeSet::add_promoted_in_place_region_to_old_collector(ShenandoahHeapRegion* region) {
|
||||
shenandoah_assert_heaplocked();
|
||||
@ -1261,7 +1301,7 @@ void ShenandoahFreeSet::add_promoted_in_place_region_to_old_collector(Shenandoah
|
||||
/* CollectorSizeChanged */ true, /* OldCollectorSizeChanged */ true,
|
||||
/* AffiliatedChangesAreYoungNeutral */ false, /* AffiliatedChangesAreGlobalNeutral */ true,
|
||||
/* UnaffiliatedChangesAreYoungNeutral */ true>();
|
||||
_partitions.assert_bounds(true);
|
||||
_partitions.assert_bounds();
|
||||
}
|
||||
|
||||
template<typename Iter>
|
||||
@ -1496,9 +1536,12 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah
|
||||
return nullptr;
|
||||
}
|
||||
HeapWord* result = nullptr;
|
||||
// We must call try_recycle_under_lock() even if !r->is_trash(). The reason is that if r is being recycled at this
|
||||
// moment by a GC worker thread, it may appear to be not trash even though it has not yet been fully recycled. If
|
||||
// we proceed without waiting for the worker to finish recycling the region, the worker thread may overwrite the
|
||||
// region's affiliation with FREE after we set the region's affiliation to req.afiliation() below
|
||||
r->try_recycle_under_lock();
|
||||
in_new_region = r->is_empty();
|
||||
|
||||
if (in_new_region) {
|
||||
log_debug(gc, free)("Using new region (%zu) for %s (" PTR_FORMAT ").",
|
||||
r->index(), req.type_string(), p2i(&req));
|
||||
@ -1668,7 +1711,7 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah
|
||||
default:
|
||||
assert(false, "won't happen");
|
||||
}
|
||||
_partitions.assert_bounds(true);
|
||||
_partitions.assert_bounds();
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -1799,6 +1842,7 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req, bo
|
||||
increase_bytes_allocated(waste_bytes);
|
||||
}
|
||||
}
|
||||
|
||||
_partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, total_used);
|
||||
increase_bytes_allocated(total_used);
|
||||
req.set_actual_size(words_size);
|
||||
@ -1819,14 +1863,16 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req, bo
|
||||
/* CollectorSizeChanged */ false, /* OldCollectorSizeChanged */ false,
|
||||
/* AffiliatedChangesAreYoungNeutral */ false, /* AffiliatedChangesAreGlobalNeutral */ false,
|
||||
/* UnaffiliatedChangesAreYoungNeutral */ false>();
|
||||
_partitions.assert_bounds(true);
|
||||
_partitions.assert_bounds();
|
||||
return _heap->get_region(beg)->bottom();
|
||||
}
|
||||
|
||||
class ShenandoahRecycleTrashedRegionClosure final : public ShenandoahHeapRegionClosure {
|
||||
public:
|
||||
void heap_region_do(ShenandoahHeapRegion* r) {
|
||||
r->try_recycle();
|
||||
if (r->is_trash()) {
|
||||
r->try_recycle();
|
||||
}
|
||||
}
|
||||
|
||||
bool is_thread_safe() {
|
||||
@ -1861,7 +1907,7 @@ bool ShenandoahFreeSet::transfer_one_region_from_mutator_to_old_collector(size_t
|
||||
/* CollectorSizeChanged */ false, /* OldCollectorSizeChanged */ true,
|
||||
/* AffiliatedChangesAreYoungNeutral */ true, /* AffiliatedChangesAreGlobalNeutral */ true,
|
||||
/* UnaffiliatedChangesAreYoungNeutral */ false>();
|
||||
_partitions.assert_bounds(true);
|
||||
_partitions.assert_bounds();
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
@ -1914,7 +1960,7 @@ bool ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) {
|
||||
/* CollectorSizeChanged */ false, /* OldCollectorSizeChanged */ true,
|
||||
/* AffiliatedChangesAreYoungNeutral */ true, /* AffiliatedChangesAreGlobalNeutral */ true,
|
||||
/* UnaffiliatedChangesAreYoungNeutral */ false>();
|
||||
_partitions.assert_bounds(true);
|
||||
_partitions.assert_bounds();
|
||||
// 4. Do not adjust capacities for generations, we just swapped the regions that have already
|
||||
// been accounted for. However, we should adjust the evacuation reserves as those may have changed.
|
||||
shenandoah_assert_heaplocked();
|
||||
@ -1945,7 +1991,7 @@ void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
|
||||
/* CollectorSizeChanged */ true, /* OldCollectorSizeChanged */ false,
|
||||
/* AffiliatedChangesAreYoungNeutral */ true, /* AffiliatedChangesAreGlobalNeutral */ true,
|
||||
/* UnaffiliatedChangesAreYoungNeutral */ true>();
|
||||
_partitions.assert_bounds(true);
|
||||
_partitions.assert_bounds();
|
||||
// We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next,
|
||||
// to recycle trash before attempting to allocate anything in the region.
|
||||
}
|
||||
@ -2025,16 +2071,23 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r
|
||||
for (size_t idx = 0; idx < num_regions; idx++) {
|
||||
ShenandoahHeapRegion* region = _heap->get_region(idx);
|
||||
if (region->is_trash()) {
|
||||
// Trashed regions represent immediate garbage identified by final mark and regions that had been in the collection
|
||||
// partition but have not yet been "cleaned up" following update refs.
|
||||
// Trashed regions represent regions that had been in the collection set (or may have been identified as immediate garbage)
|
||||
// but have not yet been "cleaned up". The cset regions are not "trashed" until we have finished update refs.
|
||||
if (region->is_old()) {
|
||||
// We're going to place this region into the Mutator set. We increment old_trashed_regions because this count represents
|
||||
// regions that the old generation is entitled to without any transfer from young. We do not place this region into
|
||||
// the OldCollector partition at this time. Instead, we let reserve_regions() decide whether to place this region
|
||||
// into the OldCollector partition. Deferring the decision allows reserve_regions() to more effectively pack the
|
||||
// OldCollector regions into high-address memory. We do not adjust capacities of old and young generations at this
|
||||
// time. At the end of finish_rebuild(), the capacities are adjusted based on the results of reserve_regions().
|
||||
old_trashed_regions++;
|
||||
} else {
|
||||
assert(region->is_young(), "Trashed region should be old or young");
|
||||
young_trashed_regions++;
|
||||
}
|
||||
} else if (region->is_old()) {
|
||||
// count both humongous and regular regions, but don't count trash (cset) regions.
|
||||
// We count humongous and regular regions as "old regions". We do not count trashed regions that are old. Those
|
||||
// are counted (above) as old_trashed_regions.
|
||||
old_region_count++;
|
||||
if (first_old_region > idx) {
|
||||
first_old_region = idx;
|
||||
@ -2048,7 +2101,7 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r
|
||||
size_t ac = alloc_capacity(region);
|
||||
if (ac >= PLAB::min_size() * HeapWordSize) {
|
||||
if (region->is_trash() || !region->is_old()) {
|
||||
// Both young and old collected regions (trashed) are placed into the Mutator set
|
||||
// Both young and old (possibly immediately) collected regions (trashed) are placed into the Mutator set
|
||||
_partitions.raw_assign_membership(idx, ShenandoahFreeSetPartitionId::Mutator);
|
||||
if (idx < mutator_leftmost) {
|
||||
mutator_leftmost = idx;
|
||||
@ -2111,10 +2164,19 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r
|
||||
assert(_partitions.membership(idx) == ShenandoahFreeSetPartitionId::NotFree, "Region should have been retired");
|
||||
size_t humongous_waste_bytes = 0;
|
||||
if (region->is_humongous_start()) {
|
||||
oop obj = cast_to_oop(region->bottom());
|
||||
size_t byte_size = obj->size() * HeapWordSize;
|
||||
size_t region_span = ShenandoahHeapRegion::required_regions(byte_size);
|
||||
humongous_waste_bytes = region_span * ShenandoahHeapRegion::region_size_bytes() - byte_size;
|
||||
// Since rebuild does not necessarily happen at a safepoint, a newly allocated humongous object may not have been
|
||||
// fully initialized. Therefore, we cannot safely consult its header.
|
||||
ShenandoahHeapRegion* last_of_humongous_continuation = region;
|
||||
size_t next_idx;
|
||||
for (next_idx = idx + 1; next_idx < num_regions; next_idx++) {
|
||||
ShenandoahHeapRegion* humongous_cont_candidate = _heap->get_region(next_idx);
|
||||
if (!humongous_cont_candidate->is_humongous_continuation()) {
|
||||
break;
|
||||
}
|
||||
last_of_humongous_continuation = humongous_cont_candidate;
|
||||
}
|
||||
// For humongous regions, used() is established while holding the global heap lock so it is reliable here
|
||||
humongous_waste_bytes = ShenandoahHeapRegion::region_size_bytes() - last_of_humongous_continuation->used();
|
||||
}
|
||||
if (region->is_old()) {
|
||||
old_collector_used += region_size_bytes;
|
||||
@ -2183,7 +2245,7 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r
|
||||
/* CollectorSizeChanged */ true, /* OldCollectorSizeChanged */ true,
|
||||
/* AffiliatedChangesAreYoungNeutral */ false, /* AffiliatedChangesAreGlobalNeutral */ false,
|
||||
/* UnaffiliatedChangesAreYoungNeutral */ false>();
|
||||
_partitions.assert_bounds(true);
|
||||
_partitions.assert_bounds();
|
||||
#ifdef ASSERT
|
||||
if (_heap->mode()->is_generational()) {
|
||||
assert(young_affiliated_regions() == _heap->young_generation()->get_affiliated_region_count(), "sanity");
|
||||
@ -2221,7 +2283,7 @@ void ShenandoahFreeSet::transfer_humongous_regions_from_mutator_to_old_collector
|
||||
/* CollectorSizeChanged */ false, /* OldCollectorSizeChanged */ true,
|
||||
/* AffiliatedChangesAreYoungNeutral */ false, /* AffiliatedChangesAreGlobalNeutral */ true,
|
||||
/* UnaffiliatedChangesAreYoungNeutral */ true>();
|
||||
_partitions.assert_bounds(true);
|
||||
_partitions.assert_bounds();
|
||||
// global_used is unaffected by this transfer
|
||||
|
||||
// No need to adjust ranges because humongous regions are not allocatable
|
||||
@ -2303,7 +2365,7 @@ void ShenandoahFreeSet::transfer_empty_regions_from_to(ShenandoahFreeSetPartitio
|
||||
/* UnaffiliatedChangesAreYoungNeutral */ true>();
|
||||
}
|
||||
}
|
||||
_partitions.assert_bounds(true);
|
||||
_partitions.assert_bounds();
|
||||
}
|
||||
|
||||
// Returns number of regions transferred, adds transferred bytes to var argument bytes_transferred
|
||||
@ -2370,7 +2432,7 @@ size_t ShenandoahFreeSet::transfer_empty_regions_from_collector_set_to_mutator_s
|
||||
/* AffiliatedChangesAreYoungNeutral */ true, /* AffiliatedChangesAreGlobalNeutral */ true,
|
||||
/* UnaffiliatedChangesAreYoungNeutral */ true>();
|
||||
}
|
||||
_partitions.assert_bounds(true);
|
||||
_partitions.assert_bounds();
|
||||
return transferred_regions;
|
||||
}
|
||||
|
||||
@ -2445,7 +2507,7 @@ transfer_non_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPa
|
||||
/* AffiliatedChangesAreYoungNeutral */ true, /* AffiliatedChangesAreGlobalNeutral */ true,
|
||||
/* UnaffiliatedChangesAreYoungNeutral */ true>();
|
||||
}
|
||||
_partitions.assert_bounds(true);
|
||||
_partitions.assert_bounds();
|
||||
return transferred_regions;
|
||||
}
|
||||
|
||||
@ -2507,14 +2569,13 @@ void ShenandoahFreeSet::prepare_to_rebuild(size_t &young_trashed_regions, size_t
|
||||
first_old_region, last_old_region, old_region_count);
|
||||
}
|
||||
|
||||
void ShenandoahFreeSet::finish_rebuild(size_t young_trashed_regions, size_t old_trashed_regions, size_t old_region_count,
|
||||
bool have_evacuation_reserves) {
|
||||
|
||||
void ShenandoahFreeSet::finish_rebuild(size_t young_cset_regions, size_t old_cset_regions, size_t old_region_count) {
|
||||
shenandoah_assert_heaplocked();
|
||||
size_t young_reserve(0), old_reserve(0);
|
||||
|
||||
if (_heap->mode()->is_generational()) {
|
||||
compute_young_and_old_reserves(young_trashed_regions, old_trashed_regions, have_evacuation_reserves,
|
||||
young_reserve, old_reserve);
|
||||
compute_young_and_old_reserves(young_cset_regions, old_cset_regions, young_reserve, old_reserve);
|
||||
} else {
|
||||
young_reserve = (_heap->max_capacity() / 100) * ShenandoahEvacReserve;
|
||||
old_reserve = 0;
|
||||
@ -2531,8 +2592,41 @@ void ShenandoahFreeSet::finish_rebuild(size_t young_trashed_regions, size_t old_
|
||||
|
||||
// Release the rebuild lock now. What remains in this function is read-only
|
||||
rebuild_lock()->unlock();
|
||||
_partitions.assert_bounds(true);
|
||||
_partitions.assert_bounds();
|
||||
log_status();
|
||||
if (_heap->mode()->is_generational()) {
|
||||
// Clear the region balance until it is adjusted in preparation for a subsequent GC cycle.
|
||||
_heap->old_generation()->set_region_balance(0);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Reduce old reserve (when there are insufficient resources to satisfy the original request).
|
||||
void ShenandoahFreeSet::reduce_old_reserve(size_t adjusted_old_reserve, size_t requested_old_reserve) {
|
||||
ShenandoahOldGeneration* const old_generation = _heap->old_generation();
|
||||
size_t requested_promoted_reserve = old_generation->get_promoted_reserve();
|
||||
size_t requested_old_evac_reserve = old_generation->get_evacuation_reserve();
|
||||
assert(adjusted_old_reserve < requested_old_reserve, "Only allow reduction");
|
||||
assert(requested_promoted_reserve + requested_old_evac_reserve >= adjusted_old_reserve, "Sanity");
|
||||
size_t delta = requested_old_reserve - adjusted_old_reserve;
|
||||
|
||||
if (requested_promoted_reserve >= delta) {
|
||||
requested_promoted_reserve -= delta;
|
||||
old_generation->set_promoted_reserve(requested_promoted_reserve);
|
||||
} else {
|
||||
delta -= requested_promoted_reserve;
|
||||
requested_promoted_reserve = 0;
|
||||
requested_old_evac_reserve -= delta;
|
||||
old_generation->set_promoted_reserve(requested_promoted_reserve);
|
||||
old_generation->set_evacuation_reserve(requested_old_evac_reserve);
|
||||
}
|
||||
}
|
||||
|
||||
// Reduce young reserve (when there are insufficient resources to satisfy the original request).
|
||||
void ShenandoahFreeSet::reduce_young_reserve(size_t adjusted_young_reserve, size_t requested_young_reserve) {
|
||||
ShenandoahYoungGeneration* const young_generation = _heap->young_generation();
|
||||
assert(adjusted_young_reserve < requested_young_reserve, "Only allow reduction");
|
||||
young_generation->set_evacuation_reserve(adjusted_young_reserve);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2549,7 +2643,6 @@ void ShenandoahFreeSet::finish_rebuild(size_t young_trashed_regions, size_t old_
|
||||
* this value should computed by ShenandoahGenerationalHeap::compute_old_generation_balance().
|
||||
*/
|
||||
void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regions, size_t old_trashed_regions,
|
||||
bool have_evacuation_reserves,
|
||||
size_t& young_reserve_result, size_t& old_reserve_result) const {
|
||||
shenandoah_assert_generational();
|
||||
shenandoah_assert_heaplocked();
|
||||
@ -2566,6 +2659,15 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regi
|
||||
old_available += old_trashed_regions * region_size_bytes;
|
||||
young_unaffiliated_regions += young_trashed_regions;
|
||||
|
||||
assert(young_capacity >= young_generation->used(),
|
||||
"Young capacity (%zu) must exceed used (%zu)", young_capacity, young_generation->used());
|
||||
|
||||
size_t young_available = young_capacity - young_generation->used();
|
||||
young_available += young_trashed_regions * region_size_bytes;
|
||||
|
||||
assert(young_available >= young_unaffiliated_regions * region_size_bytes, "sanity");
|
||||
assert(old_available >= old_unaffiliated_regions * region_size_bytes, "sanity");
|
||||
|
||||
// Consult old-region balance to make adjustments to current generation capacities and availability.
|
||||
// The generation region transfers take place after we rebuild. old_region_balance represents number of regions
|
||||
// to transfer from old to young.
|
||||
@ -2585,6 +2687,7 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regi
|
||||
ssize_t xfer_bytes = old_region_balance * checked_cast<ssize_t>(region_size_bytes);
|
||||
old_available -= xfer_bytes;
|
||||
old_unaffiliated_regions -= old_region_balance;
|
||||
young_available += xfer_bytes;
|
||||
young_capacity += xfer_bytes;
|
||||
young_unaffiliated_regions += old_region_balance;
|
||||
}
|
||||
@ -2593,41 +2696,22 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regi
|
||||
// promotions and evacuations. The partition between which old memory is reserved for evacuation and
|
||||
// which is reserved for promotion is enforced using thread-local variables that prescribe intentions for
|
||||
// each PLAB's available memory.
|
||||
if (have_evacuation_reserves) {
|
||||
// We are rebuilding at the end of final mark, having already established evacuation budgets for this GC pass.
|
||||
const size_t promoted_reserve = old_generation->get_promoted_reserve();
|
||||
const size_t old_evac_reserve = old_generation->get_evacuation_reserve();
|
||||
young_reserve_result = young_generation->get_evacuation_reserve();
|
||||
old_reserve_result = promoted_reserve + old_evac_reserve;
|
||||
if (old_reserve_result > old_available) {
|
||||
// Try to transfer memory from young to old.
|
||||
size_t old_deficit = old_reserve_result - old_available;
|
||||
size_t old_region_deficit = (old_deficit + region_size_bytes - 1) / region_size_bytes;
|
||||
if (young_unaffiliated_regions < old_region_deficit) {
|
||||
old_region_deficit = young_unaffiliated_regions;
|
||||
}
|
||||
young_unaffiliated_regions -= old_region_deficit;
|
||||
old_unaffiliated_regions += old_region_deficit;
|
||||
old_region_balance -= old_region_deficit;
|
||||
old_generation->set_region_balance(old_region_balance);
|
||||
}
|
||||
} else {
|
||||
// We are rebuilding at end of GC, so we set aside budgets specified on command line (or defaults)
|
||||
young_reserve_result = (young_capacity * ShenandoahEvacReserve) / 100;
|
||||
// The auto-sizer has already made old-gen large enough to hold all anticipated evacuations and promotions.
|
||||
// Affiliated old-gen regions are already in the OldCollector free set. Add in the relevant number of
|
||||
// unaffiliated regions.
|
||||
old_reserve_result = old_available;
|
||||
}
|
||||
const size_t promoted_reserve = old_generation->get_promoted_reserve();
|
||||
const size_t old_evac_reserve = old_generation->get_evacuation_reserve();
|
||||
young_reserve_result = young_generation->get_evacuation_reserve();
|
||||
old_reserve_result = promoted_reserve + old_evac_reserve;
|
||||
assert(old_reserve_result + young_reserve_result <= old_available + young_available,
|
||||
"Cannot reserve (%zu + %zu + %zu) more than is available: %zu + %zu",
|
||||
promoted_reserve, old_evac_reserve, young_reserve_result, old_available, young_available);
|
||||
|
||||
// Old available regions that have less than PLAB::min_size() of available memory are not placed into the OldCollector
|
||||
// free set. Because of this, old_available may not have enough memory to represent the intended reserve. Adjust
|
||||
// the reserve downward to account for this possibility. This loss is part of the reason why the original budget
|
||||
// was adjusted with ShenandoahOldEvacWaste and ShenandoahOldPromoWaste multipliers.
|
||||
if (old_reserve_result >
|
||||
_partitions.capacity_of(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes) {
|
||||
_partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes) {
|
||||
old_reserve_result =
|
||||
_partitions.capacity_of(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes;
|
||||
_partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes;
|
||||
}
|
||||
|
||||
if (young_reserve_result > young_unaffiliated_regions * region_size_bytes) {
|
||||
@ -2791,19 +2875,17 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old
|
||||
ShenandoahFreeSetPartitionId p = _partitions.membership(idx);
|
||||
size_t ac = alloc_capacity(r);
|
||||
assert(ac != region_size_bytes, "Empty regions should be in Mutator partion at entry to reserve_regions");
|
||||
if (p == ShenandoahFreeSetPartitionId::Collector) {
|
||||
if (ac != region_size_bytes) {
|
||||
young_used_regions++;
|
||||
young_used_bytes = region_size_bytes - ac;
|
||||
}
|
||||
// else, unaffiliated region has no used
|
||||
} else if (p == ShenandoahFreeSetPartitionId::OldCollector) {
|
||||
if (ac != region_size_bytes) {
|
||||
old_used_regions++;
|
||||
old_used_bytes = region_size_bytes - ac;
|
||||
}
|
||||
// else, unaffiliated region has no used
|
||||
} else if (p == ShenandoahFreeSetPartitionId::NotFree) {
|
||||
assert(p != ShenandoahFreeSetPartitionId::Collector, "Collector regions must be converted from Mutator regions");
|
||||
if (p == ShenandoahFreeSetPartitionId::OldCollector) {
|
||||
assert(!r->is_empty(), "Empty regions should be in Mutator partition at entry to reserve_regions");
|
||||
old_used_regions++;
|
||||
old_used_bytes = region_size_bytes - ac;
|
||||
// This region is within the range for OldCollector partition, as established by find_regions_with_alloc_capacity()
|
||||
assert((_partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector) <= idx) &&
|
||||
(_partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector) >= idx),
|
||||
"find_regions_with_alloc_capacity() should have established this is in range");
|
||||
} else {
|
||||
assert(p == ShenandoahFreeSetPartitionId::NotFree, "sanity");
|
||||
// This region has been retired
|
||||
if (r->is_old()) {
|
||||
old_used_regions++;
|
||||
@ -2813,21 +2895,6 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old
|
||||
young_used_regions++;
|
||||
young_used_bytes += region_size_bytes - ac;
|
||||
}
|
||||
} else {
|
||||
assert(p == ShenandoahFreeSetPartitionId::OldCollector, "Not mutator and not NotFree, so must be OldCollector");
|
||||
assert(!r->is_empty(), "Empty regions should be in Mutator partition at entry to reserve_regions");
|
||||
if (idx < old_collector_low_idx) {
|
||||
old_collector_low_idx = idx;
|
||||
}
|
||||
if (idx > old_collector_high_idx) {
|
||||
old_collector_high_idx = idx;
|
||||
}
|
||||
if (idx < old_collector_empty_low_idx) {
|
||||
old_collector_empty_low_idx = idx;
|
||||
}
|
||||
if (idx > old_collector_empty_high_idx) {
|
||||
old_collector_empty_high_idx = idx;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2856,14 +2923,14 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old
|
||||
_partitions.increase_used(ShenandoahFreeSetPartitionId::OldCollector, used_to_old_collector);
|
||||
}
|
||||
|
||||
_partitions.expand_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId::Collector,
|
||||
collector_low_idx, collector_high_idx,
|
||||
collector_empty_low_idx, collector_empty_high_idx);
|
||||
_partitions.establish_interval(ShenandoahFreeSetPartitionId::Mutator,
|
||||
mutator_low_idx, mutator_high_idx, mutator_empty_low_idx, mutator_empty_high_idx);
|
||||
_partitions.establish_interval(ShenandoahFreeSetPartitionId::Collector,
|
||||
collector_low_idx, collector_high_idx, collector_empty_low_idx, collector_empty_high_idx);
|
||||
|
||||
_partitions.expand_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId::OldCollector,
|
||||
old_collector_low_idx, old_collector_high_idx,
|
||||
old_collector_empty_low_idx, old_collector_empty_high_idx);
|
||||
_partitions.establish_interval(ShenandoahFreeSetPartitionId::Mutator,
|
||||
mutator_low_idx, mutator_high_idx, mutator_empty_low_idx, mutator_empty_high_idx);
|
||||
|
||||
recompute_total_used</* UsedByMutatorChanged */ true,
|
||||
/* UsedByCollectorChanged */ true, /* UsedByOldCollectorChanged */ true>();
|
||||
@ -2872,17 +2939,22 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old
|
||||
/* CollectorSizeChanged */ true, /* OldCollectorSizeChanged */ true,
|
||||
/* AffiliatedChangesAreYoungNeutral */ false, /* AffiliatedChangesAreGlobalNeutral */ false,
|
||||
/* UnaffiliatedChangesAreYoungNeutral */ false>();
|
||||
_partitions.assert_bounds(true);
|
||||
_partitions.assert_bounds();
|
||||
if (LogTarget(Info, gc, free)::is_enabled()) {
|
||||
size_t old_reserve = _partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector);
|
||||
if (old_reserve < to_reserve_old) {
|
||||
log_info(gc, free)("Wanted " PROPERFMT " for old reserve, but only reserved: " PROPERFMT,
|
||||
PROPERFMTARGS(to_reserve_old), PROPERFMTARGS(old_reserve));
|
||||
assert(_heap->mode()->is_generational(), "to_old_reserve > 0 implies generational mode");
|
||||
reduce_old_reserve(old_reserve, to_reserve_old);
|
||||
}
|
||||
size_t reserve = _partitions.available_in(ShenandoahFreeSetPartitionId::Collector);
|
||||
if (reserve < to_reserve) {
|
||||
if (_heap->mode()->is_generational()) {
|
||||
reduce_young_reserve(reserve, to_reserve);
|
||||
}
|
||||
log_info(gc, free)("Wanted " PROPERFMT " for young reserve, but only reserved: " PROPERFMT,
|
||||
PROPERFMTARGS(to_reserve), PROPERFMTARGS(reserve));
|
||||
PROPERFMTARGS(to_reserve), PROPERFMTARGS(reserve));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user