Merge branch 'openjdk:master' into JDK-8376531

This commit is contained in:
Xiaolong Peng 2026-01-27 14:10:25 -08:00 committed by GitHub
commit e6087ad195
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
377 changed files with 9502 additions and 8553 deletions

View File

@ -72,6 +72,7 @@ id="toc-notes-for-specific-tests">Notes for Specific Tests</a>
<li><a href="#non-us-locale" id="toc-non-us-locale">Non-US <li><a href="#non-us-locale" id="toc-non-us-locale">Non-US
locale</a></li> locale</a></li>
<li><a href="#pkcs11-tests" id="toc-pkcs11-tests">PKCS11 Tests</a></li> <li><a href="#pkcs11-tests" id="toc-pkcs11-tests">PKCS11 Tests</a></li>
<li><a href="#sctp-tests" id="toc-sctp-tests">SCTP Tests</a></li>
<li><a href="#testing-ahead-of-time-optimizations" <li><a href="#testing-ahead-of-time-optimizations"
id="toc-testing-ahead-of-time-optimizations">Testing Ahead-of-time id="toc-testing-ahead-of-time-optimizations">Testing Ahead-of-time
Optimizations</a></li> Optimizations</a></li>
@ -621,6 +622,21 @@ element of the appropriate <code>@Artifact</code> class. (See
JTREG=&quot;JAVA_OPTIONS=-Djdk.test.lib.artifacts.nsslib-linux_aarch64=/path/to/NSS-libs&quot;</code></pre> JTREG=&quot;JAVA_OPTIONS=-Djdk.test.lib.artifacts.nsslib-linux_aarch64=/path/to/NSS-libs&quot;</code></pre>
<p>For more notes about the PKCS11 tests, please refer to <p>For more notes about the PKCS11 tests, please refer to
test/jdk/sun/security/pkcs11/README.</p> test/jdk/sun/security/pkcs11/README.</p>
<h3 id="sctp-tests">SCTP Tests</h3>
<p>The SCTP tests require the SCTP runtime library, which is often not
installed by default in popular Linux distributions. Without this
library, the SCTP tests will be skipped. If you want to enable the SCTP
tests, you should install the SCTP library before running the tests.</p>
<p>For distributions using the .deb packaging format and the apt tool
(such as Debian, Ubuntu, etc.), try this:</p>
<pre><code>sudo apt install libsctp1
sudo modprobe sctp
lsmod | grep sctp</code></pre>
<p>For distributions using the .rpm packaging format and the dnf tool
(such as Fedora, Red Hat, etc.), try this:</p>
<pre><code>sudo dnf install -y lksctp-tools
sudo modprobe sctp
lsmod | grep sctp</code></pre>
<h3 id="testing-ahead-of-time-optimizations">Testing Ahead-of-time <h3 id="testing-ahead-of-time-optimizations">Testing Ahead-of-time
Optimizations</h3> Optimizations</h3>
<p>One way to improve test coverage of ahead-of-time (AOT) optimizations <p>One way to improve test coverage of ahead-of-time (AOT) optimizations

View File

@ -640,6 +640,32 @@ $ make test TEST="jtreg:sun/security/pkcs11/Secmod/AddTrustedCert.java" \
For more notes about the PKCS11 tests, please refer to For more notes about the PKCS11 tests, please refer to
test/jdk/sun/security/pkcs11/README. test/jdk/sun/security/pkcs11/README.
### SCTP Tests
The SCTP tests require the SCTP runtime library, which is often not installed
by default in popular Linux distributions. Without this library, the SCTP tests
will be skipped. If you want to enable the SCTP tests, you should install the
SCTP library before running the tests.
For distributions using the .deb packaging format and the apt tool
(such as Debian, Ubuntu, etc.), try this:
```
sudo apt install libsctp1
sudo modprobe sctp
lsmod | grep sctp
```
For distributions using the .rpm packaging format and the dnf tool
(such as Fedora, Red Hat, etc.), try this:
```
sudo dnf install -y lksctp-tools
sudo modprobe sctp
lsmod | grep sctp
```
### Testing Ahead-of-time Optimizations ### Testing Ahead-of-time Optimizations
One way to improve test coverage of ahead-of-time (AOT) optimizations in One way to improve test coverage of ahead-of-time (AOT) optimizations in

View File

@ -69,22 +69,18 @@ AC_DEFUN([FLAGS_SETUP_DEBUG_SYMBOLS],
# Debug prefix mapping if supported by compiler # Debug prefix mapping if supported by compiler
DEBUG_PREFIX_CFLAGS= DEBUG_PREFIX_CFLAGS=
UTIL_ARG_WITH(NAME: native-debug-symbols-level, TYPE: string, UTIL_ARG_WITH(NAME: native-debug-symbols-level, TYPE: literal,
DEFAULT: "", DEFAULT: [auto], VALID_VALUES: [auto 1 2 3],
RESULT: DEBUG_SYMBOLS_LEVEL, CHECK_AVAILABLE: [
if test x$TOOLCHAIN_TYPE = xmicrosoft; then
AVAILABLE=false
fi
],
DESC: [set the native debug symbol level (GCC and Clang only)], DESC: [set the native debug symbol level (GCC and Clang only)],
DEFAULT_DESC: [toolchain default]) DEFAULT_DESC: [toolchain default],
AC_SUBST(DEBUG_SYMBOLS_LEVEL) IF_AUTO: [
RESULT=""
if test "x${TOOLCHAIN_TYPE}" = xgcc || \ ])
test "x${TOOLCHAIN_TYPE}" = xclang; then
DEBUG_SYMBOLS_LEVEL_FLAGS="-g"
if test "x${DEBUG_SYMBOLS_LEVEL}" != "x"; then
DEBUG_SYMBOLS_LEVEL_FLAGS="-g${DEBUG_SYMBOLS_LEVEL}"
FLAGS_COMPILER_CHECK_ARGUMENTS(ARGUMENT: [${DEBUG_SYMBOLS_LEVEL_FLAGS}],
IF_FALSE: AC_MSG_ERROR("Debug info level ${DEBUG_SYMBOLS_LEVEL} is not supported"))
fi
fi
# Debug symbols # Debug symbols
if test "x$TOOLCHAIN_TYPE" = xgcc; then if test "x$TOOLCHAIN_TYPE" = xgcc; then
@ -111,8 +107,8 @@ AC_DEFUN([FLAGS_SETUP_DEBUG_SYMBOLS],
fi fi
# Debug info level should follow the debug format to be effective. # Debug info level should follow the debug format to be effective.
CFLAGS_DEBUG_SYMBOLS="-gdwarf-4 ${DEBUG_SYMBOLS_LEVEL_FLAGS}" CFLAGS_DEBUG_SYMBOLS="-gdwarf-4 -g${NATIVE_DEBUG_SYMBOLS_LEVEL}"
ASFLAGS_DEBUG_SYMBOLS="${DEBUG_SYMBOLS_LEVEL_FLAGS}" ASFLAGS_DEBUG_SYMBOLS="-g${NATIVE_DEBUG_SYMBOLS_LEVEL}"
elif test "x$TOOLCHAIN_TYPE" = xclang; then elif test "x$TOOLCHAIN_TYPE" = xclang; then
if test "x$ALLOW_ABSOLUTE_PATHS_IN_OUTPUT" = "xfalse"; then if test "x$ALLOW_ABSOLUTE_PATHS_IN_OUTPUT" = "xfalse"; then
# Check if compiler supports -fdebug-prefix-map. If so, use that to make # Check if compiler supports -fdebug-prefix-map. If so, use that to make
@ -132,8 +128,8 @@ AC_DEFUN([FLAGS_SETUP_DEBUG_SYMBOLS],
IF_FALSE: [GDWARF_FLAGS=""]) IF_FALSE: [GDWARF_FLAGS=""])
# Debug info level should follow the debug format to be effective. # Debug info level should follow the debug format to be effective.
CFLAGS_DEBUG_SYMBOLS="${GDWARF_FLAGS} ${DEBUG_SYMBOLS_LEVEL_FLAGS}" CFLAGS_DEBUG_SYMBOLS="${GDWARF_FLAGS} -g${NATIVE_DEBUG_SYMBOLS_LEVEL}"
ASFLAGS_DEBUG_SYMBOLS="${DEBUG_SYMBOLS_LEVEL_FLAGS}" ASFLAGS_DEBUG_SYMBOLS="-g${NATIVE_DEBUG_SYMBOLS_LEVEL}"
elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
CFLAGS_DEBUG_SYMBOLS="-Z7" CFLAGS_DEBUG_SYMBOLS="-Z7"
fi fi

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -49,13 +49,15 @@ import static com.sun.source.doctree.DocTree.Kind.*;
* The tags can be used as follows: * The tags can be used as follows:
* *
* <pre> * <pre>
* &commat;jls section-number description * &commat;jls chapter.section description
* &commat;jls preview-feature-chapter.section description
* </pre> * </pre>
* *
* For example: * For example:
* *
* <pre> * <pre>
* &commat;jls 3.4 Line Terminators * &commat;jls 3.4 Line Terminators
* &commat;jls primitive-types-in-patterns-instanceof-switch-5.7.1 Exact Testing Conversions
* </pre> * </pre>
* *
* will produce the following HTML, depending on the file containing * will produce the following HTML, depending on the file containing
@ -64,10 +66,24 @@ import static com.sun.source.doctree.DocTree.Kind.*;
* <pre>{@code * <pre>{@code
* <dt>See <i>Java Language Specification</i>: * <dt>See <i>Java Language Specification</i>:
* <dd><a href="../../specs/jls/jls-3.html#jls-3.4">3.4 Line terminators</a> * <dd><a href="../../specs/jls/jls-3.html#jls-3.4">3.4 Line terminators</a>
* <dd><a href="../../specs/primitive-types-in-patterns-instanceof-switch-jls.html#jls-5.7.1">
* 5.7.1 Exact Testing Conversions</a><sup class="preview-mark">
* <a href="../../specs/jls/jls-1.html#jls-1.5.1">PREVIEW</a></sup>
* }</pre> * }</pre>
* *
* Copies of JLS and JVMS are expected to have been placed in the {@code specs} * In inline tags (note you need manual JLS/JVMS prefix):
* folder. These documents are not included in open-source repositories. * <pre>
* JLS {&commat;jls 3.4}
* </pre>
*
* produces (note the section sign and no trailing dot):
* <pre>
* JLS <a href="../../specs/jls/jls-3.html#jls-3.4">§3.4</a>
* </pre>
*
* Copies of JLS, JVMS, and preview JLS and JVMS changes are expected to have
* been placed in the {@code specs} folder. These documents are not included
* in open-source repositories.
*/ */
public class JSpec implements Taglet { public class JSpec implements Taglet {
@ -87,9 +103,9 @@ public class JSpec implements Taglet {
} }
} }
private String tagName; private final String tagName;
private String specTitle; private final String specTitle;
private String idPrefix; private final String idPrefix;
JSpec(String tagName, String specTitle, String idPrefix) { JSpec(String tagName, String specTitle, String idPrefix) {
this.tagName = tagName; this.tagName = tagName;
@ -98,7 +114,7 @@ public class JSpec implements Taglet {
} }
// Note: Matches special cases like @jvms 6.5.checkcast // Note: Matches special cases like @jvms 6.5.checkcast
private static final Pattern TAG_PATTERN = Pattern.compile("(?s)(.+ )?(?<chapter>[1-9][0-9]*)(?<section>[0-9a-z_.]*)( .*)?$"); private static final Pattern TAG_PATTERN = Pattern.compile("(?s)(.+ )?(?<preview>([a-z0-9]+-)+)?(?<chapter>[1-9][0-9]*)(?<section>[0-9a-z_.]*)( .*)?$");
/** /**
* Returns the set of locations in which the tag may be used. * Returns the set of locations in which the tag may be used.
@ -157,19 +173,50 @@ public class JSpec implements Taglet {
.trim(); .trim();
Matcher m = TAG_PATTERN.matcher(tagText); Matcher m = TAG_PATTERN.matcher(tagText);
if (m.find()) { if (m.find()) {
// preview-feature-4.6 is preview-feature-, 4, .6
String preview = m.group("preview"); // null if no preview feature
String chapter = m.group("chapter"); String chapter = m.group("chapter");
String section = m.group("section"); String section = m.group("section");
String rootParent = currentPath().replaceAll("[^/]+", ".."); String rootParent = currentPath().replaceAll("[^/]+", "..");
String url = String.format("%1$s/specs/%2$s/%2$s-%3$s.html#%2$s-%3$s%4$s", String url = preview == null ?
rootParent, idPrefix, chapter, section); String.format("%1$s/specs/%2$s/%2$s-%3$s.html#%2$s-%3$s%4$s",
rootParent, idPrefix, chapter, section) :
String.format("%1$s/specs/%5$s%2$s.html#%2$s-%3$s%4$s",
rootParent, idPrefix, chapter, section, preview);
var literal = expand(contents).trim();
var prefix = (preview == null ? "" : preview) + chapter + section;
if (literal.startsWith(prefix)) {
var hasFullTitle = literal.length() > prefix.length();
if (hasFullTitle) {
// Drop the preview identifier
literal = chapter + section + literal.substring(prefix.length());
} else {
// No section sign if the tag refers to a chapter, like {@jvms 4}
String sectionSign = section.isEmpty() ? "" : "§";
// Change whole text to "§chapter.x" in inline tags.
literal = sectionSign + chapter + section;
}
}
sb.append("<a href=\"") sb.append("<a href=\"")
.append(url) .append(url)
.append("\">") .append("\">")
.append(expand(contents)) .append(literal)
.append("</a>"); .append("</a>");
if (preview != null) {
// Add PREVIEW superscript that links to JLS/JVMS 1.5.1
// "Restrictions on the Use of Preview Features"
// Similar to how APIs link to the Preview info box warning
var sectionLink = String.format("%1$s/specs/%2$s/%2$s-%3$s.html#%2$s-%3$s%4$s",
rootParent, idPrefix, "1", ".5.1");
sb.append("<sup class=\"preview-mark\"><a href=\"")
.append(sectionLink)
.append("\">PREVIEW</a></sup>");
}
if (tag.getKind() == DocTree.Kind.UNKNOWN_BLOCK_TAG) { if (tag.getKind() == DocTree.Kind.UNKNOWN_BLOCK_TAG) {
sb.append("<br>"); sb.append("<br>");
} }

View File

@ -5782,6 +5782,9 @@ address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3,
// return false; // return false;
bind(A_IS_NOT_NULL); bind(A_IS_NOT_NULL);
ldrw(cnt1, Address(a1, length_offset)); ldrw(cnt1, Address(a1, length_offset));
ldrw(tmp5, Address(a2, length_offset));
cmp(cnt1, tmp5);
br(NE, DONE); // If lengths differ, return false
// Increase loop counter by diff between base- and actual start-offset. // Increase loop counter by diff between base- and actual start-offset.
addw(cnt1, cnt1, extra_length); addw(cnt1, cnt1, extra_length);
lea(a1, Address(a1, start_offset)); lea(a1, Address(a1, start_offset));
@ -5848,6 +5851,9 @@ address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3,
cbz(a1, DONE); cbz(a1, DONE);
ldrw(cnt1, Address(a1, length_offset)); ldrw(cnt1, Address(a1, length_offset));
cbz(a2, DONE); cbz(a2, DONE);
ldrw(tmp5, Address(a2, length_offset));
cmp(cnt1, tmp5);
br(NE, DONE); // If lengths differ, return false
// Increase loop counter by diff between base- and actual start-offset. // Increase loop counter by diff between base- and actual start-offset.
addw(cnt1, cnt1, extra_length); addw(cnt1, cnt1, extra_length);

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025 SAP SE. All rights reserved. * Copyright (c) 2012, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -412,12 +412,8 @@ run_stub:
} }
void os::Aix::init_thread_fpu_state(void) { void os::Aix::init_thread_fpu_state(void) {
#if !defined(USE_XLC_BUILTINS)
// Disable FP exceptions. // Disable FP exceptions.
__asm__ __volatile__ ("mtfsfi 6,0"); __asm__ __volatile__ ("mtfsfi 6,0");
#else
__mtfsfi(6, 0);
#endif
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2013 SAP SE. All rights reserved. * Copyright (c) 2012, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -29,29 +29,21 @@
// Included in runtime/prefetch.inline.hpp // Included in runtime/prefetch.inline.hpp
inline void Prefetch::read(const void *loc, intx interval) { inline void Prefetch::read(const void *loc, intx interval) {
#if !defined(USE_XLC_BUILTINS)
__asm__ __volatile__ ( __asm__ __volatile__ (
" dcbt 0, %0 \n" " dcbt 0, %0 \n"
: :
: /*%0*/"r" ( ((address)loc) +((long)interval) ) : /*%0*/"r" ( ((address)loc) +((long)interval) )
//: //:
); );
#else
__dcbt(((address)loc) +((long)interval));
#endif
} }
inline void Prefetch::write(void *loc, intx interval) { inline void Prefetch::write(void *loc, intx interval) {
#if !defined(USE_XLC_BUILTINS)
__asm__ __volatile__ ( __asm__ __volatile__ (
" dcbtst 0, %0 \n" " dcbtst 0, %0 \n"
: :
: /*%0*/"r" ( ((address)loc) +((long)interval) ) : /*%0*/"r" ( ((address)loc) +((long)interval) )
//: //:
); );
#else
__dcbtst( ((address)loc) +((long)interval) );
#endif
} }
#endif // OS_CPU_AIX_PPC_PREFETCH_AIX_PPC_INLINE_HPP #endif // OS_CPU_AIX_PPC_PREFETCH_AIX_PPC_INLINE_HPP

View File

@ -0,0 +1,34 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "cds/aotGrowableArray.hpp"
#include "cds/aotMetaspace.hpp"
#include "memory/allocation.inline.hpp"
#include "utilities/growableArray.hpp"
void AOTGrowableArrayHelper::deallocate(void* mem) {
if (!AOTMetaspace::in_aot_cache(mem)) {
GrowableArrayCHeapAllocator::deallocate(mem);
}
}

View File

@ -0,0 +1,76 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_AOT_AOTGROWABLEARRAY_HPP
#define SHARE_AOT_AOTGROWABLEARRAY_HPP
#include <memory/metaspaceClosureType.hpp>
#include <utilities/growableArray.hpp>
class AOTGrowableArrayHelper {
public:
static void deallocate(void* mem);
};
// An AOTGrowableArray<T> provides the same functionality as a GrowableArray<T> that
// uses the C heap allocator. In addition, AOTGrowableArray<T> can be iterated with
// MetaspaceClosure. This type should be used for growable arrays that need to be
// stored in the AOT cache. See ModuleEntry::_reads for an example.
template <typename E>
class AOTGrowableArray : public GrowableArrayWithAllocator<E, AOTGrowableArray<E>> {
friend class VMStructs;
friend class GrowableArrayWithAllocator<E, AOTGrowableArray>;
static E* allocate(int max, MemTag mem_tag) {
return (E*)GrowableArrayCHeapAllocator::allocate(max, sizeof(E), mem_tag);
}
E* allocate() {
return allocate(this->_capacity, mtClass);
}
void deallocate(E* mem) {
#if INCLUDE_CDS
AOTGrowableArrayHelper::deallocate(mem);
#else
GrowableArrayCHeapAllocator::deallocate(mem);
#endif
}
public:
AOTGrowableArray(int initial_capacity, MemTag mem_tag) :
GrowableArrayWithAllocator<E, AOTGrowableArray>(
allocate(initial_capacity, mem_tag),
initial_capacity) {}
AOTGrowableArray() : AOTGrowableArray(0, mtClassShared) {}
// methods required by MetaspaceClosure
void metaspace_pointers_do(MetaspaceClosure* it);
int size_in_heapwords() const { return (int)heap_word_size(sizeof(*this)); }
MetaspaceClosureType type() const { return MetaspaceClosureType::GrowableArrayType; }
static bool is_read_only_by_default() { return false; }
};
#endif // SHARE_AOT_AOTGROWABLEARRAY_HPP

View File

@ -0,0 +1,37 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_CDS_AOTGROWABLEARRAY_INLINE_HPP
#define SHARE_CDS_AOTGROWABLEARRAY_INLINE_HPP
#include "cds/aotGrowableArray.hpp"
#include "memory/metaspaceClosure.hpp"
template <typename E>
void AOTGrowableArray<E>::metaspace_pointers_do(MetaspaceClosure* it) {
it->push_c_array(AOTGrowableArray<E>::data_addr(), AOTGrowableArray<E>::capacity());
}
#endif // SHARE_CDS_AOTGROWABLEARRAY_INLINE_HPP

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -29,6 +29,8 @@
#include "cds/aotStreamedHeapWriter.hpp" #include "cds/aotStreamedHeapWriter.hpp"
#include "cds/cdsConfig.hpp" #include "cds/cdsConfig.hpp"
#include "cds/filemap.hpp" #include "cds/filemap.hpp"
#include "classfile/moduleEntry.hpp"
#include "classfile/packageEntry.hpp"
#include "classfile/systemDictionaryShared.hpp" #include "classfile/systemDictionaryShared.hpp"
#include "classfile/vmClasses.hpp" #include "classfile/vmClasses.hpp"
#include "logging/log.hpp" #include "logging/log.hpp"
@ -141,7 +143,7 @@ public:
info._buffered_addr = ref->obj(); info._buffered_addr = ref->obj();
info._requested_addr = ref->obj(); info._requested_addr = ref->obj();
info._bytes = ref->size() * BytesPerWord; info._bytes = ref->size() * BytesPerWord;
info._type = ref->msotype(); info._type = ref->type();
_objs.append(info); _objs.append(info);
} }
@ -214,7 +216,7 @@ void AOTMapLogger::dumptime_log_metaspace_region(const char* name, DumpRegion* r
info._buffered_addr = src_info->buffered_addr(); info._buffered_addr = src_info->buffered_addr();
info._requested_addr = info._buffered_addr + _buffer_to_requested_delta; info._requested_addr = info._buffered_addr + _buffer_to_requested_delta;
info._bytes = src_info->size_in_bytes(); info._bytes = src_info->size_in_bytes();
info._type = src_info->msotype(); info._type = src_info->type();
objs.append(info); objs.append(info);
} }
@ -332,43 +334,52 @@ void AOTMapLogger::log_metaspace_objects_impl(address region_base, address regio
address buffered_addr = info._buffered_addr; address buffered_addr = info._buffered_addr;
address requested_addr = info._requested_addr; address requested_addr = info._requested_addr;
int bytes = info._bytes; int bytes = info._bytes;
MetaspaceObj::Type type = info._type; MetaspaceClosureType type = info._type;
const char* type_name = MetaspaceObj::type_name(type); const char* type_name = MetaspaceClosure::type_name(type);
log_as_hex(last_obj_base, buffered_addr, last_obj_base + _buffer_to_requested_delta); log_as_hex(last_obj_base, buffered_addr, last_obj_base + _buffer_to_requested_delta);
switch (type) { switch (type) {
case MetaspaceObj::ClassType: case MetaspaceClosureType::ClassType:
log_klass((Klass*)src, requested_addr, type_name, bytes, current); log_klass((Klass*)src, requested_addr, type_name, bytes, current);
break; break;
case MetaspaceObj::ConstantPoolType: case MetaspaceClosureType::ConstantPoolType:
log_constant_pool((ConstantPool*)src, requested_addr, type_name, bytes, current); log_constant_pool((ConstantPool*)src, requested_addr, type_name, bytes, current);
break; break;
case MetaspaceObj::ConstantPoolCacheType: case MetaspaceClosureType::ConstantPoolCacheType:
log_constant_pool_cache((ConstantPoolCache*)src, requested_addr, type_name, bytes, current); log_constant_pool_cache((ConstantPoolCache*)src, requested_addr, type_name, bytes, current);
break; break;
case MetaspaceObj::ConstMethodType: case MetaspaceClosureType::ConstMethodType:
log_const_method((ConstMethod*)src, requested_addr, type_name, bytes, current); log_const_method((ConstMethod*)src, requested_addr, type_name, bytes, current);
break; break;
case MetaspaceObj::MethodType: case MetaspaceClosureType::MethodType:
log_method((Method*)src, requested_addr, type_name, bytes, current); log_method((Method*)src, requested_addr, type_name, bytes, current);
break; break;
case MetaspaceObj::MethodCountersType: case MetaspaceClosureType::MethodCountersType:
log_method_counters((MethodCounters*)src, requested_addr, type_name, bytes, current); log_method_counters((MethodCounters*)src, requested_addr, type_name, bytes, current);
break; break;
case MetaspaceObj::MethodDataType: case MetaspaceClosureType::MethodDataType:
log_method_data((MethodData*)src, requested_addr, type_name, bytes, current); log_method_data((MethodData*)src, requested_addr, type_name, bytes, current);
break; break;
case MetaspaceObj::SymbolType: case MetaspaceClosureType::ModuleEntryType:
log_module_entry((ModuleEntry*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceClosureType::PackageEntryType:
log_package_entry((PackageEntry*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceClosureType::GrowableArrayType:
log_growable_array((GrowableArrayBase*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceClosureType::SymbolType:
log_symbol((Symbol*)src, requested_addr, type_name, bytes, current); log_symbol((Symbol*)src, requested_addr, type_name, bytes, current);
break; break;
case MetaspaceObj::KlassTrainingDataType: case MetaspaceClosureType::KlassTrainingDataType:
log_klass_training_data((KlassTrainingData*)src, requested_addr, type_name, bytes, current); log_klass_training_data((KlassTrainingData*)src, requested_addr, type_name, bytes, current);
break; break;
case MetaspaceObj::MethodTrainingDataType: case MetaspaceClosureType::MethodTrainingDataType:
log_method_training_data((MethodTrainingData*)src, requested_addr, type_name, bytes, current); log_method_training_data((MethodTrainingData*)src, requested_addr, type_name, bytes, current);
break; break;
case MetaspaceObj::CompileTrainingDataType: case MetaspaceClosureType::CompileTrainingDataType:
log_compile_training_data((CompileTrainingData*)src, requested_addr, type_name, bytes, current); log_compile_training_data((CompileTrainingData*)src, requested_addr, type_name, bytes, current);
break; break;
default: default:
@ -421,6 +432,27 @@ void AOTMapLogger::log_method_data(MethodData* md, address requested_addr, const
log_debug(aot, map)(_LOG_PREFIX " %s", p2i(requested_addr), type_name, bytes, md->method()->external_name()); log_debug(aot, map)(_LOG_PREFIX " %s", p2i(requested_addr), type_name, bytes, md->method()->external_name());
} }
void AOTMapLogger::log_module_entry(ModuleEntry* mod, address requested_addr, const char* type_name,
int bytes, Thread* current) {
ResourceMark rm(current);
log_debug(aot, map)(_LOG_PREFIX " %s", p2i(requested_addr), type_name, bytes,
mod->name_as_C_string());
}
void AOTMapLogger::log_package_entry(PackageEntry* pkg, address requested_addr, const char* type_name,
int bytes, Thread* current) {
ResourceMark rm(current);
log_debug(aot, map)(_LOG_PREFIX " %s - %s", p2i(requested_addr), type_name, bytes,
pkg->module()->name_as_C_string(), pkg->name_as_C_string());
}
void AOTMapLogger::log_growable_array(GrowableArrayBase* arr, address requested_addr, const char* type_name,
int bytes, Thread* current) {
ResourceMark rm(current);
log_debug(aot, map)(_LOG_PREFIX " %d (%d)", p2i(requested_addr), type_name, bytes,
arr->length(), arr->capacity());
}
void AOTMapLogger::log_klass(Klass* k, address requested_addr, const char* type_name, void AOTMapLogger::log_klass(Klass* k, address requested_addr, const char* type_name,
int bytes, Thread* current) { int bytes, Thread* current) {
ResourceMark rm(current); ResourceMark rm(current);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -28,6 +28,7 @@
#include "cds/archiveBuilder.hpp" #include "cds/archiveBuilder.hpp"
#include "memory/allocation.hpp" #include "memory/allocation.hpp"
#include "memory/allStatic.hpp" #include "memory/allStatic.hpp"
#include "memory/metaspaceClosureType.hpp"
#include "oops/oopsHierarchy.hpp" #include "oops/oopsHierarchy.hpp"
#include "utilities/globalDefinitions.hpp" #include "utilities/globalDefinitions.hpp"
#include "utilities/growableArray.hpp" #include "utilities/growableArray.hpp"
@ -37,9 +38,13 @@ class ArchiveStreamedHeapInfo;
class CompileTrainingData; class CompileTrainingData;
class DumpRegion; class DumpRegion;
class FileMapInfo; class FileMapInfo;
class GrowableArrayBase;
class KlassTrainingData; class KlassTrainingData;
class MethodCounters;
class MethodTrainingData; class MethodTrainingData;
class ModuleEntry;
class outputStream; class outputStream;
class PackageEntry;
// Write detailed info to a mapfile to analyze contents of the AOT cache/CDS archive. // Write detailed info to a mapfile to analyze contents of the AOT cache/CDS archive.
// -Xlog:aot+map* can be used both when creating an AOT cache, or when using an AOT cache. // -Xlog:aot+map* can be used both when creating an AOT cache, or when using an AOT cache.
@ -62,7 +67,7 @@ class AOTMapLogger : AllStatic {
address _buffered_addr; address _buffered_addr;
address _requested_addr; address _requested_addr;
int _bytes; int _bytes;
MetaspaceObj::Type _type; MetaspaceClosureType _type;
}; };
public: public:
@ -142,6 +147,9 @@ private:
Thread* current); Thread* current);
static void log_klass(Klass* k, address requested_addr, const char* type_name, int bytes, Thread* current); static void log_klass(Klass* k, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_method(Method* m, address requested_addr, const char* type_name, int bytes, Thread* current); static void log_method(Method* m, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_module_entry(ModuleEntry* mod, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_package_entry(PackageEntry* pkg, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_growable_array(GrowableArrayBase* arr, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_symbol(Symbol* s, address requested_addr, const char* type_name, int bytes, Thread* current); static void log_symbol(Symbol* s, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_klass_training_data(KlassTrainingData* ktd, address requested_addr, const char* type_name, int bytes, Thread* current); static void log_klass_training_data(KlassTrainingData* ktd, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_method_training_data(MethodTrainingData* mtd, address requested_addr, const char* type_name, int bytes, Thread* current); static void log_method_training_data(MethodTrainingData* mtd, address requested_addr, const char* type_name, int bytes, Thread* current);

View File

@ -698,6 +698,9 @@ public:
Universe::metaspace_pointers_do(it); Universe::metaspace_pointers_do(it);
vmSymbols::metaspace_pointers_do(it); vmSymbols::metaspace_pointers_do(it);
TrainingData::iterate_roots(it); TrainingData::iterate_roots(it);
if (CDSConfig::is_dumping_full_module_graph()) {
ClassLoaderDataShared::iterate_roots(it);
}
// The above code should find all the symbols that are referenced by the // The above code should find all the symbols that are referenced by the
// archived classes. We just need to add the extra symbols which // archived classes. We just need to add the extra symbols which
@ -795,6 +798,10 @@ void VM_PopulateDumpSharedSpace::doit() {
_builder.make_klasses_shareable(); _builder.make_klasses_shareable();
AOTMetaspace::make_method_handle_intrinsics_shareable(); AOTMetaspace::make_method_handle_intrinsics_shareable();
if (CDSConfig::is_dumping_full_module_graph()) {
ClassLoaderDataShared::remove_unshareable_info();
}
dump_java_heap_objects(); dump_java_heap_objects();
dump_shared_symbol_table(_builder.symbols()); dump_shared_symbol_table(_builder.symbols());
@ -1135,6 +1142,7 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS
HeapShared::init_heap_writer(); HeapShared::init_heap_writer();
if (CDSConfig::is_dumping_full_module_graph()) { if (CDSConfig::is_dumping_full_module_graph()) {
ClassLoaderDataShared::ensure_module_entry_tables_exist(); ClassLoaderDataShared::ensure_module_entry_tables_exist();
ClassLoaderDataShared::build_tables(CHECK);
HeapShared::reset_archived_object_states(CHECK); HeapShared::reset_archived_object_states(CHECK);
} }

View File

@ -243,7 +243,7 @@ bool ArchiveBuilder::gather_klass_and_symbol(MetaspaceClosure::Ref* ref, bool re
if (get_follow_mode(ref) != make_a_copy) { if (get_follow_mode(ref) != make_a_copy) {
return false; return false;
} }
if (ref->msotype() == MetaspaceObj::ClassType) { if (ref->type() == MetaspaceClosureType::ClassType) {
Klass* klass = (Klass*)ref->obj(); Klass* klass = (Klass*)ref->obj();
assert(klass->is_klass(), "must be"); assert(klass->is_klass(), "must be");
if (!is_excluded(klass)) { if (!is_excluded(klass)) {
@ -252,7 +252,7 @@ bool ArchiveBuilder::gather_klass_and_symbol(MetaspaceClosure::Ref* ref, bool re
assert(klass->is_instance_klass(), "must be"); assert(klass->is_instance_klass(), "must be");
} }
} }
} else if (ref->msotype() == MetaspaceObj::SymbolType) { } else if (ref->type() == MetaspaceClosureType::SymbolType) {
// Make sure the symbol won't be GC'ed while we are dumping the archive. // Make sure the symbol won't be GC'ed while we are dumping the archive.
Symbol* sym = (Symbol*)ref->obj(); Symbol* sym = (Symbol*)ref->obj();
sym->increment_refcount(); sym->increment_refcount();
@ -271,11 +271,6 @@ void ArchiveBuilder::gather_klasses_and_symbols() {
aot_log_info(aot)("Gathering classes and symbols ... "); aot_log_info(aot)("Gathering classes and symbols ... ");
GatherKlassesAndSymbols doit(this); GatherKlassesAndSymbols doit(this);
iterate_roots(&doit); iterate_roots(&doit);
#if INCLUDE_CDS_JAVA_HEAP
if (CDSConfig::is_dumping_full_module_graph()) {
ClassLoaderDataShared::iterate_symbols(&doit);
}
#endif
doit.finish(); doit.finish();
if (CDSConfig::is_dumping_static_archive()) { if (CDSConfig::is_dumping_static_archive()) {
@ -446,14 +441,14 @@ bool ArchiveBuilder::gather_one_source_obj(MetaspaceClosure::Ref* ref, bool read
} }
#ifdef ASSERT #ifdef ASSERT
if (ref->msotype() == MetaspaceObj::MethodType) { if (ref->type() == MetaspaceClosureType::MethodType) {
Method* m = (Method*)ref->obj(); Method* m = (Method*)ref->obj();
assert(!RegeneratedClasses::has_been_regenerated((address)m->method_holder()), assert(!RegeneratedClasses::has_been_regenerated((address)m->method_holder()),
"Should not archive methods in a class that has been regenerated"); "Should not archive methods in a class that has been regenerated");
} }
#endif #endif
if (ref->msotype() == MetaspaceObj::MethodDataType) { if (ref->type() == MetaspaceClosureType::MethodDataType) {
MethodData* md = (MethodData*)ref->obj(); MethodData* md = (MethodData*)ref->obj();
md->clean_method_data(false /* always_clean */); md->clean_method_data(false /* always_clean */);
} }
@ -554,16 +549,16 @@ ArchiveBuilder::FollowMode ArchiveBuilder::get_follow_mode(MetaspaceClosure::Ref
if (CDSConfig::is_dumping_dynamic_archive() && AOTMetaspace::in_aot_cache(obj)) { if (CDSConfig::is_dumping_dynamic_archive() && AOTMetaspace::in_aot_cache(obj)) {
// Don't dump existing shared metadata again. // Don't dump existing shared metadata again.
return point_to_it; return point_to_it;
} else if (ref->msotype() == MetaspaceObj::MethodDataType || } else if (ref->type() == MetaspaceClosureType::MethodDataType ||
ref->msotype() == MetaspaceObj::MethodCountersType || ref->type() == MetaspaceClosureType::MethodCountersType ||
ref->msotype() == MetaspaceObj::KlassTrainingDataType || ref->type() == MetaspaceClosureType::KlassTrainingDataType ||
ref->msotype() == MetaspaceObj::MethodTrainingDataType || ref->type() == MetaspaceClosureType::MethodTrainingDataType ||
ref->msotype() == MetaspaceObj::CompileTrainingDataType) { ref->type() == MetaspaceClosureType::CompileTrainingDataType) {
return (TrainingData::need_data() || TrainingData::assembling_data()) ? make_a_copy : set_to_null; return (TrainingData::need_data() || TrainingData::assembling_data()) ? make_a_copy : set_to_null;
} else if (ref->msotype() == MetaspaceObj::AdapterHandlerEntryType) { } else if (ref->type() == MetaspaceClosureType::AdapterHandlerEntryType) {
return CDSConfig::is_dumping_adapters() ? make_a_copy : set_to_null; return CDSConfig::is_dumping_adapters() ? make_a_copy : set_to_null;
} else { } else {
if (ref->msotype() == MetaspaceObj::ClassType) { if (ref->type() == MetaspaceClosureType::ClassType) {
Klass* klass = (Klass*)ref->obj(); Klass* klass = (Klass*)ref->obj();
assert(klass->is_klass(), "must be"); assert(klass->is_klass(), "must be");
if (RegeneratedClasses::has_been_regenerated(klass)) { if (RegeneratedClasses::has_been_regenerated(klass)) {
@ -571,7 +566,12 @@ ArchiveBuilder::FollowMode ArchiveBuilder::get_follow_mode(MetaspaceClosure::Ref
} }
if (is_excluded(klass)) { if (is_excluded(klass)) {
ResourceMark rm; ResourceMark rm;
log_debug(cds, dynamic)("Skipping class (excluded): %s", klass->external_name()); aot_log_trace(aot)("pointer set to null: class (excluded): %s", klass->external_name());
return set_to_null;
}
if (klass->is_array_klass() && CDSConfig::is_dumping_dynamic_archive()) {
ResourceMark rm;
aot_log_trace(aot)("pointer set to null: array class not supported in dynamic region: %s", klass->external_name());
return set_to_null; return set_to_null;
} }
} }
@ -615,15 +615,6 @@ void ArchiveBuilder::dump_rw_metadata() {
ResourceMark rm; ResourceMark rm;
aot_log_info(aot)("Allocating RW objects ... "); aot_log_info(aot)("Allocating RW objects ... ");
make_shallow_copies(&_rw_region, &_rw_src_objs); make_shallow_copies(&_rw_region, &_rw_src_objs);
#if INCLUDE_CDS_JAVA_HEAP
if (CDSConfig::is_dumping_full_module_graph()) {
// Archive the ModuleEntry's and PackageEntry's of the 3 built-in loaders
char* start = rw_region()->top();
ClassLoaderDataShared::allocate_archived_tables();
alloc_stats()->record_modules(rw_region()->top() - start, /*read_only*/false);
}
#endif
} }
void ArchiveBuilder::dump_ro_metadata() { void ArchiveBuilder::dump_ro_metadata() {
@ -632,15 +623,6 @@ void ArchiveBuilder::dump_ro_metadata() {
start_dump_region(&_ro_region); start_dump_region(&_ro_region);
make_shallow_copies(&_ro_region, &_ro_src_objs); make_shallow_copies(&_ro_region, &_ro_src_objs);
#if INCLUDE_CDS_JAVA_HEAP
if (CDSConfig::is_dumping_full_module_graph()) {
char* start = ro_region()->top();
ClassLoaderDataShared::init_archived_tables();
alloc_stats()->record_modules(ro_region()->top() - start, /*read_only*/true);
}
#endif
RegeneratedClasses::record_regenerated_objects(); RegeneratedClasses::record_regenerated_objects();
} }
@ -658,7 +640,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
size_t alignment = SharedSpaceObjectAlignment; // alignment for the dest pointer size_t alignment = SharedSpaceObjectAlignment; // alignment for the dest pointer
char* oldtop = dump_region->top(); char* oldtop = dump_region->top();
if (src_info->msotype() == MetaspaceObj::ClassType) { if (src_info->type() == MetaspaceClosureType::ClassType) {
// Allocate space for a pointer directly in front of the future InstanceKlass, so // Allocate space for a pointer directly in front of the future InstanceKlass, so
// we can do a quick lookup from InstanceKlass* -> RunTimeClassInfo* // we can do a quick lookup from InstanceKlass* -> RunTimeClassInfo*
// without building another hashtable. See RunTimeClassInfo::get_for() // without building another hashtable. See RunTimeClassInfo::get_for()
@ -674,7 +656,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
alignment = nth_bit(ArchiveBuilder::precomputed_narrow_klass_shift()); alignment = nth_bit(ArchiveBuilder::precomputed_narrow_klass_shift());
} }
#endif #endif
} else if (src_info->msotype() == MetaspaceObj::SymbolType) { } else if (src_info->type() == MetaspaceClosureType::SymbolType) {
// Symbols may be allocated by using AllocateHeap, so their sizes // Symbols may be allocated by using AllocateHeap, so their sizes
// may be less than size_in_bytes() indicates. // may be less than size_in_bytes() indicates.
bytes = ((Symbol*)src)->byte_size(); bytes = ((Symbol*)src)->byte_size();
@ -684,7 +666,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
memcpy(dest, src, bytes); memcpy(dest, src, bytes);
// Update the hash of buffered sorted symbols for static dump so that the symbols have deterministic contents // Update the hash of buffered sorted symbols for static dump so that the symbols have deterministic contents
if (CDSConfig::is_dumping_static_archive() && (src_info->msotype() == MetaspaceObj::SymbolType)) { if (CDSConfig::is_dumping_static_archive() && (src_info->type() == MetaspaceClosureType::SymbolType)) {
Symbol* buffered_symbol = (Symbol*)dest; Symbol* buffered_symbol = (Symbol*)dest;
assert(((Symbol*)src)->is_permanent(), "archived symbols must be permanent"); assert(((Symbol*)src)->is_permanent(), "archived symbols must be permanent");
buffered_symbol->update_identity_hash(); buffered_symbol->update_identity_hash();
@ -699,7 +681,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
} }
} }
intptr_t* archived_vtable = CppVtables::get_archived_vtable(src_info->msotype(), (address)dest); intptr_t* archived_vtable = CppVtables::get_archived_vtable(src_info->type(), (address)dest);
if (archived_vtable != nullptr) { if (archived_vtable != nullptr) {
*(address*)dest = (address)archived_vtable; *(address*)dest = (address)archived_vtable;
ArchivePtrMarker::mark_pointer((address*)dest); ArchivePtrMarker::mark_pointer((address*)dest);
@ -709,7 +691,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
src_info->set_buffered_addr((address)dest); src_info->set_buffered_addr((address)dest);
char* newtop = dump_region->top(); char* newtop = dump_region->top();
_alloc_stats.record(src_info->msotype(), int(newtop - oldtop), src_info->read_only()); _alloc_stats.record(src_info->type(), int(newtop - oldtop), src_info->read_only());
DEBUG_ONLY(_alloc_stats.verify((int)dump_region->used(), src_info->read_only())); DEBUG_ONLY(_alloc_stats.verify((int)dump_region->used(), src_info->read_only()));
} }
@ -992,15 +974,15 @@ void ArchiveBuilder::make_training_data_shareable() {
return; return;
} }
if (info.msotype() == MetaspaceObj::KlassTrainingDataType || if (info.type() == MetaspaceClosureType::KlassTrainingDataType ||
info.msotype() == MetaspaceObj::MethodTrainingDataType || info.type() == MetaspaceClosureType::MethodTrainingDataType ||
info.msotype() == MetaspaceObj::CompileTrainingDataType) { info.type() == MetaspaceClosureType::CompileTrainingDataType) {
TrainingData* buffered_td = (TrainingData*)info.buffered_addr(); TrainingData* buffered_td = (TrainingData*)info.buffered_addr();
buffered_td->remove_unshareable_info(); buffered_td->remove_unshareable_info();
} else if (info.msotype() == MetaspaceObj::MethodDataType) { } else if (info.type() == MetaspaceClosureType::MethodDataType) {
MethodData* buffered_mdo = (MethodData*)info.buffered_addr(); MethodData* buffered_mdo = (MethodData*)info.buffered_addr();
buffered_mdo->remove_unshareable_info(); buffered_mdo->remove_unshareable_info();
} else if (info.msotype() == MetaspaceObj::MethodCountersType) { } else if (info.type() == MetaspaceClosureType::MethodCountersType) {
MethodCounters* buffered_mc = (MethodCounters*)info.buffered_addr(); MethodCounters* buffered_mc = (MethodCounters*)info.buffered_addr();
buffered_mc->remove_unshareable_info(); buffered_mc->remove_unshareable_info();
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -134,13 +134,13 @@ private:
int _size_in_bytes; int _size_in_bytes;
int _id; // Each object has a unique serial ID, starting from zero. The ID is assigned int _id; // Each object has a unique serial ID, starting from zero. The ID is assigned
// when the object is added into _source_objs. // when the object is added into _source_objs.
MetaspaceObj::Type _msotype; MetaspaceClosureType _type;
address _source_addr; // The source object to be copied. address _source_addr; // The source object to be copied.
address _buffered_addr; // The copy of this object insider the buffer. address _buffered_addr; // The copy of this object insider the buffer.
public: public:
SourceObjInfo(MetaspaceClosure::Ref* ref, bool read_only, FollowMode follow_mode) : SourceObjInfo(MetaspaceClosure::Ref* ref, bool read_only, FollowMode follow_mode) :
_ptrmap_start(0), _ptrmap_end(0), _read_only(read_only), _has_embedded_pointer(false), _follow_mode(follow_mode), _ptrmap_start(0), _ptrmap_end(0), _read_only(read_only), _has_embedded_pointer(false), _follow_mode(follow_mode),
_size_in_bytes(ref->size() * BytesPerWord), _id(0), _msotype(ref->msotype()), _size_in_bytes(ref->size() * BytesPerWord), _id(0), _type(ref->type()),
_source_addr(ref->obj()) { _source_addr(ref->obj()) {
if (follow_mode == point_to_it) { if (follow_mode == point_to_it) {
_buffered_addr = ref->obj(); _buffered_addr = ref->obj();
@ -155,7 +155,7 @@ private:
SourceObjInfo(address src, SourceObjInfo* renegerated_obj_info) : SourceObjInfo(address src, SourceObjInfo* renegerated_obj_info) :
_ptrmap_start(0), _ptrmap_end(0), _read_only(false), _ptrmap_start(0), _ptrmap_end(0), _read_only(false),
_follow_mode(renegerated_obj_info->_follow_mode), _follow_mode(renegerated_obj_info->_follow_mode),
_size_in_bytes(0), _msotype(renegerated_obj_info->_msotype), _size_in_bytes(0), _type(renegerated_obj_info->_type),
_source_addr(src), _buffered_addr(renegerated_obj_info->_buffered_addr) {} _source_addr(src), _buffered_addr(renegerated_obj_info->_buffered_addr) {}
bool should_copy() const { return _follow_mode == make_a_copy; } bool should_copy() const { return _follow_mode == make_a_copy; }
@ -182,7 +182,7 @@ private:
} }
return _buffered_addr; return _buffered_addr;
} }
MetaspaceObj::Type msotype() const { return _msotype; } MetaspaceClosureType type() const { return _type; }
FollowMode follow_mode() const { return _follow_mode; } FollowMode follow_mode() const { return _follow_mode; }
}; };

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -22,12 +22,14 @@
* *
*/ */
#include "cds/aotGrowableArray.hpp"
#include "cds/aotMetaspace.hpp" #include "cds/aotMetaspace.hpp"
#include "cds/archiveBuilder.hpp" #include "cds/archiveBuilder.hpp"
#include "cds/archiveUtils.hpp" #include "cds/archiveUtils.hpp"
#include "cds/cdsConfig.hpp" #include "cds/cdsConfig.hpp"
#include "cds/cppVtables.hpp" #include "cds/cppVtables.hpp"
#include "logging/log.hpp" #include "logging/log.hpp"
#include "memory/resourceArea.hpp"
#include "oops/instanceClassLoaderKlass.hpp" #include "oops/instanceClassLoaderKlass.hpp"
#include "oops/instanceMirrorKlass.hpp" #include "oops/instanceMirrorKlass.hpp"
#include "oops/instanceRefKlass.hpp" #include "oops/instanceRefKlass.hpp"
@ -53,6 +55,19 @@
// + at run time: we clone the actual contents of the vtables from libjvm.so // + at run time: we clone the actual contents of the vtables from libjvm.so
// into our own tables. // into our own tables.
#ifndef PRODUCT
// AOTGrowableArray has a vtable only when in non-product builds (due to
// the virtual printing functions in AnyObj).
using GrowableArray_ModuleEntry_ptr = AOTGrowableArray<ModuleEntry*>;
#define DEBUG_CPP_VTABLE_TYPES_DO(f) \
f(GrowableArray_ModuleEntry_ptr) \
#endif
// Currently, the archive contains ONLY the following types of objects that have C++ vtables. // Currently, the archive contains ONLY the following types of objects that have C++ vtables.
#define CPP_VTABLE_TYPES_DO(f) \ #define CPP_VTABLE_TYPES_DO(f) \
f(ConstantPool) \ f(ConstantPool) \
@ -68,7 +83,8 @@
f(TypeArrayKlass) \ f(TypeArrayKlass) \
f(KlassTrainingData) \ f(KlassTrainingData) \
f(MethodTrainingData) \ f(MethodTrainingData) \
f(CompileTrainingData) f(CompileTrainingData) \
NOT_PRODUCT(DEBUG_CPP_VTABLE_TYPES_DO(f))
class CppVtableInfo { class CppVtableInfo {
intptr_t _vtable_size; intptr_t _vtable_size;
@ -86,7 +102,7 @@ public:
} }
}; };
static inline intptr_t* vtable_of(const Metadata* m) { static inline intptr_t* vtable_of(const void* m) {
return *((intptr_t**)m); return *((intptr_t**)m);
} }
@ -116,6 +132,7 @@ CppVtableInfo* CppVtableCloner<T>::allocate_and_initialize(const char* name) {
template <class T> template <class T>
void CppVtableCloner<T>::initialize(const char* name, CppVtableInfo* info) { void CppVtableCloner<T>::initialize(const char* name, CppVtableInfo* info) {
ResourceMark rm;
T tmp; // Allocate temporary dummy metadata object to get to the original vtable. T tmp; // Allocate temporary dummy metadata object to get to the original vtable.
int n = info->vtable_size(); int n = info->vtable_size();
intptr_t* srcvtable = vtable_of(&tmp); intptr_t* srcvtable = vtable_of(&tmp);
@ -268,7 +285,7 @@ void CppVtables::serialize(SerializeClosure* soc) {
} }
} }
intptr_t* CppVtables::get_archived_vtable(MetaspaceObj::Type msotype, address obj) { intptr_t* CppVtables::get_archived_vtable(MetaspaceClosureType type, address obj) {
if (!_orig_cpp_vtptrs_inited) { if (!_orig_cpp_vtptrs_inited) {
CPP_VTABLE_TYPES_DO(INIT_ORIG_CPP_VTPTRS); CPP_VTABLE_TYPES_DO(INIT_ORIG_CPP_VTPTRS);
_orig_cpp_vtptrs_inited = true; _orig_cpp_vtptrs_inited = true;
@ -276,19 +293,23 @@ intptr_t* CppVtables::get_archived_vtable(MetaspaceObj::Type msotype, address ob
assert(CDSConfig::is_dumping_archive(), "sanity"); assert(CDSConfig::is_dumping_archive(), "sanity");
int kind = -1; int kind = -1;
switch (msotype) { switch (type) {
case MetaspaceObj::SymbolType: case MetaspaceClosureType::SymbolType:
case MetaspaceObj::TypeArrayU1Type: case MetaspaceClosureType::TypeArrayU1Type:
case MetaspaceObj::TypeArrayU2Type: case MetaspaceClosureType::TypeArrayU2Type:
case MetaspaceObj::TypeArrayU4Type: case MetaspaceClosureType::TypeArrayU4Type:
case MetaspaceObj::TypeArrayU8Type: case MetaspaceClosureType::TypeArrayU8Type:
case MetaspaceObj::TypeArrayOtherType: case MetaspaceClosureType::TypeArrayOtherType:
case MetaspaceObj::ConstMethodType: case MetaspaceClosureType::CArrayType:
case MetaspaceObj::ConstantPoolCacheType: case MetaspaceClosureType::ConstMethodType:
case MetaspaceObj::AnnotationsType: case MetaspaceClosureType::ConstantPoolCacheType:
case MetaspaceObj::RecordComponentType: case MetaspaceClosureType::AnnotationsType:
case MetaspaceObj::AdapterHandlerEntryType: case MetaspaceClosureType::ModuleEntryType:
case MetaspaceObj::AdapterFingerPrintType: case MetaspaceClosureType::PackageEntryType:
case MetaspaceClosureType::RecordComponentType:
case MetaspaceClosureType::AdapterHandlerEntryType:
case MetaspaceClosureType::AdapterFingerPrintType:
PRODUCT_ONLY(case MetaspaceClosureType::GrowableArrayType:)
// These have no vtables. // These have no vtables.
break; break;
default: default:

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -27,6 +27,7 @@
#include "memory/allocation.hpp" #include "memory/allocation.hpp"
#include "memory/allStatic.hpp" #include "memory/allStatic.hpp"
#include "memory/metaspaceClosureType.hpp"
#include "utilities/globalDefinitions.hpp" #include "utilities/globalDefinitions.hpp"
class ArchiveBuilder; class ArchiveBuilder;
@ -40,7 +41,7 @@ class CppVtables : AllStatic {
public: public:
static void dumptime_init(ArchiveBuilder* builder); static void dumptime_init(ArchiveBuilder* builder);
static void zero_archived_vtables(); static void zero_archived_vtables();
static intptr_t* get_archived_vtable(MetaspaceObj::Type msotype, address obj); static intptr_t* get_archived_vtable(MetaspaceClosureType type, address obj);
static void serialize(SerializeClosure* sc); static void serialize(SerializeClosure* sc);
static bool is_valid_shared_method(const Method* m) NOT_CDS_RETURN_(false); static bool is_valid_shared_method(const Method* m) NOT_CDS_RETURN_(false);
static char* vtables_serialized_base() { return _vtables_serialized_base; } static char* vtables_serialized_base() { return _vtables_serialized_base; }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -27,32 +27,34 @@
#include "classfile/compactHashtable.hpp" #include "classfile/compactHashtable.hpp"
#include "memory/allocation.hpp" #include "memory/allocation.hpp"
#include "memory/metaspaceClosureType.hpp"
// This is for dumping detailed statistics for the allocations // This is for dumping detailed statistics for the allocations
// in the shared spaces. // in the shared spaces.
class DumpAllocStats : public StackObj { class DumpAllocStats : public StackObj {
public: public:
// Here's poor man's enum inheritance #define DUMPED_OBJ_TYPES_DO(f) \
#define SHAREDSPACE_OBJ_TYPES_DO(f) \ METASPACE_CLOSURE_TYPES_DO(f) \
METASPACE_OBJ_TYPES_DO(f) \
f(SymbolHashentry) \ f(SymbolHashentry) \
f(SymbolBucket) \ f(SymbolBucket) \
f(StringHashentry) \ f(StringHashentry) \
f(StringBucket) \ f(StringBucket) \
f(ModulesNatives) \
f(CppVTables) \ f(CppVTables) \
f(Other) f(Other)
#define DUMPED_TYPE_DECLARE(name) name ## Type,
#define DUMPED_TYPE_NAME_CASE(name) case name ## Type: return #name;
enum Type { enum Type {
// Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc // Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc
SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE) DUMPED_OBJ_TYPES_DO(DUMPED_TYPE_DECLARE)
_number_of_types _number_of_types
}; };
static const char* type_name(Type type) { static const char* type_name(Type type) {
switch(type) { switch(type) {
SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE) DUMPED_OBJ_TYPES_DO(DUMPED_TYPE_NAME_CASE)
default: default:
ShouldNotReachHere(); ShouldNotReachHere();
return nullptr; return nullptr;
@ -101,16 +103,12 @@ public:
CompactHashtableStats* symbol_stats() { return &_symbol_stats; } CompactHashtableStats* symbol_stats() { return &_symbol_stats; }
CompactHashtableStats* string_stats() { return &_string_stats; } CompactHashtableStats* string_stats() { return &_string_stats; }
void record(MetaspaceObj::Type type, int byte_size, bool read_only) { void record(MetaspaceClosureType type, int byte_size, bool read_only) {
assert(int(type) >= 0 && type < MetaspaceObj::_number_of_types, "sanity"); int t = (int)type;
assert(t >= 0 && t < (int)MetaspaceClosureType::_number_of_types, "sanity");
int which = (read_only) ? RO : RW; int which = (read_only) ? RO : RW;
_counts[which][type] ++; _counts[which][t] ++;
_bytes [which][type] += byte_size; _bytes [which][t] += byte_size;
}
void record_modules(int byte_size, bool read_only) {
int which = (read_only) ? RO : RW;
_bytes [which][ModulesNativesType] += byte_size;
} }
void record_other_type(int byte_size, bool read_only) { void record_other_type(int byte_size, bool read_only) {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -948,10 +948,6 @@ void HeapShared::archive_subgraphs() {
true /* is_full_module_graph */); true /* is_full_module_graph */);
} }
} }
if (CDSConfig::is_dumping_full_module_graph()) {
Modules::verify_archived_modules();
}
} }
// //

View File

@ -216,6 +216,10 @@ ciField::ciField(fieldDescriptor *fd) :
static bool trust_final_non_static_fields(ciInstanceKlass* holder) { static bool trust_final_non_static_fields(ciInstanceKlass* holder) {
if (holder == nullptr) if (holder == nullptr)
return false; return false;
if (holder->trust_final_fields()) {
// Explicit opt-in from system classes
return true;
}
// Even if general trusting is disabled, trust system-built closures in these packages. // Even if general trusting is disabled, trust system-built closures in these packages.
if (holder->is_in_package("java/lang/invoke") || holder->is_in_package("sun/invoke") || if (holder->is_in_package("java/lang/invoke") || holder->is_in_package("sun/invoke") ||
holder->is_in_package("java/lang/reflect") || holder->is_in_package("jdk/internal/reflect") || holder->is_in_package("java/lang/reflect") || holder->is_in_package("jdk/internal/reflect") ||
@ -230,14 +234,6 @@ static bool trust_final_non_static_fields(ciInstanceKlass* holder) {
// Trust final fields in records // Trust final fields in records
if (holder->is_record()) if (holder->is_record())
return true; return true;
// Trust Atomic*FieldUpdaters: they are very important for performance, and make up one
// more reason not to use Unsafe, if their final fields are trusted. See more in JDK-8140483.
if (holder->name() == ciSymbols::java_util_concurrent_atomic_AtomicIntegerFieldUpdater_Impl() ||
holder->name() == ciSymbols::java_util_concurrent_atomic_AtomicLongFieldUpdater_CASUpdater() ||
holder->name() == ciSymbols::java_util_concurrent_atomic_AtomicLongFieldUpdater_LockedUpdater() ||
holder->name() == ciSymbols::java_util_concurrent_atomic_AtomicReferenceFieldUpdater_Impl()) {
return true;
}
return TrustFinalNonStaticFields; return TrustFinalNonStaticFields;
} }

View File

@ -65,6 +65,7 @@ ciInstanceKlass::ciInstanceKlass(Klass* k) :
_has_nonstatic_concrete_methods = ik->has_nonstatic_concrete_methods(); _has_nonstatic_concrete_methods = ik->has_nonstatic_concrete_methods();
_is_hidden = ik->is_hidden(); _is_hidden = ik->is_hidden();
_is_record = ik->is_record(); _is_record = ik->is_record();
_trust_final_fields = ik->trust_final_fields();
_nonstatic_fields = nullptr; // initialized lazily by compute_nonstatic_fields: _nonstatic_fields = nullptr; // initialized lazily by compute_nonstatic_fields:
_has_injected_fields = -1; _has_injected_fields = -1;
_implementor = nullptr; // we will fill these lazily _implementor = nullptr; // we will fill these lazily

View File

@ -59,6 +59,7 @@ private:
bool _has_nonstatic_concrete_methods; bool _has_nonstatic_concrete_methods;
bool _is_hidden; bool _is_hidden;
bool _is_record; bool _is_record;
bool _trust_final_fields;
bool _has_trusted_loader; bool _has_trusted_loader;
ciFlags _flags; ciFlags _flags;
@ -207,6 +208,10 @@ public:
return _is_record; return _is_record;
} }
bool trust_final_fields() const {
return _trust_final_fields;
}
ciInstanceKlass* get_canonical_holder(int offset); ciInstanceKlass* get_canonical_holder(int offset);
ciField* get_field_by_offset(int field_offset, bool is_static); ciField* get_field_by_offset(int field_offset, bool is_static);
ciField* get_field_by_name(ciSymbol* name, ciSymbol* signature, bool is_static); ciField* get_field_by_name(ciSymbol* name, ciSymbol* signature, bool is_static);

View File

@ -943,6 +943,7 @@ public:
_java_lang_Deprecated_for_removal, _java_lang_Deprecated_for_removal,
_jdk_internal_vm_annotation_AOTSafeClassInitializer, _jdk_internal_vm_annotation_AOTSafeClassInitializer,
_method_AOTRuntimeSetup, _method_AOTRuntimeSetup,
_jdk_internal_vm_annotation_TrustFinalFields,
_annotation_LIMIT _annotation_LIMIT
}; };
const Location _location; const Location _location;
@ -1878,6 +1879,11 @@ AnnotationCollector::annotation_index(const ClassLoaderData* loader_data,
if (!privileged) break; // only allow in privileged code if (!privileged) break; // only allow in privileged code
return _field_Stable; return _field_Stable;
} }
case VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_TrustFinalFields_signature): {
if (_location != _in_class) break; // only allow for classes
if (!privileged) break; // only allow in privileged code
return _jdk_internal_vm_annotation_TrustFinalFields;
}
case VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_Contended_signature): { case VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_Contended_signature): {
if (_location != _in_field && _location != _in_class) { if (_location != _in_field && _location != _in_class) {
break; // only allow for fields and classes break; // only allow for fields and classes
@ -1992,6 +1998,9 @@ void ClassFileParser::ClassAnnotationCollector::apply_to(InstanceKlass* ik) {
if (has_annotation(_jdk_internal_vm_annotation_AOTSafeClassInitializer)) { if (has_annotation(_jdk_internal_vm_annotation_AOTSafeClassInitializer)) {
ik->set_has_aot_safe_initializer(); ik->set_has_aot_safe_initializer();
} }
if (has_annotation(_jdk_internal_vm_annotation_TrustFinalFields)) {
ik->set_trust_final_fields(true);
}
} }
#define MAX_ARGS_SIZE 255 #define MAX_ARGS_SIZE 255

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -33,6 +33,7 @@
#include "classfile/packageEntry.hpp" #include "classfile/packageEntry.hpp"
#include "classfile/systemDictionary.hpp" #include "classfile/systemDictionary.hpp"
#include "logging/log.hpp" #include "logging/log.hpp"
#include "memory/metaspaceClosure.hpp"
#include "runtime/handles.inline.hpp" #include "runtime/handles.inline.hpp"
#include "runtime/safepoint.hpp" #include "runtime/safepoint.hpp"
@ -56,9 +57,9 @@ class ArchivedClassLoaderData {
public: public:
ArchivedClassLoaderData() : _packages(nullptr), _modules(nullptr), _unnamed_module(nullptr) {} ArchivedClassLoaderData() : _packages(nullptr), _modules(nullptr), _unnamed_module(nullptr) {}
void iterate_symbols(ClassLoaderData* loader_data, MetaspaceClosure* closure); void iterate_roots(MetaspaceClosure* closure);
void allocate(ClassLoaderData* loader_data); void build_tables(ClassLoaderData* loader_data, TRAPS);
void init_archived_entries(ClassLoaderData* loader_data); void remove_unshareable_info();
ModuleEntry* unnamed_module() { ModuleEntry* unnamed_module() {
return _unnamed_module; return _unnamed_module;
} }
@ -80,17 +81,14 @@ static ModuleEntry* _archived_javabase_moduleEntry = nullptr;
static int _platform_loader_root_index = -1; static int _platform_loader_root_index = -1;
static int _system_loader_root_index = -1; static int _system_loader_root_index = -1;
void ArchivedClassLoaderData::iterate_symbols(ClassLoaderData* loader_data, MetaspaceClosure* closure) { void ArchivedClassLoaderData::iterate_roots(MetaspaceClosure* it) {
assert(CDSConfig::is_dumping_full_module_graph(), "must be"); assert(CDSConfig::is_dumping_full_module_graph(), "must be");
assert_valid(loader_data); it->push(&_packages);
if (loader_data != nullptr) { it->push(&_modules);
loader_data->packages()->iterate_symbols(closure); it->push(&_unnamed_module);
loader_data->modules() ->iterate_symbols(closure);
loader_data->unnamed_module()->iterate_symbols(closure);
}
} }
void ArchivedClassLoaderData::allocate(ClassLoaderData* loader_data) { void ArchivedClassLoaderData::build_tables(ClassLoaderData* loader_data, TRAPS) {
assert(CDSConfig::is_dumping_full_module_graph(), "must be"); assert(CDSConfig::is_dumping_full_module_graph(), "must be");
assert_valid(loader_data); assert_valid(loader_data);
if (loader_data != nullptr) { if (loader_data != nullptr) {
@ -98,19 +96,28 @@ void ArchivedClassLoaderData::allocate(ClassLoaderData* loader_data) {
// address of the Symbols, which may be relocated at runtime due to ASLR. // address of the Symbols, which may be relocated at runtime due to ASLR.
// So we store the packages/modules in Arrays. At runtime, we create // So we store the packages/modules in Arrays. At runtime, we create
// the hashtables using these arrays. // the hashtables using these arrays.
_packages = loader_data->packages()->allocate_archived_entries(); _packages = loader_data->packages()->build_aot_table(loader_data, CHECK);
_modules = loader_data->modules() ->allocate_archived_entries(); _modules = loader_data->modules()->build_aot_table(loader_data, CHECK);
_unnamed_module = loader_data->unnamed_module()->allocate_archived_entry(); _unnamed_module = loader_data->unnamed_module();
} }
} }
void ArchivedClassLoaderData::init_archived_entries(ClassLoaderData* loader_data) { void ArchivedClassLoaderData::remove_unshareable_info() {
assert(CDSConfig::is_dumping_full_module_graph(), "must be"); if (_packages != nullptr) {
assert_valid(loader_data); _packages = ArchiveBuilder::current()->get_buffered_addr(_packages);
if (loader_data != nullptr) { for (int i = 0; i < _packages->length(); i++) {
loader_data->packages()->init_archived_entries(_packages); _packages->at(i)->remove_unshareable_info();
loader_data->modules() ->init_archived_entries(_modules); }
_unnamed_module->init_as_archived_entry(); }
if (_modules != nullptr) {
_modules = ArchiveBuilder::current()->get_buffered_addr(_modules);
for (int i = 0; i < _modules->length(); i++) {
_modules->at(i)->remove_unshareable_info();
}
}
if (_unnamed_module != nullptr) {
_unnamed_module = ArchiveBuilder::current()->get_buffered_addr(_unnamed_module);
_unnamed_module->remove_unshareable_info();
} }
} }
@ -153,7 +160,6 @@ void ArchivedClassLoaderData::clear_archived_oops() {
// ------------------------------ // ------------------------------
void ClassLoaderDataShared::load_archived_platform_and_system_class_loaders() { void ClassLoaderDataShared::load_archived_platform_and_system_class_loaders() {
#if INCLUDE_CDS_JAVA_HEAP
// The streaming object loader prefers loading the class loader related objects before // The streaming object loader prefers loading the class loader related objects before
// the CLD constructor which has a NoSafepointVerifier. // the CLD constructor which has a NoSafepointVerifier.
if (!HeapShared::is_loading_streaming_mode()) { if (!HeapShared::is_loading_streaming_mode()) {
@ -178,7 +184,6 @@ void ClassLoaderDataShared::load_archived_platform_and_system_class_loaders() {
if (system_loader_module_entry != nullptr) { if (system_loader_module_entry != nullptr) {
system_loader_module_entry->preload_archived_oops(); system_loader_module_entry->preload_archived_oops();
} }
#endif
} }
static ClassLoaderData* null_class_loader_data() { static ClassLoaderData* null_class_loader_data() {
@ -210,28 +215,27 @@ void ClassLoaderDataShared::ensure_module_entry_table_exists(oop class_loader) {
assert(met != nullptr, "sanity"); assert(met != nullptr, "sanity");
} }
void ClassLoaderDataShared::iterate_symbols(MetaspaceClosure* closure) { void ClassLoaderDataShared::build_tables(TRAPS) {
assert(CDSConfig::is_dumping_full_module_graph(), "must be"); assert(CDSConfig::is_dumping_full_module_graph(), "must be");
_archived_boot_loader_data.iterate_symbols (null_class_loader_data(), closure); _archived_boot_loader_data.build_tables(null_class_loader_data(), CHECK);
_archived_platform_loader_data.iterate_symbols(java_platform_loader_data_or_null(), closure); _archived_platform_loader_data.build_tables(java_platform_loader_data_or_null(), CHECK);
_archived_system_loader_data.iterate_symbols (java_system_loader_data_or_null(), closure); _archived_system_loader_data.build_tables(java_system_loader_data_or_null(), CHECK);
} }
void ClassLoaderDataShared::allocate_archived_tables() { void ClassLoaderDataShared::iterate_roots(MetaspaceClosure* it) {
assert(CDSConfig::is_dumping_full_module_graph(), "must be"); assert(CDSConfig::is_dumping_full_module_graph(), "must be");
_archived_boot_loader_data.allocate (null_class_loader_data()); _archived_boot_loader_data.iterate_roots(it);
_archived_platform_loader_data.allocate(java_platform_loader_data_or_null()); _archived_platform_loader_data.iterate_roots(it);
_archived_system_loader_data.allocate (java_system_loader_data_or_null()); _archived_system_loader_data.iterate_roots(it);
} }
void ClassLoaderDataShared::init_archived_tables() { void ClassLoaderDataShared::remove_unshareable_info() {
assert(CDSConfig::is_dumping_full_module_graph(), "must be"); assert(CDSConfig::is_dumping_full_module_graph(), "must be");
_archived_boot_loader_data.remove_unshareable_info();
_archived_platform_loader_data.remove_unshareable_info();
_archived_system_loader_data.remove_unshareable_info();
_archived_boot_loader_data.init_archived_entries (null_class_loader_data()); _archived_javabase_moduleEntry = ArchiveBuilder::current()->get_buffered_addr(ModuleEntryTable::javabase_moduleEntry());
_archived_platform_loader_data.init_archived_entries(java_platform_loader_data_or_null());
_archived_system_loader_data.init_archived_entries (java_system_loader_data_or_null());
_archived_javabase_moduleEntry = ModuleEntry::get_archived_entry(ModuleEntryTable::javabase_moduleEntry());
_platform_loader_root_index = HeapShared::append_root(SystemDictionary::java_platform_loader()); _platform_loader_root_index = HeapShared::append_root(SystemDictionary::java_platform_loader());
_system_loader_root_index = HeapShared::append_root(SystemDictionary::java_system_loader()); _system_loader_root_index = HeapShared::append_root(SystemDictionary::java_system_loader());
@ -271,7 +275,6 @@ ModuleEntry* ClassLoaderDataShared::archived_unnamed_module(ClassLoaderData* loa
return archived_module; return archived_module;
} }
void ClassLoaderDataShared::clear_archived_oops() { void ClassLoaderDataShared::clear_archived_oops() {
assert(!CDSConfig::is_using_full_module_graph(), "must be"); assert(!CDSConfig::is_using_full_module_graph(), "must be");
_archived_boot_loader_data.clear_archived_oops(); _archived_boot_loader_data.clear_archived_oops();

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -40,11 +40,11 @@ class ClassLoaderDataShared : AllStatic {
public: public:
static void load_archived_platform_and_system_class_loaders() NOT_CDS_JAVA_HEAP_RETURN; static void load_archived_platform_and_system_class_loaders() NOT_CDS_JAVA_HEAP_RETURN;
static void restore_archived_modules_for_preloading_classes(JavaThread* current) NOT_CDS_JAVA_HEAP_RETURN; static void restore_archived_modules_for_preloading_classes(JavaThread* current) NOT_CDS_JAVA_HEAP_RETURN;
static void build_tables(TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
static void iterate_roots(MetaspaceClosure* closure) NOT_CDS_JAVA_HEAP_RETURN;
static void remove_unshareable_info() NOT_CDS_JAVA_HEAP_RETURN;
#if INCLUDE_CDS_JAVA_HEAP #if INCLUDE_CDS_JAVA_HEAP
static void ensure_module_entry_tables_exist(); static void ensure_module_entry_tables_exist();
static void allocate_archived_tables();
static void iterate_symbols(MetaspaceClosure* closure);
static void init_archived_tables();
static void serialize(SerializeClosure* f); static void serialize(SerializeClosure* f);
static void clear_archived_oops(); static void clear_archived_oops();
static void restore_archived_entries_for_null_class_loader_data(); static void restore_archived_entries_for_null_class_loader_data();

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -23,6 +23,7 @@
*/ */
#include "cds/aotClassLocation.hpp" #include "cds/aotClassLocation.hpp"
#include "cds/aotGrowableArray.inline.hpp"
#include "cds/archiveBuilder.hpp" #include "cds/archiveBuilder.hpp"
#include "cds/archiveUtils.hpp" #include "cds/archiveUtils.hpp"
#include "cds/cdsConfig.hpp" #include "cds/cdsConfig.hpp"
@ -37,6 +38,7 @@
#include "jni.h" #include "jni.h"
#include "logging/log.hpp" #include "logging/log.hpp"
#include "logging/logStream.hpp" #include "logging/logStream.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/resourceArea.hpp" #include "memory/resourceArea.hpp"
#include "memory/universe.hpp" #include "memory/universe.hpp"
#include "oops/oopHandle.inline.hpp" #include "oops/oopHandle.inline.hpp"
@ -44,7 +46,6 @@
#include "runtime/handles.inline.hpp" #include "runtime/handles.inline.hpp"
#include "runtime/safepoint.hpp" #include "runtime/safepoint.hpp"
#include "utilities/events.hpp" #include "utilities/events.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/hashTable.hpp" #include "utilities/hashTable.hpp"
#include "utilities/ostream.hpp" #include "utilities/ostream.hpp"
#include "utilities/quickSort.hpp" #include "utilities/quickSort.hpp"
@ -167,7 +168,7 @@ void ModuleEntry::add_read(ModuleEntry* m) {
} else { } else {
if (reads() == nullptr) { if (reads() == nullptr) {
// Lazily create a module's reads list // Lazily create a module's reads list
GrowableArray<ModuleEntry*>* new_reads = new (mtModule) GrowableArray<ModuleEntry*>(MODULE_READS_SIZE, mtModule); AOTGrowableArray<ModuleEntry*>* new_reads = new (mtModule) AOTGrowableArray<ModuleEntry*>(MODULE_READS_SIZE, mtModule);
set_reads(new_reads); set_reads(new_reads);
} }
@ -274,8 +275,7 @@ ModuleEntry::ModuleEntry(Handle module_handle,
_has_default_read_edges(false), _has_default_read_edges(false),
_must_walk_reads(false), _must_walk_reads(false),
_is_open(is_open), _is_open(is_open),
_is_patched(false) _is_patched(false) {
DEBUG_ONLY(COMMA _reads_is_archived(false)) {
// Initialize fields specific to a ModuleEntry // Initialize fields specific to a ModuleEntry
if (_name == nullptr) { if (_name == nullptr) {
@ -394,7 +394,6 @@ ModuleEntryTable::~ModuleEntryTable() {
ModuleEntryTableDeleter deleter; ModuleEntryTableDeleter deleter;
_table.unlink(&deleter); _table.unlink(&deleter);
assert(_table.number_of_entries() == 0, "should have removed all entries"); assert(_table.number_of_entries() == 0, "should have removed all entries");
} }
void ModuleEntry::set_loader_data(ClassLoaderData* cld) { void ModuleEntry::set_loader_data(ClassLoaderData* cld) {
@ -402,147 +401,51 @@ void ModuleEntry::set_loader_data(ClassLoaderData* cld) {
_loader_data = cld; _loader_data = cld;
} }
void ModuleEntry::metaspace_pointers_do(MetaspaceClosure* it) {
it->push(&_name);
it->push(&_reads);
it->push(&_version);
it->push(&_location);
}
#if INCLUDE_CDS_JAVA_HEAP #if INCLUDE_CDS_JAVA_HEAP
typedef HashTable<
const ModuleEntry*,
ModuleEntry*,
557, // prime number
AnyObj::C_HEAP> ArchivedModuleEntries;
static ArchivedModuleEntries* _archive_modules_entries = nullptr;
#ifndef PRODUCT
static int _num_archived_module_entries = 0;
static int _num_inited_module_entries = 0;
#endif
bool ModuleEntry::should_be_archived() const { bool ModuleEntry::should_be_archived() const {
return SystemDictionaryShared::is_builtin_loader(loader_data()); return SystemDictionaryShared::is_builtin_loader(loader_data());
} }
ModuleEntry* ModuleEntry::allocate_archived_entry() const { void ModuleEntry::remove_unshareable_info() {
precond(should_be_archived()); _archived_module_index = HeapShared::append_root(module_oop());
precond(CDSConfig::is_dumping_full_module_graph());
ModuleEntry* archived_entry = (ModuleEntry*)ArchiveBuilder::rw_region_alloc(sizeof(ModuleEntry));
memcpy((void*)archived_entry, (void*)this, sizeof(ModuleEntry));
archived_entry->_archived_module_index = HeapShared::append_root(module_oop()); if (_reads != nullptr) {
if (_archive_modules_entries == nullptr) { _reads->set_in_aot_cache();
_archive_modules_entries = new (mtClass)ArchivedModuleEntries();
}
assert(_archive_modules_entries->get(this) == nullptr, "Each ModuleEntry must not be shared across ModuleEntryTables");
_archive_modules_entries->put(this, archived_entry);
DEBUG_ONLY(_num_archived_module_entries++);
if (CDSConfig::is_dumping_final_static_archive()) {
OopHandle null_handle;
archived_entry->_shared_pd = null_handle;
} else {
assert(archived_entry->shared_protection_domain() == nullptr, "never set during -Xshare:dump");
} }
// Clear handles and restore at run time. Handles cannot be archived. // Clear handles and restore at run time. Handles cannot be archived.
if (CDSConfig::is_dumping_final_static_archive()) {
OopHandle null_handle;
_shared_pd = null_handle;
} else {
assert(shared_protection_domain() == nullptr, "never set during -Xshare:dump");
}
OopHandle null_handle; OopHandle null_handle;
archived_entry->_module_handle = null_handle; _module_handle = null_handle;
// For verify_archived_module_entries()
DEBUG_ONLY(_num_inited_module_entries++);
if (log_is_enabled(Info, aot, module)) {
ResourceMark rm;
LogStream ls(Log(aot, module)::info());
ls.print("Stored in archive: ");
archived_entry->print(&ls);
}
return archived_entry;
}
bool ModuleEntry::has_been_archived() {
assert(!ArchiveBuilder::current()->is_in_buffer_space(this), "must be called on original ModuleEntry");
return _archive_modules_entries->contains(this);
}
ModuleEntry* ModuleEntry::get_archived_entry(ModuleEntry* orig_entry) {
ModuleEntry** ptr = _archive_modules_entries->get(orig_entry);
assert(ptr != nullptr && *ptr != nullptr, "must have been allocated");
return *ptr;
}
// This function is used to archive ModuleEntry::_reads and PackageEntry::_qualified_exports.
// GrowableArray cannot be directly archived, as it needs to be expandable at runtime.
// Write it out as an Array, and convert it back to GrowableArray at runtime.
Array<ModuleEntry*>* ModuleEntry::write_growable_array(GrowableArray<ModuleEntry*>* array) {
Array<ModuleEntry*>* archived_array = nullptr;
int length = (array == nullptr) ? 0 : array->length();
if (length > 0) {
archived_array = ArchiveBuilder::new_ro_array<ModuleEntry*>(length);
for (int i = 0; i < length; i++) {
ModuleEntry* archived_entry = get_archived_entry(array->at(i));
archived_array->at_put(i, archived_entry);
ArchivePtrMarker::mark_pointer((address*)archived_array->adr_at(i));
}
}
return archived_array;
}
GrowableArray<ModuleEntry*>* ModuleEntry::restore_growable_array(Array<ModuleEntry*>* archived_array) {
GrowableArray<ModuleEntry*>* array = nullptr;
int length = (archived_array == nullptr) ? 0 : archived_array->length();
if (length > 0) {
array = new (mtModule) GrowableArray<ModuleEntry*>(length, mtModule);
for (int i = 0; i < length; i++) {
ModuleEntry* archived_entry = archived_array->at(i);
array->append(archived_entry);
}
}
return array;
}
void ModuleEntry::iterate_symbols(MetaspaceClosure* closure) {
closure->push(&_name);
closure->push(&_version);
closure->push(&_location);
}
void ModuleEntry::init_as_archived_entry() {
set_archived_reads(write_growable_array(reads()));
_loader_data = nullptr; // re-init at runtime _loader_data = nullptr; // re-init at runtime
if (name() != nullptr) { if (name() != nullptr) {
_shared_path_index = AOTClassLocationConfig::dumptime()->get_module_shared_path_index(_location); Symbol* src_location = ArchiveBuilder::current()->get_source_addr(_location);
_name = ArchiveBuilder::get_buffered_symbol(_name); _shared_path_index = AOTClassLocationConfig::dumptime()->get_module_shared_path_index(src_location);
ArchivePtrMarker::mark_pointer((address*)&_name);
} else { } else {
// _shared_path_index is used only by SystemDictionary::is_shared_class_visible_impl() // _shared_path_index is used only by SystemDictionary::is_shared_class_visible_impl()
// for checking classes in named modules. // for checking classes in named modules.
_shared_path_index = -1; _shared_path_index = -1;
} }
if (_version != nullptr) {
_version = ArchiveBuilder::get_buffered_symbol(_version);
}
if (_location != nullptr) {
_location = ArchiveBuilder::get_buffered_symbol(_location);
}
JFR_ONLY(set_trace_id(0);) // re-init at runtime JFR_ONLY(set_trace_id(0);) // re-init at runtime
ArchivePtrMarker::mark_pointer((address*)&_reads);
ArchivePtrMarker::mark_pointer((address*)&_version);
ArchivePtrMarker::mark_pointer((address*)&_location);
} }
#ifndef PRODUCT
void ModuleEntry::verify_archived_module_entries() {
assert(_num_archived_module_entries == _num_inited_module_entries,
"%d ModuleEntries have been archived but %d of them have been properly initialized with archived java.lang.Module objects",
_num_archived_module_entries, _num_inited_module_entries);
}
#endif // PRODUCT
void ModuleEntry::load_from_archive(ClassLoaderData* loader_data) { void ModuleEntry::load_from_archive(ClassLoaderData* loader_data) {
assert(CDSConfig::is_using_archive(), "runtime only"); assert(CDSConfig::is_using_archive(), "runtime only");
set_loader_data(loader_data); set_loader_data(loader_data);
set_reads(restore_growable_array(archived_reads()));
JFR_ONLY(INIT_ID(this);) JFR_ONLY(INIT_ID(this);)
} }
@ -581,38 +484,28 @@ static int compare_module_by_name(ModuleEntry* a, ModuleEntry* b) {
return a->name()->fast_compare(b->name()); return a->name()->fast_compare(b->name());
} }
void ModuleEntryTable::iterate_symbols(MetaspaceClosure* closure) { Array<ModuleEntry*>* ModuleEntryTable::build_aot_table(ClassLoaderData* loader_data, TRAPS) {
auto syms = [&] (const SymbolHandle& key, ModuleEntry*& m) { Array<ModuleEntry*>* aot_table =
m->iterate_symbols(closure); MetadataFactory::new_array<ModuleEntry*>(loader_data, _table.number_of_entries(), nullptr, CHECK_NULL);
};
_table.iterate_all(syms);
}
Array<ModuleEntry*>* ModuleEntryTable::allocate_archived_entries() {
Array<ModuleEntry*>* archived_modules = ArchiveBuilder::new_rw_array<ModuleEntry*>(_table.number_of_entries());
int n = 0; int n = 0;
auto grab = [&] (const SymbolHandle& key, ModuleEntry*& m) { auto grab = [&] (const SymbolHandle& key, ModuleEntry*& m) {
archived_modules->at_put(n++, m); m->pack_reads();
aot_table->at_put(n++, m);
if (log_is_enabled(Info, aot, module)) {
ResourceMark rm;
LogStream ls(Log(aot, module)::info());
ls.print("Stored in archive: ");
m->print(&ls);
}
}; };
_table.iterate_all(grab); _table.iterate_all(grab);
if (n > 1) { if (n > 1) {
// Always allocate in the same order to produce deterministic archive. // Always allocate in the same order to produce deterministic archive.
QuickSort::sort(archived_modules->data(), n, compare_module_by_name); QuickSort::sort(aot_table->data(), n, compare_module_by_name);
} }
for (int i = 0; i < n; i++) {
archived_modules->at_put(i, archived_modules->at(i)->allocate_archived_entry());
ArchivePtrMarker::mark_pointer((address*)archived_modules->adr_at(i));
}
return archived_modules;
}
void ModuleEntryTable::init_archived_entries(Array<ModuleEntry*>* archived_modules) { return aot_table;
assert(CDSConfig::is_dumping_full_module_graph(), "sanity");
for (int i = 0; i < archived_modules->length(); i++) {
ModuleEntry* archived_entry = archived_modules->at(i);
archived_entry->init_as_archived_entry();
}
} }
void ModuleEntryTable::load_archived_entries(ClassLoaderData* loader_data, void ModuleEntryTable::load_archived_entries(ClassLoaderData* loader_data,

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -25,7 +25,9 @@
#ifndef SHARE_CLASSFILE_MODULEENTRY_HPP #ifndef SHARE_CLASSFILE_MODULEENTRY_HPP
#define SHARE_CLASSFILE_MODULEENTRY_HPP #define SHARE_CLASSFILE_MODULEENTRY_HPP
#include "cds/aotGrowableArray.hpp"
#include "jni.h" #include "jni.h"
#include "memory/metaspaceClosureType.hpp"
#include "oops/oopHandle.hpp" #include "oops/oopHandle.hpp"
#include "oops/symbol.hpp" #include "oops/symbol.hpp"
#include "oops/symbolHandle.hpp" #include "oops/symbolHandle.hpp"
@ -68,11 +70,8 @@ private:
// for shared classes from this module // for shared classes from this module
Symbol* _name; // name of this module Symbol* _name; // name of this module
ClassLoaderData* _loader_data; ClassLoaderData* _loader_data;
AOTGrowableArray<ModuleEntry*>* _reads; // list of modules that are readable by this module
union {
GrowableArray<ModuleEntry*>* _reads; // list of modules that are readable by this module
Array<ModuleEntry*>* _archived_reads; // List of readable modules stored in the CDS archive
};
Symbol* _version; // module version number Symbol* _version; // module version number
Symbol* _location; // module location Symbol* _location; // module location
CDS_ONLY(int _shared_path_index;) // >=0 if classes in this module are in CDS archive CDS_ONLY(int _shared_path_index;) // >=0 if classes in this module are in CDS archive
@ -81,7 +80,6 @@ private:
bool _must_walk_reads; // walk module's reads list at GC safepoints to purge out dead modules bool _must_walk_reads; // walk module's reads list at GC safepoints to purge out dead modules
bool _is_open; // whether the packages in the module are all unqualifiedly exported bool _is_open; // whether the packages in the module are all unqualifiedly exported
bool _is_patched; // whether the module is patched via --patch-module bool _is_patched; // whether the module is patched via --patch-module
DEBUG_ONLY(bool _reads_is_archived);
CDS_JAVA_HEAP_ONLY(int _archived_module_index;) CDS_JAVA_HEAP_ONLY(int _archived_module_index;)
JFR_ONLY(DEFINE_TRACE_ID_FIELD;) JFR_ONLY(DEFINE_TRACE_ID_FIELD;)
@ -120,22 +118,18 @@ public:
bool can_read(ModuleEntry* m) const; bool can_read(ModuleEntry* m) const;
bool has_reads_list() const; bool has_reads_list() const;
GrowableArray<ModuleEntry*>* reads() const { AOTGrowableArray<ModuleEntry*>* reads() const {
assert(!_reads_is_archived, "sanity");
return _reads; return _reads;
} }
void set_reads(GrowableArray<ModuleEntry*>* r) { void set_reads(AOTGrowableArray<ModuleEntry*>* r) {
_reads = r; _reads = r;
DEBUG_ONLY(_reads_is_archived = false);
} }
Array<ModuleEntry*>* archived_reads() const { void pack_reads() {
assert(_reads_is_archived, "sanity"); if (_reads != nullptr) {
return _archived_reads; _reads->shrink_to_fit();
} }
void set_archived_reads(Array<ModuleEntry*>* r) {
_archived_reads = r;
DEBUG_ONLY(_reads_is_archived = true);
} }
void add_read(ModuleEntry* m); void add_read(ModuleEntry* m);
void set_read_walk_required(ClassLoaderData* m_loader_data); void set_read_walk_required(ClassLoaderData* m_loader_data);
@ -189,6 +183,13 @@ public:
const char* name_as_C_string() const { const char* name_as_C_string() const {
return is_named() ? name()->as_C_string() : UNNAMED_MODULE; return is_named() ? name()->as_C_string() : UNNAMED_MODULE;
} }
// methods required by MetaspaceClosure
void metaspace_pointers_do(MetaspaceClosure* it);
int size_in_heapwords() const { return (int)heap_word_size(sizeof(ModuleEntry)); }
MetaspaceClosureType type() const { return MetaspaceClosureType::ModuleEntryType; }
static bool is_read_only_by_default() { return false; }
void print(outputStream* st = tty) const; void print(outputStream* st = tty) const;
void verify(); void verify();
@ -198,18 +199,11 @@ public:
#if INCLUDE_CDS_JAVA_HEAP #if INCLUDE_CDS_JAVA_HEAP
bool should_be_archived() const; bool should_be_archived() const;
void iterate_symbols(MetaspaceClosure* closure); void remove_unshareable_info();
ModuleEntry* allocate_archived_entry() const;
void init_as_archived_entry();
static ModuleEntry* get_archived_entry(ModuleEntry* orig_entry);
bool has_been_archived();
static Array<ModuleEntry*>* write_growable_array(GrowableArray<ModuleEntry*>* array);
static GrowableArray<ModuleEntry*>* restore_growable_array(Array<ModuleEntry*>* archived_array);
void load_from_archive(ClassLoaderData* loader_data); void load_from_archive(ClassLoaderData* loader_data);
void preload_archived_oops(); void preload_archived_oops();
void restore_archived_oops(ClassLoaderData* loader_data); void restore_archived_oops(ClassLoaderData* loader_data);
void clear_archived_oops(); void clear_archived_oops();
static void verify_archived_module_entries() PRODUCT_RETURN;
#endif #endif
}; };
@ -275,9 +269,7 @@ public:
void verify(); void verify();
#if INCLUDE_CDS_JAVA_HEAP #if INCLUDE_CDS_JAVA_HEAP
void iterate_symbols(MetaspaceClosure* closure); Array<ModuleEntry*>* build_aot_table(ClassLoaderData* loader_data, TRAPS);
Array<ModuleEntry*>* allocate_archived_entries();
void init_archived_entries(Array<ModuleEntry*>* archived_modules);
void load_archived_entries(ClassLoaderData* loader_data, void load_archived_entries(ClassLoaderData* loader_data,
Array<ModuleEntry*>* archived_modules); Array<ModuleEntry*>* archived_modules);
void restore_archived_oops(ClassLoaderData* loader_data, void restore_archived_oops(ClassLoaderData* loader_data,

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -505,13 +505,10 @@ void Modules::check_archived_module_oop(oop orig_module_obj) {
ClassLoaderData* loader_data = orig_module_ent->loader_data(); ClassLoaderData* loader_data = orig_module_ent->loader_data();
assert(loader_data->is_builtin_class_loader_data(), "must be"); assert(loader_data->is_builtin_class_loader_data(), "must be");
if (orig_module_ent->name() != nullptr) { precond(ArchiveBuilder::current()->has_been_archived(orig_module_ent));
// For each named module, we archive both the java.lang.Module oop and the ModuleEntry. if (orig_module_ent->name() == nullptr) {
assert(orig_module_ent->has_been_archived(), "sanity");
} else {
// We always archive unnamed module oop for boot, platform, and system loaders. // We always archive unnamed module oop for boot, platform, and system loaders.
precond(orig_module_ent->should_be_archived()); precond(orig_module_ent->should_be_archived());
precond(orig_module_ent->has_been_archived());
if (loader_data->is_boot_class_loader_data()) { if (loader_data->is_boot_class_loader_data()) {
assert(!_seen_boot_unnamed_module, "only once"); assert(!_seen_boot_unnamed_module, "only once");
@ -529,10 +526,6 @@ void Modules::check_archived_module_oop(oop orig_module_obj) {
} }
} }
void Modules::verify_archived_modules() {
ModuleEntry::verify_archived_module_entries();
}
class Modules::ArchivedProperty { class Modules::ArchivedProperty {
const char* _prop; const char* _prop;
const bool _numbered; const bool _numbered;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -59,7 +59,6 @@ public:
TRAPS) NOT_CDS_JAVA_HEAP_RETURN; TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
static void init_archived_modules(JavaThread* current, Handle h_platform_loader, Handle h_system_loader) static void init_archived_modules(JavaThread* current, Handle h_platform_loader, Handle h_system_loader)
NOT_CDS_JAVA_HEAP_RETURN; NOT_CDS_JAVA_HEAP_RETURN;
static void verify_archived_modules() NOT_CDS_JAVA_HEAP_RETURN;
static void dump_archived_module_info() NOT_CDS_JAVA_HEAP_RETURN; static void dump_archived_module_info() NOT_CDS_JAVA_HEAP_RETURN;
static void serialize_archived_module_info(SerializeClosure* soc) NOT_CDS_JAVA_HEAP_RETURN; static void serialize_archived_module_info(SerializeClosure* soc) NOT_CDS_JAVA_HEAP_RETURN;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -22,6 +22,8 @@
* *
*/ */
#include "cds/aotGrowableArray.inline.hpp"
#include "cds/aotMetaspace.hpp"
#include "cds/archiveBuilder.hpp" #include "cds/archiveBuilder.hpp"
#include "cds/archiveUtils.hpp" #include "cds/archiveUtils.hpp"
#include "cds/cdsConfig.hpp" #include "cds/cdsConfig.hpp"
@ -31,13 +33,13 @@
#include "classfile/vmSymbols.hpp" #include "classfile/vmSymbols.hpp"
#include "logging/log.hpp" #include "logging/log.hpp"
#include "logging/logStream.hpp" #include "logging/logStream.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/resourceArea.hpp" #include "memory/resourceArea.hpp"
#include "oops/array.hpp" #include "oops/array.hpp"
#include "oops/symbol.hpp" #include "oops/symbol.hpp"
#include "runtime/handles.inline.hpp" #include "runtime/handles.inline.hpp"
#include "runtime/java.hpp" #include "runtime/java.hpp"
#include "utilities/events.hpp" #include "utilities/events.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/hashTable.hpp" #include "utilities/hashTable.hpp"
#include "utilities/ostream.hpp" #include "utilities/ostream.hpp"
#include "utilities/quickSort.hpp" #include "utilities/quickSort.hpp"
@ -51,7 +53,7 @@ PackageEntry::PackageEntry(Symbol* name, ModuleEntry* module) :
_qualified_exports(nullptr), _qualified_exports(nullptr),
_defined_by_cds_in_class_path(0) _defined_by_cds_in_class_path(0)
{ {
// name can't be null // name can't be null -- a class in the default package gets a PackageEntry of nullptr.
_name->increment_refcount(); _name->increment_refcount();
JFR_ONLY(INIT_ID(this);) JFR_ONLY(INIT_ID(this);)
@ -81,7 +83,7 @@ void PackageEntry::add_qexport(ModuleEntry* m) {
if (!has_qual_exports_list()) { if (!has_qual_exports_list()) {
// Lazily create a package's qualified exports list. // Lazily create a package's qualified exports list.
// Initial size is small, do not anticipate export lists to be large. // Initial size is small, do not anticipate export lists to be large.
_qualified_exports = new (mtModule) GrowableArray<ModuleEntry*>(QUAL_EXP_SIZE, mtModule); _qualified_exports = new (mtModule) AOTGrowableArray<ModuleEntry*>(QUAL_EXP_SIZE, mtModule);
} }
// Determine, based on this newly established export to module m, // Determine, based on this newly established export to module m,
@ -183,12 +185,24 @@ void PackageEntry::purge_qualified_exports() {
} }
void PackageEntry::delete_qualified_exports() { void PackageEntry::delete_qualified_exports() {
if (_qualified_exports != nullptr) { if (_qualified_exports != nullptr && !AOTMetaspace::in_aot_cache(_qualified_exports)) {
delete _qualified_exports; delete _qualified_exports;
} }
_qualified_exports = nullptr; _qualified_exports = nullptr;
} }
void PackageEntry::pack_qualified_exports() {
if (_qualified_exports != nullptr) {
_qualified_exports->shrink_to_fit();
}
}
void PackageEntry::metaspace_pointers_do(MetaspaceClosure* it) {
it->push(&_name);
it->push(&_module);
it->push(&_qualified_exports);
}
PackageEntryTable::PackageEntryTable() { } PackageEntryTable::PackageEntryTable() { }
PackageEntryTable::~PackageEntryTable() { PackageEntryTable::~PackageEntryTable() {
@ -212,66 +226,19 @@ PackageEntryTable::~PackageEntryTable() {
} }
#if INCLUDE_CDS_JAVA_HEAP #if INCLUDE_CDS_JAVA_HEAP
typedef HashTable<
const PackageEntry*,
PackageEntry*,
557, // prime number
AnyObj::C_HEAP> ArchivedPackageEntries;
static ArchivedPackageEntries* _archived_packages_entries = nullptr;
bool PackageEntry::should_be_archived() const { bool PackageEntry::should_be_archived() const {
return module()->should_be_archived(); return module()->should_be_archived();
} }
PackageEntry* PackageEntry::allocate_archived_entry() const { void PackageEntry::remove_unshareable_info() {
precond(should_be_archived()); if (_qualified_exports != nullptr) {
PackageEntry* archived_entry = (PackageEntry*)ArchiveBuilder::rw_region_alloc(sizeof(PackageEntry)); _qualified_exports->set_in_aot_cache();
memcpy((void*)archived_entry, (void*)this, sizeof(PackageEntry));
if (_archived_packages_entries == nullptr) {
_archived_packages_entries = new (mtClass)ArchivedPackageEntries();
} }
assert(_archived_packages_entries->get(this) == nullptr, "Each PackageEntry must not be shared across PackageEntryTables");
_archived_packages_entries->put(this, archived_entry);
return archived_entry;
}
PackageEntry* PackageEntry::get_archived_entry(PackageEntry* orig_entry) {
PackageEntry** ptr = _archived_packages_entries->get(orig_entry);
if (ptr != nullptr) {
return *ptr;
} else {
return nullptr;
}
}
void PackageEntry::iterate_symbols(MetaspaceClosure* closure) {
closure->push(&_name);
}
void PackageEntry::init_as_archived_entry() {
Array<ModuleEntry*>* archived_qualified_exports = ModuleEntry::write_growable_array(_qualified_exports);
_name = ArchiveBuilder::get_buffered_symbol(_name);
_module = ModuleEntry::get_archived_entry(_module);
_qualified_exports = (GrowableArray<ModuleEntry*>*)archived_qualified_exports;
_defined_by_cds_in_class_path = 0; _defined_by_cds_in_class_path = 0;
JFR_ONLY(set_trace_id(0);) // re-init at runtime JFR_ONLY(set_trace_id(0);) // re-init at runtime
ArchivePtrMarker::mark_pointer((address*)&_name);
ArchivePtrMarker::mark_pointer((address*)&_module);
ArchivePtrMarker::mark_pointer((address*)&_qualified_exports);
LogStreamHandle(Info, aot, package) st;
if (st.is_enabled()) {
st.print("archived ");
print(&st);
}
} }
void PackageEntry::load_from_archive() { void PackageEntry::load_from_archive() {
_qualified_exports = ModuleEntry::restore_growable_array((Array<ModuleEntry*>*)_qualified_exports);
JFR_ONLY(INIT_ID(this);) JFR_ONLY(INIT_ID(this);)
} }
@ -280,14 +247,7 @@ static int compare_package_by_name(PackageEntry* a, PackageEntry* b) {
return a->name()->fast_compare(b->name()); return a->name()->fast_compare(b->name());
} }
void PackageEntryTable::iterate_symbols(MetaspaceClosure* closure) { Array<PackageEntry*>* PackageEntryTable::build_aot_table(ClassLoaderData* loader_data, TRAPS) {
auto syms = [&] (const SymbolHandle& key, PackageEntry*& p) {
p->iterate_symbols(closure);
};
_table.iterate_all(syms);
}
Array<PackageEntry*>* PackageEntryTable::allocate_archived_entries() {
// First count the packages in named modules // First count the packages in named modules
int n = 0; int n = 0;
auto count = [&] (const SymbolHandle& key, PackageEntry*& p) { auto count = [&] (const SymbolHandle& key, PackageEntry*& p) {
@ -297,12 +257,19 @@ Array<PackageEntry*>* PackageEntryTable::allocate_archived_entries() {
}; };
_table.iterate_all(count); _table.iterate_all(count);
Array<PackageEntry*>* archived_packages = ArchiveBuilder::new_rw_array<PackageEntry*>(n); Array<PackageEntry*>* archived_packages = MetadataFactory::new_array<PackageEntry*>(loader_data, n, nullptr, CHECK_NULL);
// reset n // reset n
n = 0; n = 0;
auto grab = [&] (const SymbolHandle& key, PackageEntry*& p) { auto grab = [&] (const SymbolHandle& key, PackageEntry*& p) {
if (p->should_be_archived()) { if (p->should_be_archived()) {
p->pack_qualified_exports();
archived_packages->at_put(n++, p); archived_packages->at_put(n++, p);
LogStreamHandle(Info, aot, package) st;
if (st.is_enabled()) {
st.print("archived ");
p->print(&st);
}
} }
}; };
_table.iterate_all(grab); _table.iterate_all(grab);
@ -311,18 +278,8 @@ Array<PackageEntry*>* PackageEntryTable::allocate_archived_entries() {
// Always allocate in the same order to produce deterministic archive. // Always allocate in the same order to produce deterministic archive.
QuickSort::sort(archived_packages->data(), n, compare_package_by_name); QuickSort::sort(archived_packages->data(), n, compare_package_by_name);
} }
for (int i = 0; i < n; i++) {
archived_packages->at_put(i, archived_packages->at(i)->allocate_archived_entry());
ArchivePtrMarker::mark_pointer((address*)archived_packages->adr_at(i));
}
return archived_packages;
}
void PackageEntryTable::init_archived_entries(Array<PackageEntry*>* archived_packages) { return archived_packages;
for (int i = 0; i < archived_packages->length(); i++) {
PackageEntry* archived_entry = archived_packages->at(i);
archived_entry->init_as_archived_entry();
}
} }
void PackageEntryTable::load_archived_entries(Array<PackageEntry*>* archived_packages) { void PackageEntryTable::load_archived_entries(Array<PackageEntry*>* archived_packages) {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -25,7 +25,9 @@
#ifndef SHARE_CLASSFILE_PACKAGEENTRY_HPP #ifndef SHARE_CLASSFILE_PACKAGEENTRY_HPP
#define SHARE_CLASSFILE_PACKAGEENTRY_HPP #define SHARE_CLASSFILE_PACKAGEENTRY_HPP
#include "cds/aotGrowableArray.hpp"
#include "classfile/moduleEntry.hpp" #include "classfile/moduleEntry.hpp"
#include "memory/metaspaceClosureType.hpp"
#include "oops/symbol.hpp" #include "oops/symbol.hpp"
#include "oops/symbolHandle.hpp" #include "oops/symbolHandle.hpp"
#include "runtime/atomicAccess.hpp" #include "runtime/atomicAccess.hpp"
@ -114,7 +116,7 @@ private:
bool _must_walk_exports; bool _must_walk_exports;
// Contains list of modules this package is qualifiedly exported to. Access // Contains list of modules this package is qualifiedly exported to. Access
// to this list is protected by the Module_lock. // to this list is protected by the Module_lock.
GrowableArray<ModuleEntry*>* _qualified_exports; AOTGrowableArray<ModuleEntry*>* _qualified_exports;
JFR_ONLY(DEFINE_TRACE_ID_FIELD;) JFR_ONLY(DEFINE_TRACE_ID_FIELD;)
// Initial size of a package entry's list of qualified exports. // Initial size of a package entry's list of qualified exports.
@ -205,14 +207,24 @@ public:
void purge_qualified_exports(); void purge_qualified_exports();
void delete_qualified_exports(); void delete_qualified_exports();
void pack_qualified_exports(); // used by AOT
// methods required by MetaspaceClosure
void metaspace_pointers_do(MetaspaceClosure* it);
int size_in_heapwords() const { return (int)heap_word_size(sizeof(PackageEntry)); }
MetaspaceClosureType type() const { return MetaspaceClosureType::PackageEntryType; }
static bool is_read_only_by_default() { return false; }
void print(outputStream* st = tty); void print(outputStream* st = tty);
char* name_as_C_string() const {
assert(_name != nullptr, "name can't be null");
return name()->as_C_string();
}
#if INCLUDE_CDS_JAVA_HEAP #if INCLUDE_CDS_JAVA_HEAP
bool should_be_archived() const; bool should_be_archived() const;
void iterate_symbols(MetaspaceClosure* closure); void remove_unshareable_info();
PackageEntry* allocate_archived_entry() const;
void init_as_archived_entry();
static PackageEntry* get_archived_entry(PackageEntry* orig_entry);
void load_from_archive(); void load_from_archive();
#endif #endif
@ -271,9 +283,7 @@ public:
void print(outputStream* st = tty); void print(outputStream* st = tty);
#if INCLUDE_CDS_JAVA_HEAP #if INCLUDE_CDS_JAVA_HEAP
void iterate_symbols(MetaspaceClosure* closure); Array<PackageEntry*>* build_aot_table(ClassLoaderData* loader_data, TRAPS);
Array<PackageEntry*>* allocate_archived_entries();
void init_archived_entries(Array<PackageEntry*>* archived_packages);
void load_archived_entries(Array<PackageEntry*>* archived_packages); void load_archived_entries(Array<PackageEntry*>* archived_packages);
#endif #endif
}; };

View File

@ -245,10 +245,6 @@ class SerializeClosure;
\ \
/* Concurrency support */ \ /* Concurrency support */ \
template(java_util_concurrent_locks_AbstractOwnableSynchronizer, "java/util/concurrent/locks/AbstractOwnableSynchronizer") \ template(java_util_concurrent_locks_AbstractOwnableSynchronizer, "java/util/concurrent/locks/AbstractOwnableSynchronizer") \
template(java_util_concurrent_atomic_AtomicIntegerFieldUpdater_Impl, "java/util/concurrent/atomic/AtomicIntegerFieldUpdater$AtomicIntegerFieldUpdaterImpl") \
template(java_util_concurrent_atomic_AtomicLongFieldUpdater_CASUpdater, "java/util/concurrent/atomic/AtomicLongFieldUpdater$CASUpdater") \
template(java_util_concurrent_atomic_AtomicLongFieldUpdater_LockedUpdater, "java/util/concurrent/atomic/AtomicLongFieldUpdater$LockedUpdater") \
template(java_util_concurrent_atomic_AtomicReferenceFieldUpdater_Impl, "java/util/concurrent/atomic/AtomicReferenceFieldUpdater$AtomicReferenceFieldUpdaterImpl") \
template(jdk_internal_vm_annotation_Contended_signature, "Ljdk/internal/vm/annotation/Contended;") \ template(jdk_internal_vm_annotation_Contended_signature, "Ljdk/internal/vm/annotation/Contended;") \
template(jdk_internal_vm_annotation_ReservedStackAccess_signature, "Ljdk/internal/vm/annotation/ReservedStackAccess;") \ template(jdk_internal_vm_annotation_ReservedStackAccess_signature, "Ljdk/internal/vm/annotation/ReservedStackAccess;") \
template(jdk_internal_ValueBased_signature, "Ljdk/internal/ValueBased;") \ template(jdk_internal_ValueBased_signature, "Ljdk/internal/ValueBased;") \
@ -302,6 +298,7 @@ class SerializeClosure;
template(jdk_internal_misc_Scoped_signature, "Ljdk/internal/misc/ScopedMemoryAccess$Scoped;") \ template(jdk_internal_misc_Scoped_signature, "Ljdk/internal/misc/ScopedMemoryAccess$Scoped;") \
template(jdk_internal_vm_annotation_IntrinsicCandidate_signature, "Ljdk/internal/vm/annotation/IntrinsicCandidate;") \ template(jdk_internal_vm_annotation_IntrinsicCandidate_signature, "Ljdk/internal/vm/annotation/IntrinsicCandidate;") \
template(jdk_internal_vm_annotation_Stable_signature, "Ljdk/internal/vm/annotation/Stable;") \ template(jdk_internal_vm_annotation_Stable_signature, "Ljdk/internal/vm/annotation/Stable;") \
template(jdk_internal_vm_annotation_TrustFinalFields_signature, "Ljdk/internal/vm/annotation/TrustFinalFields;") \
\ \
template(jdk_internal_vm_annotation_ChangesCurrentThread_signature, "Ljdk/internal/vm/annotation/ChangesCurrentThread;") \ template(jdk_internal_vm_annotation_ChangesCurrentThread_signature, "Ljdk/internal/vm/annotation/ChangesCurrentThread;") \
template(jdk_internal_vm_annotation_JvmtiHideEvents_signature, "Ljdk/internal/vm/annotation/JvmtiHideEvents;") \ template(jdk_internal_vm_annotation_JvmtiHideEvents_signature, "Ljdk/internal/vm/annotation/JvmtiHideEvents;") \

View File

@ -29,7 +29,6 @@
#include "gc/shared/gcLogPrecious.hpp" #include "gc/shared/gcLogPrecious.hpp"
#include "gc/shared/gcTraceTime.inline.hpp" #include "gc/shared/gcTraceTime.inline.hpp"
#include "memory/allocation.inline.hpp" #include "memory/allocation.inline.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/globals_extension.hpp" #include "runtime/globals_extension.hpp"
#include "runtime/java.hpp" #include "runtime/java.hpp"
#include "utilities/bitMap.inline.hpp" #include "utilities/bitMap.inline.hpp"
@ -192,32 +191,32 @@ const char* G1CardSetConfiguration::mem_object_type_name_str(uint index) {
void G1CardSetCoarsenStats::reset() { void G1CardSetCoarsenStats::reset() {
STATIC_ASSERT(ARRAY_SIZE(_coarsen_from) == ARRAY_SIZE(_coarsen_collision)); STATIC_ASSERT(ARRAY_SIZE(_coarsen_from) == ARRAY_SIZE(_coarsen_collision));
for (uint i = 0; i < ARRAY_SIZE(_coarsen_from); i++) { for (uint i = 0; i < ARRAY_SIZE(_coarsen_from); i++) {
_coarsen_from[i] = 0; _coarsen_from[i].store_relaxed(0);
_coarsen_collision[i] = 0; _coarsen_collision[i].store_relaxed(0);
} }
} }
void G1CardSetCoarsenStats::set(G1CardSetCoarsenStats& other) { void G1CardSetCoarsenStats::set(G1CardSetCoarsenStats& other) {
STATIC_ASSERT(ARRAY_SIZE(_coarsen_from) == ARRAY_SIZE(_coarsen_collision)); STATIC_ASSERT(ARRAY_SIZE(_coarsen_from) == ARRAY_SIZE(_coarsen_collision));
for (uint i = 0; i < ARRAY_SIZE(_coarsen_from); i++) { for (uint i = 0; i < ARRAY_SIZE(_coarsen_from); i++) {
_coarsen_from[i] = other._coarsen_from[i]; _coarsen_from[i].store_relaxed(other._coarsen_from[i].load_relaxed());
_coarsen_collision[i] = other._coarsen_collision[i]; _coarsen_collision[i].store_relaxed(other._coarsen_collision[i].load_relaxed());
} }
} }
void G1CardSetCoarsenStats::subtract_from(G1CardSetCoarsenStats& other) { void G1CardSetCoarsenStats::subtract_from(G1CardSetCoarsenStats& other) {
STATIC_ASSERT(ARRAY_SIZE(_coarsen_from) == ARRAY_SIZE(_coarsen_collision)); STATIC_ASSERT(ARRAY_SIZE(_coarsen_from) == ARRAY_SIZE(_coarsen_collision));
for (uint i = 0; i < ARRAY_SIZE(_coarsen_from); i++) { for (uint i = 0; i < ARRAY_SIZE(_coarsen_from); i++) {
_coarsen_from[i] = other._coarsen_from[i] - _coarsen_from[i]; _coarsen_from[i].store_relaxed(other._coarsen_from[i].load_relaxed() - _coarsen_from[i].load_relaxed());
_coarsen_collision[i] = other._coarsen_collision[i] - _coarsen_collision[i]; _coarsen_collision[i].store_relaxed(other._coarsen_collision[i].load_relaxed() - _coarsen_collision[i].load_relaxed());
} }
} }
void G1CardSetCoarsenStats::record_coarsening(uint tag, bool collision) { void G1CardSetCoarsenStats::record_coarsening(uint tag, bool collision) {
assert(tag < ARRAY_SIZE(_coarsen_from), "tag %u out of bounds", tag); assert(tag < ARRAY_SIZE(_coarsen_from), "tag %u out of bounds", tag);
AtomicAccess::inc(&_coarsen_from[tag], memory_order_relaxed); _coarsen_from[tag].add_then_fetch(1u, memory_order_relaxed);
if (collision) { if (collision) {
AtomicAccess::inc(&_coarsen_collision[tag], memory_order_relaxed); _coarsen_collision[tag].add_then_fetch(1u, memory_order_relaxed);
} }
} }
@ -228,13 +227,13 @@ void G1CardSetCoarsenStats::print_on(outputStream* out) {
"Inline->AoC %zu (%zu) " "Inline->AoC %zu (%zu) "
"AoC->BitMap %zu (%zu) " "AoC->BitMap %zu (%zu) "
"BitMap->Full %zu (%zu) ", "BitMap->Full %zu (%zu) ",
_coarsen_from[0], _coarsen_collision[0], _coarsen_from[0].load_relaxed(), _coarsen_collision[0].load_relaxed(),
_coarsen_from[1], _coarsen_collision[1], _coarsen_from[1].load_relaxed(), _coarsen_collision[1].load_relaxed(),
// There is no BitMap at the first level so we can't . // There is no BitMap at the first level so we can't .
_coarsen_from[3], _coarsen_collision[3], _coarsen_from[3].load_relaxed(), _coarsen_collision[3].load_relaxed(),
_coarsen_from[4], _coarsen_collision[4], _coarsen_from[4].load_relaxed(), _coarsen_collision[4].load_relaxed(),
_coarsen_from[5], _coarsen_collision[5], _coarsen_from[5].load_relaxed(), _coarsen_collision[5].load_relaxed(),
_coarsen_from[6], _coarsen_collision[6] _coarsen_from[6].load_relaxed(), _coarsen_collision[6].load_relaxed()
); );
} }
@ -248,7 +247,7 @@ class G1CardSetHashTable : public CHeapObj<mtGCCardSet> {
// the per region cardsets. // the per region cardsets.
const static uint GroupBucketClaimSize = 4; const static uint GroupBucketClaimSize = 4;
// Did we insert at least one card in the table? // Did we insert at least one card in the table?
bool volatile _inserted_card; Atomic<bool> _inserted_card;
G1CardSetMemoryManager* _mm; G1CardSetMemoryManager* _mm;
CardSetHash _table; CardSetHash _table;
@ -311,10 +310,10 @@ public:
G1CardSetHashTableValue value(region_idx, G1CardSetInlinePtr()); G1CardSetHashTableValue value(region_idx, G1CardSetInlinePtr());
bool inserted = _table.insert_get(Thread::current(), lookup, value, found, should_grow); bool inserted = _table.insert_get(Thread::current(), lookup, value, found, should_grow);
if (!_inserted_card && inserted) { if (!_inserted_card.load_relaxed() && inserted) {
// It does not matter to us who is setting the flag so a regular atomic store // It does not matter to us who is setting the flag so a regular atomic store
// is sufficient. // is sufficient.
AtomicAccess::store(&_inserted_card, true); _inserted_card.store_relaxed(true);
} }
return found.value(); return found.value();
@ -343,9 +342,9 @@ public:
} }
void reset() { void reset() {
if (AtomicAccess::load(&_inserted_card)) { if (_inserted_card.load_relaxed()) {
_table.unsafe_reset(InitialLogTableSize); _table.unsafe_reset(InitialLogTableSize);
AtomicAccess::store(&_inserted_card, false); _inserted_card.store_relaxed(false);
} }
} }
@ -455,14 +454,14 @@ void G1CardSet::free_mem_object(ContainerPtr container) {
_mm->free(container_type_to_mem_object_type(type), value); _mm->free(container_type_to_mem_object_type(type), value);
} }
G1CardSet::ContainerPtr G1CardSet::acquire_container(ContainerPtr volatile* container_addr) { G1CardSet::ContainerPtr G1CardSet::acquire_container(Atomic<ContainerPtr>* container_addr) {
// Update reference counts under RCU critical section to avoid a // Update reference counts under RCU critical section to avoid a
// use-after-cleapup bug where we increment a reference count for // use-after-cleapup bug where we increment a reference count for
// an object whose memory has already been cleaned up and reused. // an object whose memory has already been cleaned up and reused.
GlobalCounter::CriticalSection cs(Thread::current()); GlobalCounter::CriticalSection cs(Thread::current());
while (true) { while (true) {
// Get ContainerPtr and increment refcount atomically wrt to memory reuse. // Get ContainerPtr and increment refcount atomically wrt to memory reuse.
ContainerPtr container = AtomicAccess::load_acquire(container_addr); ContainerPtr container = container_addr->load_acquire();
uint cs_type = container_type(container); uint cs_type = container_type(container);
if (container == FullCardSet || cs_type == ContainerInlinePtr) { if (container == FullCardSet || cs_type == ContainerInlinePtr) {
return container; return container;
@ -503,15 +502,15 @@ class G1ReleaseCardsets : public StackObj {
G1CardSet* _card_set; G1CardSet* _card_set;
using ContainerPtr = G1CardSet::ContainerPtr; using ContainerPtr = G1CardSet::ContainerPtr;
void coarsen_to_full(ContainerPtr* container_addr) { void coarsen_to_full(Atomic<ContainerPtr>* container_addr) {
while (true) { while (true) {
ContainerPtr cur_container = AtomicAccess::load_acquire(container_addr); ContainerPtr cur_container = container_addr->load_acquire();
uint cs_type = G1CardSet::container_type(cur_container); uint cs_type = G1CardSet::container_type(cur_container);
if (cur_container == G1CardSet::FullCardSet) { if (cur_container == G1CardSet::FullCardSet) {
return; return;
} }
ContainerPtr old_value = AtomicAccess::cmpxchg(container_addr, cur_container, G1CardSet::FullCardSet); ContainerPtr old_value = container_addr->compare_exchange(cur_container, G1CardSet::FullCardSet);
if (old_value == cur_container) { if (old_value == cur_container) {
_card_set->release_and_maybe_free_container(cur_container); _card_set->release_and_maybe_free_container(cur_container);
@ -523,7 +522,7 @@ class G1ReleaseCardsets : public StackObj {
public: public:
explicit G1ReleaseCardsets(G1CardSet* card_set) : _card_set(card_set) { } explicit G1ReleaseCardsets(G1CardSet* card_set) : _card_set(card_set) { }
void operator ()(ContainerPtr* container_addr) { void operator ()(Atomic<ContainerPtr>* container_addr) {
coarsen_to_full(container_addr); coarsen_to_full(container_addr);
} }
}; };
@ -544,10 +543,10 @@ G1AddCardResult G1CardSet::add_to_howl(ContainerPtr parent_container,
ContainerPtr container; ContainerPtr container;
uint bucket = _config->howl_bucket_index(card_in_region); uint bucket = _config->howl_bucket_index(card_in_region);
ContainerPtr volatile* bucket_entry = howl->container_addr(bucket); Atomic<ContainerPtr>* bucket_entry = howl->container_addr(bucket);
while (true) { while (true) {
if (AtomicAccess::load(&howl->_num_entries) >= _config->cards_in_howl_threshold()) { if (howl->_num_entries.load_relaxed() >= _config->cards_in_howl_threshold()) {
return Overflow; return Overflow;
} }
@ -571,7 +570,7 @@ G1AddCardResult G1CardSet::add_to_howl(ContainerPtr parent_container,
} }
if (increment_total && add_result == Added) { if (increment_total && add_result == Added) {
AtomicAccess::inc(&howl->_num_entries, memory_order_relaxed); howl->_num_entries.add_then_fetch(1u, memory_order_relaxed);
} }
if (to_transfer != nullptr) { if (to_transfer != nullptr) {
@ -588,7 +587,7 @@ G1AddCardResult G1CardSet::add_to_bitmap(ContainerPtr container, uint card_in_re
return bitmap->add(card_offset, _config->cards_in_howl_bitmap_threshold(), _config->max_cards_in_howl_bitmap()); return bitmap->add(card_offset, _config->cards_in_howl_bitmap_threshold(), _config->max_cards_in_howl_bitmap());
} }
G1AddCardResult G1CardSet::add_to_inline_ptr(ContainerPtr volatile* container_addr, ContainerPtr container, uint card_in_region) { G1AddCardResult G1CardSet::add_to_inline_ptr(Atomic<ContainerPtr>* container_addr, ContainerPtr container, uint card_in_region) {
G1CardSetInlinePtr value(container_addr, container); G1CardSetInlinePtr value(container_addr, container);
return value.add(card_in_region, _config->inline_ptr_bits_per_card(), _config->max_cards_in_inline_ptr()); return value.add(card_in_region, _config->inline_ptr_bits_per_card(), _config->max_cards_in_inline_ptr());
} }
@ -610,7 +609,7 @@ G1CardSet::ContainerPtr G1CardSet::create_coarsened_array_of_cards(uint card_in_
return new_container; return new_container;
} }
bool G1CardSet::coarsen_container(ContainerPtr volatile* container_addr, bool G1CardSet::coarsen_container(Atomic<ContainerPtr>* container_addr,
ContainerPtr cur_container, ContainerPtr cur_container,
uint card_in_region, uint card_in_region,
bool within_howl) { bool within_howl) {
@ -640,7 +639,7 @@ bool G1CardSet::coarsen_container(ContainerPtr volatile* container_addr,
ShouldNotReachHere(); ShouldNotReachHere();
} }
ContainerPtr old_value = AtomicAccess::cmpxchg(container_addr, cur_container, new_container); // Memory order? ContainerPtr old_value = container_addr->compare_exchange(cur_container, new_container); // Memory order?
if (old_value == cur_container) { if (old_value == cur_container) {
// Success. Indicate that the cards from the current card set must be transferred // Success. Indicate that the cards from the current card set must be transferred
// by this caller. // by this caller.
@ -687,7 +686,7 @@ void G1CardSet::transfer_cards(G1CardSetHashTableValue* table_entry, ContainerPt
assert(container_type(source_container) == ContainerHowl, "must be"); assert(container_type(source_container) == ContainerHowl, "must be");
// Need to correct for that the Full remembered set occupies more cards than the // Need to correct for that the Full remembered set occupies more cards than the
// AoCS before. // AoCS before.
AtomicAccess::add(&_num_occupied, _config->max_cards_in_region() - table_entry->_num_occupied, memory_order_relaxed); _num_occupied.add_then_fetch(_config->max_cards_in_region() - table_entry->_num_occupied.load_relaxed(), memory_order_relaxed);
} }
} }
@ -713,18 +712,18 @@ void G1CardSet::transfer_cards_in_howl(ContainerPtr parent_container,
diff -= 1; diff -= 1;
G1CardSetHowl* howling_array = container_ptr<G1CardSetHowl>(parent_container); G1CardSetHowl* howling_array = container_ptr<G1CardSetHowl>(parent_container);
AtomicAccess::add(&howling_array->_num_entries, diff, memory_order_relaxed); howling_array->_num_entries.add_then_fetch(diff, memory_order_relaxed);
G1CardSetHashTableValue* table_entry = get_container(card_region); G1CardSetHashTableValue* table_entry = get_container(card_region);
assert(table_entry != nullptr, "Table entry not found for transferred cards"); assert(table_entry != nullptr, "Table entry not found for transferred cards");
AtomicAccess::add(&table_entry->_num_occupied, diff, memory_order_relaxed); table_entry->_num_occupied.add_then_fetch(diff, memory_order_relaxed);
AtomicAccess::add(&_num_occupied, diff, memory_order_relaxed); _num_occupied.add_then_fetch(diff, memory_order_relaxed);
} }
} }
G1AddCardResult G1CardSet::add_to_container(ContainerPtr volatile* container_addr, G1AddCardResult G1CardSet::add_to_container(Atomic<ContainerPtr>* container_addr,
ContainerPtr container, ContainerPtr container,
uint card_region, uint card_region,
uint card_in_region, uint card_in_region,
@ -827,8 +826,8 @@ G1AddCardResult G1CardSet::add_card(uint card_region, uint card_in_region, bool
} }
if (increment_total && add_result == Added) { if (increment_total && add_result == Added) {
AtomicAccess::inc(&table_entry->_num_occupied, memory_order_relaxed); table_entry->_num_occupied.add_then_fetch(1u, memory_order_relaxed);
AtomicAccess::inc(&_num_occupied, memory_order_relaxed); _num_occupied.add_then_fetch(1u, memory_order_relaxed);
} }
if (should_grow_table) { if (should_grow_table) {
_table->grow(); _table->grow();
@ -853,7 +852,7 @@ bool G1CardSet::contains_card(uint card_region, uint card_in_region) {
return false; return false;
} }
ContainerPtr container = table_entry->_container; ContainerPtr container = table_entry->_container.load_relaxed();
if (container == FullCardSet) { if (container == FullCardSet) {
// contains_card() is not a performance critical method so we do not hide that // contains_card() is not a performance critical method so we do not hide that
// case in the switch below. // case in the switch below.
@ -889,7 +888,7 @@ void G1CardSet::print_info(outputStream* st, uintptr_t card) {
return; return;
} }
ContainerPtr container = table_entry->_container; ContainerPtr container = table_entry->_container.load_relaxed();
if (container == FullCardSet) { if (container == FullCardSet) {
st->print("FULL card set)"); st->print("FULL card set)");
return; return;
@ -940,7 +939,7 @@ void G1CardSet::iterate_cards_during_transfer(ContainerPtr const container, Card
void G1CardSet::iterate_containers(ContainerPtrClosure* cl, bool at_safepoint) { void G1CardSet::iterate_containers(ContainerPtrClosure* cl, bool at_safepoint) {
auto do_value = auto do_value =
[&] (G1CardSetHashTableValue* value) { [&] (G1CardSetHashTableValue* value) {
cl->do_containerptr(value->_region_idx, value->_num_occupied, value->_container); cl->do_containerptr(value->_region_idx, value->_num_occupied.load_relaxed(), value->_container.load_relaxed());
return true; return true;
}; };
@ -1001,11 +1000,11 @@ bool G1CardSet::occupancy_less_or_equal_to(size_t limit) const {
} }
bool G1CardSet::is_empty() const { bool G1CardSet::is_empty() const {
return _num_occupied == 0; return _num_occupied.load_relaxed() == 0;
} }
size_t G1CardSet::occupied() const { size_t G1CardSet::occupied() const {
return _num_occupied; return _num_occupied.load_relaxed();
} }
size_t G1CardSet::num_containers() { size_t G1CardSet::num_containers() {
@ -1051,7 +1050,7 @@ size_t G1CardSet::static_mem_size() {
void G1CardSet::clear() { void G1CardSet::clear() {
_table->reset(); _table->reset();
_num_occupied = 0; _num_occupied.store_relaxed(0);
_mm->flush(); _mm->flush();
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -27,6 +27,7 @@
#include "memory/allocation.hpp" #include "memory/allocation.hpp"
#include "memory/memRegion.hpp" #include "memory/memRegion.hpp"
#include "runtime/atomic.hpp"
#include "utilities/concurrentHashTable.hpp" #include "utilities/concurrentHashTable.hpp"
class G1CardSetAllocOptions; class G1CardSetAllocOptions;
@ -154,8 +155,8 @@ public:
private: private:
// Indices are "from" indices. // Indices are "from" indices.
size_t _coarsen_from[NumCoarsenCategories]; Atomic<size_t> _coarsen_from[NumCoarsenCategories];
size_t _coarsen_collision[NumCoarsenCategories]; Atomic<size_t> _coarsen_collision[NumCoarsenCategories];
public: public:
G1CardSetCoarsenStats() { reset(); } G1CardSetCoarsenStats() { reset(); }
@ -271,11 +272,11 @@ private:
// Total number of cards in this card set. This is a best-effort value, i.e. there may // Total number of cards in this card set. This is a best-effort value, i.e. there may
// be (slightly) more cards in the card set than this value in reality. // be (slightly) more cards in the card set than this value in reality.
size_t _num_occupied; Atomic<size_t> _num_occupied;
ContainerPtr make_container_ptr(void* value, uintptr_t type); ContainerPtr make_container_ptr(void* value, uintptr_t type);
ContainerPtr acquire_container(ContainerPtr volatile* container_addr); ContainerPtr acquire_container(Atomic<ContainerPtr>* container_addr);
// Returns true if the card set container should be released // Returns true if the card set container should be released
bool release_container(ContainerPtr container); bool release_container(ContainerPtr container);
// Release card set and free if needed. // Release card set and free if needed.
@ -288,7 +289,7 @@ private:
// coarsen_container does not transfer cards from cur_container // coarsen_container does not transfer cards from cur_container
// to the new container. Transfer is achieved by transfer_cards. // to the new container. Transfer is achieved by transfer_cards.
// Returns true if this was the thread that coarsened the container (and added the card). // Returns true if this was the thread that coarsened the container (and added the card).
bool coarsen_container(ContainerPtr volatile* container_addr, bool coarsen_container(Atomic<ContainerPtr>* container_addr,
ContainerPtr cur_container, ContainerPtr cur_container,
uint card_in_region, bool within_howl = false); uint card_in_region, bool within_howl = false);
@ -300,9 +301,9 @@ private:
void transfer_cards(G1CardSetHashTableValue* table_entry, ContainerPtr source_container, uint card_region); void transfer_cards(G1CardSetHashTableValue* table_entry, ContainerPtr source_container, uint card_region);
void transfer_cards_in_howl(ContainerPtr parent_container, ContainerPtr source_container, uint card_region); void transfer_cards_in_howl(ContainerPtr parent_container, ContainerPtr source_container, uint card_region);
G1AddCardResult add_to_container(ContainerPtr volatile* container_addr, ContainerPtr container, uint card_region, uint card, bool increment_total = true); G1AddCardResult add_to_container(Atomic<ContainerPtr>* container_addr, ContainerPtr container, uint card_region, uint card, bool increment_total = true);
G1AddCardResult add_to_inline_ptr(ContainerPtr volatile* container_addr, ContainerPtr container, uint card_in_region); G1AddCardResult add_to_inline_ptr(Atomic<ContainerPtr>* container_addr, ContainerPtr container, uint card_in_region);
G1AddCardResult add_to_array(ContainerPtr container, uint card_in_region); G1AddCardResult add_to_array(ContainerPtr container, uint card_in_region);
G1AddCardResult add_to_bitmap(ContainerPtr container, uint card_in_region); G1AddCardResult add_to_bitmap(ContainerPtr container, uint card_in_region);
G1AddCardResult add_to_howl(ContainerPtr parent_container, uint card_region, uint card_in_region, bool increment_total = true); G1AddCardResult add_to_howl(ContainerPtr parent_container, uint card_region, uint card_in_region, bool increment_total = true);
@ -366,7 +367,6 @@ public:
size_t num_containers(); size_t num_containers();
static G1CardSetCoarsenStats coarsen_stats();
static void print_coarsen_stats(outputStream* out); static void print_coarsen_stats(outputStream* out);
// Returns size of the actual remembered set containers in bytes. // Returns size of the actual remembered set containers in bytes.
@ -412,8 +412,15 @@ public:
using ContainerPtr = G1CardSet::ContainerPtr; using ContainerPtr = G1CardSet::ContainerPtr;
const uint _region_idx; const uint _region_idx;
uint volatile _num_occupied; Atomic<uint> _num_occupied;
ContainerPtr volatile _container; Atomic<ContainerPtr> _container;
// Copy constructor needed for use in ConcurrentHashTable.
G1CardSetHashTableValue(const G1CardSetHashTableValue& other) :
_region_idx(other._region_idx),
_num_occupied(other._num_occupied.load_relaxed()),
_container(other._container.load_relaxed())
{ }
G1CardSetHashTableValue(uint region_idx, ContainerPtr container) : _region_idx(region_idx), _num_occupied(0), _container(container) { } G1CardSetHashTableValue(uint region_idx, ContainerPtr container) : _region_idx(region_idx), _num_occupied(0), _container(container) { }
}; };

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -27,7 +27,7 @@
#include "gc/g1/g1CardSet.hpp" #include "gc/g1/g1CardSet.hpp"
#include "memory/allocation.hpp" #include "memory/allocation.hpp"
#include "runtime/atomicAccess.hpp" #include "runtime/atomic.hpp"
#include "utilities/bitMap.hpp" #include "utilities/bitMap.hpp"
#include "utilities/globalDefinitions.hpp" #include "utilities/globalDefinitions.hpp"
@ -67,7 +67,7 @@ class G1CardSetInlinePtr : public StackObj {
using ContainerPtr = G1CardSet::ContainerPtr; using ContainerPtr = G1CardSet::ContainerPtr;
ContainerPtr volatile * _value_addr; Atomic<ContainerPtr>* _value_addr;
ContainerPtr _value; ContainerPtr _value;
static const uint SizeFieldLen = 3; static const uint SizeFieldLen = 3;
@ -103,7 +103,7 @@ public:
explicit G1CardSetInlinePtr(ContainerPtr value) : explicit G1CardSetInlinePtr(ContainerPtr value) :
G1CardSetInlinePtr(nullptr, value) {} G1CardSetInlinePtr(nullptr, value) {}
G1CardSetInlinePtr(ContainerPtr volatile* value_addr, ContainerPtr value) : _value_addr(value_addr), _value(value) { G1CardSetInlinePtr(Atomic<ContainerPtr>* value_addr, ContainerPtr value) : _value_addr(value_addr), _value(value) {
assert(G1CardSet::container_type(_value) == G1CardSet::ContainerInlinePtr, "Value " PTR_FORMAT " is not a valid G1CardSetInlinePtr.", p2i(_value)); assert(G1CardSet::container_type(_value) == G1CardSet::ContainerInlinePtr, "Value " PTR_FORMAT " is not a valid G1CardSetInlinePtr.", p2i(_value));
} }
@ -145,13 +145,13 @@ public:
// All but inline pointers are of this kind. For those, card entries are stored // All but inline pointers are of this kind. For those, card entries are stored
// directly in the ContainerPtr of the ConcurrentHashTable node. // directly in the ContainerPtr of the ConcurrentHashTable node.
class G1CardSetContainer { class G1CardSetContainer {
uintptr_t _ref_count; Atomic<uintptr_t> _ref_count;
protected: protected:
~G1CardSetContainer() = default; ~G1CardSetContainer() = default;
public: public:
G1CardSetContainer() : _ref_count(3) { } G1CardSetContainer() : _ref_count(3) { }
uintptr_t refcount() const { return AtomicAccess::load_acquire(&_ref_count); } uintptr_t refcount() const { return _ref_count.load_acquire(); }
bool try_increment_refcount(); bool try_increment_refcount();
@ -172,7 +172,7 @@ public:
using ContainerPtr = G1CardSet::ContainerPtr; using ContainerPtr = G1CardSet::ContainerPtr;
private: private:
EntryCountType _size; EntryCountType _size;
EntryCountType volatile _num_entries; Atomic<EntryCountType> _num_entries;
// VLA implementation. // VLA implementation.
EntryDataType _data[1]; EntryDataType _data[1];
@ -180,10 +180,10 @@ private:
static const EntryCountType EntryMask = LockBitMask - 1; static const EntryCountType EntryMask = LockBitMask - 1;
class G1CardSetArrayLocker : public StackObj { class G1CardSetArrayLocker : public StackObj {
EntryCountType volatile* _num_entries_addr; Atomic<EntryCountType>* _num_entries_addr;
EntryCountType _local_num_entries; EntryCountType _local_num_entries;
public: public:
G1CardSetArrayLocker(EntryCountType volatile* value); G1CardSetArrayLocker(Atomic<EntryCountType>* value);
EntryCountType num_entries() const { return _local_num_entries; } EntryCountType num_entries() const { return _local_num_entries; }
void inc_num_entries() { void inc_num_entries() {
@ -192,7 +192,7 @@ private:
} }
~G1CardSetArrayLocker() { ~G1CardSetArrayLocker() {
AtomicAccess::release_store(_num_entries_addr, _local_num_entries); _num_entries_addr->release_store(_local_num_entries);
} }
}; };
@ -213,7 +213,7 @@ public:
template <class CardVisitor> template <class CardVisitor>
void iterate(CardVisitor& found); void iterate(CardVisitor& found);
size_t num_entries() const { return _num_entries & EntryMask; } size_t num_entries() const { return _num_entries.load_relaxed() & EntryMask; }
static size_t header_size_in_bytes(); static size_t header_size_in_bytes();
@ -223,7 +223,7 @@ public:
}; };
class G1CardSetBitMap : public G1CardSetContainer { class G1CardSetBitMap : public G1CardSetContainer {
size_t _num_bits_set; Atomic<size_t> _num_bits_set;
BitMap::bm_word_t _bits[1]; BitMap::bm_word_t _bits[1];
public: public:
@ -236,7 +236,7 @@ public:
return bm.at(card_idx); return bm.at(card_idx);
} }
uint num_bits_set() const { return (uint)_num_bits_set; } uint num_bits_set() const { return (uint)_num_bits_set.load_relaxed(); }
template <class CardVisitor> template <class CardVisitor>
void iterate(CardVisitor& found, size_t const size_in_bits, uint offset); void iterate(CardVisitor& found, size_t const size_in_bits, uint offset);
@ -255,10 +255,10 @@ class G1CardSetHowl : public G1CardSetContainer {
public: public:
typedef uint EntryCountType; typedef uint EntryCountType;
using ContainerPtr = G1CardSet::ContainerPtr; using ContainerPtr = G1CardSet::ContainerPtr;
EntryCountType volatile _num_entries; Atomic<EntryCountType> _num_entries;
private: private:
// VLA implementation. // VLA implementation.
ContainerPtr _buckets[1]; Atomic<ContainerPtr> _buckets[1];
// Do not add class member variables beyond this point. // Do not add class member variables beyond this point.
// Iterates over the given ContainerPtr with at index in this Howl card set, // Iterates over the given ContainerPtr with at index in this Howl card set,
@ -268,14 +268,14 @@ private:
ContainerPtr at(EntryCountType index) const; ContainerPtr at(EntryCountType index) const;
ContainerPtr const* buckets() const; Atomic<ContainerPtr> const* buckets() const;
public: public:
G1CardSetHowl(EntryCountType card_in_region, G1CardSetConfiguration* config); G1CardSetHowl(EntryCountType card_in_region, G1CardSetConfiguration* config);
ContainerPtr const* container_addr(EntryCountType index) const; Atomic<ContainerPtr> const* container_addr(EntryCountType index) const;
ContainerPtr* container_addr(EntryCountType index); Atomic<ContainerPtr>* container_addr(EntryCountType index);
bool contains(uint card_idx, G1CardSetConfiguration* config); bool contains(uint card_idx, G1CardSetConfiguration* config);
// Iterates over all ContainerPtrs in this Howl card set, applying a CardOrRangeVisitor // Iterates over all ContainerPtrs in this Howl card set, applying a CardOrRangeVisitor

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -67,7 +67,7 @@ inline G1AddCardResult G1CardSetInlinePtr::add(uint card_idx, uint bits_per_card
return Overflow; return Overflow;
} }
ContainerPtr new_value = merge(_value, card_idx, num_cards, bits_per_card); ContainerPtr new_value = merge(_value, card_idx, num_cards, bits_per_card);
ContainerPtr old_value = AtomicAccess::cmpxchg(_value_addr, _value, new_value, memory_order_relaxed); ContainerPtr old_value = _value_addr->compare_exchange(_value, new_value, memory_order_relaxed);
if (_value == old_value) { if (_value == old_value) {
return Added; return Added;
} }
@ -126,7 +126,7 @@ inline bool G1CardSetContainer::try_increment_refcount() {
} }
uintptr_t new_value = old_value + 2; uintptr_t new_value = old_value + 2;
uintptr_t ref_count = AtomicAccess::cmpxchg(&_ref_count, old_value, new_value); uintptr_t ref_count = _ref_count.compare_exchange(old_value, new_value);
if (ref_count == old_value) { if (ref_count == old_value) {
return true; return true;
} }
@ -137,7 +137,7 @@ inline bool G1CardSetContainer::try_increment_refcount() {
inline uintptr_t G1CardSetContainer::decrement_refcount() { inline uintptr_t G1CardSetContainer::decrement_refcount() {
uintptr_t old_value = refcount(); uintptr_t old_value = refcount();
assert((old_value & 0x1) != 0 && old_value >= 3, "precondition"); assert((old_value & 0x1) != 0 && old_value >= 3, "precondition");
return AtomicAccess::sub(&_ref_count, 2u); return _ref_count.sub_then_fetch(2u);
} }
inline G1CardSetArray::G1CardSetArray(uint card_in_region, EntryCountType num_cards) : inline G1CardSetArray::G1CardSetArray(uint card_in_region, EntryCountType num_cards) :
@ -149,14 +149,13 @@ inline G1CardSetArray::G1CardSetArray(uint card_in_region, EntryCountType num_ca
*entry_addr(0) = checked_cast<EntryDataType>(card_in_region); *entry_addr(0) = checked_cast<EntryDataType>(card_in_region);
} }
inline G1CardSetArray::G1CardSetArrayLocker::G1CardSetArrayLocker(EntryCountType volatile* num_entries_addr) : inline G1CardSetArray::G1CardSetArrayLocker::G1CardSetArrayLocker(Atomic<EntryCountType>* num_entries_addr) :
_num_entries_addr(num_entries_addr) { _num_entries_addr(num_entries_addr) {
SpinYield s; SpinYield s;
EntryCountType num_entries = AtomicAccess::load(_num_entries_addr) & EntryMask; EntryCountType num_entries = _num_entries_addr->load_relaxed() & EntryMask;
while (true) { while (true) {
EntryCountType old_value = AtomicAccess::cmpxchg(_num_entries_addr, EntryCountType old_value = _num_entries_addr->compare_exchange(num_entries,
num_entries, (EntryCountType)(num_entries | LockBitMask));
(EntryCountType)(num_entries | LockBitMask));
if (old_value == num_entries) { if (old_value == num_entries) {
// Succeeded locking the array. // Succeeded locking the array.
_local_num_entries = num_entries; _local_num_entries = num_entries;
@ -174,7 +173,7 @@ inline G1CardSetArray::EntryDataType const* G1CardSetArray::base_addr() const {
} }
inline G1CardSetArray::EntryDataType const* G1CardSetArray::entry_addr(EntryCountType index) const { inline G1CardSetArray::EntryDataType const* G1CardSetArray::entry_addr(EntryCountType index) const {
assert(index < _num_entries, "precondition"); assert(index < _num_entries.load_relaxed(), "precondition");
return base_addr() + index; return base_addr() + index;
} }
@ -189,7 +188,7 @@ inline G1CardSetArray::EntryDataType G1CardSetArray::at(EntryCountType index) co
inline G1AddCardResult G1CardSetArray::add(uint card_idx) { inline G1AddCardResult G1CardSetArray::add(uint card_idx) {
assert(card_idx < (1u << (sizeof(EntryDataType) * BitsPerByte)), assert(card_idx < (1u << (sizeof(EntryDataType) * BitsPerByte)),
"Card index %u does not fit allowed card value range.", card_idx); "Card index %u does not fit allowed card value range.", card_idx);
EntryCountType num_entries = AtomicAccess::load_acquire(&_num_entries) & EntryMask; EntryCountType num_entries = _num_entries.load_acquire() & EntryMask;
EntryCountType idx = 0; EntryCountType idx = 0;
for (; idx < num_entries; idx++) { for (; idx < num_entries; idx++) {
if (at(idx) == card_idx) { if (at(idx) == card_idx) {
@ -223,7 +222,7 @@ inline G1AddCardResult G1CardSetArray::add(uint card_idx) {
} }
inline bool G1CardSetArray::contains(uint card_idx) { inline bool G1CardSetArray::contains(uint card_idx) {
EntryCountType num_entries = AtomicAccess::load_acquire(&_num_entries) & EntryMask; EntryCountType num_entries = _num_entries.load_acquire() & EntryMask;
for (EntryCountType idx = 0; idx < num_entries; idx++) { for (EntryCountType idx = 0; idx < num_entries; idx++) {
if (at(idx) == card_idx) { if (at(idx) == card_idx) {
@ -235,7 +234,7 @@ inline bool G1CardSetArray::contains(uint card_idx) {
template <class CardVisitor> template <class CardVisitor>
void G1CardSetArray::iterate(CardVisitor& found) { void G1CardSetArray::iterate(CardVisitor& found) {
EntryCountType num_entries = AtomicAccess::load_acquire(&_num_entries) & EntryMask; EntryCountType num_entries = _num_entries.load_acquire() & EntryMask;
for (EntryCountType idx = 0; idx < num_entries; idx++) { for (EntryCountType idx = 0; idx < num_entries; idx++) {
found(at(idx)); found(at(idx));
} }
@ -256,11 +255,11 @@ inline G1CardSetBitMap::G1CardSetBitMap(uint card_in_region, uint size_in_bits)
inline G1AddCardResult G1CardSetBitMap::add(uint card_idx, size_t threshold, size_t size_in_bits) { inline G1AddCardResult G1CardSetBitMap::add(uint card_idx, size_t threshold, size_t size_in_bits) {
BitMapView bm(_bits, size_in_bits); BitMapView bm(_bits, size_in_bits);
if (_num_bits_set >= threshold) { if (_num_bits_set.load_relaxed() >= threshold) {
return bm.at(card_idx) ? Found : Overflow; return bm.at(card_idx) ? Found : Overflow;
} }
if (bm.par_set_bit(card_idx)) { if (bm.par_set_bit(card_idx)) {
AtomicAccess::inc(&_num_bits_set, memory_order_relaxed); _num_bits_set.add_then_fetch(1u, memory_order_relaxed);
return Added; return Added;
} }
return Found; return Found;
@ -276,22 +275,22 @@ inline size_t G1CardSetBitMap::header_size_in_bytes() {
return offset_of(G1CardSetBitMap, _bits); return offset_of(G1CardSetBitMap, _bits);
} }
inline G1CardSetHowl::ContainerPtr const* G1CardSetHowl::container_addr(EntryCountType index) const { inline Atomic<G1CardSetHowl::ContainerPtr> const* G1CardSetHowl::container_addr(EntryCountType index) const {
assert(index < _num_entries, "precondition"); assert(index < _num_entries.load_relaxed(), "precondition");
return buckets() + index; return buckets() + index;
} }
inline G1CardSetHowl::ContainerPtr* G1CardSetHowl::container_addr(EntryCountType index) { inline Atomic<G1CardSetHowl::ContainerPtr>* G1CardSetHowl::container_addr(EntryCountType index) {
return const_cast<ContainerPtr*>(const_cast<const G1CardSetHowl*>(this)->container_addr(index)); return const_cast<Atomic<ContainerPtr>*>(const_cast<const G1CardSetHowl*>(this)->container_addr(index));
} }
inline G1CardSetHowl::ContainerPtr G1CardSetHowl::at(EntryCountType index) const { inline G1CardSetHowl::ContainerPtr G1CardSetHowl::at(EntryCountType index) const {
return *container_addr(index); return (*container_addr(index)).load_relaxed();
} }
inline G1CardSetHowl::ContainerPtr const* G1CardSetHowl::buckets() const { inline Atomic<G1CardSetHowl::ContainerPtr> const* G1CardSetHowl::buckets() const {
const void* ptr = reinterpret_cast<const char*>(this) + header_size_in_bytes(); const void* ptr = reinterpret_cast<const char*>(this) + header_size_in_bytes();
return reinterpret_cast<ContainerPtr const*>(ptr); return reinterpret_cast<Atomic<ContainerPtr> const*>(ptr);
} }
inline G1CardSetHowl::G1CardSetHowl(EntryCountType card_in_region, G1CardSetConfiguration* config) : inline G1CardSetHowl::G1CardSetHowl(EntryCountType card_in_region, G1CardSetConfiguration* config) :
@ -300,7 +299,7 @@ inline G1CardSetHowl::G1CardSetHowl(EntryCountType card_in_region, G1CardSetConf
EntryCountType num_buckets = config->num_buckets_in_howl(); EntryCountType num_buckets = config->num_buckets_in_howl();
EntryCountType bucket = config->howl_bucket_index(card_in_region); EntryCountType bucket = config->howl_bucket_index(card_in_region);
for (uint i = 0; i < num_buckets; ++i) { for (uint i = 0; i < num_buckets; ++i) {
*container_addr(i) = G1CardSetInlinePtr(); container_addr(i)->store_relaxed(G1CardSetInlinePtr());
if (i == bucket) { if (i == bucket) {
G1CardSetInlinePtr value(container_addr(i), at(i)); G1CardSetInlinePtr value(container_addr(i), at(i));
value.add(card_in_region, config->inline_ptr_bits_per_card(), config->max_cards_in_inline_ptr()); value.add(card_in_region, config->inline_ptr_bits_per_card(), config->max_cards_in_inline_ptr());
@ -310,8 +309,8 @@ inline G1CardSetHowl::G1CardSetHowl(EntryCountType card_in_region, G1CardSetConf
inline bool G1CardSetHowl::contains(uint card_idx, G1CardSetConfiguration* config) { inline bool G1CardSetHowl::contains(uint card_idx, G1CardSetConfiguration* config) {
EntryCountType bucket = config->howl_bucket_index(card_idx); EntryCountType bucket = config->howl_bucket_index(card_idx);
ContainerPtr* array_entry = container_addr(bucket); Atomic<ContainerPtr>* array_entry = container_addr(bucket);
ContainerPtr container = AtomicAccess::load_acquire(array_entry); ContainerPtr container = array_entry->load_acquire();
switch (G1CardSet::container_type(container)) { switch (G1CardSet::container_type(container)) {
case G1CardSet::ContainerArrayOfCards: { case G1CardSet::ContainerArrayOfCards: {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -26,7 +26,6 @@
#include "gc/g1/g1CardSetContainers.inline.hpp" #include "gc/g1/g1CardSetContainers.inline.hpp"
#include "gc/g1/g1CardSetMemory.inline.hpp" #include "gc/g1/g1CardSetMemory.inline.hpp"
#include "gc/g1/g1MonotonicArena.inline.hpp" #include "gc/g1/g1MonotonicArena.inline.hpp"
#include "runtime/atomicAccess.hpp"
#include "utilities/ostream.hpp" #include "utilities/ostream.hpp"
G1CardSetAllocator::G1CardSetAllocator(const char* name, G1CardSetAllocator::G1CardSetAllocator(const char* name,

View File

@ -31,6 +31,7 @@
#include "gc/g1/g1CollectorState.hpp" #include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1ConcurrentMark.inline.hpp" #include "gc/g1/g1ConcurrentMark.inline.hpp"
#include "gc/g1/g1EvacFailureRegions.hpp" #include "gc/g1/g1EvacFailureRegions.hpp"
#include "gc/g1/g1EvacStats.inline.hpp"
#include "gc/g1/g1HeapRegion.inline.hpp" #include "gc/g1/g1HeapRegion.inline.hpp"
#include "gc/g1/g1HeapRegionManager.inline.hpp" #include "gc/g1/g1HeapRegionManager.inline.hpp"
#include "gc/g1/g1HeapRegionRemSet.hpp" #include "gc/g1/g1HeapRegionRemSet.hpp"

View File

@ -203,13 +203,13 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
G1CollectedHeap* _g1h; G1CollectedHeap* _g1h;
G1HeapRegionClaimer _hrclaimer; G1HeapRegionClaimer _hrclaimer;
uint volatile _num_regions_added; Atomic<uint> _num_regions_added;
G1BuildCandidateArray _result; G1BuildCandidateArray _result;
void update_totals(uint num_regions) { void update_totals(uint num_regions) {
if (num_regions > 0) { if (num_regions > 0) {
AtomicAccess::add(&_num_regions_added, num_regions); _num_regions_added.add_then_fetch(num_regions);
} }
} }
@ -221,7 +221,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
void prune(G1HeapRegion** data) { void prune(G1HeapRegion** data) {
G1Policy* p = G1CollectedHeap::heap()->policy(); G1Policy* p = G1CollectedHeap::heap()->policy();
uint num_candidates = AtomicAccess::load(&_num_regions_added); uint num_candidates = _num_regions_added.load_relaxed();
uint min_old_cset_length = p->calc_min_old_cset_length(num_candidates); uint min_old_cset_length = p->calc_min_old_cset_length(num_candidates);
uint num_pruned = 0; uint num_pruned = 0;
@ -254,7 +254,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
wasted_bytes, wasted_bytes,
allowed_waste); allowed_waste);
AtomicAccess::sub(&_num_regions_added, num_pruned, memory_order_relaxed); _num_regions_added.sub_then_fetch(num_pruned, memory_order_relaxed);
} }
public: public:
@ -275,7 +275,7 @@ public:
_result.sort_by_gc_efficiency(); _result.sort_by_gc_efficiency();
prune(_result.array()); prune(_result.array());
candidates->set_candidates_from_marking(_result.array(), candidates->set_candidates_from_marking(_result.array(),
_num_regions_added); _num_regions_added.load_relaxed());
} }
}; };

View File

@ -291,9 +291,9 @@ void G1CMMarkStack::expand() {
_chunk_allocator.try_expand(); _chunk_allocator.try_expand();
} }
void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) { void G1CMMarkStack::add_chunk_to_list(Atomic<TaskQueueEntryChunk*>* list, TaskQueueEntryChunk* elem) {
elem->next = *list; elem->next = list->load_relaxed();
*list = elem; list->store_relaxed(elem);
} }
void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) { void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) {
@ -307,10 +307,10 @@ void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) {
add_chunk_to_list(&_free_list, elem); add_chunk_to_list(&_free_list, elem);
} }
G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) { G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(Atomic<TaskQueueEntryChunk*>* list) {
TaskQueueEntryChunk* result = *list; TaskQueueEntryChunk* result = list->load_relaxed();
if (result != nullptr) { if (result != nullptr) {
*list = (*list)->next; list->store_relaxed(list->load_relaxed()->next);
} }
return result; return result;
} }
@ -364,8 +364,8 @@ bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) {
void G1CMMarkStack::set_empty() { void G1CMMarkStack::set_empty() {
_chunks_in_chunk_list = 0; _chunks_in_chunk_list = 0;
_chunk_list = nullptr; _chunk_list.store_relaxed(nullptr);
_free_list = nullptr; _free_list.store_relaxed(nullptr);
_chunk_allocator.reset(); _chunk_allocator.reset();
} }

View File

@ -210,17 +210,17 @@ private:
ChunkAllocator _chunk_allocator; ChunkAllocator _chunk_allocator;
char _pad0[DEFAULT_PADDING_SIZE]; char _pad0[DEFAULT_PADDING_SIZE];
TaskQueueEntryChunk* volatile _free_list; // Linked list of free chunks that can be allocated by users. Atomic<TaskQueueEntryChunk*> _free_list; // Linked list of free chunks that can be allocated by users.
char _pad1[DEFAULT_PADDING_SIZE - sizeof(TaskQueueEntryChunk*)]; char _pad1[DEFAULT_PADDING_SIZE - sizeof(TaskQueueEntryChunk*)];
TaskQueueEntryChunk* volatile _chunk_list; // List of chunks currently containing data. Atomic<TaskQueueEntryChunk*> _chunk_list; // List of chunks currently containing data.
volatile size_t _chunks_in_chunk_list; volatile size_t _chunks_in_chunk_list;
char _pad2[DEFAULT_PADDING_SIZE - sizeof(TaskQueueEntryChunk*) - sizeof(size_t)]; char _pad2[DEFAULT_PADDING_SIZE - sizeof(TaskQueueEntryChunk*) - sizeof(size_t)];
// Atomically add the given chunk to the list. // Atomically add the given chunk to the list.
void add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem); void add_chunk_to_list(Atomic<TaskQueueEntryChunk*>* list, TaskQueueEntryChunk* elem);
// Atomically remove and return a chunk from the given list. Returns null if the // Atomically remove and return a chunk from the given list. Returns null if the
// list is empty. // list is empty.
TaskQueueEntryChunk* remove_chunk_from_list(TaskQueueEntryChunk* volatile* list); TaskQueueEntryChunk* remove_chunk_from_list(Atomic<TaskQueueEntryChunk*>* list);
void add_chunk_to_chunk_list(TaskQueueEntryChunk* elem); void add_chunk_to_chunk_list(TaskQueueEntryChunk* elem);
void add_chunk_to_free_list(TaskQueueEntryChunk* elem); void add_chunk_to_free_list(TaskQueueEntryChunk* elem);
@ -252,7 +252,7 @@ private:
// Return whether the chunk list is empty. Racy due to unsynchronized access to // Return whether the chunk list is empty. Racy due to unsynchronized access to
// _chunk_list. // _chunk_list.
bool is_empty() const { return _chunk_list == nullptr; } bool is_empty() const { return _chunk_list.load_relaxed() == nullptr; }
size_t capacity() const { return _chunk_allocator.capacity(); } size_t capacity() const { return _chunk_allocator.capacity(); }

View File

@ -90,7 +90,7 @@ inline void G1CMMarkStack::iterate(Fn fn) const {
size_t num_chunks = 0; size_t num_chunks = 0;
TaskQueueEntryChunk* cur = _chunk_list; TaskQueueEntryChunk* cur = _chunk_list.load_relaxed();
while (cur != nullptr) { while (cur != nullptr) {
guarantee(num_chunks <= _chunks_in_chunk_list, "Found %zu oop chunks which is more than there should be", num_chunks); guarantee(num_chunks <= _chunks_in_chunk_list, "Found %zu oop chunks which is more than there should be", num_chunks);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -30,7 +30,6 @@
#include "gc/g1/g1HeapRegionPrinter.hpp" #include "gc/g1/g1HeapRegionPrinter.hpp"
#include "gc/g1/g1RemSetTrackingPolicy.hpp" #include "gc/g1/g1RemSetTrackingPolicy.hpp"
#include "logging/log.hpp" #include "logging/log.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/mutexLocker.hpp" #include "runtime/mutexLocker.hpp"
struct G1UpdateRegionLivenessAndSelectForRebuildTask::G1OnRegionClosure : public G1HeapRegionClosure { struct G1UpdateRegionLivenessAndSelectForRebuildTask::G1OnRegionClosure : public G1HeapRegionClosure {
@ -154,7 +153,7 @@ void G1UpdateRegionLivenessAndSelectForRebuildTask::work(uint worker_id) {
G1OnRegionClosure on_region_cl(_g1h, _cm, &local_cleanup_list); G1OnRegionClosure on_region_cl(_g1h, _cm, &local_cleanup_list);
_g1h->heap_region_par_iterate_from_worker_offset(&on_region_cl, &_hrclaimer, worker_id); _g1h->heap_region_par_iterate_from_worker_offset(&on_region_cl, &_hrclaimer, worker_id);
AtomicAccess::add(&_total_selected_for_rebuild, on_region_cl._num_selected_for_rebuild); _total_selected_for_rebuild.add_then_fetch(on_region_cl._num_selected_for_rebuild);
// Update the old/humongous region sets // Update the old/humongous region sets
_g1h->remove_from_old_gen_sets(on_region_cl._num_old_regions_removed, _g1h->remove_from_old_gen_sets(on_region_cl._num_old_regions_removed,

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -29,6 +29,7 @@
#include "gc/g1/g1HeapRegionManager.hpp" #include "gc/g1/g1HeapRegionManager.hpp"
#include "gc/g1/g1HeapRegionSet.hpp" #include "gc/g1/g1HeapRegionSet.hpp"
#include "gc/shared/workerThread.hpp" #include "gc/shared/workerThread.hpp"
#include "runtime/atomic.hpp"
class G1CollectedHeap; class G1CollectedHeap;
class G1ConcurrentMark; class G1ConcurrentMark;
@ -41,7 +42,7 @@ class G1UpdateRegionLivenessAndSelectForRebuildTask : public WorkerTask {
G1ConcurrentMark* _cm; G1ConcurrentMark* _cm;
G1HeapRegionClaimer _hrclaimer; G1HeapRegionClaimer _hrclaimer;
uint volatile _total_selected_for_rebuild; Atomic<uint> _total_selected_for_rebuild;
// Reclaimed empty regions // Reclaimed empty regions
G1FreeRegionList _cleanup_list; G1FreeRegionList _cleanup_list;
@ -57,7 +58,9 @@ public:
void work(uint worker_id) override; void work(uint worker_id) override;
uint total_selected_for_rebuild() const { return _total_selected_for_rebuild; } uint total_selected_for_rebuild() const {
return _total_selected_for_rebuild.load_relaxed();
}
static uint desired_num_workers(uint num_regions); static uint desired_num_workers(uint num_regions);
}; };

View File

@ -28,6 +28,7 @@
#include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectionSet.hpp" #include "gc/g1/g1CollectionSet.hpp"
#include "gc/g1/g1ConcurrentRefine.hpp" #include "gc/g1/g1ConcurrentRefine.hpp"
#include "gc/g1/g1ConcurrentRefineStats.inline.hpp"
#include "gc/g1/g1ConcurrentRefineSweepTask.hpp" #include "gc/g1/g1ConcurrentRefineSweepTask.hpp"
#include "gc/g1/g1ConcurrentRefineThread.hpp" #include "gc/g1/g1ConcurrentRefineThread.hpp"
#include "gc/g1/g1HeapRegion.inline.hpp" #include "gc/g1/g1HeapRegion.inline.hpp"

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -22,7 +22,7 @@
* *
*/ */
#include "gc/g1/g1ConcurrentRefineStats.hpp" #include "gc/g1/g1ConcurrentRefineStats.inline.hpp"
#include "runtime/atomicAccess.hpp" #include "runtime/atomicAccess.hpp"
#include "runtime/timer.hpp" #include "runtime/timer.hpp"
@ -39,19 +39,27 @@ G1ConcurrentRefineStats::G1ConcurrentRefineStats() :
{} {}
void G1ConcurrentRefineStats::add_atomic(G1ConcurrentRefineStats* other) { void G1ConcurrentRefineStats::add_atomic(G1ConcurrentRefineStats* other) {
AtomicAccess::add(&_sweep_duration, other->_sweep_duration, memory_order_relaxed); _sweep_duration.add_then_fetch(other->_sweep_duration.load_relaxed(), memory_order_relaxed);
AtomicAccess::add(&_yield_during_sweep_duration, other->_yield_during_sweep_duration, memory_order_relaxed); _yield_during_sweep_duration.add_then_fetch(other->yield_during_sweep_duration(), memory_order_relaxed);
AtomicAccess::add(&_cards_scanned, other->_cards_scanned, memory_order_relaxed); _cards_scanned.add_then_fetch(other->cards_scanned(), memory_order_relaxed);
AtomicAccess::add(&_cards_clean, other->_cards_clean, memory_order_relaxed); _cards_clean.add_then_fetch(other->cards_clean(), memory_order_relaxed);
AtomicAccess::add(&_cards_not_parsable, other->_cards_not_parsable, memory_order_relaxed); _cards_not_parsable.add_then_fetch(other->cards_not_parsable(), memory_order_relaxed);
AtomicAccess::add(&_cards_already_refer_to_cset, other->_cards_already_refer_to_cset, memory_order_relaxed); _cards_already_refer_to_cset.add_then_fetch(other->cards_already_refer_to_cset(), memory_order_relaxed);
AtomicAccess::add(&_cards_refer_to_cset, other->_cards_refer_to_cset, memory_order_relaxed); _cards_refer_to_cset.add_then_fetch(other->cards_refer_to_cset(), memory_order_relaxed);
AtomicAccess::add(&_cards_no_cross_region, other->_cards_no_cross_region, memory_order_relaxed); _cards_no_cross_region.add_then_fetch(other->cards_no_cross_region(), memory_order_relaxed);
AtomicAccess::add(&_refine_duration, other->_refine_duration, memory_order_relaxed); _refine_duration.add_then_fetch(other->refine_duration(), memory_order_relaxed);
} }
void G1ConcurrentRefineStats::reset() { void G1ConcurrentRefineStats::reset() {
*this = G1ConcurrentRefineStats(); _sweep_duration.store_relaxed(0);
_yield_during_sweep_duration.store_relaxed(0);
_cards_scanned.store_relaxed(0);
_cards_clean.store_relaxed(0);
_cards_not_parsable.store_relaxed(0);
_cards_already_refer_to_cset.store_relaxed(0);
_cards_refer_to_cset.store_relaxed(0);
_cards_no_cross_region.store_relaxed(0);
_refine_duration.store_relaxed(0);
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -26,61 +26,61 @@
#define SHARE_GC_G1_G1CONCURRENTREFINESTATS_HPP #define SHARE_GC_G1_G1CONCURRENTREFINESTATS_HPP
#include "memory/allocation.hpp" #include "memory/allocation.hpp"
#include "runtime/atomic.hpp"
#include "utilities/globalDefinitions.hpp" #include "utilities/globalDefinitions.hpp"
#include "utilities/ticks.hpp"
// Collection of statistics for concurrent refinement processing. // Collection of statistics for concurrent refinement processing.
// Used for collecting per-thread statistics and for summaries over a // Used for collecting per-thread statistics and for summaries over a
// collection of threads. // collection of threads.
class G1ConcurrentRefineStats : public CHeapObj<mtGC> { class G1ConcurrentRefineStats : public CHeapObj<mtGC> {
jlong _sweep_duration; // Time spent sweeping the table finding non-clean cards Atomic<jlong> _sweep_duration; // Time spent sweeping the table finding non-clean cards
// and refining them. // and refining them.
jlong _yield_during_sweep_duration; // Time spent yielding during the sweep (not doing the sweep). Atomic<jlong> _yield_during_sweep_duration; // Time spent yielding during the sweep (not doing the sweep).
size_t _cards_scanned; // Total number of cards scanned. Atomic<size_t> _cards_scanned; // Total number of cards scanned.
size_t _cards_clean; // Number of cards found clean. Atomic<size_t> _cards_clean; // Number of cards found clean.
size_t _cards_not_parsable; // Number of cards we could not parse and left unrefined. Atomic<size_t> _cards_not_parsable; // Number of cards we could not parse and left unrefined.
size_t _cards_already_refer_to_cset;// Number of cards marked found to be already young. Atomic<size_t> _cards_already_refer_to_cset;// Number of cards marked found to be already young.
size_t _cards_refer_to_cset; // Number of dirty cards that were recently found to contain a to-cset reference. Atomic<size_t> _cards_refer_to_cset; // Number of dirty cards that were recently found to contain a to-cset reference.
size_t _cards_no_cross_region; // Number of dirty cards that were dirtied, but then cleaned again by the mutator. Atomic<size_t> _cards_no_cross_region; // Number of dirty cards that were dirtied, but then cleaned again by the mutator.
jlong _refine_duration; // Time spent during actual refinement. Atomic<jlong> _refine_duration; // Time spent during actual refinement.
public: public:
G1ConcurrentRefineStats(); G1ConcurrentRefineStats();
// Time spent performing sweeping the refinement table (includes actual refinement, // Time spent performing sweeping the refinement table (includes actual refinement,
// but not yield time). // but not yield time).
jlong sweep_duration() const { return _sweep_duration - _yield_during_sweep_duration; } inline jlong sweep_duration() const;
jlong yield_during_sweep_duration() const { return _yield_during_sweep_duration; } inline jlong yield_during_sweep_duration() const;
jlong refine_duration() const { return _refine_duration; } inline jlong refine_duration() const;
// Number of refined cards. // Number of refined cards.
size_t refined_cards() const { return cards_not_clean(); } inline size_t refined_cards() const;
size_t cards_scanned() const { return _cards_scanned; } inline size_t cards_scanned() const;
size_t cards_clean() const { return _cards_clean; } inline size_t cards_clean() const;
size_t cards_not_clean() const { return _cards_scanned - _cards_clean; } inline size_t cards_not_clean() const;
size_t cards_not_parsable() const { return _cards_not_parsable; } inline size_t cards_not_parsable() const;
size_t cards_already_refer_to_cset() const { return _cards_already_refer_to_cset; } inline size_t cards_already_refer_to_cset() const;
size_t cards_refer_to_cset() const { return _cards_refer_to_cset; } inline size_t cards_refer_to_cset() const;
size_t cards_no_cross_region() const { return _cards_no_cross_region; } inline size_t cards_no_cross_region() const;
// Number of cards that were marked dirty and in need of refinement. This includes cards recently // Number of cards that were marked dirty and in need of refinement. This includes cards recently
// found to refer to the collection set as they originally were dirty. // found to refer to the collection set as they originally were dirty.
size_t cards_pending() const { return cards_not_clean() - _cards_already_refer_to_cset; } inline size_t cards_pending() const;
size_t cards_to_cset() const { return _cards_already_refer_to_cset + _cards_refer_to_cset; } inline size_t cards_to_cset() const;
void inc_sweep_time(jlong t) { _sweep_duration += t; } inline void inc_sweep_time(jlong t);
void inc_yield_during_sweep_duration(jlong t) { _yield_during_sweep_duration += t; } inline void inc_yield_during_sweep_duration(jlong t);
void inc_refine_duration(jlong t) { _refine_duration += t; } inline void inc_refine_duration(jlong t);
void inc_cards_scanned(size_t increment) { _cards_scanned += increment; } inline void inc_cards_scanned(size_t increment);
void inc_cards_clean(size_t increment) { _cards_clean += increment; } inline void inc_cards_clean(size_t increment);
void inc_cards_not_parsable() { _cards_not_parsable++; } inline void inc_cards_not_parsable();
void inc_cards_already_refer_to_cset() { _cards_already_refer_to_cset++; } inline void inc_cards_already_refer_to_cset();
void inc_cards_refer_to_cset() { _cards_refer_to_cset++; } inline void inc_cards_refer_to_cset();
void inc_cards_no_cross_region() { _cards_no_cross_region++; } inline void inc_cards_no_cross_region();
void add_atomic(G1ConcurrentRefineStats* other); void add_atomic(G1ConcurrentRefineStats* other);

View File

@ -0,0 +1,118 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_G1_G1CONCURRENTREFINESTATS_INLINE_HPP
#define SHARE_GC_G1_G1CONCURRENTREFINESTATS_INLINE_HPP
#include "gc/g1/g1ConcurrentRefineStats.hpp"
inline jlong G1ConcurrentRefineStats::sweep_duration() const {
return _sweep_duration.load_relaxed() - yield_during_sweep_duration();
}
inline jlong G1ConcurrentRefineStats::yield_during_sweep_duration() const {
return _yield_during_sweep_duration.load_relaxed();
}
inline jlong G1ConcurrentRefineStats::refine_duration() const {
return _refine_duration.load_relaxed();
}
inline size_t G1ConcurrentRefineStats::refined_cards() const {
return cards_not_clean();
}
inline size_t G1ConcurrentRefineStats::cards_scanned() const {
return _cards_scanned.load_relaxed();
}
inline size_t G1ConcurrentRefineStats::cards_clean() const {
return _cards_clean.load_relaxed();
}
inline size_t G1ConcurrentRefineStats::cards_not_clean() const {
return cards_scanned() - cards_clean();
}
inline size_t G1ConcurrentRefineStats::cards_not_parsable() const {
return _cards_not_parsable.load_relaxed();
}
inline size_t G1ConcurrentRefineStats::cards_already_refer_to_cset() const {
return _cards_already_refer_to_cset.load_relaxed();
}
inline size_t G1ConcurrentRefineStats::cards_refer_to_cset() const {
return _cards_refer_to_cset.load_relaxed();
}
inline size_t G1ConcurrentRefineStats::cards_no_cross_region() const {
return _cards_no_cross_region.load_relaxed();
}
inline size_t G1ConcurrentRefineStats::cards_pending() const {
return cards_not_clean() - cards_already_refer_to_cset();
}
inline size_t G1ConcurrentRefineStats::cards_to_cset() const {
return cards_already_refer_to_cset() + cards_refer_to_cset();
}
inline void G1ConcurrentRefineStats::inc_sweep_time(jlong t) {
_sweep_duration.store_relaxed(_sweep_duration.load_relaxed() + t);
}
inline void G1ConcurrentRefineStats::inc_yield_during_sweep_duration(jlong t) {
_yield_during_sweep_duration.store_relaxed(yield_during_sweep_duration() + t);
}
inline void G1ConcurrentRefineStats::inc_refine_duration(jlong t) {
_refine_duration.store_relaxed(refine_duration() + t);
}
inline void G1ConcurrentRefineStats::inc_cards_scanned(size_t increment) {
_cards_scanned.store_relaxed(cards_scanned() + increment);
}
inline void G1ConcurrentRefineStats::inc_cards_clean(size_t increment) {
_cards_clean.store_relaxed(cards_clean() + increment);
}
inline void G1ConcurrentRefineStats::inc_cards_not_parsable() {
_cards_not_parsable.store_relaxed(cards_not_parsable() + 1);
}
inline void G1ConcurrentRefineStats::inc_cards_already_refer_to_cset() {
_cards_already_refer_to_cset.store_relaxed(cards_already_refer_to_cset() + 1);
}
inline void G1ConcurrentRefineStats::inc_cards_refer_to_cset() {
_cards_refer_to_cset.store_relaxed(cards_refer_to_cset() + 1);
}
inline void G1ConcurrentRefineStats::inc_cards_no_cross_region() {
_cards_no_cross_region.store_relaxed(cards_no_cross_region() + 1);
}
#endif // SHARE_GC_G1_G1CONCURRENTREFINESTATS_INLINE_HPP

View File

@ -24,6 +24,7 @@
#include "gc/g1/g1CardTableClaimTable.inline.hpp" #include "gc/g1/g1CardTableClaimTable.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1ConcurrentRefineStats.inline.hpp"
#include "gc/g1/g1ConcurrentRefineSweepTask.hpp" #include "gc/g1/g1ConcurrentRefineSweepTask.hpp"
class G1RefineRegionClosure : public G1HeapRegionClosure { class G1RefineRegionClosure : public G1HeapRegionClosure {

View File

@ -25,10 +25,10 @@
#ifndef SHARE_GC_G1_G1CONCURRENTREFINESWEEPTASK_HPP #ifndef SHARE_GC_G1_G1CONCURRENTREFINESWEEPTASK_HPP
#define SHARE_GC_G1_G1CONCURRENTREFINESWEEPTASK_HPP #define SHARE_GC_G1_G1CONCURRENTREFINESWEEPTASK_HPP
#include "gc/g1/g1ConcurrentRefineStats.hpp"
#include "gc/shared/workerThread.hpp" #include "gc/shared/workerThread.hpp"
class G1CardTableClaimTable; class G1CardTableClaimTable;
class G1ConcurrentRefineStats;
class G1ConcurrentRefineSweepTask : public WorkerTask { class G1ConcurrentRefineSweepTask : public WorkerTask {
G1CardTableClaimTable* _scan_state; G1CardTableClaimTable* _scan_state;

View File

@ -26,7 +26,7 @@
#include "gc/g1/g1CardTableClaimTable.inline.hpp" #include "gc/g1/g1CardTableClaimTable.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1ConcurrentRefine.hpp" #include "gc/g1/g1ConcurrentRefine.hpp"
#include "gc/g1/g1ConcurrentRefineStats.hpp" #include "gc/g1/g1ConcurrentRefineStats.inline.hpp"
#include "gc/g1/g1ConcurrentRefineSweepTask.hpp" #include "gc/g1/g1ConcurrentRefineSweepTask.hpp"
#include "gc/g1/g1ConcurrentRefineThread.hpp" #include "gc/g1/g1ConcurrentRefineThread.hpp"
#include "gc/shared/gcTraceTime.inline.hpp" #include "gc/shared/gcTraceTime.inline.hpp"

View File

@ -25,7 +25,6 @@
#ifndef SHARE_GC_G1_G1CONCURRENTREFINETHREAD_HPP #ifndef SHARE_GC_G1_G1CONCURRENTREFINETHREAD_HPP
#define SHARE_GC_G1_G1CONCURRENTREFINETHREAD_HPP #define SHARE_GC_G1_G1CONCURRENTREFINETHREAD_HPP
#include "gc/g1/g1ConcurrentRefineStats.hpp"
#include "gc/shared/concurrentGCThread.hpp" #include "gc/shared/concurrentGCThread.hpp"
#include "runtime/mutex.hpp" #include "runtime/mutex.hpp"
#include "utilities/globalDefinitions.hpp" #include "utilities/globalDefinitions.hpp"

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -22,13 +22,24 @@
* *
*/ */
#include "gc/g1/g1EvacStats.hpp" #include "gc/g1/g1EvacStats.inline.hpp"
#include "gc/shared/gc_globals.hpp" #include "gc/shared/gc_globals.hpp"
#include "gc/shared/gcId.hpp" #include "gc/shared/gcId.hpp"
#include "logging/log.hpp" #include "logging/log.hpp"
#include "memory/allocation.inline.hpp" #include "memory/allocation.inline.hpp"
#include "runtime/globals.hpp" #include "runtime/globals.hpp"
void G1EvacStats::reset() {
PLABStats::reset();
_region_end_waste.store_relaxed(0);
_regions_filled.store_relaxed(0);
_num_plab_filled.store_relaxed(0);
_direct_allocated.store_relaxed(0);
_num_direct_allocated.store_relaxed(0);
_failure_used.store_relaxed(0);
_failure_waste.store_relaxed(0);
}
void G1EvacStats::log_plab_allocation() { void G1EvacStats::log_plab_allocation() {
log_debug(gc, plab)("%s PLAB allocation: " log_debug(gc, plab)("%s PLAB allocation: "
"allocated: %zuB, " "allocated: %zuB, "
@ -51,13 +62,13 @@ void G1EvacStats::log_plab_allocation() {
"failure used: %zuB, " "failure used: %zuB, "
"failure wasted: %zuB", "failure wasted: %zuB",
_description, _description,
_region_end_waste * HeapWordSize, region_end_waste() * HeapWordSize,
_regions_filled, regions_filled(),
_num_plab_filled, num_plab_filled(),
_direct_allocated * HeapWordSize, direct_allocated() * HeapWordSize,
_num_direct_allocated, num_direct_allocated(),
_failure_used * HeapWordSize, failure_used() * HeapWordSize,
_failure_waste * HeapWordSize); failure_waste() * HeapWordSize);
} }
void G1EvacStats::log_sizing(size_t calculated_words, size_t net_desired_words) { void G1EvacStats::log_sizing(size_t calculated_words, size_t net_desired_words) {
@ -109,7 +120,7 @@ size_t G1EvacStats::compute_desired_plab_size() const {
// threads do not allocate anything but a few rather large objects. In this // threads do not allocate anything but a few rather large objects. In this
// degenerate case the PLAB size would simply quickly tend to minimum PLAB size, // degenerate case the PLAB size would simply quickly tend to minimum PLAB size,
// which is an okay reaction. // which is an okay reaction.
size_t const used_for_waste_calculation = used() > _region_end_waste ? used() - _region_end_waste : 0; size_t const used_for_waste_calculation = used() > region_end_waste() ? used() - region_end_waste() : 0;
size_t const total_waste_allowed = used_for_waste_calculation * TargetPLABWastePct; size_t const total_waste_allowed = used_for_waste_calculation * TargetPLABWastePct;
return (size_t)((double)total_waste_allowed / (100 - G1LastPLABAverageOccupancy)); return (size_t)((double)total_waste_allowed / (100 - G1LastPLABAverageOccupancy));

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -27,6 +27,7 @@
#include "gc/shared/gcUtil.hpp" #include "gc/shared/gcUtil.hpp"
#include "gc/shared/plab.hpp" #include "gc/shared/plab.hpp"
#include "runtime/atomic.hpp"
// Records various memory allocation statistics gathered during evacuation. All sizes // Records various memory allocation statistics gathered during evacuation. All sizes
// are in HeapWords. // are in HeapWords.
@ -36,30 +37,21 @@ class G1EvacStats : public PLABStats {
AdaptiveWeightedAverage AdaptiveWeightedAverage
_net_plab_size_filter; // Integrator with decay _net_plab_size_filter; // Integrator with decay
size_t _region_end_waste; // Number of words wasted due to skipping to the next region. Atomic<size_t> _region_end_waste; // Number of words wasted due to skipping to the next region.
uint _regions_filled; // Number of regions filled completely. Atomic<uint> _regions_filled; // Number of regions filled completely.
size_t _num_plab_filled; // Number of PLABs filled and retired. Atomic<size_t> _num_plab_filled; // Number of PLABs filled and retired.
size_t _direct_allocated; // Number of words allocated directly into the regions. Atomic<size_t> _direct_allocated; // Number of words allocated directly into the regions.
size_t _num_direct_allocated; // Number of direct allocation attempts. Atomic<size_t> _num_direct_allocated; // Number of direct allocation attempts.
// Number of words in live objects remaining in regions that ultimately suffered an // Number of words in live objects remaining in regions that ultimately suffered an
// evacuation failure. This is used in the regions when the regions are made old regions. // evacuation failure. This is used in the regions when the regions are made old regions.
size_t _failure_used; Atomic<size_t> _failure_used;
// Number of words wasted in regions which failed evacuation. This is the sum of space // Number of words wasted in regions which failed evacuation. This is the sum of space
// for objects successfully copied out of the regions (now dead space) plus waste at the // for objects successfully copied out of the regions (now dead space) plus waste at the
// end of regions. // end of regions.
size_t _failure_waste; Atomic<size_t> _failure_waste;
virtual void reset() { virtual void reset();
PLABStats::reset();
_region_end_waste = 0;
_regions_filled = 0;
_num_plab_filled = 0;
_direct_allocated = 0;
_num_direct_allocated = 0;
_failure_used = 0;
_failure_waste = 0;
}
void log_plab_allocation(); void log_plab_allocation();
void log_sizing(size_t calculated_words, size_t net_desired_words); void log_sizing(size_t calculated_words, size_t net_desired_words);
@ -77,16 +69,16 @@ public:
// Should be called at the end of a GC pause. // Should be called at the end of a GC pause.
void adjust_desired_plab_size(); void adjust_desired_plab_size();
uint regions_filled() const { return _regions_filled; } uint regions_filled() const;
size_t num_plab_filled() const { return _num_plab_filled; } size_t num_plab_filled() const;
size_t region_end_waste() const { return _region_end_waste; } size_t region_end_waste() const;
size_t direct_allocated() const { return _direct_allocated; } size_t direct_allocated() const;
size_t num_direct_allocated() const { return _num_direct_allocated; } size_t num_direct_allocated() const;
// Amount of space in heapwords used in the failing regions when an evacuation failure happens. // Amount of space in heapwords used in the failing regions when an evacuation failure happens.
size_t failure_used() const { return _failure_used; } size_t failure_used() const;
// Amount of space in heapwords wasted (unused) in the failing regions when an evacuation failure happens. // Amount of space in heapwords wasted (unused) in the failing regions when an evacuation failure happens.
size_t failure_waste() const { return _failure_waste; } size_t failure_waste() const;
inline void add_num_plab_filled(size_t value); inline void add_num_plab_filled(size_t value);
inline void add_direct_allocated(size_t value); inline void add_direct_allocated(size_t value);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -27,28 +27,54 @@
#include "gc/g1/g1EvacStats.hpp" #include "gc/g1/g1EvacStats.hpp"
#include "runtime/atomicAccess.hpp" inline uint G1EvacStats::regions_filled() const {
return _regions_filled.load_relaxed();
}
inline size_t G1EvacStats::num_plab_filled() const {
return _num_plab_filled.load_relaxed();
}
inline size_t G1EvacStats::region_end_waste() const {
return _region_end_waste.load_relaxed();
}
inline size_t G1EvacStats::direct_allocated() const {
return _direct_allocated.load_relaxed();
}
inline size_t G1EvacStats::num_direct_allocated() const {
return _num_direct_allocated.load_relaxed();
}
inline size_t G1EvacStats::failure_used() const {
return _failure_used.load_relaxed();
}
inline size_t G1EvacStats::failure_waste() const {
return _failure_waste.load_relaxed();
}
inline void G1EvacStats::add_direct_allocated(size_t value) { inline void G1EvacStats::add_direct_allocated(size_t value) {
AtomicAccess::add(&_direct_allocated, value, memory_order_relaxed); _direct_allocated.add_then_fetch(value, memory_order_relaxed);
} }
inline void G1EvacStats::add_num_plab_filled(size_t value) { inline void G1EvacStats::add_num_plab_filled(size_t value) {
AtomicAccess::add(&_num_plab_filled, value, memory_order_relaxed); _num_plab_filled.add_then_fetch(value, memory_order_relaxed);
} }
inline void G1EvacStats::add_num_direct_allocated(size_t value) { inline void G1EvacStats::add_num_direct_allocated(size_t value) {
AtomicAccess::add(&_num_direct_allocated, value, memory_order_relaxed); _num_direct_allocated.add_then_fetch(value, memory_order_relaxed);
} }
inline void G1EvacStats::add_region_end_waste(size_t value) { inline void G1EvacStats::add_region_end_waste(size_t value) {
AtomicAccess::add(&_region_end_waste, value, memory_order_relaxed); _region_end_waste.add_then_fetch(value, memory_order_relaxed);
AtomicAccess::inc(&_regions_filled, memory_order_relaxed); _regions_filled.add_then_fetch(1u, memory_order_relaxed);
} }
inline void G1EvacStats::add_failure_used_and_waste(size_t used, size_t waste) { inline void G1EvacStats::add_failure_used_and_waste(size_t used, size_t waste) {
AtomicAccess::add(&_failure_used, used, memory_order_relaxed); _failure_used.add_then_fetch(used, memory_order_relaxed);
AtomicAccess::add(&_failure_waste, waste, memory_order_relaxed); _failure_waste.add_then_fetch(waste, memory_order_relaxed);
} }
#endif // SHARE_GC_G1_G1EVACSTATS_INLINE_HPP #endif // SHARE_GC_G1_G1EVACSTATS_INLINE_HPP

View File

@ -134,10 +134,10 @@ G1FullCollector::G1FullCollector(G1CollectedHeap* heap,
_compaction_points = NEW_C_HEAP_ARRAY(G1FullGCCompactionPoint*, _num_workers, mtGC); _compaction_points = NEW_C_HEAP_ARRAY(G1FullGCCompactionPoint*, _num_workers, mtGC);
_live_stats = NEW_C_HEAP_ARRAY(G1RegionMarkStats, _heap->max_num_regions(), mtGC); _live_stats = NEW_C_HEAP_ARRAY(G1RegionMarkStats, _heap->max_num_regions(), mtGC);
_compaction_tops = NEW_C_HEAP_ARRAY(HeapWord*, _heap->max_num_regions(), mtGC); _compaction_tops = NEW_C_HEAP_ARRAY(Atomic<HeapWord*>, _heap->max_num_regions(), mtGC);
for (uint j = 0; j < heap->max_num_regions(); j++) { for (uint j = 0; j < heap->max_num_regions(); j++) {
_live_stats[j].clear(); _live_stats[j].clear();
_compaction_tops[j] = nullptr; ::new (&_compaction_tops[j]) Atomic<HeapWord*>{};
} }
_partial_array_state_manager = new PartialArrayStateManager(_num_workers); _partial_array_state_manager = new PartialArrayStateManager(_num_workers);
@ -167,7 +167,7 @@ G1FullCollector::~G1FullCollector() {
FREE_C_HEAP_ARRAY(G1FullGCMarker*, _markers); FREE_C_HEAP_ARRAY(G1FullGCMarker*, _markers);
FREE_C_HEAP_ARRAY(G1FullGCCompactionPoint*, _compaction_points); FREE_C_HEAP_ARRAY(G1FullGCCompactionPoint*, _compaction_points);
FREE_C_HEAP_ARRAY(HeapWord*, _compaction_tops); FREE_C_HEAP_ARRAY(Atomic<HeapWord*>, _compaction_tops);
FREE_C_HEAP_ARRAY(G1RegionMarkStats, _live_stats); FREE_C_HEAP_ARRAY(G1RegionMarkStats, _live_stats);
} }

View File

@ -96,7 +96,7 @@ class G1FullCollector : StackObj {
G1FullGCHeapRegionAttr _region_attr_table; G1FullGCHeapRegionAttr _region_attr_table;
HeapWord* volatile* _compaction_tops; Atomic<HeapWord*>* _compaction_tops;
public: public:
G1FullCollector(G1CollectedHeap* heap, G1FullCollector(G1CollectedHeap* heap,

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -63,11 +63,11 @@ void G1FullCollector::update_from_skip_compacting_to_compacting(uint region_idx)
} }
void G1FullCollector::set_compaction_top(G1HeapRegion* r, HeapWord* value) { void G1FullCollector::set_compaction_top(G1HeapRegion* r, HeapWord* value) {
AtomicAccess::store(&_compaction_tops[r->hrm_index()], value); _compaction_tops[r->hrm_index()].store_relaxed(value);
} }
HeapWord* G1FullCollector::compaction_top(G1HeapRegion* r) const { HeapWord* G1FullCollector::compaction_top(G1HeapRegion* r) const {
return AtomicAccess::load(&_compaction_tops[r->hrm_index()]); return _compaction_tops[r->hrm_index()].load_relaxed();
} }
void G1FullCollector::set_has_compaction_targets() { void G1FullCollector::set_has_compaction_targets() {

View File

@ -32,7 +32,7 @@
#include "gc/g1/g1ConcurrentMark.hpp" #include "gc/g1/g1ConcurrentMark.hpp"
#include "gc/g1/g1ConcurrentMarkThread.inline.hpp" #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
#include "gc/g1/g1ConcurrentRefine.hpp" #include "gc/g1/g1ConcurrentRefine.hpp"
#include "gc/g1/g1ConcurrentRefineStats.hpp" #include "gc/g1/g1ConcurrentRefineStats.inline.hpp"
#include "gc/g1/g1GCPhaseTimes.hpp" #include "gc/g1/g1GCPhaseTimes.hpp"
#include "gc/g1/g1HeapRegion.inline.hpp" #include "gc/g1/g1HeapRegion.inline.hpp"
#include "gc/g1/g1HeapRegionRemSet.inline.hpp" #include "gc/g1/g1HeapRegionRemSet.inline.hpp"

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -50,7 +50,7 @@
#include "memory/resourceArea.hpp" #include "memory/resourceArea.hpp"
#include "oops/access.inline.hpp" #include "oops/access.inline.hpp"
#include "oops/oop.inline.hpp" #include "oops/oop.inline.hpp"
#include "runtime/atomicAccess.hpp" #include "runtime/atomic.hpp"
#include "runtime/os.hpp" #include "runtime/os.hpp"
#include "utilities/align.hpp" #include "utilities/align.hpp"
#include "utilities/globalDefinitions.hpp" #include "utilities/globalDefinitions.hpp"
@ -107,46 +107,48 @@ class G1RemSetScanState : public CHeapObj<mtGC> {
// Set of (unique) regions that can be added to concurrently. // Set of (unique) regions that can be added to concurrently.
class G1DirtyRegions : public CHeapObj<mtGC> { class G1DirtyRegions : public CHeapObj<mtGC> {
uint* _buffer; uint* _buffer;
uint _cur_idx; Atomic<uint> _cur_idx;
size_t _max_reserved_regions; size_t _max_reserved_regions;
bool* _contains; Atomic<bool>* _contains;
public: public:
G1DirtyRegions(size_t max_reserved_regions) : G1DirtyRegions(size_t max_reserved_regions) :
_buffer(NEW_C_HEAP_ARRAY(uint, max_reserved_regions, mtGC)), _buffer(NEW_C_HEAP_ARRAY(uint, max_reserved_regions, mtGC)),
_cur_idx(0), _cur_idx(0),
_max_reserved_regions(max_reserved_regions), _max_reserved_regions(max_reserved_regions),
_contains(NEW_C_HEAP_ARRAY(bool, max_reserved_regions, mtGC)) { _contains(NEW_C_HEAP_ARRAY(Atomic<bool>, max_reserved_regions, mtGC)) {
reset(); reset();
} }
~G1DirtyRegions() { ~G1DirtyRegions() {
FREE_C_HEAP_ARRAY(uint, _buffer); FREE_C_HEAP_ARRAY(uint, _buffer);
FREE_C_HEAP_ARRAY(bool, _contains); FREE_C_HEAP_ARRAY(Atomic<bool>, _contains);
} }
void reset() { void reset() {
_cur_idx = 0; _cur_idx.store_relaxed(0);
::memset(_contains, false, _max_reserved_regions * sizeof(bool)); for (uint i = 0; i < _max_reserved_regions; i++) {
_contains[i].store_relaxed(false);
}
} }
uint size() const { return _cur_idx; } uint size() const { return _cur_idx.load_relaxed(); }
uint at(uint idx) const { uint at(uint idx) const {
assert(idx < _cur_idx, "Index %u beyond valid regions", idx); assert(idx < size(), "Index %u beyond valid regions", idx);
return _buffer[idx]; return _buffer[idx];
} }
void add_dirty_region(uint region) { void add_dirty_region(uint region) {
if (_contains[region]) { if (_contains[region].load_relaxed()) {
return; return;
} }
bool marked_as_dirty = AtomicAccess::cmpxchg(&_contains[region], false, true) == false; bool marked_as_dirty = _contains[region].compare_set(false, true);
if (marked_as_dirty) { if (marked_as_dirty) {
uint allocated = AtomicAccess::fetch_then_add(&_cur_idx, 1u); uint allocated = _cur_idx.fetch_then_add(1u);
_buffer[allocated] = region; _buffer[allocated] = region;
} }
} }
@ -155,9 +157,11 @@ class G1RemSetScanState : public CHeapObj<mtGC> {
void merge(const G1DirtyRegions* other) { void merge(const G1DirtyRegions* other) {
for (uint i = 0; i < other->size(); i++) { for (uint i = 0; i < other->size(); i++) {
uint region = other->at(i); uint region = other->at(i);
if (!_contains[region]) { if (!_contains[region].load_relaxed()) {
_buffer[_cur_idx++] = region; uint cur = _cur_idx.load_relaxed();
_contains[region] = true; _buffer[cur] = region;
_cur_idx.store_relaxed(cur + 1);
_contains[region].store_relaxed(true);
} }
} }
} }
@ -173,7 +177,7 @@ class G1RemSetScanState : public CHeapObj<mtGC> {
class G1ClearCardTableTask : public G1AbstractSubTask { class G1ClearCardTableTask : public G1AbstractSubTask {
G1CollectedHeap* _g1h; G1CollectedHeap* _g1h;
G1DirtyRegions* _regions; G1DirtyRegions* _regions;
uint volatile _cur_dirty_regions; Atomic<uint> _cur_dirty_regions;
G1RemSetScanState* _scan_state; G1RemSetScanState* _scan_state;
@ -210,8 +214,9 @@ class G1ClearCardTableTask : public G1AbstractSubTask {
void do_work(uint worker_id) override { void do_work(uint worker_id) override {
const uint num_regions_per_worker = num_cards_per_worker / (uint)G1HeapRegion::CardsPerRegion; const uint num_regions_per_worker = num_cards_per_worker / (uint)G1HeapRegion::CardsPerRegion;
while (_cur_dirty_regions < _regions->size()) { uint cur = _cur_dirty_regions.load_relaxed();
uint next = AtomicAccess::fetch_then_add(&_cur_dirty_regions, num_regions_per_worker); while (cur < _regions->size()) {
uint next = _cur_dirty_regions.fetch_then_add(num_regions_per_worker);
uint max = MIN2(next + num_regions_per_worker, _regions->size()); uint max = MIN2(next + num_regions_per_worker, _regions->size());
for (uint i = next; i < max; i++) { for (uint i = next; i < max; i++) {
@ -226,6 +231,7 @@ class G1ClearCardTableTask : public G1AbstractSubTask {
// old regions use it for old->collection set candidates, so they should not be cleared // old regions use it for old->collection set candidates, so they should not be cleared
// either. // either.
} }
cur = max;
} }
} }
}; };
@ -1115,7 +1121,7 @@ class G1MergeHeapRootsTask : public WorkerTask {
bool _initial_evacuation; bool _initial_evacuation;
volatile bool _fast_reclaim_handled; Atomic<bool> _fast_reclaim_handled;
public: public:
G1MergeHeapRootsTask(G1RemSetScanState* scan_state, uint num_workers, bool initial_evacuation) : G1MergeHeapRootsTask(G1RemSetScanState* scan_state, uint num_workers, bool initial_evacuation) :
@ -1143,8 +1149,8 @@ public:
// 1. eager-reclaim candidates // 1. eager-reclaim candidates
if (_initial_evacuation && if (_initial_evacuation &&
g1h->has_humongous_reclaim_candidates() && g1h->has_humongous_reclaim_candidates() &&
!_fast_reclaim_handled && !_fast_reclaim_handled.load_relaxed() &&
!AtomicAccess::cmpxchg(&_fast_reclaim_handled, false, true)) { _fast_reclaim_handled.compare_set(false, true)) {
G1GCParPhaseTimesTracker subphase_x(p, G1GCPhaseTimes::MergeER, worker_id); G1GCParPhaseTimesTracker subphase_x(p, G1GCPhaseTimes::MergeER, worker_id);

View File

@ -23,7 +23,6 @@
*/ */
#include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1ConcurrentRefineStats.hpp"
#include "gc/g1/g1RegionPinCache.inline.hpp" #include "gc/g1/g1RegionPinCache.inline.hpp"
#include "gc/g1/g1ThreadLocalData.hpp" #include "gc/g1/g1ThreadLocalData.hpp"
#include "gc/g1/g1YoungGCPreEvacuateTasks.hpp" #include "gc/g1/g1YoungGCPreEvacuateTasks.hpp"

View File

@ -31,7 +31,6 @@
#include "gc/shared/oopStorageSet.hpp" #include "gc/shared/oopStorageSet.hpp"
#include "memory/iterator.hpp" #include "memory/iterator.hpp"
#include "oops/access.inline.hpp" #include "oops/access.inline.hpp"
#include "runtime/atomicAccess.hpp"
#include "utilities/debug.hpp" #include "utilities/debug.hpp"
template <bool concurrent, bool is_const> template <bool concurrent, bool is_const>

View File

@ -28,7 +28,6 @@
#include "memory/arena.hpp" #include "memory/arena.hpp"
#include "nmt/memTag.hpp" #include "nmt/memTag.hpp"
#include "oops/oopsHierarchy.hpp" #include "oops/oopsHierarchy.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/orderAccess.hpp" #include "runtime/orderAccess.hpp"
#include "utilities/debug.hpp" #include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp" #include "utilities/globalDefinitions.hpp"

View File

@ -28,7 +28,6 @@
#include "gc/shared/partialArrayTaskStepper.hpp" #include "gc/shared/partialArrayTaskStepper.hpp"
#include "gc/shared/partialArrayState.hpp" #include "gc/shared/partialArrayState.hpp"
#include "runtime/atomicAccess.hpp"
#include "utilities/checkedCast.hpp" #include "utilities/checkedCast.hpp"
#include "utilities/debug.hpp" #include "utilities/debug.hpp"

View File

@ -25,7 +25,6 @@
#include "gc/shared/taskqueue.hpp" #include "gc/shared/taskqueue.hpp"
#include "logging/log.hpp" #include "logging/log.hpp"
#include "oops/oop.inline.hpp" #include "oops/oop.inline.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/javaThread.hpp" #include "runtime/javaThread.hpp"
#include "runtime/os.hpp" #include "runtime/os.hpp"
#include "utilities/debug.hpp" #include "utilities/debug.hpp"

View File

@ -32,7 +32,6 @@
#include "memory/allocation.inline.hpp" #include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp" #include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp" #include "oops/oop.inline.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/orderAccess.hpp" #include "runtime/orderAccess.hpp"
#include "utilities/debug.hpp" #include "utilities/debug.hpp"
#include "utilities/ostream.hpp" #include "utilities/ostream.hpp"

View File

@ -26,7 +26,6 @@
#include "gc/shared/workerThread.hpp" #include "gc/shared/workerThread.hpp"
#include "logging/log.hpp" #include "logging/log.hpp"
#include "memory/iterator.hpp" #include "memory/iterator.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/init.hpp" #include "runtime/init.hpp"
#include "runtime/java.hpp" #include "runtime/java.hpp"
#include "runtime/os.hpp" #include "runtime/os.hpp"

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -111,7 +111,6 @@ typedef ZValue<ZPerNUMAStorage, ZPartition> ZPerNUMAZPartition;
\ \
nonstatic_field(ZForwarding, _virtual, const ZVirtualMemory) \ nonstatic_field(ZForwarding, _virtual, const ZVirtualMemory) \
nonstatic_field(ZForwarding, _object_alignment_shift, const size_t) \ nonstatic_field(ZForwarding, _object_alignment_shift, const size_t) \
volatile_nonstatic_field(ZForwarding, _ref_count, int) \
nonstatic_field(ZForwarding, _entries, const ZAttachedArrayForForwarding) \ nonstatic_field(ZForwarding, _entries, const ZAttachedArrayForForwarding) \
nonstatic_field(ZForwardingEntry, _entry, uint64_t) \ nonstatic_field(ZForwardingEntry, _entry, uint64_t) \
nonstatic_field(ZAttachedArrayForForwarding, _length, const size_t) nonstatic_field(ZAttachedArrayForForwarding, _length, const size_t)

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -22,10 +22,9 @@
*/ */
#include "gc/z/zAbort.hpp" #include "gc/z/zAbort.hpp"
#include "runtime/atomicAccess.hpp"
volatile bool ZAbort::_should_abort = false; Atomic<bool> ZAbort::_should_abort{};
void ZAbort::abort() { void ZAbort::abort() {
AtomicAccess::store(&_should_abort, true); _should_abort.store_relaxed(true);
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -25,10 +25,11 @@
#define SHARE_GC_Z_ZABORT_HPP #define SHARE_GC_Z_ZABORT_HPP
#include "memory/allStatic.hpp" #include "memory/allStatic.hpp"
#include "runtime/atomic.hpp"
class ZAbort : public AllStatic { class ZAbort : public AllStatic {
private: private:
static volatile bool _should_abort; static Atomic<bool> _should_abort;
public: public:
static bool should_abort(); static bool should_abort();

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -26,10 +26,8 @@
#include "gc/z/zAbort.hpp" #include "gc/z/zAbort.hpp"
#include "runtime/atomicAccess.hpp"
inline bool ZAbort::should_abort() { inline bool ZAbort::should_abort() {
return AtomicAccess::load(&_should_abort); return _should_abort.load_relaxed();
} }
#endif // SHARE_GC_Z_ZABORT_INLINE_HPP #endif // SHARE_GC_Z_ZABORT_INLINE_HPP

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -26,7 +26,7 @@
#include "cppstdlib/type_traits.hpp" #include "cppstdlib/type_traits.hpp"
#include "memory/allocation.hpp" #include "memory/allocation.hpp"
#include "runtime/atomicAccess.hpp" #include "runtime/atomic.hpp"
#include "runtime/os.hpp" #include "runtime/os.hpp"
#include "runtime/thread.hpp" #include "runtime/thread.hpp"
#include "utilities/growableArray.hpp" #include "utilities/growableArray.hpp"
@ -78,7 +78,9 @@ public:
template <typename T, bool Parallel> template <typename T, bool Parallel>
class ZArrayIteratorImpl : public StackObj { class ZArrayIteratorImpl : public StackObj {
private: private:
size_t _next; using NextType = std::conditional_t<Parallel, Atomic<size_t>, size_t>;
NextType _next;
const size_t _end; const size_t _end;
const T* const _array; const T* const _array;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -27,7 +27,6 @@
#include "gc/z/zArray.hpp" #include "gc/z/zArray.hpp"
#include "gc/z/zLock.inline.hpp" #include "gc/z/zLock.inline.hpp"
#include "runtime/atomicAccess.hpp"
template <typename T> template <typename T>
ZArraySlice<T>::ZArraySlice(T* data, int len) ZArraySlice<T>::ZArraySlice(T* data, int len)
@ -130,7 +129,7 @@ inline bool ZArrayIteratorImpl<T, Parallel>::next_serial(size_t* index) {
template <typename T, bool Parallel> template <typename T, bool Parallel>
inline bool ZArrayIteratorImpl<T, Parallel>::next_parallel(size_t* index) { inline bool ZArrayIteratorImpl<T, Parallel>::next_parallel(size_t* index) {
const size_t claimed_index = AtomicAccess::fetch_then_add(&_next, 1u, memory_order_relaxed); const size_t claimed_index = _next.fetch_then_add(1u, memory_order_relaxed);
if (claimed_index < _end) { if (claimed_index < _end) {
*index = claimed_index; *index = claimed_index;
@ -177,7 +176,7 @@ inline bool ZArrayIteratorImpl<T, Parallel>::next_if(T* elem, Function predicate
template <typename T, bool Parallel> template <typename T, bool Parallel>
inline bool ZArrayIteratorImpl<T, Parallel>::next_index(size_t* index) { inline bool ZArrayIteratorImpl<T, Parallel>::next_index(size_t* index) {
if (Parallel) { if constexpr (Parallel) {
return next_parallel(index); return next_parallel(index);
} else { } else {
return next_serial(index); return next_serial(index);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -29,7 +29,6 @@
#include "gc/z/zStat.hpp" #include "gc/z/zStat.hpp"
#include "gc/z/zUtils.inline.hpp" #include "gc/z/zUtils.inline.hpp"
#include "logging/log.hpp" #include "logging/log.hpp"
#include "runtime/atomicAccess.hpp"
#include "utilities/align.hpp" #include "utilities/align.hpp"
// //
@ -50,7 +49,7 @@
// //
bool ZForwarding::claim() { bool ZForwarding::claim() {
return AtomicAccess::cmpxchg(&_claimed, false, true) == false; return _claimed.compare_set(false, true);
} }
void ZForwarding::in_place_relocation_start(zoffset relocated_watermark) { void ZForwarding::in_place_relocation_start(zoffset relocated_watermark) {
@ -60,7 +59,7 @@ void ZForwarding::in_place_relocation_start(zoffset relocated_watermark) {
// Support for ZHeap::is_in checks of from-space objects // Support for ZHeap::is_in checks of from-space objects
// in a page that is in-place relocating // in a page that is in-place relocating
AtomicAccess::store(&_in_place_thread, Thread::current()); _in_place_thread.store_relaxed(Thread::current());
_in_place_top_at_start = _page->top(); _in_place_top_at_start = _page->top();
} }
@ -76,17 +75,17 @@ void ZForwarding::in_place_relocation_finish() {
} }
// Disable relaxed ZHeap::is_in checks // Disable relaxed ZHeap::is_in checks
AtomicAccess::store(&_in_place_thread, (Thread*)nullptr); _in_place_thread.store_relaxed(nullptr);
} }
bool ZForwarding::in_place_relocation_is_below_top_at_start(zoffset offset) const { bool ZForwarding::in_place_relocation_is_below_top_at_start(zoffset offset) const {
// Only the relocating thread is allowed to know about the old relocation top. // Only the relocating thread is allowed to know about the old relocation top.
return AtomicAccess::load(&_in_place_thread) == Thread::current() && offset < _in_place_top_at_start; return _in_place_thread.load_relaxed() == Thread::current() && offset < _in_place_top_at_start;
} }
bool ZForwarding::retain_page(ZRelocateQueue* queue) { bool ZForwarding::retain_page(ZRelocateQueue* queue) {
for (;;) { for (;;) {
const int32_t ref_count = AtomicAccess::load_acquire(&_ref_count); const int32_t ref_count = _ref_count.load_acquire();
if (ref_count == 0) { if (ref_count == 0) {
// Released // Released
@ -101,7 +100,7 @@ bool ZForwarding::retain_page(ZRelocateQueue* queue) {
return false; return false;
} }
if (AtomicAccess::cmpxchg(&_ref_count, ref_count, ref_count + 1) == ref_count) { if (_ref_count.compare_set(ref_count, ref_count + 1)) {
// Retained // Retained
return true; return true;
} }
@ -110,11 +109,11 @@ bool ZForwarding::retain_page(ZRelocateQueue* queue) {
void ZForwarding::in_place_relocation_claim_page() { void ZForwarding::in_place_relocation_claim_page() {
for (;;) { for (;;) {
const int32_t ref_count = AtomicAccess::load(&_ref_count); const int32_t ref_count = _ref_count.load_relaxed();
assert(ref_count > 0, "Invalid state"); assert(ref_count > 0, "Invalid state");
// Invert reference count // Invert reference count
if (AtomicAccess::cmpxchg(&_ref_count, ref_count, -ref_count) != ref_count) { if (!_ref_count.compare_set(ref_count, -ref_count)) {
continue; continue;
} }
@ -122,7 +121,7 @@ void ZForwarding::in_place_relocation_claim_page() {
// and we have now claimed the page. Otherwise we wait until it is claimed. // and we have now claimed the page. Otherwise we wait until it is claimed.
if (ref_count != 1) { if (ref_count != 1) {
ZLocker<ZConditionLock> locker(&_ref_lock); ZLocker<ZConditionLock> locker(&_ref_lock);
while (AtomicAccess::load_acquire(&_ref_count) != -1) { while (_ref_count.load_acquire() != -1) {
_ref_lock.wait(); _ref_lock.wait();
} }
} }
@ -134,12 +133,12 @@ void ZForwarding::in_place_relocation_claim_page() {
void ZForwarding::release_page() { void ZForwarding::release_page() {
for (;;) { for (;;) {
const int32_t ref_count = AtomicAccess::load(&_ref_count); const int32_t ref_count = _ref_count.load_relaxed();
assert(ref_count != 0, "Invalid state"); assert(ref_count != 0, "Invalid state");
if (ref_count > 0) { if (ref_count > 0) {
// Decrement reference count // Decrement reference count
if (AtomicAccess::cmpxchg(&_ref_count, ref_count, ref_count - 1) != ref_count) { if (!_ref_count.compare_set(ref_count, ref_count - 1)) {
continue; continue;
} }
@ -152,7 +151,7 @@ void ZForwarding::release_page() {
} }
} else { } else {
// Increment reference count // Increment reference count
if (AtomicAccess::cmpxchg(&_ref_count, ref_count, ref_count + 1) != ref_count) { if (!_ref_count.compare_set(ref_count, ref_count + 1)) {
continue; continue;
} }
@ -171,9 +170,9 @@ void ZForwarding::release_page() {
ZPage* ZForwarding::detach_page() { ZPage* ZForwarding::detach_page() {
// Wait until released // Wait until released
if (AtomicAccess::load_acquire(&_ref_count) != 0) { if (_ref_count.load_acquire() != 0) {
ZLocker<ZConditionLock> locker(&_ref_lock); ZLocker<ZConditionLock> locker(&_ref_lock);
while (AtomicAccess::load_acquire(&_ref_count) != 0) { while (_ref_count.load_acquire() != 0) {
_ref_lock.wait(); _ref_lock.wait();
} }
} }
@ -182,16 +181,16 @@ ZPage* ZForwarding::detach_page() {
} }
ZPage* ZForwarding::page() { ZPage* ZForwarding::page() {
assert(AtomicAccess::load(&_ref_count) != 0, "The page has been released/detached"); assert(_ref_count.load_relaxed() != 0, "The page has been released/detached");
return _page; return _page;
} }
void ZForwarding::mark_done() { void ZForwarding::mark_done() {
AtomicAccess::store(&_done, true); _done.store_relaxed(true);
} }
bool ZForwarding::is_done() const { bool ZForwarding::is_done() const {
return AtomicAccess::load(&_done); return _done.load_relaxed();
} }
// //
@ -288,7 +287,7 @@ void ZForwarding::relocated_remembered_fields_publish() {
// used to have remembered set entries. Now publish the fields to // used to have remembered set entries. Now publish the fields to
// the YC. // the YC.
const ZPublishState res = AtomicAccess::cmpxchg(&_relocated_remembered_fields_state, ZPublishState::none, ZPublishState::published); const ZPublishState res = _relocated_remembered_fields_state.compare_exchange(ZPublishState::none, ZPublishState::published);
// none: OK to publish // none: OK to publish
// published: Not possible - this operation makes this transition // published: Not possible - this operation makes this transition
@ -319,7 +318,7 @@ void ZForwarding::relocated_remembered_fields_notify_concurrent_scan_of() {
// Invariant: The page is being retained // Invariant: The page is being retained
assert(ZGeneration::young()->is_phase_mark(), "Only called when"); assert(ZGeneration::young()->is_phase_mark(), "Only called when");
const ZPublishState res = AtomicAccess::cmpxchg(&_relocated_remembered_fields_state, ZPublishState::none, ZPublishState::reject); const ZPublishState res = _relocated_remembered_fields_state.compare_exchange(ZPublishState::none, ZPublishState::reject);
// none: OC has not completed relocation // none: OC has not completed relocation
// published: OC has completed and published all relocated remembered fields // published: OC has completed and published all relocated remembered fields
@ -340,7 +339,7 @@ void ZForwarding::relocated_remembered_fields_notify_concurrent_scan_of() {
// OC relocation already collected and published fields // OC relocation already collected and published fields
// Still notify concurrent scanning and reject the collected data from the OC // Still notify concurrent scanning and reject the collected data from the OC
const ZPublishState res2 = AtomicAccess::cmpxchg(&_relocated_remembered_fields_state, ZPublishState::published, ZPublishState::reject); const ZPublishState res2 = _relocated_remembered_fields_state.compare_exchange(ZPublishState::published, ZPublishState::reject);
assert(res2 == ZPublishState::published, "Should not fail"); assert(res2 == ZPublishState::published, "Should not fail");
log_debug(gc, remset)("Forwarding remset eager and reject: " PTR_FORMAT " " PTR_FORMAT, untype(start()), untype(end())); log_debug(gc, remset)("Forwarding remset eager and reject: " PTR_FORMAT " " PTR_FORMAT, untype(start()), untype(end()));
@ -368,7 +367,7 @@ bool ZForwarding::relocated_remembered_fields_published_contains(volatile zpoint
} }
void ZForwarding::verify() const { void ZForwarding::verify() const {
guarantee(_ref_count != 0, "Invalid reference count"); guarantee(_ref_count.load_relaxed() != 0, "Invalid reference count");
guarantee(_page != nullptr, "Invalid page"); guarantee(_page != nullptr, "Invalid page");
uint32_t live_objects = 0; uint32_t live_objects = 0;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -32,6 +32,7 @@
#include "gc/z/zPageAge.hpp" #include "gc/z/zPageAge.hpp"
#include "gc/z/zPageType.hpp" #include "gc/z/zPageType.hpp"
#include "gc/z/zVirtualMemory.hpp" #include "gc/z/zVirtualMemory.hpp"
#include "runtime/atomic.hpp"
class ObjectClosure; class ObjectClosure;
class ZForwardingAllocator; class ZForwardingAllocator;
@ -62,13 +63,13 @@ private:
const uint32_t _partition_id; const uint32_t _partition_id;
const ZPageAge _from_age; const ZPageAge _from_age;
const ZPageAge _to_age; const ZPageAge _to_age;
volatile bool _claimed; Atomic<bool> _claimed;
mutable ZConditionLock _ref_lock; mutable ZConditionLock _ref_lock;
volatile int32_t _ref_count; Atomic<int32_t> _ref_count;
volatile bool _done; Atomic<bool> _done;
// Relocated remembered set fields support // Relocated remembered set fields support
volatile ZPublishState _relocated_remembered_fields_state; Atomic<ZPublishState> _relocated_remembered_fields_state;
PointerArray _relocated_remembered_fields_array; PointerArray _relocated_remembered_fields_array;
uint32_t _relocated_remembered_fields_publish_young_seqnum; uint32_t _relocated_remembered_fields_publish_young_seqnum;
@ -77,7 +78,7 @@ private:
zoffset_end _in_place_top_at_start; zoffset_end _in_place_top_at_start;
// Debugging // Debugging
volatile Thread* _in_place_thread; Atomic<Thread*> _in_place_thread;
ZForwardingEntry* entries() const; ZForwardingEntry* entries() const;
ZForwardingEntry at(ZForwardingCursor* cursor) const; ZForwardingEntry at(ZForwardingCursor* cursor) const;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -196,7 +196,7 @@ void ZForwarding::oops_do_in_forwarded_via_table(Function function) {
} }
inline bool ZForwarding::in_place_relocation() const { inline bool ZForwarding::in_place_relocation() const {
assert(AtomicAccess::load(&_ref_count) != 0, "The page has been released/detached"); assert(_ref_count.load_relaxed() != 0, "The page has been released/detached");
return _in_place; return _in_place;
} }
@ -307,7 +307,7 @@ inline void ZForwarding::relocated_remembered_fields_register(volatile zpointer*
// Invariant: Page is being retained // Invariant: Page is being retained
assert(ZGeneration::young()->is_phase_mark(), "Only called when"); assert(ZGeneration::young()->is_phase_mark(), "Only called when");
const ZPublishState res = AtomicAccess::load(&_relocated_remembered_fields_state); const ZPublishState res = _relocated_remembered_fields_state.load_relaxed();
// none: Gather remembered fields // none: Gather remembered fields
// published: Have already published fields - not possible since they haven't been // published: Have already published fields - not possible since they haven't been
@ -327,7 +327,7 @@ inline void ZForwarding::relocated_remembered_fields_register(volatile zpointer*
// Returns true iff the page is being (or about to be) relocated by the OC // Returns true iff the page is being (or about to be) relocated by the OC
// while the YC gathered the remembered fields of the "from" page. // while the YC gathered the remembered fields of the "from" page.
inline bool ZForwarding::relocated_remembered_fields_is_concurrently_scanned() const { inline bool ZForwarding::relocated_remembered_fields_is_concurrently_scanned() const {
return AtomicAccess::load(&_relocated_remembered_fields_state) == ZPublishState::reject; return _relocated_remembered_fields_state.load_relaxed() == ZPublishState::reject;
} }
template <typename Function> template <typename Function>
@ -335,7 +335,7 @@ inline void ZForwarding::relocated_remembered_fields_apply_to_published(Function
// Invariant: Page is not being retained // Invariant: Page is not being retained
assert(ZGeneration::young()->is_phase_mark(), "Only called when"); assert(ZGeneration::young()->is_phase_mark(), "Only called when");
const ZPublishState res = AtomicAccess::load_acquire(&_relocated_remembered_fields_state); const ZPublishState res = _relocated_remembered_fields_state.load_acquire();
// none: Nothing published - page had already been relocated before YC started // none: Nothing published - page had already been relocated before YC started
// published: OC relocated and published relocated remembered fields // published: OC relocated and published relocated remembered fields
@ -363,14 +363,14 @@ inline void ZForwarding::relocated_remembered_fields_apply_to_published(Function
// collection. Mark that it is unsafe (and unnecessary) to call scan_page // collection. Mark that it is unsafe (and unnecessary) to call scan_page
// on the page in the page table. // on the page in the page table.
assert(res != ZPublishState::accept, "Unexpected"); assert(res != ZPublishState::accept, "Unexpected");
AtomicAccess::store(&_relocated_remembered_fields_state, ZPublishState::reject); _relocated_remembered_fields_state.store_relaxed(ZPublishState::reject);
} else { } else {
log_debug(gc, remset)("scan_forwarding failed retain safe " PTR_FORMAT, untype(start())); log_debug(gc, remset)("scan_forwarding failed retain safe " PTR_FORMAT, untype(start()));
// Guaranteed that the page was fully relocated and removed from page table. // Guaranteed that the page was fully relocated and removed from page table.
// Because of this we can signal to scan_page that any page found in page table // Because of this we can signal to scan_page that any page found in page table
// of the same slot as the current forwarding is a page that is safe to scan, // of the same slot as the current forwarding is a page that is safe to scan,
// and in fact must be scanned. // and in fact must be scanned.
AtomicAccess::store(&_relocated_remembered_fields_state, ZPublishState::accept); _relocated_remembered_fields_state.store_relaxed(ZPublishState::accept);
} }
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -34,6 +34,7 @@ ZForwardingAllocator::~ZForwardingAllocator() {
} }
void ZForwardingAllocator::reset(size_t size) { void ZForwardingAllocator::reset(size_t size) {
_start = _top = REALLOC_C_HEAP_ARRAY(char, _start, size, mtGC); _start = REALLOC_C_HEAP_ARRAY(char, _start, size, mtGC);
_top.store_relaxed(_start);
_end = _start + size; _end = _start + size;
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -24,13 +24,14 @@
#ifndef SHARE_GC_Z_ZFORWARDINGALLOCATOR_HPP #ifndef SHARE_GC_Z_ZFORWARDINGALLOCATOR_HPP
#define SHARE_GC_Z_ZFORWARDINGALLOCATOR_HPP #define SHARE_GC_Z_ZFORWARDINGALLOCATOR_HPP
#include "runtime/atomic.hpp"
#include "utilities/globalDefinitions.hpp" #include "utilities/globalDefinitions.hpp"
class ZForwardingAllocator { class ZForwardingAllocator {
private: private:
char* _start; char* _start;
char* _end; char* _end;
char* _top; Atomic<char*> _top;
public: public:
ZForwardingAllocator(); ZForwardingAllocator();

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -26,7 +26,6 @@
#include "gc/z/zForwardingAllocator.hpp" #include "gc/z/zForwardingAllocator.hpp"
#include "runtime/atomicAccess.hpp"
#include "utilities/debug.hpp" #include "utilities/debug.hpp"
inline size_t ZForwardingAllocator::size() const { inline size_t ZForwardingAllocator::size() const {
@ -34,11 +33,11 @@ inline size_t ZForwardingAllocator::size() const {
} }
inline bool ZForwardingAllocator::is_full() const { inline bool ZForwardingAllocator::is_full() const {
return _top == _end; return _top.load_relaxed() == _end;
} }
inline void* ZForwardingAllocator::alloc(size_t size) { inline void* ZForwardingAllocator::alloc(size_t size) {
char* const addr = AtomicAccess::fetch_then_add(&_top, size); char* const addr = _top.fetch_then_add(size);
assert(addr + size <= _end, "Allocation should never fail"); assert(addr + size <= _end, "Allocation should never fail");
return addr; return addr;
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -56,7 +56,6 @@
#include "logging/log.hpp" #include "logging/log.hpp"
#include "memory/universe.hpp" #include "memory/universe.hpp"
#include "prims/jvmtiTagMap.hpp" #include "prims/jvmtiTagMap.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/continuation.hpp" #include "runtime/continuation.hpp"
#include "runtime/handshake.hpp" #include "runtime/handshake.hpp"
#include "runtime/safepoint.hpp" #include "runtime/safepoint.hpp"
@ -298,33 +297,33 @@ bool ZGeneration::is_relocate_queue_active() const {
void ZGeneration::reset_statistics() { void ZGeneration::reset_statistics() {
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
_freed = 0; _freed.store_relaxed(0u);
_promoted = 0; _promoted.store_relaxed(0u);
_compacted = 0; _compacted.store_relaxed(0u);
} }
size_t ZGeneration::freed() const { size_t ZGeneration::freed() const {
return _freed; return _freed.load_relaxed();
} }
void ZGeneration::increase_freed(size_t size) { void ZGeneration::increase_freed(size_t size) {
AtomicAccess::add(&_freed, size, memory_order_relaxed); _freed.add_then_fetch(size, memory_order_relaxed);
} }
size_t ZGeneration::promoted() const { size_t ZGeneration::promoted() const {
return _promoted; return _promoted.load_relaxed();;
} }
void ZGeneration::increase_promoted(size_t size) { void ZGeneration::increase_promoted(size_t size) {
AtomicAccess::add(&_promoted, size, memory_order_relaxed); _promoted.add_then_fetch(size, memory_order_relaxed);
} }
size_t ZGeneration::compacted() const { size_t ZGeneration::compacted() const {
return _compacted; return _compacted.load_relaxed();;
} }
void ZGeneration::increase_compacted(size_t size) { void ZGeneration::increase_compacted(size_t size) {
AtomicAccess::add(&_compacted, size, memory_order_relaxed); _compacted.add_then_fetch(size, memory_order_relaxed);
} }
ConcurrentGCTimer* ZGeneration::gc_timer() const { ConcurrentGCTimer* ZGeneration::gc_timer() const {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -37,6 +37,7 @@
#include "gc/z/zWeakRootsProcessor.hpp" #include "gc/z/zWeakRootsProcessor.hpp"
#include "gc/z/zWorkers.hpp" #include "gc/z/zWorkers.hpp"
#include "memory/allocation.hpp" #include "memory/allocation.hpp"
#include "runtime/atomic.hpp"
class ThreadClosure; class ThreadClosure;
class ZForwardingTable; class ZForwardingTable;
@ -70,9 +71,9 @@ protected:
ZRelocate _relocate; ZRelocate _relocate;
ZRelocationSet _relocation_set; ZRelocationSet _relocation_set;
volatile size_t _freed; Atomic<size_t> _freed;
volatile size_t _promoted; Atomic<size_t> _promoted;
volatile size_t _compacted; Atomic<size_t> _compacted;
Phase _phase; Phase _phase;
uint32_t _seqnum; uint32_t _seqnum;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -24,7 +24,6 @@
#include "gc/z/zJNICritical.hpp" #include "gc/z/zJNICritical.hpp"
#include "gc/z/zLock.inline.hpp" #include "gc/z/zLock.inline.hpp"
#include "gc/z/zStat.hpp" #include "gc/z/zStat.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/interfaceSupport.inline.hpp" #include "runtime/interfaceSupport.inline.hpp"
#include "runtime/thread.inline.hpp" #include "runtime/thread.inline.hpp"
#include "utilities/debug.hpp" #include "utilities/debug.hpp"
@ -46,22 +45,22 @@
static const ZStatCriticalPhase ZCriticalPhaseJNICriticalStall("JNI Critical Stall", false /* verbose */); static const ZStatCriticalPhase ZCriticalPhaseJNICriticalStall("JNI Critical Stall", false /* verbose */);
volatile int64_t ZJNICritical::_count; Atomic<int64_t> ZJNICritical::_count;
ZConditionLock* ZJNICritical::_lock; ZConditionLock* ZJNICritical::_lock;
void ZJNICritical::initialize() { void ZJNICritical::initialize() {
_count = 0; precond(_count.load_relaxed() == 0);
_lock = new ZConditionLock(); _lock = new ZConditionLock();
} }
void ZJNICritical::block() { void ZJNICritical::block() {
for (;;) { for (;;) {
const int64_t count = AtomicAccess::load_acquire(&_count); const int64_t count = _count.load_acquire();
if (count < 0) { if (count < 0) {
// Already blocked, wait until unblocked // Already blocked, wait until unblocked
ZLocker<ZConditionLock> locker(_lock); ZLocker<ZConditionLock> locker(_lock);
while (AtomicAccess::load_acquire(&_count) < 0) { while (_count.load_acquire() < 0) {
_lock->wait(); _lock->wait();
} }
@ -70,7 +69,7 @@ void ZJNICritical::block() {
} }
// Increment and invert count // Increment and invert count
if (AtomicAccess::cmpxchg(&_count, count, -(count + 1)) != count) { if (!_count.compare_set(count, -(count + 1))) {
continue; continue;
} }
@ -80,7 +79,7 @@ void ZJNICritical::block() {
if (count != 0) { if (count != 0) {
// Wait until blocked // Wait until blocked
ZLocker<ZConditionLock> locker(_lock); ZLocker<ZConditionLock> locker(_lock);
while (AtomicAccess::load_acquire(&_count) != -1) { while (_count.load_acquire() != -1) {
_lock->wait(); _lock->wait();
} }
} }
@ -91,18 +90,18 @@ void ZJNICritical::block() {
} }
void ZJNICritical::unblock() { void ZJNICritical::unblock() {
const int64_t count = AtomicAccess::load_acquire(&_count); const int64_t count = _count.load_acquire();
assert(count == -1, "Invalid count"); assert(count == -1, "Invalid count");
// Notify unblocked // Notify unblocked
ZLocker<ZConditionLock> locker(_lock); ZLocker<ZConditionLock> locker(_lock);
AtomicAccess::release_store(&_count, (int64_t)0); _count.release_store(0);
_lock->notify_all(); _lock->notify_all();
} }
void ZJNICritical::enter_inner(JavaThread* thread) { void ZJNICritical::enter_inner(JavaThread* thread) {
for (;;) { for (;;) {
const int64_t count = AtomicAccess::load_acquire(&_count); const int64_t count = _count.load_acquire();
if (count < 0) { if (count < 0) {
// Wait until unblocked // Wait until unblocked
@ -112,7 +111,7 @@ void ZJNICritical::enter_inner(JavaThread* thread) {
ThreadBlockInVM tbivm(thread); ThreadBlockInVM tbivm(thread);
ZLocker<ZConditionLock> locker(_lock); ZLocker<ZConditionLock> locker(_lock);
while (AtomicAccess::load_acquire(&_count) < 0) { while (_count.load_acquire() < 0) {
_lock->wait(); _lock->wait();
} }
@ -121,7 +120,7 @@ void ZJNICritical::enter_inner(JavaThread* thread) {
} }
// Increment count // Increment count
if (AtomicAccess::cmpxchg(&_count, count, count + 1) != count) { if (!_count.compare_set(count, count + 1)) {
continue; continue;
} }
@ -142,17 +141,17 @@ void ZJNICritical::enter(JavaThread* thread) {
void ZJNICritical::exit_inner() { void ZJNICritical::exit_inner() {
for (;;) { for (;;) {
const int64_t count = AtomicAccess::load_acquire(&_count); const int64_t count = _count.load_acquire();
assert(count != 0, "Invalid count"); assert(count != 0, "Invalid count");
if (count > 0) { if (count > 0) {
// No block in progress, decrement count // No block in progress, decrement count
if (AtomicAccess::cmpxchg(&_count, count, count - 1) != count) { if (!_count.compare_set(count, count - 1)) {
continue; continue;
} }
} else { } else {
// Block in progress, increment count // Block in progress, increment count
if (AtomicAccess::cmpxchg(&_count, count, count + 1) != count) { if (!_count.compare_set(count, count + 1)) {
continue; continue;
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -25,14 +25,15 @@
#define SHARE_GC_Z_ZJNICRITICAL_HPP #define SHARE_GC_Z_ZJNICRITICAL_HPP
#include "memory/allocation.hpp" #include "memory/allocation.hpp"
#include "runtime/atomic.hpp"
class JavaThread; class JavaThread;
class ZConditionLock; class ZConditionLock;
class ZJNICritical : public AllStatic { class ZJNICritical : public AllStatic {
private: private:
static volatile int64_t _count; static Atomic<int64_t> _count;
static ZConditionLock* _lock; static ZConditionLock* _lock;
static void enter_inner(JavaThread* thread); static void enter_inner(JavaThread* thread);
static void exit_inner(); static void exit_inner();

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -27,7 +27,6 @@
#include "gc/z/zStat.hpp" #include "gc/z/zStat.hpp"
#include "gc/z/zUtils.hpp" #include "gc/z/zUtils.hpp"
#include "logging/log.hpp" #include "logging/log.hpp"
#include "runtime/atomicAccess.hpp"
#include "utilities/debug.hpp" #include "utilities/debug.hpp"
#include "utilities/powerOfTwo.hpp" #include "utilities/powerOfTwo.hpp"
#include "utilities/spinYield.hpp" #include "utilities/spinYield.hpp"
@ -60,18 +59,18 @@ void ZLiveMap::reset(ZGenerationId id) {
// Multiple threads can enter here, make sure only one of them // Multiple threads can enter here, make sure only one of them
// resets the marking information while the others busy wait. // resets the marking information while the others busy wait.
for (uint32_t seqnum = AtomicAccess::load_acquire(&_seqnum); for (uint32_t seqnum = _seqnum.load_acquire();
seqnum != generation->seqnum(); seqnum != generation->seqnum();
seqnum = AtomicAccess::load_acquire(&_seqnum)) { seqnum = _seqnum.load_acquire()) {
if (seqnum != seqnum_initializing) { if (seqnum != seqnum_initializing) {
// No one has claimed initialization of the livemap yet // No one has claimed initialization of the livemap yet
if (AtomicAccess::cmpxchg(&_seqnum, seqnum, seqnum_initializing) == seqnum) { if (_seqnum.compare_set(seqnum, seqnum_initializing)) {
// This thread claimed the initialization // This thread claimed the initialization
// Reset marking information // Reset marking information
_live_bytes = 0; _live_bytes.store_relaxed(0u);
_live_objects = 0; _live_objects.store_relaxed(0u);
// Clear segment claimed/live bits // Clear segment claimed/live bits
segment_live_bits().clear(); segment_live_bits().clear();
@ -81,13 +80,13 @@ void ZLiveMap::reset(ZGenerationId id) {
// a bit is about to be set for the first time. // a bit is about to be set for the first time.
initialize_bitmap(); initialize_bitmap();
assert(_seqnum == seqnum_initializing, "Invalid"); assert(_seqnum.load_relaxed() == seqnum_initializing, "Invalid");
// Make sure the newly reset marking information is ordered // Make sure the newly reset marking information is ordered
// before the update of the page seqnum, such that when the // before the update of the page seqnum, such that when the
// up-to-date seqnum is load acquired, the bit maps will not // up-to-date seqnum is load acquired, the bit maps will not
// contain stale information. // contain stale information.
AtomicAccess::release_store(&_seqnum, generation->seqnum()); _seqnum.release_store(generation->seqnum());
break; break;
} }
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -28,6 +28,7 @@
#include "gc/z/zBitMap.hpp" #include "gc/z/zBitMap.hpp"
#include "gc/z/zGenerationId.hpp" #include "gc/z/zGenerationId.hpp"
#include "memory/allocation.hpp" #include "memory/allocation.hpp"
#include "runtime/atomic.hpp"
class ObjectClosure; class ObjectClosure;
@ -41,9 +42,9 @@ private:
const uint32_t _segment_size; const uint32_t _segment_size;
const int _segment_shift; const int _segment_shift;
volatile uint32_t _seqnum; Atomic<uint32_t> _seqnum;
volatile uint32_t _live_objects; Atomic<uint32_t> _live_objects;
volatile size_t _live_bytes; Atomic<size_t> _live_bytes;
BitMap::bm_word_t _segment_live_bits; BitMap::bm_word_t _segment_live_bits;
BitMap::bm_word_t _segment_claim_bits; BitMap::bm_word_t _segment_claim_bits;
ZBitMap _bitmap; ZBitMap _bitmap;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -31,24 +31,23 @@
#include "gc/z/zGeneration.inline.hpp" #include "gc/z/zGeneration.inline.hpp"
#include "gc/z/zMark.hpp" #include "gc/z/zMark.hpp"
#include "gc/z/zUtils.inline.hpp" #include "gc/z/zUtils.inline.hpp"
#include "runtime/atomicAccess.hpp"
#include "utilities/bitMap.inline.hpp" #include "utilities/bitMap.inline.hpp"
#include "utilities/debug.hpp" #include "utilities/debug.hpp"
inline void ZLiveMap::reset() { inline void ZLiveMap::reset() {
_seqnum = 0; _seqnum.store_relaxed(0u);
} }
inline bool ZLiveMap::is_marked(ZGenerationId id) const { inline bool ZLiveMap::is_marked(ZGenerationId id) const {
return AtomicAccess::load_acquire(&_seqnum) == ZGeneration::generation(id)->seqnum(); return _seqnum.load_acquire() == ZGeneration::generation(id)->seqnum();
} }
inline uint32_t ZLiveMap::live_objects() const { inline uint32_t ZLiveMap::live_objects() const {
return _live_objects; return _live_objects.load_relaxed();
} }
inline size_t ZLiveMap::live_bytes() const { inline size_t ZLiveMap::live_bytes() const {
return _live_bytes; return _live_bytes.load_relaxed();
} }
inline const BitMapView ZLiveMap::segment_live_bits() const { inline const BitMapView ZLiveMap::segment_live_bits() const {
@ -116,8 +115,8 @@ inline bool ZLiveMap::set(ZGenerationId id, BitMap::idx_t index, bool finalizabl
} }
inline void ZLiveMap::inc_live(uint32_t objects, size_t bytes) { inline void ZLiveMap::inc_live(uint32_t objects, size_t bytes) {
AtomicAccess::add(&_live_objects, objects); _live_objects.add_then_fetch(objects);
AtomicAccess::add(&_live_bytes, bytes); _live_bytes.add_then_fetch(bytes);
} }
inline BitMap::idx_t ZLiveMap::segment_start(BitMap::idx_t segment) const { inline BitMap::idx_t ZLiveMap::segment_start(BitMap::idx_t segment) const {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -25,6 +25,7 @@
#define SHARE_GC_Z_ZLOCK_HPP #define SHARE_GC_Z_ZLOCK_HPP
#include "memory/allocation.hpp" #include "memory/allocation.hpp"
#include "runtime/atomic.hpp"
#include "runtime/mutex.hpp" #include "runtime/mutex.hpp"
class ZLock : public CHeapObj<mtGC> { class ZLock : public CHeapObj<mtGC> {
@ -39,9 +40,9 @@ public:
class ZReentrantLock { class ZReentrantLock {
private: private:
ZLock _lock; ZLock _lock;
Thread* volatile _owner; Atomic<Thread*> _owner;
uint64_t _count; uint64_t _count;
public: public:
ZReentrantLock(); ZReentrantLock();

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -26,7 +26,6 @@
#include "gc/z/zLock.hpp" #include "gc/z/zLock.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/javaThread.hpp" #include "runtime/javaThread.hpp"
#include "runtime/os.inline.hpp" #include "runtime/os.inline.hpp"
#include "utilities/debug.hpp" #include "utilities/debug.hpp"
@ -50,11 +49,11 @@ inline ZReentrantLock::ZReentrantLock()
inline void ZReentrantLock::lock() { inline void ZReentrantLock::lock() {
Thread* const thread = Thread::current(); Thread* const thread = Thread::current();
Thread* const owner = AtomicAccess::load(&_owner); Thread* const owner = _owner.load_relaxed();
if (owner != thread) { if (owner != thread) {
_lock.lock(); _lock.lock();
AtomicAccess::store(&_owner, thread); _owner.store_relaxed(thread);
} }
_count++; _count++;
@ -67,14 +66,14 @@ inline void ZReentrantLock::unlock() {
_count--; _count--;
if (_count == 0) { if (_count == 0) {
AtomicAccess::store(&_owner, (Thread*)nullptr); _owner.store_relaxed(nullptr);
_lock.unlock(); _lock.unlock();
} }
} }
inline bool ZReentrantLock::is_owned() const { inline bool ZReentrantLock::is_owned() const {
Thread* const thread = Thread::current(); Thread* const thread = Thread::current();
Thread* const owner = AtomicAccess::load(&_owner); Thread* const owner = _owner.load_relaxed();
return owner == thread; return owner == thread;
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -57,7 +57,6 @@
#include "memory/iterator.inline.hpp" #include "memory/iterator.inline.hpp"
#include "oops/objArrayOop.inline.hpp" #include "oops/objArrayOop.inline.hpp"
#include "oops/oop.inline.hpp" #include "oops/oop.inline.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/continuation.hpp" #include "runtime/continuation.hpp"
#include "runtime/handshake.hpp" #include "runtime/handshake.hpp"
#include "runtime/javaThread.hpp" #include "runtime/javaThread.hpp"
@ -152,13 +151,14 @@ void ZMark::prepare_work() {
_terminate.reset(_nworkers); _terminate.reset(_nworkers);
// Reset flush counters // Reset flush counters
_work_nproactiveflush = _work_nterminateflush = 0; _work_nproactiveflush.store_relaxed(0u);
_work_nterminateflush.store_relaxed(0u);
} }
void ZMark::finish_work() { void ZMark::finish_work() {
// Accumulate proactive/terminate flush counters // Accumulate proactive/terminate flush counters
_nproactiveflush += _work_nproactiveflush; _nproactiveflush += _work_nproactiveflush.load_relaxed();
_nterminateflush += _work_nterminateflush; _nterminateflush += _work_nterminateflush.load_relaxed();
} }
void ZMark::follow_work_complete() { void ZMark::follow_work_complete() {
@ -594,7 +594,7 @@ bool ZMark::flush() {
} }
bool ZMark::try_terminate_flush() { bool ZMark::try_terminate_flush() {
AtomicAccess::inc(&_work_nterminateflush); _work_nterminateflush.add_then_fetch(1u);
_terminate.set_resurrected(false); _terminate.set_resurrected(false);
if (ZVerifyMarking) { if (ZVerifyMarking) {
@ -610,12 +610,12 @@ bool ZMark::try_proactive_flush() {
return false; return false;
} }
if (AtomicAccess::load(&_work_nproactiveflush) == ZMarkProactiveFlushMax) { if (_work_nproactiveflush.load_relaxed() == ZMarkProactiveFlushMax) {
// Limit reached or we're trying to terminate // Limit reached or we're trying to terminate
return false; return false;
} }
AtomicAccess::inc(&_work_nproactiveflush); _work_nproactiveflush.add_then_fetch(1u);
SuspendibleThreadSetLeaver sts_leaver; SuspendibleThreadSetLeaver sts_leaver;
return flush(); return flush();

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -30,6 +30,7 @@
#include "gc/z/zMarkStackEntry.hpp" #include "gc/z/zMarkStackEntry.hpp"
#include "gc/z/zMarkTerminate.hpp" #include "gc/z/zMarkTerminate.hpp"
#include "oops/oopsHierarchy.hpp" #include "oops/oopsHierarchy.hpp"
#include "runtime/atomic.hpp"
#include "utilities/globalDefinitions.hpp" #include "utilities/globalDefinitions.hpp"
class Thread; class Thread;
@ -60,8 +61,8 @@ private:
ZMarkingSMR _marking_smr; ZMarkingSMR _marking_smr;
ZMarkStripeSet _stripes; ZMarkStripeSet _stripes;
ZMarkTerminate _terminate; ZMarkTerminate _terminate;
volatile size_t _work_nproactiveflush; Atomic<size_t> _work_nproactiveflush;
volatile size_t _work_nterminateflush; Atomic<size_t> _work_nterminateflush;
size_t _nproactiveflush; size_t _nproactiveflush;
size_t _nterminateflush; size_t _nterminateflush;
size_t _ntrycomplete; size_t _ntrycomplete;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -25,6 +25,7 @@
#define SHARE_GC_Z_ZMARKTERMINATE_HPP #define SHARE_GC_Z_ZMARKTERMINATE_HPP
#include "gc/z/zLock.hpp" #include "gc/z/zLock.hpp"
#include "runtime/atomic.hpp"
#include "utilities/globalDefinitions.hpp" #include "utilities/globalDefinitions.hpp"
class ZMarkStripeSet; class ZMarkStripeSet;
@ -32,9 +33,9 @@ class ZMarkStripeSet;
class ZMarkTerminate { class ZMarkTerminate {
private: private:
uint _nworkers; uint _nworkers;
volatile uint _nworking; Atomic<uint> _nworking;
volatile uint _nawakening; Atomic<uint> _nawakening;
volatile bool _resurrected; Atomic<bool> _resurrected;
ZConditionLock _lock; ZConditionLock _lock;
void maybe_reduce_stripes(ZMarkStripeSet* stripes, size_t used_nstripes); void maybe_reduce_stripes(ZMarkStripeSet* stripes, size_t used_nstripes);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -30,7 +30,6 @@
#include "gc/z/zLock.inline.hpp" #include "gc/z/zLock.inline.hpp"
#include "gc/z/zMarkStack.hpp" #include "gc/z/zMarkStack.hpp"
#include "logging/log.hpp" #include "logging/log.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/osThread.hpp" #include "runtime/osThread.hpp"
#include "runtime/thread.inline.hpp" #include "runtime/thread.inline.hpp"
@ -42,24 +41,23 @@ inline ZMarkTerminate::ZMarkTerminate()
_lock() {} _lock() {}
inline void ZMarkTerminate::reset(uint nworkers) { inline void ZMarkTerminate::reset(uint nworkers) {
AtomicAccess::store(&_nworkers, nworkers); _nworkers = nworkers;
AtomicAccess::store(&_nworking, nworkers); _nworking.store_relaxed(nworkers);
_nawakening = 0; _nawakening.store_relaxed(0u);
} }
inline void ZMarkTerminate::leave() { inline void ZMarkTerminate::leave() {
SuspendibleThreadSetLeaver sts_leaver; SuspendibleThreadSetLeaver sts_leaver;
ZLocker<ZConditionLock> locker(&_lock); ZLocker<ZConditionLock> locker(&_lock);
AtomicAccess::store(&_nworking, _nworking - 1); if (_nworking.sub_then_fetch(1u, memory_order_relaxed) == 0) {
if (_nworking == 0) {
// Last thread leaving; notify waiters // Last thread leaving; notify waiters
_lock.notify_all(); _lock.notify_all();
} }
} }
inline void ZMarkTerminate::maybe_reduce_stripes(ZMarkStripeSet* stripes, size_t used_nstripes) { inline void ZMarkTerminate::maybe_reduce_stripes(ZMarkStripeSet* stripes, size_t used_nstripes) {
size_t nstripes = stripes->nstripes(); const size_t nstripes = stripes->nstripes();
if (used_nstripes == nstripes && nstripes > 1u) { if (used_nstripes == nstripes && nstripes > 1u) {
stripes->try_set_nstripes(nstripes, nstripes >> 1); stripes->try_set_nstripes(nstripes, nstripes >> 1);
} }
@ -69,8 +67,7 @@ inline bool ZMarkTerminate::try_terminate(ZMarkStripeSet* stripes, size_t used_n
SuspendibleThreadSetLeaver sts_leaver; SuspendibleThreadSetLeaver sts_leaver;
ZLocker<ZConditionLock> locker(&_lock); ZLocker<ZConditionLock> locker(&_lock);
AtomicAccess::store(&_nworking, _nworking - 1); if (_nworking.sub_then_fetch(1u, memory_order_relaxed) == 0) {
if (_nworking == 0) {
// Last thread entering termination: success // Last thread entering termination: success
_lock.notify_all(); _lock.notify_all();
return true; return true;
@ -83,24 +80,24 @@ inline bool ZMarkTerminate::try_terminate(ZMarkStripeSet* stripes, size_t used_n
// We either got notification about more work // We either got notification about more work
// or got a spurious wakeup; don't terminate // or got a spurious wakeup; don't terminate
if (_nawakening > 0) { if (_nawakening.load_relaxed() > 0) {
AtomicAccess::store(&_nawakening, _nawakening - 1); _nawakening.sub_then_fetch(1u, memory_order_relaxed);
} }
if (_nworking == 0) { if (_nworking.load_relaxed() == 0) {
// We got notified all work is done; terminate // We got notified all work is done; terminate
return true; return true;
} }
AtomicAccess::store(&_nworking, _nworking + 1); _nworking.add_then_fetch(1u, memory_order_relaxed);
return false; return false;
} }
inline void ZMarkTerminate::wake_up() { inline void ZMarkTerminate::wake_up() {
uint nworking = AtomicAccess::load(&_nworking); const uint nworking = _nworking.load_relaxed();
uint nawakening = AtomicAccess::load(&_nawakening); const uint nawakening = _nawakening.load_relaxed();
if (nworking + nawakening == AtomicAccess::load(&_nworkers)) { if (nworking + nawakening == _nworkers) {
// Everyone is working or about to // Everyone is working or about to
return; return;
} }
@ -111,24 +108,24 @@ inline void ZMarkTerminate::wake_up() {
} }
ZLocker<ZConditionLock> locker(&_lock); ZLocker<ZConditionLock> locker(&_lock);
if (_nworking + _nawakening != _nworkers) { if (_nworking.load_relaxed() + _nawakening.load_relaxed() != _nworkers) {
// Everyone is not working // Everyone is not working
AtomicAccess::store(&_nawakening, _nawakening + 1); _nawakening.add_then_fetch(1u, memory_order_relaxed);
_lock.notify(); _lock.notify();
} }
} }
inline bool ZMarkTerminate::saturated() const { inline bool ZMarkTerminate::saturated() const {
uint nworking = AtomicAccess::load(&_nworking); const uint nworking = _nworking.load_relaxed();
uint nawakening = AtomicAccess::load(&_nawakening); const uint nawakening = _nawakening.load_relaxed();
return nworking + nawakening == AtomicAccess::load(&_nworkers); return nworking + nawakening == _nworkers;
} }
inline void ZMarkTerminate::set_resurrected(bool value) { inline void ZMarkTerminate::set_resurrected(bool value) {
// Update resurrected if it changed // Update resurrected if it changed
if (resurrected() != value) { if (resurrected() != value) {
AtomicAccess::store(&_resurrected, value); _resurrected.store_relaxed(value);
if (value) { if (value) {
log_debug(gc, marking)("Resurrection broke termination"); log_debug(gc, marking)("Resurrection broke termination");
} else { } else {
@ -138,7 +135,7 @@ inline void ZMarkTerminate::set_resurrected(bool value) {
} }
inline bool ZMarkTerminate::resurrected() const { inline bool ZMarkTerminate::resurrected() const {
return AtomicAccess::load(&_resurrected); return _resurrected.load_relaxed();
} }
#endif // SHARE_GC_Z_ZMARKTERMINATE_INLINE_HPP #endif // SHARE_GC_Z_ZMARKTERMINATE_INLINE_HPP

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -24,7 +24,6 @@
#include "gc/z/zNMethodTableEntry.hpp" #include "gc/z/zNMethodTableEntry.hpp"
#include "gc/z/zNMethodTableIteration.hpp" #include "gc/z/zNMethodTableIteration.hpp"
#include "memory/iterator.hpp" #include "memory/iterator.hpp"
#include "runtime/atomicAccess.hpp"
#include "utilities/debug.hpp" #include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp" #include "utilities/globalDefinitions.hpp"
@ -42,11 +41,11 @@ void ZNMethodTableIteration::nmethods_do_begin(ZNMethodTableEntry* table, size_t
_table = table; _table = table;
_size = size; _size = size;
_claimed = 0; _claimed.store_relaxed(0u);
} }
void ZNMethodTableIteration::nmethods_do_end() { void ZNMethodTableIteration::nmethods_do_end() {
assert(_claimed >= _size, "Failed to claim all table entries"); assert(_claimed.load_relaxed() >= _size, "Failed to claim all table entries");
// Finish iteration // Finish iteration
_table = nullptr; _table = nullptr;
@ -57,7 +56,7 @@ void ZNMethodTableIteration::nmethods_do(NMethodClosure* cl) {
// Claim table partition. Each partition is currently sized to span // Claim table partition. Each partition is currently sized to span
// two cache lines. This number is just a guess, but seems to work well. // two cache lines. This number is just a guess, but seems to work well.
const size_t partition_size = (ZCacheLineSize * 2) / sizeof(ZNMethodTableEntry); const size_t partition_size = (ZCacheLineSize * 2) / sizeof(ZNMethodTableEntry);
const size_t partition_start = MIN2(AtomicAccess::fetch_then_add(&_claimed, partition_size), _size); const size_t partition_start = MIN2(_claimed.fetch_then_add(partition_size), _size);
const size_t partition_end = MIN2(partition_start + partition_size, _size); const size_t partition_end = MIN2(partition_start + partition_size, _size);
if (partition_start == partition_end) { if (partition_start == partition_end) {
// End of table // End of table

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -25,15 +25,16 @@
#define SHARE_GC_Z_ZNMETHODTABLEITERATION_HPP #define SHARE_GC_Z_ZNMETHODTABLEITERATION_HPP
#include "gc/z/zGlobals.hpp" #include "gc/z/zGlobals.hpp"
#include "runtime/atomic.hpp"
class NMethodClosure; class NMethodClosure;
class ZNMethodTableEntry; class ZNMethodTableEntry;
class ZNMethodTableIteration { class ZNMethodTableIteration {
private: private:
ZNMethodTableEntry* _table; ZNMethodTableEntry* _table;
size_t _size; size_t _size;
ZCACHE_ALIGNED volatile size_t _claimed; ZCACHE_ALIGNED Atomic<size_t> _claimed;
bool in_progress() const; bool in_progress() const;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -64,31 +64,31 @@ ZRelocateQueue::ZRelocateQueue()
_needs_attention(0) {} _needs_attention(0) {}
bool ZRelocateQueue::needs_attention() const { bool ZRelocateQueue::needs_attention() const {
return AtomicAccess::load(&_needs_attention) != 0; return _needs_attention.load_relaxed() != 0;
} }
void ZRelocateQueue::inc_needs_attention() { void ZRelocateQueue::inc_needs_attention() {
const int needs_attention = AtomicAccess::add(&_needs_attention, 1); const int needs_attention = _needs_attention.add_then_fetch(1);
assert(needs_attention == 1 || needs_attention == 2, "Invalid state"); assert(needs_attention == 1 || needs_attention == 2, "Invalid state");
} }
void ZRelocateQueue::dec_needs_attention() { void ZRelocateQueue::dec_needs_attention() {
const int needs_attention = AtomicAccess::sub(&_needs_attention, 1); const int needs_attention = _needs_attention.sub_then_fetch(1);
assert(needs_attention == 0 || needs_attention == 1, "Invalid state"); assert(needs_attention == 0 || needs_attention == 1, "Invalid state");
} }
void ZRelocateQueue::activate(uint nworkers) { void ZRelocateQueue::activate(uint nworkers) {
_is_active = true; _is_active.store_relaxed(true);
join(nworkers); join(nworkers);
} }
void ZRelocateQueue::deactivate() { void ZRelocateQueue::deactivate() {
AtomicAccess::store(&_is_active, false); _is_active.store_relaxed(false);
clear(); clear();
} }
bool ZRelocateQueue::is_active() const { bool ZRelocateQueue::is_active() const {
return AtomicAccess::load(&_is_active); return _is_active.load_relaxed();
} }
void ZRelocateQueue::join(uint nworkers) { void ZRelocateQueue::join(uint nworkers) {
@ -453,7 +453,7 @@ static void retire_target_page(ZGeneration* generation, ZPage* page) {
class ZRelocateSmallAllocator { class ZRelocateSmallAllocator {
private: private:
ZGeneration* const _generation; ZGeneration* const _generation;
volatile size_t _in_place_count; Atomic<size_t> _in_place_count;
public: public:
ZRelocateSmallAllocator(ZGeneration* generation) ZRelocateSmallAllocator(ZGeneration* generation)
@ -463,7 +463,7 @@ public:
ZPage* alloc_and_retire_target_page(ZForwarding* forwarding, ZPage* target) { ZPage* alloc_and_retire_target_page(ZForwarding* forwarding, ZPage* target) {
ZPage* const page = alloc_page(forwarding); ZPage* const page = alloc_page(forwarding);
if (page == nullptr) { if (page == nullptr) {
AtomicAccess::inc(&_in_place_count); _in_place_count.add_then_fetch(1u);
} }
if (target != nullptr) { if (target != nullptr) {
@ -493,7 +493,7 @@ public:
} }
size_t in_place_count() const { size_t in_place_count() const {
return _in_place_count; return _in_place_count.load_relaxed();
} }
}; };
@ -503,7 +503,7 @@ private:
ZConditionLock _lock; ZConditionLock _lock;
ZRelocationTargets* _shared_targets; ZRelocationTargets* _shared_targets;
bool _in_place; bool _in_place;
volatile size_t _in_place_count; Atomic<size_t> _in_place_count;
public: public:
ZRelocateMediumAllocator(ZGeneration* generation, ZRelocationTargets* shared_targets) ZRelocateMediumAllocator(ZGeneration* generation, ZRelocationTargets* shared_targets)
@ -539,7 +539,7 @@ public:
ZPage* const to_page = alloc_page(forwarding); ZPage* const to_page = alloc_page(forwarding);
_shared_targets->set(partition_id, to_age, to_page); _shared_targets->set(partition_id, to_age, to_page);
if (to_page == nullptr) { if (to_page == nullptr) {
AtomicAccess::inc(&_in_place_count); _in_place_count.add_then_fetch(1u);
_in_place = true; _in_place = true;
} }
@ -579,7 +579,7 @@ public:
} }
size_t in_place_count() const { size_t in_place_count() const {
return _in_place_count; return _in_place_count.load_relaxed();
} }
}; };

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -28,6 +28,7 @@
#include "gc/z/zPageAge.hpp" #include "gc/z/zPageAge.hpp"
#include "gc/z/zRelocationSet.hpp" #include "gc/z/zRelocationSet.hpp"
#include "gc/z/zValue.hpp" #include "gc/z/zValue.hpp"
#include "runtime/atomic.hpp"
class ZForwarding; class ZForwarding;
class ZGeneration; class ZGeneration;
@ -42,8 +43,8 @@ private:
uint _nworkers; uint _nworkers;
uint _nsynchronized; uint _nsynchronized;
bool _synchronize; bool _synchronize;
volatile bool _is_active; Atomic<bool> _is_active;
volatile int _needs_attention; Atomic<int> _needs_attention;
bool needs_attention() const; bool needs_attention() const;
void inc_needs_attention(); void inc_needs_attention();

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -23,24 +23,23 @@
#include "gc/z/zTLABUsage.hpp" #include "gc/z/zTLABUsage.hpp"
#include "logging/log.hpp" #include "logging/log.hpp"
#include "runtime/atomicAccess.hpp"
ZTLABUsage::ZTLABUsage() ZTLABUsage::ZTLABUsage()
: _used(0), : _used(0),
_used_history() {} _used_history() {}
void ZTLABUsage::increase_used(size_t size) { void ZTLABUsage::increase_used(size_t size) {
AtomicAccess::add(&_used, size, memory_order_relaxed); _used.add_then_fetch(size, memory_order_relaxed);
} }
void ZTLABUsage::decrease_used(size_t size) { void ZTLABUsage::decrease_used(size_t size) {
precond(size <= _used); precond(size <= _used.load_relaxed());
AtomicAccess::sub(&_used, size, memory_order_relaxed); _used.sub_then_fetch(size, memory_order_relaxed);
} }
void ZTLABUsage::reset() { void ZTLABUsage::reset() {
const size_t used = AtomicAccess::xchg(&_used, (size_t) 0); const size_t used = _used.exchange(0u);
// Avoid updates when nothing has been allocated since the last YC // Avoid updates when nothing has been allocated since the last YC
if (used == 0) { if (used == 0) {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -24,6 +24,7 @@
#ifndef SHARE_GC_Z_ZTLABUSAGE_HPP #ifndef SHARE_GC_Z_ZTLABUSAGE_HPP
#define SHARE_GC_Z_ZTLABUSAGE_HPP #define SHARE_GC_Z_ZTLABUSAGE_HPP
#include "runtime/atomic.hpp"
#include "utilities/globalDefinitions.hpp" #include "utilities/globalDefinitions.hpp"
#include "utilities/numberSeq.hpp" #include "utilities/numberSeq.hpp"
@ -42,9 +43,9 @@
class ZTLABUsage { class ZTLABUsage {
private: private:
// Accounting TLAB used until the next GC cycle // Accounting TLAB used until the next GC cycle
volatile size_t _used; Atomic<size_t> _used;
// Sequence of historic used values // Sequence of historic used values
TruncatedSeq _used_history; TruncatedSeq _used_history;
public: public:
ZTLABUsage(); ZTLABUsage();

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -192,18 +192,20 @@ void BytecodeTracer::trace_interpreter(const methodHandle& method, address bcp,
} }
#endif #endif
void BytecodeTracer::print_method_codes(const methodHandle& method, int from, int to, outputStream* st, int flags) { void BytecodeTracer::print_method_codes(const methodHandle& method, int from, int to, outputStream* st, int flags, bool buffered) {
BytecodePrinter method_printer(flags); BytecodePrinter method_printer(flags);
BytecodeStream s(method); BytecodeStream s(method);
s.set_interval(from, to); s.set_interval(from, to);
// Keep output to st coherent: collect all lines and print at once.
ResourceMark rm; ResourceMark rm;
stringStream ss; stringStream ss;
outputStream* out = buffered ? &ss : st;
while (s.next() >= 0) { while (s.next() >= 0) {
method_printer.trace(method, s.bcp(), &ss); method_printer.trace(method, s.bcp(), out);
}
if (buffered) {
st->print("%s", ss.as_string());
} }
st->print("%s", ss.as_string());
} }
void BytecodePrinter::print_constant(int cp_index, outputStream* st) { void BytecodePrinter::print_constant(int cp_index, outputStream* st) {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -39,7 +39,7 @@ class BytecodeClosure;
class BytecodeTracer: AllStatic { class BytecodeTracer: AllStatic {
public: public:
NOT_PRODUCT(static void trace_interpreter(const methodHandle& method, address bcp, uintptr_t tos, uintptr_t tos2, outputStream* st = tty);) NOT_PRODUCT(static void trace_interpreter(const methodHandle& method, address bcp, uintptr_t tos, uintptr_t tos2, outputStream* st = tty);)
static void print_method_codes(const methodHandle& method, int from, int to, outputStream* st, int flags); static void print_method_codes(const methodHandle& method, int from, int to, outputStream* st, int flags, bool buffered = true);
}; };
#endif // SHARE_INTERPRETER_BYTECODETRACER_HPP #endif // SHARE_INTERPRETER_BYTECODETRACER_HPP

Some files were not shown because too many files have changed in this diff Show More