Compare commits

..

No commits in common. "master" and "jdk-27+5" have entirely different histories.

843 changed files with 15257 additions and 23609 deletions

View File

@ -72,7 +72,6 @@ id="toc-notes-for-specific-tests">Notes for Specific Tests</a>
<li><a href="#non-us-locale" id="toc-non-us-locale">Non-US
locale</a></li>
<li><a href="#pkcs11-tests" id="toc-pkcs11-tests">PKCS11 Tests</a></li>
<li><a href="#sctp-tests" id="toc-sctp-tests">SCTP Tests</a></li>
<li><a href="#testing-ahead-of-time-optimizations"
id="toc-testing-ahead-of-time-optimizations">Testing Ahead-of-time
Optimizations</a></li>
@ -622,21 +621,6 @@ element of the appropriate <code>@Artifact</code> class. (See
JTREG=&quot;JAVA_OPTIONS=-Djdk.test.lib.artifacts.nsslib-linux_aarch64=/path/to/NSS-libs&quot;</code></pre>
<p>For more notes about the PKCS11 tests, please refer to
test/jdk/sun/security/pkcs11/README.</p>
<h3 id="sctp-tests">SCTP Tests</h3>
<p>The SCTP tests require the SCTP runtime library, which is often not
installed by default in popular Linux distributions. Without this
library, the SCTP tests will be skipped. If you want to enable the SCTP
tests, you should install the SCTP library before running the tests.</p>
<p>For distributions using the .deb packaging format and the apt tool
(such as Debian, Ubuntu, etc.), try this:</p>
<pre><code>sudo apt install libsctp1
sudo modprobe sctp
lsmod | grep sctp</code></pre>
<p>For distributions using the .rpm packaging format and the dnf tool
(such as Fedora, Red Hat, etc.), try this:</p>
<pre><code>sudo dnf install -y lksctp-tools
sudo modprobe sctp
lsmod | grep sctp</code></pre>
<h3 id="testing-ahead-of-time-optimizations">Testing Ahead-of-time
Optimizations</h3>
<p>One way to improve test coverage of ahead-of-time (AOT) optimizations

View File

@ -640,32 +640,6 @@ $ make test TEST="jtreg:sun/security/pkcs11/Secmod/AddTrustedCert.java" \
For more notes about the PKCS11 tests, please refer to
test/jdk/sun/security/pkcs11/README.
### SCTP Tests
The SCTP tests require the SCTP runtime library, which is often not installed
by default in popular Linux distributions. Without this library, the SCTP tests
will be skipped. If you want to enable the SCTP tests, you should install the
SCTP library before running the tests.
For distributions using the .deb packaging format and the apt tool
(such as Debian, Ubuntu, etc.), try this:
```
sudo apt install libsctp1
sudo modprobe sctp
lsmod | grep sctp
```
For distributions using the .rpm packaging format and the dnf tool
(such as Fedora, Red Hat, etc.), try this:
```
sudo dnf install -y lksctp-tools
sudo modprobe sctp
lsmod | grep sctp
```
### Testing Ahead-of-time Optimizations
One way to improve test coverage of ahead-of-time (AOT) optimizations in

View File

@ -69,18 +69,22 @@ AC_DEFUN([FLAGS_SETUP_DEBUG_SYMBOLS],
# Debug prefix mapping if supported by compiler
DEBUG_PREFIX_CFLAGS=
UTIL_ARG_WITH(NAME: native-debug-symbols-level, TYPE: literal,
DEFAULT: [auto], VALID_VALUES: [auto 1 2 3],
CHECK_AVAILABLE: [
if test x$TOOLCHAIN_TYPE = xmicrosoft; then
AVAILABLE=false
fi
],
UTIL_ARG_WITH(NAME: native-debug-symbols-level, TYPE: string,
DEFAULT: "",
RESULT: DEBUG_SYMBOLS_LEVEL,
DESC: [set the native debug symbol level (GCC and Clang only)],
DEFAULT_DESC: [toolchain default],
IF_AUTO: [
RESULT=""
])
DEFAULT_DESC: [toolchain default])
AC_SUBST(DEBUG_SYMBOLS_LEVEL)
if test "x${TOOLCHAIN_TYPE}" = xgcc || \
test "x${TOOLCHAIN_TYPE}" = xclang; then
DEBUG_SYMBOLS_LEVEL_FLAGS="-g"
if test "x${DEBUG_SYMBOLS_LEVEL}" != "x"; then
DEBUG_SYMBOLS_LEVEL_FLAGS="-g${DEBUG_SYMBOLS_LEVEL}"
FLAGS_COMPILER_CHECK_ARGUMENTS(ARGUMENT: [${DEBUG_SYMBOLS_LEVEL_FLAGS}],
IF_FALSE: AC_MSG_ERROR("Debug info level ${DEBUG_SYMBOLS_LEVEL} is not supported"))
fi
fi
# Debug symbols
if test "x$TOOLCHAIN_TYPE" = xgcc; then
@ -107,8 +111,8 @@ AC_DEFUN([FLAGS_SETUP_DEBUG_SYMBOLS],
fi
# Debug info level should follow the debug format to be effective.
CFLAGS_DEBUG_SYMBOLS="-gdwarf-4 -g${NATIVE_DEBUG_SYMBOLS_LEVEL}"
ASFLAGS_DEBUG_SYMBOLS="-g${NATIVE_DEBUG_SYMBOLS_LEVEL}"
CFLAGS_DEBUG_SYMBOLS="-gdwarf-4 ${DEBUG_SYMBOLS_LEVEL_FLAGS}"
ASFLAGS_DEBUG_SYMBOLS="${DEBUG_SYMBOLS_LEVEL_FLAGS}"
elif test "x$TOOLCHAIN_TYPE" = xclang; then
if test "x$ALLOW_ABSOLUTE_PATHS_IN_OUTPUT" = "xfalse"; then
# Check if compiler supports -fdebug-prefix-map. If so, use that to make
@ -128,8 +132,8 @@ AC_DEFUN([FLAGS_SETUP_DEBUG_SYMBOLS],
IF_FALSE: [GDWARF_FLAGS=""])
# Debug info level should follow the debug format to be effective.
CFLAGS_DEBUG_SYMBOLS="${GDWARF_FLAGS} -g${NATIVE_DEBUG_SYMBOLS_LEVEL}"
ASFLAGS_DEBUG_SYMBOLS="-g${NATIVE_DEBUG_SYMBOLS_LEVEL}"
CFLAGS_DEBUG_SYMBOLS="${GDWARF_FLAGS} ${DEBUG_SYMBOLS_LEVEL_FLAGS}"
ASFLAGS_DEBUG_SYMBOLS="${DEBUG_SYMBOLS_LEVEL_FLAGS}"
elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
CFLAGS_DEBUG_SYMBOLS="-Z7"
fi

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -61,8 +61,7 @@ $(eval $(call SetupJdkLibrary, BUILD_GTEST_LIBGTEST, \
INCLUDE_FILES := gtest-all.cc gmock-all.cc, \
DISABLED_WARNINGS_gcc := format-nonliteral maybe-uninitialized undef \
unused-result zero-as-null-pointer-constant, \
DISABLED_WARNINGS_clang := format-nonliteral undef unused-result \
zero-as-null-pointer-constant, \
DISABLED_WARNINGS_clang := format-nonliteral undef unused-result, \
DISABLED_WARNINGS_microsoft := 4530, \
DEFAULT_CFLAGS := false, \
CFLAGS := $(JVM_CFLAGS) \

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -49,15 +49,13 @@ import static com.sun.source.doctree.DocTree.Kind.*;
* The tags can be used as follows:
*
* <pre>
* &commat;jls chapter.section description
* &commat;jls preview-feature-chapter.section description
* &commat;jls section-number description
* </pre>
*
* For example:
*
* <pre>
* &commat;jls 3.4 Line Terminators
* &commat;jls primitive-types-in-patterns-instanceof-switch-5.7.1 Exact Testing Conversions
* </pre>
*
* will produce the following HTML, depending on the file containing
@ -66,24 +64,10 @@ import static com.sun.source.doctree.DocTree.Kind.*;
* <pre>{@code
* <dt>See <i>Java Language Specification</i>:
* <dd><a href="../../specs/jls/jls-3.html#jls-3.4">3.4 Line terminators</a>
* <dd><a href="../../specs/primitive-types-in-patterns-instanceof-switch-jls.html#jls-5.7.1">
* 5.7.1 Exact Testing Conversions</a><sup class="preview-mark">
* <a href="../../specs/jls/jls-1.html#jls-1.5.1">PREVIEW</a></sup>
* }</pre>
*
* In inline tags (note you need manual JLS/JVMS prefix):
* <pre>
* JLS {&commat;jls 3.4}
* </pre>
*
* produces (note the section sign and no trailing dot):
* <pre>
* JLS <a href="../../specs/jls/jls-3.html#jls-3.4">§3.4</a>
* </pre>
*
* Copies of JLS, JVMS, and preview JLS and JVMS changes are expected to have
* been placed in the {@code specs} folder. These documents are not included
* in open-source repositories.
* Copies of JLS and JVMS are expected to have been placed in the {@code specs}
* folder. These documents are not included in open-source repositories.
*/
public class JSpec implements Taglet {
@ -103,9 +87,9 @@ public class JSpec implements Taglet {
}
}
private final String tagName;
private final String specTitle;
private final String idPrefix;
private String tagName;
private String specTitle;
private String idPrefix;
JSpec(String tagName, String specTitle, String idPrefix) {
this.tagName = tagName;
@ -114,7 +98,7 @@ public class JSpec implements Taglet {
}
// Note: Matches special cases like @jvms 6.5.checkcast
private static final Pattern TAG_PATTERN = Pattern.compile("(?s)(.+ )?(?<preview>([a-z0-9]+-)+)?(?<chapter>[1-9][0-9]*)(?<section>[0-9a-z_.]*)( .*)?$");
private static final Pattern TAG_PATTERN = Pattern.compile("(?s)(.+ )?(?<chapter>[1-9][0-9]*)(?<section>[0-9a-z_.]*)( .*)?$");
/**
* Returns the set of locations in which the tag may be used.
@ -173,50 +157,19 @@ public class JSpec implements Taglet {
.trim();
Matcher m = TAG_PATTERN.matcher(tagText);
if (m.find()) {
// preview-feature-4.6 is preview-feature-, 4, .6
String preview = m.group("preview"); // null if no preview feature
String chapter = m.group("chapter");
String section = m.group("section");
String rootParent = currentPath().replaceAll("[^/]+", "..");
String url = preview == null ?
String.format("%1$s/specs/%2$s/%2$s-%3$s.html#%2$s-%3$s%4$s",
rootParent, idPrefix, chapter, section) :
String.format("%1$s/specs/%5$s%2$s.html#%2$s-%3$s%4$s",
rootParent, idPrefix, chapter, section, preview);
var literal = expand(contents).trim();
var prefix = (preview == null ? "" : preview) + chapter + section;
if (literal.startsWith(prefix)) {
var hasFullTitle = literal.length() > prefix.length();
if (hasFullTitle) {
// Drop the preview identifier
literal = chapter + section + literal.substring(prefix.length());
} else {
// No section sign if the tag refers to a chapter, like {@jvms 4}
String sectionSign = section.isEmpty() ? "" : "§";
// Change whole text to "§chapter.x" in inline tags.
literal = sectionSign + chapter + section;
}
}
String url = String.format("%1$s/specs/%2$s/%2$s-%3$s.html#%2$s-%3$s%4$s",
rootParent, idPrefix, chapter, section);
sb.append("<a href=\"")
.append(url)
.append("\">")
.append(literal)
.append(expand(contents))
.append("</a>");
if (preview != null) {
// Add PREVIEW superscript that links to JLS/JVMS 1.5.1
// "Restrictions on the Use of Preview Features"
// Similar to how APIs link to the Preview info box warning
var sectionLink = String.format("%1$s/specs/%2$s/%2$s-%3$s.html#%2$s-%3$s%4$s",
rootParent, idPrefix, "1", ".5.1");
sb.append("<sup class=\"preview-mark\"><a href=\"")
.append(sectionLink)
.append("\">PREVIEW</a></sup>");
}
if (tag.getKind() == DocTree.Kind.UNKNOWN_BLOCK_TAG) {
sb.append("<br>");
}

View File

@ -31,14 +31,13 @@ include LibCommon.gmk
## Build libjaas
################################################################################
ifeq ($(call isTargetOs, windows), true)
$(eval $(call SetupJdkLibrary, BUILD_LIBJAAS, \
NAME := jaas, \
OPTIMIZATION := LOW, \
EXTRA_HEADER_DIRS := java.base:libjava, \
LIBS_windows := advapi32.lib mpr.lib netapi32.lib user32.lib, \
))
$(eval $(call SetupJdkLibrary, BUILD_LIBJAAS, \
NAME := jaas, \
OPTIMIZATION := LOW, \
EXTRA_HEADER_DIRS := java.base:libjava, \
LIBS_windows := advapi32.lib mpr.lib netapi32.lib user32.lib, \
))
TARGETS += $(BUILD_LIBJAAS)
TARGETS += $(BUILD_LIBJAAS)
endif
################################################################################

View File

@ -1218,11 +1218,43 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
__ bind(*op->stub()->continuation());
}
void LIR_Assembler::type_profile_helper(Register mdo, ciMethodData *md,
ciProfileData *data, Register recv) {
void LIR_Assembler::type_profile_helper(Register mdo,
ciMethodData *md, ciProfileData *data,
Register recv, Label* update_done) {
int mdp_offset = md->byte_offset_of_slot(data, in_ByteSize(0));
__ profile_receiver_type(recv, mdo, mdp_offset);
// Given a profile data offset, generate an Address which points to
// the corresponding slot in mdo->data().
// Clobbers rscratch2.
auto slot_at = [=](ByteSize offset) -> Address {
return __ form_address(rscratch2, mdo,
md->byte_offset_of_slot(data, offset),
LogBytesPerWord);
};
for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
Label next_test;
// See if the receiver is receiver[n].
__ ldr(rscratch1, slot_at(ReceiverTypeData::receiver_offset(i)));
__ cmp(recv, rscratch1);
__ br(Assembler::NE, next_test);
__ addptr(slot_at(ReceiverTypeData::receiver_count_offset(i)),
DataLayout::counter_increment);
__ b(*update_done);
__ bind(next_test);
}
// Didn't find receiver; find next empty slot and fill it in
for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
Label next_test;
Address recv_addr(slot_at(ReceiverTypeData::receiver_offset(i)));
__ ldr(rscratch1, recv_addr);
__ cbnz(rscratch1, next_test);
__ str(recv, recv_addr);
__ mov(rscratch1, DataLayout::counter_increment);
__ str(rscratch1, slot_at(ReceiverTypeData::receiver_count_offset(i)));
__ b(*update_done);
__ bind(next_test);
}
}
void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
@ -1284,9 +1316,14 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
__ b(*obj_is_null);
__ bind(not_null);
Label update_done;
Register recv = k_RInfo;
__ load_klass(recv, obj);
type_profile_helper(mdo, md, data, recv);
type_profile_helper(mdo, md, data, recv, &update_done);
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
__ addptr(counter_addr, DataLayout::counter_increment);
__ bind(update_done);
} else {
__ cbz(obj, *obj_is_null);
}
@ -1393,9 +1430,13 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
__ b(done);
__ bind(not_null);
Label update_done;
Register recv = k_RInfo;
__ load_klass(recv, value);
type_profile_helper(mdo, md, data, recv);
type_profile_helper(mdo, md, data, recv, &update_done);
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
__ addptr(counter_addr, DataLayout::counter_increment);
__ bind(update_done);
} else {
__ cbz(value, done);
}
@ -2499,9 +2540,13 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) {
// We know the type that will be seen at this call site; we can
// statically update the MethodData* rather than needing to do
// dynamic tests on the receiver type.
// dynamic tests on the receiver type
// NOTE: we should probably put a lock around this search to
// avoid collisions by concurrent compilations
ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
for (uint i = 0; i < VirtualCallData::row_limit(); i++) {
uint i;
for (i = 0; i < VirtualCallData::row_limit(); i++) {
ciKlass* receiver = vc_data->receiver(i);
if (known_klass->equals(receiver)) {
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
@ -2509,13 +2554,36 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
return;
}
}
// Receiver type is not found in profile data.
// Fall back to runtime helper to handle the rest at runtime.
__ mov_metadata(recv, known_klass->constant_encoding());
// Receiver type not found in profile data; select an empty slot
// Note that this is less efficient than it should be because it
// always does a write to the receiver part of the
// VirtualCallData rather than just the first time
for (i = 0; i < VirtualCallData::row_limit(); i++) {
ciKlass* receiver = vc_data->receiver(i);
if (receiver == nullptr) {
__ mov_metadata(rscratch1, known_klass->constant_encoding());
Address recv_addr =
__ form_address(rscratch2, mdo,
md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)),
LogBytesPerWord);
__ str(rscratch1, recv_addr);
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
__ addptr(data_addr, DataLayout::counter_increment);
return;
}
}
} else {
__ load_klass(recv, recv);
Label update_done;
type_profile_helper(mdo, md, data, recv, &update_done);
// Receiver did not match any saved receiver and there is no empty row for it.
// Increment total counter to indicate polymorphic case.
__ addptr(counter_addr, DataLayout::counter_increment);
__ bind(update_done);
}
type_profile_helper(mdo, md, data, recv);
} else {
// Static call
__ addptr(counter_addr, DataLayout::counter_increment);

View File

@ -50,8 +50,9 @@ friend class ArrayCopyStub;
Address stack_slot_address(int index, uint shift, Register tmp, int adjust = 0);
// Record the type of the receiver in ReceiverTypeData
void type_profile_helper(Register mdo, ciMethodData *md,
ciProfileData *data, Register recv);
void type_profile_helper(Register mdo,
ciMethodData *md, ciProfileData *data,
Register recv, Label* update_done);
void add_debug_info_for_branch(address adr, CodeEmitInfo* info);
void casw(Register addr, Register newval, Register cmpval);

View File

@ -240,14 +240,15 @@ void InterpreterMacroAssembler::load_resolved_klass_at_offset(
// Rsub_klass: subklass
//
// Kills:
// r2
// r2, r5
void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
Label& ok_is_subtype) {
assert(Rsub_klass != r0, "r0 holds superklass");
assert(Rsub_klass != r2, "r2 holds 2ndary super array length");
assert(Rsub_klass != r5, "r5 holds 2ndary super array scan ptr");
// Profile the not-null value's klass.
profile_typecheck(r2, Rsub_klass); // blows r2
profile_typecheck(r2, Rsub_klass, r5); // blows r2, reloads r5
// Do the check.
check_klass_subtype(Rsub_klass, r0, r2, ok_is_subtype); // blows r2
@ -990,6 +991,7 @@ void InterpreterMacroAssembler::profile_final_call(Register mdp) {
void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
Register mdp,
Register reg2,
bool receiver_can_be_null) {
if (ProfileInterpreter) {
Label profile_continue;
@ -1007,7 +1009,7 @@ void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
}
// Record the receiver type.
profile_receiver_type(receiver, mdp, 0);
record_klass_in_profile(receiver, mdp, reg2);
bind(skip_receiver_profile);
// The method data pointer needs to be updated to reflect the new target.
@ -1016,6 +1018,131 @@ void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
}
}
// This routine creates a state machine for updating the multi-row
// type profile at a virtual call site (or other type-sensitive bytecode).
// The machine visits each row (of receiver/count) until the receiver type
// is found, or until it runs out of rows. At the same time, it remembers
// the location of the first empty row. (An empty row records null for its
// receiver, and can be allocated for a newly-observed receiver type.)
// Because there are two degrees of freedom in the state, a simple linear
// search will not work; it must be a decision tree. Hence this helper
// function is recursive, to generate the required tree structured code.
// It's the interpreter, so we are trading off code space for speed.
// See below for example code.
void InterpreterMacroAssembler::record_klass_in_profile_helper(
Register receiver, Register mdp,
Register reg2, int start_row,
Label& done) {
if (TypeProfileWidth == 0) {
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
} else {
record_item_in_profile_helper(receiver, mdp, reg2, 0, done, TypeProfileWidth,
&VirtualCallData::receiver_offset, &VirtualCallData::receiver_count_offset);
}
}
void InterpreterMacroAssembler::record_item_in_profile_helper(Register item, Register mdp,
Register reg2, int start_row, Label& done, int total_rows,
OffsetFunction item_offset_fn, OffsetFunction item_count_offset_fn) {
int last_row = total_rows - 1;
assert(start_row <= last_row, "must be work left to do");
// Test this row for both the item and for null.
// Take any of three different outcomes:
// 1. found item => increment count and goto done
// 2. found null => keep looking for case 1, maybe allocate this cell
// 3. found something else => keep looking for cases 1 and 2
// Case 3 is handled by a recursive call.
for (int row = start_row; row <= last_row; row++) {
Label next_test;
bool test_for_null_also = (row == start_row);
// See if the item is item[n].
int item_offset = in_bytes(item_offset_fn(row));
test_mdp_data_at(mdp, item_offset, item,
(test_for_null_also ? reg2 : noreg),
next_test);
// (Reg2 now contains the item from the CallData.)
// The item is item[n]. Increment count[n].
int count_offset = in_bytes(item_count_offset_fn(row));
increment_mdp_data_at(mdp, count_offset);
b(done);
bind(next_test);
if (test_for_null_also) {
Label found_null;
// Failed the equality check on item[n]... Test for null.
if (start_row == last_row) {
// The only thing left to do is handle the null case.
cbz(reg2, found_null);
// Item did not match any saved item and there is no empty row for it.
// Increment total counter to indicate polymorphic case.
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
b(done);
bind(found_null);
break;
}
// Since null is rare, make it be the branch-taken case.
cbz(reg2, found_null);
// Put all the "Case 3" tests here.
record_item_in_profile_helper(item, mdp, reg2, start_row + 1, done, total_rows,
item_offset_fn, item_count_offset_fn);
// Found a null. Keep searching for a matching item,
// but remember that this is an empty (unused) slot.
bind(found_null);
}
}
// In the fall-through case, we found no matching item, but we
// observed the item[start_row] is null.
// Fill in the item field and increment the count.
int item_offset = in_bytes(item_offset_fn(start_row));
set_mdp_data_at(mdp, item_offset, item);
int count_offset = in_bytes(item_count_offset_fn(start_row));
mov(reg2, DataLayout::counter_increment);
set_mdp_data_at(mdp, count_offset, reg2);
if (start_row > 0) {
b(done);
}
}
// Example state machine code for three profile rows:
// // main copy of decision tree, rooted at row[1]
// if (row[0].rec == rec) { row[0].incr(); goto done; }
// if (row[0].rec != nullptr) {
// // inner copy of decision tree, rooted at row[1]
// if (row[1].rec == rec) { row[1].incr(); goto done; }
// if (row[1].rec != nullptr) {
// // degenerate decision tree, rooted at row[2]
// if (row[2].rec == rec) { row[2].incr(); goto done; }
// if (row[2].rec != nullptr) { count.incr(); goto done; } // overflow
// row[2].init(rec); goto done;
// } else {
// // remember row[1] is empty
// if (row[2].rec == rec) { row[2].incr(); goto done; }
// row[1].init(rec); goto done;
// }
// } else {
// // remember row[0] is empty
// if (row[1].rec == rec) { row[1].incr(); goto done; }
// if (row[2].rec == rec) { row[2].incr(); goto done; }
// row[0].init(rec); goto done;
// }
// done:
void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
Register mdp, Register reg2) {
assert(ProfileInterpreter, "must be profiling");
Label done;
record_klass_in_profile_helper(receiver, mdp, reg2, 0, done);
bind (done);
}
void InterpreterMacroAssembler::profile_ret(Register return_bci,
Register mdp) {
if (ProfileInterpreter) {
@ -1073,7 +1200,7 @@ void InterpreterMacroAssembler::profile_null_seen(Register mdp) {
}
}
void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass) {
void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass, Register reg2) {
if (ProfileInterpreter) {
Label profile_continue;
@ -1086,7 +1213,7 @@ void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass)
mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
// Record the object type.
profile_receiver_type(klass, mdp, 0);
record_klass_in_profile(klass, mdp, reg2);
}
update_mdp_by_constant(mdp, mdp_delta);

View File

@ -273,6 +273,15 @@ class InterpreterMacroAssembler: public MacroAssembler {
Register test_value_out,
Label& not_equal_continue);
void record_klass_in_profile(Register receiver, Register mdp,
Register reg2);
void record_klass_in_profile_helper(Register receiver, Register mdp,
Register reg2, int start_row,
Label& done);
void record_item_in_profile_helper(Register item, Register mdp,
Register reg2, int start_row, Label& done, int total_rows,
OffsetFunction item_offset_fn, OffsetFunction item_count_offset_fn);
void update_mdp_by_offset(Register mdp_in, int offset_of_offset);
void update_mdp_by_offset(Register mdp_in, Register reg, int offset_of_disp);
void update_mdp_by_constant(Register mdp_in, int constant);
@ -286,10 +295,11 @@ class InterpreterMacroAssembler: public MacroAssembler {
void profile_call(Register mdp);
void profile_final_call(Register mdp);
void profile_virtual_call(Register receiver, Register mdp,
Register scratch2,
bool receiver_can_be_null = false);
void profile_ret(Register return_bci, Register mdp);
void profile_null_seen(Register mdp);
void profile_typecheck(Register mdp, Register klass);
void profile_typecheck(Register mdp, Register klass, Register scratch);
void profile_typecheck_failed(Register mdp);
void profile_switch_default(Register mdp);
void profile_switch_case(Register index_in_scratch, Register mdp,

View File

@ -2118,161 +2118,6 @@ Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
}
}
// Handle the receiver type profile update given the "recv" klass.
//
// Normally updates the ReceiverData (RD) that starts at "mdp" + "mdp_offset".
// If there are no matching or claimable receiver entries in RD, updates
// the polymorphic counter.
//
// This code expected to run by either the interpreter or JIT-ed code, without
// extra synchronization. For safety, receiver cells are claimed atomically, which
// avoids grossly misrepresenting the profiles under concurrent updates. For speed,
// counter updates are not atomic.
//
void MacroAssembler::profile_receiver_type(Register recv, Register mdp, int mdp_offset) {
assert_different_registers(recv, mdp, rscratch1, rscratch2);
int base_receiver_offset = in_bytes(ReceiverTypeData::receiver_offset(0));
int end_receiver_offset = in_bytes(ReceiverTypeData::receiver_offset(ReceiverTypeData::row_limit()));
int poly_count_offset = in_bytes(CounterData::count_offset());
int receiver_step = in_bytes(ReceiverTypeData::receiver_offset(1)) - base_receiver_offset;
int receiver_to_count_step = in_bytes(ReceiverTypeData::receiver_count_offset(0)) - base_receiver_offset;
// Adjust for MDP offsets.
base_receiver_offset += mdp_offset;
end_receiver_offset += mdp_offset;
poly_count_offset += mdp_offset;
#ifdef ASSERT
// We are about to walk the MDO slots without asking for offsets.
// Check that our math hits all the right spots.
for (uint c = 0; c < ReceiverTypeData::row_limit(); c++) {
int real_recv_offset = mdp_offset + in_bytes(ReceiverTypeData::receiver_offset(c));
int real_count_offset = mdp_offset + in_bytes(ReceiverTypeData::receiver_count_offset(c));
int offset = base_receiver_offset + receiver_step*c;
int count_offset = offset + receiver_to_count_step;
assert(offset == real_recv_offset, "receiver slot math");
assert(count_offset == real_count_offset, "receiver count math");
}
int real_poly_count_offset = mdp_offset + in_bytes(CounterData::count_offset());
assert(poly_count_offset == real_poly_count_offset, "poly counter math");
#endif
// Corner case: no profile table. Increment poly counter and exit.
if (ReceiverTypeData::row_limit() == 0) {
increment(Address(mdp, poly_count_offset), DataLayout::counter_increment);
return;
}
Register offset = rscratch2;
Label L_loop_search_receiver, L_loop_search_empty;
Label L_restart, L_found_recv, L_found_empty, L_polymorphic, L_count_update;
// The code here recognizes three major cases:
// A. Fastest: receiver found in the table
// B. Fast: no receiver in the table, and the table is full
// C. Slow: no receiver in the table, free slots in the table
//
// The case A performance is most important, as perfectly-behaved code would end up
// there, especially with larger TypeProfileWidth. The case B performance is
// important as well, this is where bulk of code would land for normally megamorphic
// cases. The case C performance is not essential, its job is to deal with installation
// races, we optimize for code density instead. Case C needs to make sure that receiver
// rows are only claimed once. This makes sure we never overwrite a row for another
// receiver and never duplicate the receivers in the list, making profile type-accurate.
//
// It is very tempting to handle these cases in a single loop, and claim the first slot
// without checking the rest of the table. But, profiling code should tolerate free slots
// in the table, as class unloading can clear them. After such cleanup, the receiver
// we need might be _after_ the free slot. Therefore, we need to let at least full scan
// to complete, before trying to install new slots. Splitting the code in several tight
// loops also helpfully optimizes for cases A and B.
//
// This code is effectively:
//
// restart:
// // Fastest: receiver is already installed
// for (i = 0; i < receiver_count(); i++) {
// if (receiver(i) == recv) goto found_recv(i);
// }
//
// // Fast: no receiver, but profile is full
// for (i = 0; i < receiver_count(); i++) {
// if (receiver(i) == null) goto found_null(i);
// }
// goto polymorphic
//
// // Slow: try to install receiver
// found_null(i):
// CAS(&receiver(i), null, recv);
// goto restart
//
// polymorphic:
// count++;
// return
//
// found_recv(i):
// *receiver_count(i)++
//
bind(L_restart);
// Fastest: receiver is already installed
mov(offset, base_receiver_offset);
bind(L_loop_search_receiver);
ldr(rscratch1, Address(mdp, offset));
cmp(rscratch1, recv);
br(Assembler::EQ, L_found_recv);
add(offset, offset, receiver_step);
sub(rscratch1, offset, end_receiver_offset);
cbnz(rscratch1, L_loop_search_receiver);
// Fast: no receiver, but profile is full
mov(offset, base_receiver_offset);
bind(L_loop_search_empty);
ldr(rscratch1, Address(mdp, offset));
cbz(rscratch1, L_found_empty);
add(offset, offset, receiver_step);
sub(rscratch1, offset, end_receiver_offset);
cbnz(rscratch1, L_loop_search_empty);
b(L_polymorphic);
// Slow: try to install receiver
bind(L_found_empty);
// Atomically swing receiver slot: null -> recv.
//
// The update uses CAS, which clobbers rscratch1. Therefore, rscratch2
// is used to hold the destination address. This is safe because the
// offset is no longer needed after the address is computed.
lea(rscratch2, Address(mdp, offset));
cmpxchg(/*addr*/ rscratch2, /*expected*/ zr, /*new*/ recv, Assembler::xword,
/*acquire*/ false, /*release*/ false, /*weak*/ true, noreg);
// CAS success means the slot now has the receiver we want. CAS failure means
// something had claimed the slot concurrently: it can be the same receiver we want,
// or something else. Since this is a slow path, we can optimize for code density,
// and just restart the search from the beginning.
b(L_restart);
// Counter updates:
// Increment polymorphic counter instead of receiver slot.
bind(L_polymorphic);
mov(offset, poly_count_offset);
b(L_count_update);
// Found a receiver, convert its slot offset to corresponding count offset.
bind(L_found_recv);
add(offset, offset, receiver_to_count_step);
bind(L_count_update);
increment(Address(mdp, offset), DataLayout::counter_increment);
}
void MacroAssembler::call_VM_leaf_base(address entry_point,
int number_of_arguments,
Label *retaddr) {
@ -5761,11 +5606,12 @@ void MacroAssembler::adrp(Register reg1, const Address &dest, uint64_t &byte_off
}
void MacroAssembler::load_byte_map_base(Register reg) {
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
CardTable::CardValue* byte_map_base =
((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base();
// Strictly speaking the card table base isn't an address at all, and it might
// Strictly speaking the byte_map_base isn't an address at all, and it might
// even be negative. It is thus materialised as a constant.
mov(reg, (uint64_t)ctbs->card_table_base_const());
mov(reg, (uint64_t)byte_map_base);
}
void MacroAssembler::build_frame(int framesize) {
@ -5936,9 +5782,6 @@ address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3,
// return false;
bind(A_IS_NOT_NULL);
ldrw(cnt1, Address(a1, length_offset));
ldrw(tmp5, Address(a2, length_offset));
cmp(cnt1, tmp5);
br(NE, DONE); // If lengths differ, return false
// Increase loop counter by diff between base- and actual start-offset.
addw(cnt1, cnt1, extra_length);
lea(a1, Address(a1, start_offset));
@ -6005,9 +5848,6 @@ address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3,
cbz(a1, DONE);
ldrw(cnt1, Address(a1, length_offset));
cbz(a2, DONE);
ldrw(tmp5, Address(a2, length_offset));
cmp(cnt1, tmp5);
br(NE, DONE); // If lengths differ, return false
// Increase loop counter by diff between base- and actual start-offset.
addw(cnt1, cnt1, extra_length);

View File

@ -1122,8 +1122,6 @@ public:
Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
void profile_receiver_type(Register recv, Register mdp, int mdp_offset);
void verify_sve_vector_length(Register tmp = rscratch1);
void reinitialize_ptrue() {
if (UseSVE > 0) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -722,20 +722,22 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
// Class initialization barrier for static methods
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
Label L_skip_barrier;
if (VM_Version::supports_fast_class_init_checks()) {
Label L_skip_barrier;
// Bypass the barrier for non-static methods
__ ldrh(rscratch1, Address(rmethod, Method::access_flags_offset()));
__ andsw(zr, rscratch1, JVM_ACC_STATIC);
__ br(Assembler::EQ, L_skip_barrier); // non-static
{ // Bypass the barrier for non-static methods
__ ldrh(rscratch1, Address(rmethod, Method::access_flags_offset()));
__ andsw(zr, rscratch1, JVM_ACC_STATIC);
__ br(Assembler::EQ, L_skip_barrier); // non-static
}
__ load_method_holder(rscratch2, rmethod);
__ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
__ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
__ load_method_holder(rscratch2, rmethod);
__ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
__ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
__ bind(L_skip_barrier);
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
__ bind(L_skip_barrier);
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
}
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->c2i_entry_barrier(masm);
@ -1506,8 +1508,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// SVC, HVC, or SMC. Make it a NOP.
__ nop();
if (method->needs_clinit_barrier()) {
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
Label L_skip_barrier;
__ mov_metadata(rscratch2, method->method_holder()); // InstanceKlass*
__ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -2290,8 +2290,7 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
__ subs(zr, temp, (int) code); // have we resolved this bytecode?
// Class initialization barrier for static methods
if (bytecode() == Bytecodes::_invokestatic) {
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
__ br(Assembler::NE, L_clinit_barrier_slow);
__ ldr(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::method_offset())));
__ load_method_holder(temp, temp);
@ -2341,8 +2340,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
__ subs(zr, temp, (int) code); // have we resolved this bytecode?
// Class initialization barrier for static fields
if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
if (VM_Version::supports_fast_class_init_checks() &&
(bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
const Register field_holder = temp;
__ br(Assembler::NE, L_clinit_barrier_slow);
@ -3370,7 +3369,7 @@ void TemplateTable::invokevirtual_helper(Register index,
__ load_klass(r0, recv);
// profile this call
__ profile_virtual_call(r0, rlocals);
__ profile_virtual_call(r0, rlocals, r3);
// get target Method & entry point
__ lookup_virtual_method(r0, index, method);
@ -3500,7 +3499,7 @@ void TemplateTable::invokeinterface(int byte_no) {
/*return_method=*/false);
// profile this call
__ profile_virtual_call(r3, r13);
__ profile_virtual_call(r3, r13, r19);
// Get declaring interface class from method, and itable index

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -356,10 +356,10 @@ frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
assert(is_interpreted_frame(), "Not an interpreted frame");
// These are reasonable sanity checks
if (fp() == nullptr || (intptr_t(fp()) & (wordSize-1)) != 0) {
if (fp() == 0 || (intptr_t(fp()) & (wordSize-1)) != 0) {
return false;
}
if (sp() == nullptr || (intptr_t(sp()) & (wordSize-1)) != 0) {
if (sp() == 0 || (intptr_t(sp()) & (wordSize-1)) != 0) {
return false;
}
if (fp() + interpreter_frame_initial_sp_offset < sp()) {

View File

@ -67,7 +67,9 @@ void CardTableBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet d
void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, Register tmp) {
BLOCK_COMMENT("CardTablePostBarrier");
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
BarrierSet* bs = BarrierSet::barrier_set();
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
Label L_cardtable_loop, L_done;
@ -81,7 +83,7 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
__ sub(count, count, addr); // nb of cards
// warning: Rthread has not been preserved
__ mov_address(tmp, (address)ctbs->card_table_base_const());
__ mov_address(tmp, (address) ct->byte_map_base());
__ add(addr,tmp, addr);
Register zero = __ zero_register(tmp);
@ -120,7 +122,8 @@ void CardTableBarrierSetAssembler::store_check_part1(MacroAssembler* masm, Regis
assert(bs->kind() == BarrierSet::CardTableBarrierSet,
"Wrong barrier set kind");
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
// Load card table base address.
@ -137,7 +140,7 @@ void CardTableBarrierSetAssembler::store_check_part1(MacroAssembler* masm, Regis
Possible cause is a cache miss (card table base address resides in a
rarely accessed area of thread descriptor).
*/
__ mov_address(card_table_base, (address)ctbs->card_table_base_const());
__ mov_address(card_table_base, (address)ct->byte_map_base());
}
// The 2nd part of the store check.
@ -167,8 +170,8 @@ void CardTableBarrierSetAssembler::store_check_part2(MacroAssembler* masm, Regis
void CardTableBarrierSetAssembler::set_card(MacroAssembler* masm, Register card_table_base, Address card_table_addr, Register tmp) {
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
if ((((uintptr_t)ctbs->card_table_base_const() & 0xff) == 0)) {
CardTable* ct = ctbs->card_table();
if ((((uintptr_t)ct->byte_map_base() & 0xff) == 0)) {
// Card table is aligned so the lowest byte of the table address base is zero.
// This works only if the code is not saved for later use, possibly
// in a context where the base would no longer be aligned.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -172,7 +172,7 @@ void NativeMovConstReg::set_data(intptr_t x, address pc) {
address addr = oop_addr != nullptr ? (address)oop_addr : (address)metadata_addr;
if (pc == nullptr) {
if(pc == 0) {
offset = addr - instruction_address() - 8;
} else {
offset = addr - pc - 8;
@ -228,7 +228,7 @@ void NativeMovConstReg::set_data(intptr_t x, address pc) {
void NativeMovConstReg::set_pc_relative_offset(address addr, address pc) {
int offset;
if (pc == nullptr) {
if (pc == 0) {
offset = addr - instruction_address() - 8;
} else {
offset = addr - pc - 8;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -371,7 +371,7 @@ class NativeMovConstReg: public NativeInstruction {
public:
intptr_t data() const;
void set_data(intptr_t x, address pc = nullptr);
void set_data(intptr_t x, address pc = 0);
bool is_pc_relative() {
return !is_movw();
}

View File

@ -103,7 +103,8 @@ void CardTableBarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Registe
void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr,
Register count, Register preserve) {
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
CardTable* ct = ctbs->card_table();
assert_different_registers(addr, count, R0);
Label Lskip_loop, Lstore_loop;
@ -116,7 +117,7 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
__ srdi(addr, addr, CardTable::card_shift());
__ srdi(count, count, CardTable::card_shift());
__ subf(count, addr, count);
__ add_const_optimized(addr, addr, (address)ctbs->card_table_base_const(), R0);
__ add_const_optimized(addr, addr, (address)ct->byte_map_base(), R0);
__ addi(count, count, 1);
__ li(R0, 0);
__ mtctr(count);
@ -139,8 +140,8 @@ void CardTableBarrierSetAssembler::card_table_write(MacroAssembler* masm,
}
void CardTableBarrierSetAssembler::card_write_barrier_post(MacroAssembler* masm, Register store_addr, Register tmp) {
CardTableBarrierSet* bs = CardTableBarrierSet::barrier_set();
card_table_write(masm, bs->card_table_base_const(), tmp, store_addr);
CardTableBarrierSet* bs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
card_table_write(masm, bs->card_table()->byte_map_base(), tmp, store_addr);
}
void CardTableBarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,

View File

@ -771,6 +771,9 @@ void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler *masm, Register b
void ShenandoahBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, Register preserve) {
assert(ShenandoahCardBarrier, "Should have been checked by caller");
ShenandoahBarrierSet* bs = ShenandoahBarrierSet::barrier_set();
CardTable* ct = bs->card_table();
assert_different_registers(addr, count, R0);
Label L_skip_loop, L_store_loop;

View File

@ -1109,11 +1109,11 @@ void InterpreterMacroAssembler::verify_method_data_pointer() {
lhz(R11_scratch1, in_bytes(DataLayout::bci_offset()), R28_mdx);
ld(R12_scratch2, in_bytes(Method::const_offset()), R19_method);
addi(R11_scratch1, R11_scratch1, in_bytes(ConstMethod::codes_offset()));
add(R11_scratch1, R11_scratch1, R12_scratch2);
add(R11_scratch1, R12_scratch2, R12_scratch2);
cmpd(CR0, R11_scratch1, R14_bcp);
beq(CR0, verify_continue);
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), R19_method, R14_bcp, R28_mdx);
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp ), R19_method, R14_bcp, R28_mdx);
bind(verify_continue);
#endif

View File

@ -4535,7 +4535,7 @@ void MacroAssembler::push_cont_fastpath() {
Label done;
ld_ptr(R0, JavaThread::cont_fastpath_offset(), R16_thread);
cmpld(CR0, R1_SP, R0);
ble(CR0, done); // if (SP <= _cont_fastpath) goto done;
ble(CR0, done);
st_ptr(R1_SP, JavaThread::cont_fastpath_offset(), R16_thread);
bind(done);
}
@ -4546,7 +4546,7 @@ void MacroAssembler::pop_cont_fastpath() {
Label done;
ld_ptr(R0, JavaThread::cont_fastpath_offset(), R16_thread);
cmpld(CR0, R1_SP, R0);
blt(CR0, done); // if (SP < _cont_fastpath) goto done;
ble(CR0, done);
li(R0, 0);
st_ptr(R0, JavaThread::cont_fastpath_offset(), R16_thread);
bind(done);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -1237,24 +1237,26 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
// Class initialization barrier for static methods
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
Label L_skip_barrier;
if (VM_Version::supports_fast_class_init_checks()) {
Label L_skip_barrier;
// Bypass the barrier for non-static methods
__ lhz(R0, in_bytes(Method::access_flags_offset()), R19_method);
__ andi_(R0, R0, JVM_ACC_STATIC);
__ beq(CR0, L_skip_barrier); // non-static
{ // Bypass the barrier for non-static methods
__ lhz(R0, in_bytes(Method::access_flags_offset()), R19_method);
__ andi_(R0, R0, JVM_ACC_STATIC);
__ beq(CR0, L_skip_barrier); // non-static
}
Register klass = R11_scratch1;
__ load_method_holder(klass, R19_method);
__ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/);
Register klass = R11_scratch1;
__ load_method_holder(klass, R19_method);
__ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/);
__ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0);
__ mtctr(klass);
__ bctr();
__ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0);
__ mtctr(klass);
__ bctr();
__ bind(L_skip_barrier);
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
__ bind(L_skip_barrier);
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
}
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->c2i_entry_barrier(masm, /* tmp register*/ ic_klass, /* tmp register*/ receiver_klass, /* tmp register*/ code);
@ -2208,8 +2210,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// --------------------------------------------------------------------------
vep_start_pc = (intptr_t)__ pc();
if (method->needs_clinit_barrier()) {
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
Label L_skip_barrier;
Register klass = r_temp_1;
// Notify OOP recorder (don't need the relocation)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2025 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -2199,8 +2199,7 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no, Register Rca
__ isync(); // Order load wrt. succeeding loads.
// Class initialization barrier for static methods
if (bytecode() == Bytecodes::_invokestatic) {
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
const Register method = Rscratch;
const Register klass = Rscratch;
@ -2245,8 +2244,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no, Register Rcac
__ isync(); // Order load wrt. succeeding loads.
// Class initialization barrier for static fields
if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
if (VM_Version::supports_fast_class_init_checks() &&
(bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
const Register field_holder = R4_ARG2;
// InterpreterRuntime::resolve_get_put sets field_holder and finally release-stores put_code.

View File

@ -5110,8 +5110,9 @@ void MacroAssembler::get_thread(Register thread) {
}
void MacroAssembler::load_byte_map_base(Register reg) {
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
mv(reg, (uint64_t)ctbs->card_table_base_const());
CardTable::CardValue* byte_map_base =
((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base();
mv(reg, (uint64_t)byte_map_base);
}
void MacroAssembler::build_frame(int framesize) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -213,7 +213,7 @@ void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
// Is vector's size (in bytes) bigger than a size saved by default?
// riscv does not ovlerlay the floating-point registers on vector registers like aarch64.
bool SharedRuntime::is_wide_vector(int size) {
return UseRVV && size > 0;
return UseRVV;
}
// ---------------------------------------------------------------------------
@ -637,20 +637,22 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
// Class initialization barrier for static methods
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
Label L_skip_barrier;
if (VM_Version::supports_fast_class_init_checks()) {
Label L_skip_barrier;
// Bypass the barrier for non-static methods
__ load_unsigned_short(t0, Address(xmethod, Method::access_flags_offset()));
__ test_bit(t1, t0, exact_log2(JVM_ACC_STATIC));
__ beqz(t1, L_skip_barrier); // non-static
{ // Bypass the barrier for non-static methods
__ load_unsigned_short(t0, Address(xmethod, Method::access_flags_offset()));
__ test_bit(t1, t0, exact_log2(JVM_ACC_STATIC));
__ beqz(t1, L_skip_barrier); // non-static
}
__ load_method_holder(t1, xmethod);
__ clinit_barrier(t1, t0, &L_skip_barrier);
__ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
__ load_method_holder(t1, xmethod);
__ clinit_barrier(t1, t0, &L_skip_barrier);
__ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
__ bind(L_skip_barrier);
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
__ bind(L_skip_barrier);
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
}
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->c2i_entry_barrier(masm);
@ -1441,8 +1443,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ nop(); // 4 bytes
}
if (method->needs_clinit_barrier()) {
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
Label L_skip_barrier;
__ mov_metadata(t1, method->method_holder()); // InstanceKlass*
__ clinit_barrier(t1, t0, &L_skip_barrier);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -2192,8 +2192,7 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
__ mv(t0, (int) code);
// Class initialization barrier for static methods
if (bytecode() == Bytecodes::_invokestatic) {
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
__ bne(temp, t0, L_clinit_barrier_slow); // have we resolved this bytecode?
__ ld(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::method_offset())));
__ load_method_holder(temp, temp);
@ -2244,8 +2243,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
__ mv(t0, (int) code); // have we resolved this bytecode?
// Class initialization barrier for static fields
if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
if (VM_Version::supports_fast_class_init_checks() &&
(bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
const Register field_holder = temp;
__ bne(temp, t0, L_clinit_barrier_slow);

View File

@ -83,7 +83,8 @@ void CardTableBarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Registe
void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count,
bool do_return) {
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
CardTable* ct = ctbs->card_table();
NearLabel doXC, done;
assert_different_registers(Z_R0, Z_R1, addr, count);
@ -104,7 +105,7 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
__ add2reg_with_index(count, -BytesPerHeapOop, count, addr);
// Get base address of card table.
__ load_const_optimized(Z_R1, (address)ctbs->card_table_base_const());
__ load_const_optimized(Z_R1, (address)ct->byte_map_base());
// count = (count>>shift) - (addr>>shift)
__ z_srlg(addr, addr, CardTable::card_shift());
@ -178,12 +179,13 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register store_addr, Register tmp) {
// Does a store check for the oop in register obj. The content of
// register obj is destroyed afterwards.
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
CardTable* ct = ctbs->card_table();
assert_different_registers(store_addr, tmp);
__ z_srlg(store_addr, store_addr, CardTable::card_shift());
__ load_absolute_address(tmp, (address)ctbs->card_table_base_const());
__ load_absolute_address(tmp, (address)ct->byte_map_base());
__ z_agr(store_addr, tmp);
__ z_mvi(0, store_addr, CardTable::dirty_card_val());
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2024 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -1567,8 +1567,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
//---------------------------------------------------------------------
wrapper_VEPStart = __ offset();
if (method->needs_clinit_barrier()) {
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
Label L_skip_barrier;
Register klass = Z_R1_scratch;
// Notify OOP recorder (don't need the relocation)
@ -2379,22 +2378,24 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
// Class initialization barrier for static methods
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
Label L_skip_barrier;
if (VM_Version::supports_fast_class_init_checks()) {
Label L_skip_barrier;
// Bypass the barrier for non-static methods
__ testbit_ushort(Address(Z_method, Method::access_flags_offset()), JVM_ACC_STATIC_BIT);
__ z_bfalse(L_skip_barrier); // non-static
{ // Bypass the barrier for non-static methods
__ testbit_ushort(Address(Z_method, Method::access_flags_offset()), JVM_ACC_STATIC_BIT);
__ z_bfalse(L_skip_barrier); // non-static
}
Register klass = Z_R11;
__ load_method_holder(klass, Z_method);
__ clinit_barrier(klass, Z_thread, &L_skip_barrier /*L_fast_path*/);
Register klass = Z_R11;
__ load_method_holder(klass, Z_method);
__ clinit_barrier(klass, Z_thread, &L_skip_barrier /*L_fast_path*/);
__ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub());
__ z_br(klass);
__ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub());
__ z_br(klass);
__ bind(L_skip_barrier);
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
__ bind(L_skip_barrier);
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
}
gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
return;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2024 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -2377,8 +2377,7 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
__ z_cli(Address(Rcache, bc_offset), code);
// Class initialization barrier for static methods
if (bytecode() == Bytecodes::_invokestatic) {
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
const Register method = Z_R1_scratch;
const Register klass = Z_R1_scratch;
__ z_brne(L_clinit_barrier_slow);
@ -2428,8 +2427,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
__ z_cli(Address(cache, code_offset), code);
// Class initialization barrier for static fields
if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
if (VM_Version::supports_fast_class_init_checks() &&
(bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
const Register field_holder = index;
__ z_brne(L_clinit_barrier_slow);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2024, 2026, Intel Corporation. All rights reserved.
* Copyright (c) 2024, 2025, Intel Corporation. All rights reserved.
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -1330,12 +1330,10 @@ static void big_case_loop_helper(bool sizeKnown, int size, Label &noMatch, Label
// Clarification: The BYTE_K compare above compares haystack[(n-32):(n-1)]. We need to
// compare haystack[(k-1):(k-1+31)]. Subtracting either index gives shift value of
// (k + 31 - n): x = (k-1+31)-(n-1) = k-1+31-n+1 = k+31-n.
// When isU is set, similarly, shift is from haystack[(n-32):(n-1)] to [(k-2):(k-2+31)]
if (sizeKnown) {
__ movl(temp2, (isU ? 30 : 31) + size);
__ movl(temp2, 31 + size);
} else {
__ movl(temp2, isU ? 30 : 31);
__ movl(temp2, 31);
__ addl(temp2, needleLen);
}
__ subl(temp2, hsLength);

View File

@ -95,7 +95,11 @@ void CardTableBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet d
void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, Register tmp) {
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
BarrierSet *bs = BarrierSet::barrier_set();
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
intptr_t disp = (intptr_t) ct->byte_map_base();
SHENANDOAHGC_ONLY(assert(!UseShenandoahGC, "Shenandoah byte_map_base is not constant.");)
Label L_loop, L_done;
const Register end = count;
@ -111,7 +115,7 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
__ shrptr(end, CardTable::card_shift());
__ subptr(end, addr); // end --> cards count
__ mov64(tmp, (intptr_t)ctbs->card_table_base_const());
__ mov64(tmp, disp);
__ addptr(addr, tmp);
__ BIND(L_loop);
__ movb(Address(addr, count, Address::times_1), 0);
@ -124,7 +128,10 @@ __ BIND(L_done);
void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj, Address dst) {
// Does a store check for the oop in register obj. The content of
// register obj is destroyed afterwards.
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
BarrierSet* bs = BarrierSet::barrier_set();
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
__ shrptr(obj, CardTable::card_shift());
@ -135,7 +142,7 @@ void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register ob
// So this essentially converts an address to a displacement and it will
// never need to be relocated. On 64bit however the value may be too
// large for a 32bit displacement.
intptr_t byte_map_base = (intptr_t)ctbs->card_table_base_const();
intptr_t byte_map_base = (intptr_t)ct->byte_map_base();
if (__ is_simm32(byte_map_base)) {
card_addr = Address(noreg, obj, Address::times_1, byte_map_base);
} else {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1043,24 +1043,26 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
// Class initialization barrier for static methods
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
Label L_skip_barrier;
Register method = rbx;
if (VM_Version::supports_fast_class_init_checks()) {
Label L_skip_barrier;
Register method = rbx;
// Bypass the barrier for non-static methods
Register flags = rscratch1;
__ load_unsigned_short(flags, Address(method, Method::access_flags_offset()));
__ testl(flags, JVM_ACC_STATIC);
__ jcc(Assembler::zero, L_skip_barrier); // non-static
{ // Bypass the barrier for non-static methods
Register flags = rscratch1;
__ load_unsigned_short(flags, Address(method, Method::access_flags_offset()));
__ testl(flags, JVM_ACC_STATIC);
__ jcc(Assembler::zero, L_skip_barrier); // non-static
}
Register klass = rscratch1;
__ load_method_holder(klass, method);
__ clinit_barrier(klass, &L_skip_barrier /*L_fast_path*/);
Register klass = rscratch1;
__ load_method_holder(klass, method);
__ clinit_barrier(klass, &L_skip_barrier /*L_fast_path*/);
__ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path
__ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path
__ bind(L_skip_barrier);
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
__ bind(L_skip_barrier);
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
}
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->c2i_entry_barrier(masm);
@ -1902,8 +1904,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
int vep_offset = ((intptr_t)__ pc()) - start;
if (method->needs_clinit_barrier()) {
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
Label L_skip_barrier;
Register klass = r10;
__ mov_metadata(klass, method->method_holder()); // InstanceKlass*
@ -3601,3 +3602,4 @@ RuntimeStub* SharedRuntime::generate_jfr_return_lease() {
}
#endif // INCLUDE_JFR

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -64,39 +64,6 @@ static address kyberAvx512ConstsAddr(int offset) {
const Register scratch = r10;
ATTRIBUTE_ALIGNED(64) static const uint8_t kyberAvx512_12To16Dup[] = {
// 0 - 63
0, 1, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11, 12, 13, 13, 14, 15, 16,
16, 17, 18, 19, 19, 20, 21, 22, 22, 23, 24, 25, 25, 26, 27, 28, 28, 29, 30,
31, 31, 32, 33, 34, 34, 35, 36, 37, 37, 38, 39, 40, 40, 41, 42, 43, 43, 44,
45, 46, 46, 47
};
static address kyberAvx512_12To16DupAddr() {
return (address) kyberAvx512_12To16Dup;
}
ATTRIBUTE_ALIGNED(64) static const uint16_t kyberAvx512_12To16Shift[] = {
// 0 - 31
0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0,
4, 0, 4, 0, 4, 0, 4
};
static address kyberAvx512_12To16ShiftAddr() {
return (address) kyberAvx512_12To16Shift;
}
ATTRIBUTE_ALIGNED(64) static const uint64_t kyberAvx512_12To16And[] = {
// 0 - 7
0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF,
0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF,
0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF
};
static address kyberAvx512_12To16AndAddr() {
return (address) kyberAvx512_12To16And;
}
ATTRIBUTE_ALIGNED(64) static const uint16_t kyberAvx512NttPerms[] = {
// 0
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
@ -855,65 +822,10 @@ address generate_kyber12To16_avx512(StubGenerator *stubgen,
const Register perms = r11;
Label Loop, VBMILoop;
Label Loop;
__ addptr(condensed, condensedOffs);
if (VM_Version::supports_avx512_vbmi()) {
// mask load for the first 48 bytes of each vector
__ mov64(rax, 0x0000FFFFFFFFFFFF);
__ kmovql(k1, rax);
__ lea(perms, ExternalAddress(kyberAvx512_12To16DupAddr()));
__ evmovdqub(xmm20, Address(perms), Assembler::AVX_512bit);
__ lea(perms, ExternalAddress(kyberAvx512_12To16ShiftAddr()));
__ evmovdquw(xmm21, Address(perms), Assembler::AVX_512bit);
__ lea(perms, ExternalAddress(kyberAvx512_12To16AndAddr()));
__ evmovdquq(xmm22, Address(perms), Assembler::AVX_512bit);
__ align(OptoLoopAlignment);
__ BIND(VBMILoop);
__ evmovdqub(xmm0, k1, Address(condensed, 0), false,
Assembler::AVX_512bit);
__ evmovdqub(xmm1, k1, Address(condensed, 48), false,
Assembler::AVX_512bit);
__ evmovdqub(xmm2, k1, Address(condensed, 96), false,
Assembler::AVX_512bit);
__ evmovdqub(xmm3, k1, Address(condensed, 144), false,
Assembler::AVX_512bit);
__ evpermb(xmm4, k0, xmm20, xmm0, false, Assembler::AVX_512bit);
__ evpermb(xmm5, k0, xmm20, xmm1, false, Assembler::AVX_512bit);
__ evpermb(xmm6, k0, xmm20, xmm2, false, Assembler::AVX_512bit);
__ evpermb(xmm7, k0, xmm20, xmm3, false, Assembler::AVX_512bit);
__ evpsrlvw(xmm4, xmm4, xmm21, Assembler::AVX_512bit);
__ evpsrlvw(xmm5, xmm5, xmm21, Assembler::AVX_512bit);
__ evpsrlvw(xmm6, xmm6, xmm21, Assembler::AVX_512bit);
__ evpsrlvw(xmm7, xmm7, xmm21, Assembler::AVX_512bit);
__ evpandq(xmm0, xmm22, xmm4, Assembler::AVX_512bit);
__ evpandq(xmm1, xmm22, xmm5, Assembler::AVX_512bit);
__ evpandq(xmm2, xmm22, xmm6, Assembler::AVX_512bit);
__ evpandq(xmm3, xmm22, xmm7, Assembler::AVX_512bit);
store4regs(parsed, 0, xmm0_3, _masm);
__ addptr(condensed, 192);
__ addptr(parsed, 256);
__ subl(parsedLength, 128);
__ jcc(Assembler::greater, VBMILoop);
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ mov64(rax, 0); // return 0
__ ret(0);
return start;
}
__ lea(perms, ExternalAddress(kyberAvx512_12To16PermsAddr()));
load4regs(xmm24_27, perms, 0, _masm);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -2216,8 +2216,7 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
__ cmpl(temp, code); // have we resolved this bytecode?
// Class initialization barrier for static methods
if (bytecode() == Bytecodes::_invokestatic) {
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
const Register method = temp;
const Register klass = temp;
@ -2265,8 +2264,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
__ cmpl(temp, code); // have we resolved this bytecode?
// Class initialization barrier for static fields
if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
if (VM_Version::supports_fast_class_init_checks() &&
(bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
const Register field_holder = temp;
__ jcc(Assembler::notEqual, L_clinit_barrier_slow);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -921,9 +921,8 @@ void VM_Version::get_processor_features() {
// Check if processor has Intel Ecore
if (FLAG_IS_DEFAULT(EnableX86ECoreOpts) && is_intel() && is_intel_server_family() &&
(supports_hybrid() ||
_model == 0xAF /* Xeon 6 E-cores (Sierra Forest) */ ||
_model == 0xDD /* Xeon 6+ E-cores (Clearwater Forest) */ )) {
(_model == 0x97 || _model == 0xAA || _model == 0xAC || _model == 0xAF ||
_model == 0xCC || _model == 0xDD)) {
FLAG_SET_DEFAULT(EnableX86ECoreOpts, true);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -258,18 +258,10 @@ bool os::free_memory(physical_memory_size_type& value) {
return Aix::available_memory(value);
}
bool os::Machine::free_memory(physical_memory_size_type& value) {
return Aix::available_memory(value);
}
bool os::available_memory(physical_memory_size_type& value) {
return Aix::available_memory(value);
}
bool os::Machine::available_memory(physical_memory_size_type& value) {
return Aix::available_memory(value);
}
bool os::Aix::available_memory(physical_memory_size_type& value) {
os::Aix::meminfo_t mi;
if (os::Aix::get_meminfo(&mi)) {
@ -281,10 +273,6 @@ bool os::Aix::available_memory(physical_memory_size_type& value) {
}
bool os::total_swap_space(physical_memory_size_type& value) {
return Machine::total_swap_space(value);
}
bool os::Machine::total_swap_space(physical_memory_size_type& value) {
perfstat_memory_total_t memory_info;
if (libperfstat::perfstat_memory_total(nullptr, &memory_info, sizeof(perfstat_memory_total_t), 1) == -1) {
return false;
@ -294,10 +282,6 @@ bool os::Machine::total_swap_space(physical_memory_size_type& value) {
}
bool os::free_swap_space(physical_memory_size_type& value) {
return Machine::free_swap_space(value);
}
bool os::Machine::free_swap_space(physical_memory_size_type& value) {
perfstat_memory_total_t memory_info;
if (libperfstat::perfstat_memory_total(nullptr, &memory_info, sizeof(perfstat_memory_total_t), 1) == -1) {
return false;
@ -310,10 +294,6 @@ physical_memory_size_type os::physical_memory() {
return Aix::physical_memory();
}
physical_memory_size_type os::Machine::physical_memory() {
return Aix::physical_memory();
}
size_t os::rss() { return (size_t)0; }
// Cpu architecture string
@ -2284,10 +2264,6 @@ int os::active_processor_count() {
return ActiveProcessorCount;
}
return Machine::active_processor_count();
}
int os::Machine::active_processor_count() {
int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
return online_cpus;

View File

@ -132,7 +132,7 @@ public:
static const char* tagToStr(uint32_t user_tag) {
switch (user_tag) {
case 0:
return nullptr;
return 0;
X1(MALLOC, malloc);
X1(MALLOC_SMALL, malloc_small);
X1(MALLOC_LARGE, malloc_large);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -137,18 +137,10 @@ bool os::available_memory(physical_memory_size_type& value) {
return Bsd::available_memory(value);
}
bool os::Machine::available_memory(physical_memory_size_type& value) {
return Bsd::available_memory(value);
}
bool os::free_memory(physical_memory_size_type& value) {
return Bsd::available_memory(value);
}
bool os::Machine::free_memory(physical_memory_size_type& value) {
return Bsd::available_memory(value);
}
// Available here means free. Note that this number is of no much use. As an estimate
// for future memory pressure it is far too conservative, since MacOS will use a lot
// of unused memory for caches, and return it willingly in case of needs.
@ -189,10 +181,6 @@ void os::Bsd::print_uptime_info(outputStream* st) {
}
bool os::total_swap_space(physical_memory_size_type& value) {
return Machine::total_swap_space(value);
}
bool os::Machine::total_swap_space(physical_memory_size_type& value) {
#if defined(__APPLE__)
struct xsw_usage vmusage;
size_t size = sizeof(vmusage);
@ -207,10 +195,6 @@ bool os::Machine::total_swap_space(physical_memory_size_type& value) {
}
bool os::free_swap_space(physical_memory_size_type& value) {
return Machine::free_swap_space(value);
}
bool os::Machine::free_swap_space(physical_memory_size_type& value) {
#if defined(__APPLE__)
struct xsw_usage vmusage;
size_t size = sizeof(vmusage);
@ -228,10 +212,6 @@ physical_memory_size_type os::physical_memory() {
return Bsd::physical_memory();
}
physical_memory_size_type os::Machine::physical_memory() {
return Bsd::physical_memory();
}
size_t os::rss() {
size_t rss = 0;
#ifdef __APPLE__
@ -628,7 +608,7 @@ static void *thread_native_entry(Thread *thread) {
log_info(os, thread)("Thread finished (tid: %zu, pthread id: %zu).",
os::current_thread_id(), (uintx) pthread_self());
return nullptr;
return 0;
}
bool os::create_thread(Thread* thread, ThreadType thr_type,
@ -1420,7 +1400,7 @@ int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *pa
#elif defined(__APPLE__)
for (uint32_t i = 1; i < _dyld_image_count(); i++) {
// Value for top_address is returned as 0 since we don't have any information about module size
if (callback(_dyld_get_image_name(i), (address)_dyld_get_image_header(i), nullptr, param)) {
if (callback(_dyld_get_image_name(i), (address)_dyld_get_image_header(i), (address)0, param)) {
return 1;
}
}
@ -2209,10 +2189,6 @@ int os::active_processor_count() {
return ActiveProcessorCount;
}
return Machine::active_processor_count();
}
int os::Machine::active_processor_count() {
return _processor_count;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -631,20 +631,22 @@ void CgroupSubsystemFactory::cleanup(CgroupInfo* cg_infos) {
* return:
* true if there were no errors. false otherwise.
*/
bool CgroupSubsystem::active_processor_count(double& value) {
bool CgroupSubsystem::active_processor_count(int& value) {
int cpu_count;
int result = -1;
// We use a cache with a timeout to avoid performing expensive
// computations in the event this function is called frequently.
// [See 8227006].
CachingCgroupController<CgroupCpuController, double>* contrl = cpu_controller();
CachedMetric<double>* cpu_limit = contrl->metrics_cache();
CachingCgroupController<CgroupCpuController>* contrl = cpu_controller();
CachedMetric* cpu_limit = contrl->metrics_cache();
if (!cpu_limit->should_check_metric()) {
value = cpu_limit->value();
log_trace(os, container)("CgroupSubsystem::active_processor_count (cached): %.2f", value);
value = (int)cpu_limit->value();
log_trace(os, container)("CgroupSubsystem::active_processor_count (cached): %d", value);
return true;
}
int cpu_count = os::Linux::active_processor_count();
double result = -1;
cpu_count = os::Linux::active_processor_count();
if (!CgroupUtil::processor_count(contrl->controller(), cpu_count, result)) {
return false;
}
@ -669,8 +671,8 @@ bool CgroupSubsystem::active_processor_count(double& value) {
*/
bool CgroupSubsystem::memory_limit_in_bytes(physical_memory_size_type upper_bound,
physical_memory_size_type& value) {
CachingCgroupController<CgroupMemoryController, physical_memory_size_type>* contrl = memory_controller();
CachedMetric<physical_memory_size_type>* memory_limit = contrl->metrics_cache();
CachingCgroupController<CgroupMemoryController>* contrl = memory_controller();
CachedMetric* memory_limit = contrl->metrics_cache();
if (!memory_limit->should_check_metric()) {
value = memory_limit->value();
return true;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -181,21 +181,20 @@ class CgroupController: public CHeapObj<mtInternal> {
static bool limit_from_str(char* limit_str, physical_memory_size_type& value);
};
template <typename MetricType>
class CachedMetric : public CHeapObj<mtInternal>{
private:
volatile MetricType _metric;
volatile physical_memory_size_type _metric;
volatile jlong _next_check_counter;
public:
CachedMetric() {
_metric = static_cast<MetricType>(value_unlimited);
_metric = value_unlimited;
_next_check_counter = min_jlong;
}
bool should_check_metric() {
return os::elapsed_counter() > _next_check_counter;
}
MetricType value() { return _metric; }
void set_value(MetricType value, jlong timeout) {
physical_memory_size_type value() { return _metric; }
void set_value(physical_memory_size_type value, jlong timeout) {
_metric = value;
// Metric is unlikely to change, but we want to remain
// responsive to configuration changes. A very short grace time
@ -206,19 +205,19 @@ class CachedMetric : public CHeapObj<mtInternal>{
}
};
template <class T, typename MetricType>
template <class T>
class CachingCgroupController : public CHeapObj<mtInternal> {
private:
T* _controller;
CachedMetric<MetricType>* _metrics_cache;
CachedMetric* _metrics_cache;
public:
CachingCgroupController(T* cont) {
_controller = cont;
_metrics_cache = new CachedMetric<MetricType>();
_metrics_cache = new CachedMetric();
}
CachedMetric<MetricType>* metrics_cache() { return _metrics_cache; }
CachedMetric* metrics_cache() { return _metrics_cache; }
T* controller() { return _controller; }
};
@ -278,7 +277,7 @@ class CgroupMemoryController: public CHeapObj<mtInternal> {
class CgroupSubsystem: public CHeapObj<mtInternal> {
public:
bool memory_limit_in_bytes(physical_memory_size_type upper_bound, physical_memory_size_type& value);
bool active_processor_count(double& value);
bool active_processor_count(int& value);
virtual bool pids_max(uint64_t& value) = 0;
virtual bool pids_current(uint64_t& value) = 0;
@ -287,8 +286,8 @@ class CgroupSubsystem: public CHeapObj<mtInternal> {
virtual char * cpu_cpuset_cpus() = 0;
virtual char * cpu_cpuset_memory_nodes() = 0;
virtual const char * container_type() = 0;
virtual CachingCgroupController<CgroupMemoryController, physical_memory_size_type>* memory_controller() = 0;
virtual CachingCgroupController<CgroupCpuController, double>* cpu_controller() = 0;
virtual CachingCgroupController<CgroupMemoryController>* memory_controller() = 0;
virtual CachingCgroupController<CgroupCpuController>* cpu_controller() = 0;
virtual CgroupCpuacctController* cpuacct_controller() = 0;
bool cpu_quota(int& value);

View File

@ -1,6 +1,5 @@
/*
* Copyright (c) 2024, 2025, Red Hat, Inc.
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,8 +25,9 @@
#include "cgroupUtil_linux.hpp"
#include "os_linux.hpp"
bool CgroupUtil::processor_count(CgroupCpuController* cpu_ctrl, int upper_bound, double& value) {
bool CgroupUtil::processor_count(CgroupCpuController* cpu_ctrl, int upper_bound, int& value) {
assert(upper_bound > 0, "upper bound of cpus must be positive");
int limit_count = upper_bound;
int quota = -1;
int period = -1;
if (!cpu_ctrl->cpu_quota(quota)) {
@ -37,15 +37,20 @@ bool CgroupUtil::processor_count(CgroupCpuController* cpu_ctrl, int upper_bound,
return false;
}
int quota_count = 0;
double result = upper_bound;
int result = upper_bound;
if (quota > 0 && period > 0) { // Use quotas
double cpu_quota = static_cast<double>(quota) / period;
log_trace(os, container)("CPU Quota based on quota/period: %.2f", cpu_quota);
result = MIN2(result, cpu_quota);
if (quota > -1 && period > 0) {
quota_count = ceilf((float)quota / (float)period);
log_trace(os, container)("CPU Quota count based on quota/period: %d", quota_count);
}
log_trace(os, container)("OSContainer::active_processor_count: %.2f", result);
// Use quotas
if (quota_count != 0) {
limit_count = quota_count;
}
result = MIN2(upper_bound, limit_count);
log_trace(os, container)("OSContainer::active_processor_count: %d", result);
value = result;
return true;
}
@ -68,11 +73,11 @@ physical_memory_size_type CgroupUtil::get_updated_mem_limit(CgroupMemoryControll
// Get an updated cpu limit. The return value is strictly less than or equal to the
// passed in 'lowest' value.
double CgroupUtil::get_updated_cpu_limit(CgroupCpuController* cpu,
int CgroupUtil::get_updated_cpu_limit(CgroupCpuController* cpu,
int lowest,
int upper_bound) {
assert(lowest > 0 && lowest <= upper_bound, "invariant");
double cpu_limit_val = -1;
int cpu_limit_val = -1;
if (CgroupUtil::processor_count(cpu, upper_bound, cpu_limit_val) && cpu_limit_val != upper_bound) {
assert(cpu_limit_val <= upper_bound, "invariant");
if (lowest > cpu_limit_val) {
@ -167,7 +172,7 @@ void CgroupUtil::adjust_controller(CgroupCpuController* cpu) {
assert(cg_path[0] == '/', "cgroup path must start with '/'");
int host_cpus = os::Linux::active_processor_count();
int lowest_limit = host_cpus;
double cpus = get_updated_cpu_limit(cpu, lowest_limit, host_cpus);
int cpus = get_updated_cpu_limit(cpu, lowest_limit, host_cpus);
int orig_limit = lowest_limit != host_cpus ? lowest_limit : host_cpus;
char* limit_cg_path = nullptr;
while ((last_slash = strrchr(cg_path, '/')) != cg_path) {

View File

@ -1,6 +1,5 @@
/*
* Copyright (c) 2024, Red Hat, Inc.
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,7 +31,7 @@
class CgroupUtil: AllStatic {
public:
static bool processor_count(CgroupCpuController* cpu, int upper_bound, double& value);
static bool processor_count(CgroupCpuController* cpu, int upper_bound, int& value);
// Given a memory controller, adjust its path to a point in the hierarchy
// that represents the closest memory limit.
static void adjust_controller(CgroupMemoryController* m);
@ -43,7 +42,9 @@ class CgroupUtil: AllStatic {
static physical_memory_size_type get_updated_mem_limit(CgroupMemoryController* m,
physical_memory_size_type lowest,
physical_memory_size_type upper_bound);
static double get_updated_cpu_limit(CgroupCpuController* c, int lowest, int upper_bound);
static int get_updated_cpu_limit(CgroupCpuController* c,
int lowest,
int upper_bound);
};
#endif // CGROUP_UTIL_LINUX_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -328,8 +328,8 @@ CgroupV1Subsystem::CgroupV1Subsystem(CgroupV1Controller* cpuset,
_pids(pids) {
CgroupUtil::adjust_controller(memory);
CgroupUtil::adjust_controller(cpu);
_memory = new CachingCgroupController<CgroupMemoryController, physical_memory_size_type>(memory);
_cpu = new CachingCgroupController<CgroupCpuController, double>(cpu);
_memory = new CachingCgroupController<CgroupMemoryController>(memory);
_cpu = new CachingCgroupController<CgroupCpuController>(cpu);
}
bool CgroupV1Subsystem::is_containerized() {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -214,15 +214,15 @@ class CgroupV1Subsystem: public CgroupSubsystem {
const char * container_type() override {
return "cgroupv1";
}
CachingCgroupController<CgroupMemoryController, physical_memory_size_type>* memory_controller() override { return _memory; }
CachingCgroupController<CgroupCpuController, double>* cpu_controller() override { return _cpu; }
CachingCgroupController<CgroupMemoryController>* memory_controller() override { return _memory; }
CachingCgroupController<CgroupCpuController>* cpu_controller() override { return _cpu; }
CgroupCpuacctController* cpuacct_controller() override { return _cpuacct; }
private:
/* controllers */
CachingCgroupController<CgroupMemoryController, physical_memory_size_type>* _memory = nullptr;
CachingCgroupController<CgroupMemoryController>* _memory = nullptr;
CgroupV1Controller* _cpuset = nullptr;
CachingCgroupController<CgroupCpuController, double>* _cpu = nullptr;
CachingCgroupController<CgroupCpuController>* _cpu = nullptr;
CgroupV1CpuacctController* _cpuacct = nullptr;
CgroupV1Controller* _pids = nullptr;

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2020, 2025, Red Hat Inc.
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -156,8 +156,8 @@ CgroupV2Subsystem::CgroupV2Subsystem(CgroupV2MemoryController * memory,
_unified(unified) {
CgroupUtil::adjust_controller(memory);
CgroupUtil::adjust_controller(cpu);
_memory = new CachingCgroupController<CgroupMemoryController, physical_memory_size_type>(memory);
_cpu = new CachingCgroupController<CgroupCpuController, double>(cpu);
_memory = new CachingCgroupController<CgroupMemoryController>(memory);
_cpu = new CachingCgroupController<CgroupCpuController>(cpu);
_cpuacct = cpuacct;
}

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2020, 2024, Red Hat Inc.
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -152,8 +152,8 @@ class CgroupV2Subsystem: public CgroupSubsystem {
/* One unified controller */
CgroupV2Controller _unified;
/* Caching wrappers for cpu/memory metrics */
CachingCgroupController<CgroupMemoryController, physical_memory_size_type>* _memory = nullptr;
CachingCgroupController<CgroupCpuController, double>* _cpu = nullptr;
CachingCgroupController<CgroupMemoryController>* _memory = nullptr;
CachingCgroupController<CgroupCpuController>* _cpu = nullptr;
CgroupCpuacctController* _cpuacct = nullptr;
@ -175,8 +175,8 @@ class CgroupV2Subsystem: public CgroupSubsystem {
const char * container_type() override {
return "cgroupv2";
}
CachingCgroupController<CgroupMemoryController, physical_memory_size_type>* memory_controller() override { return _memory; }
CachingCgroupController<CgroupCpuController, double>* cpu_controller() override { return _cpu; }
CachingCgroupController<CgroupMemoryController>* memory_controller() override { return _memory; }
CachingCgroupController<CgroupCpuController>* cpu_controller() override { return _cpu; }
CgroupCpuacctController* cpuacct_controller() override { return _cpuacct; };
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -86,8 +86,8 @@ void OSContainer::init() {
// 2.) On a physical Linux system with a limit enforced by other means (like systemd slice)
physical_memory_size_type mem_limit_val = value_unlimited;
(void)memory_limit_in_bytes(mem_limit_val); // discard error and use default
double host_cpus = os::Linux::active_processor_count();
double cpus = host_cpus;
int host_cpus = os::Linux::active_processor_count();
int cpus = host_cpus;
(void)active_processor_count(cpus); // discard error and use default
any_mem_cpu_limit_present = mem_limit_val != value_unlimited || host_cpus != cpus;
if (any_mem_cpu_limit_present) {
@ -127,7 +127,8 @@ bool OSContainer::available_memory_in_bytes(physical_memory_size_type& value) {
return false;
}
bool OSContainer::available_swap_in_bytes(physical_memory_size_type& value) {
bool OSContainer::available_swap_in_bytes(physical_memory_size_type host_free_swap,
physical_memory_size_type& value) {
physical_memory_size_type mem_limit = 0;
physical_memory_size_type mem_swap_limit = 0;
if (memory_limit_in_bytes(mem_limit) &&
@ -178,7 +179,8 @@ bool OSContainer::available_swap_in_bytes(physical_memory_size_type& value) {
assert(num < 25, "buffer too small");
mem_limit_buf[num] = '\0';
log_trace(os,container)("OSContainer::available_swap_in_bytes: container_swap_limit=%s"
" container_mem_limit=%s", mem_swap_buf, mem_limit_buf);
" container_mem_limit=%s, host_free_swap: " PHYS_MEM_TYPE_FORMAT,
mem_swap_buf, mem_limit_buf, host_free_swap);
}
return false;
}
@ -250,7 +252,7 @@ char * OSContainer::cpu_cpuset_memory_nodes() {
return cgroup_subsystem->cpu_cpuset_memory_nodes();
}
bool OSContainer::active_processor_count(double& value) {
bool OSContainer::active_processor_count(int& value) {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
return cgroup_subsystem->active_processor_count(value);
}
@ -289,13 +291,11 @@ template<typename T> struct metric_fmt;
template<> struct metric_fmt<unsigned long long int> { static constexpr const char* fmt = "%llu"; };
template<> struct metric_fmt<unsigned long int> { static constexpr const char* fmt = "%lu"; };
template<> struct metric_fmt<int> { static constexpr const char* fmt = "%d"; };
template<> struct metric_fmt<double> { static constexpr const char* fmt = "%.2f"; };
template<> struct metric_fmt<const char*> { static constexpr const char* fmt = "%s"; };
template void OSContainer::print_container_metric<unsigned long long int>(outputStream*, const char*, unsigned long long int, const char*);
template void OSContainer::print_container_metric<unsigned long int>(outputStream*, const char*, unsigned long int, const char*);
template void OSContainer::print_container_metric<int>(outputStream*, const char*, int, const char*);
template void OSContainer::print_container_metric<double>(outputStream*, const char*, double, const char*);
template void OSContainer::print_container_metric<const char*>(outputStream*, const char*, const char*, const char*);
template <typename T>
@ -304,13 +304,12 @@ void OSContainer::print_container_metric(outputStream* st, const char* metrics,
constexpr int longest_value = max_length - 11; // Max length - shortest "metric: " string ("cpu_quota: ")
char value_str[longest_value + 1] = {};
os::snprintf_checked(value_str, longest_value, metric_fmt<T>::fmt, value);
const int pad_width = max_length - static_cast<int>(strlen(metrics)) - 2; // -2 for the ": "
const char* unit_prefix = unit[0] != '\0' ? " " : "";
char line[128] = {};
os::snprintf_checked(line, sizeof(line), "%s: %*s%s%s", metrics, pad_width, value_str, unit_prefix, unit);
st->print_cr("%s", line);
st->print("%s: %*s", metrics, max_length - static_cast<int>(strlen(metrics)) - 2, value_str); // -2 for the ": "
if (unit[0] != '\0') {
st->print_cr(" %s", unit);
} else {
st->print_cr("");
}
}
void OSContainer::print_container_helper(outputStream* st, MetricResult& res, const char* metrics) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -72,7 +72,8 @@ class OSContainer: AllStatic {
static const char * container_type();
static bool available_memory_in_bytes(physical_memory_size_type& value);
static bool available_swap_in_bytes(physical_memory_size_type& value);
static bool available_swap_in_bytes(physical_memory_size_type host_free_swap,
physical_memory_size_type& value);
static bool memory_limit_in_bytes(physical_memory_size_type& value);
static bool memory_and_swap_limit_in_bytes(physical_memory_size_type& value);
static bool memory_and_swap_usage_in_bytes(physical_memory_size_type& value);
@ -83,7 +84,7 @@ class OSContainer: AllStatic {
static bool rss_usage_in_bytes(physical_memory_size_type& value);
static bool cache_usage_in_bytes(physical_memory_size_type& value);
static bool active_processor_count(double& value);
static bool active_processor_count(int& value);
static char * cpu_cpuset_cpus();
static char * cpu_cpuset_memory_nodes();

View File

@ -211,58 +211,15 @@ static bool suppress_primordial_thread_resolution = false;
// utility functions
bool os::is_containerized() {
return OSContainer::is_containerized();
}
bool os::Container::memory_limit(physical_memory_size_type& value) {
physical_memory_size_type result = 0;
if (OSContainer::memory_limit_in_bytes(result) && result != value_unlimited) {
value = result;
return true;
}
return false;
}
bool os::Container::memory_soft_limit(physical_memory_size_type& value) {
physical_memory_size_type result = 0;
if (OSContainer::memory_soft_limit_in_bytes(result) && result != 0 && result != value_unlimited) {
value = result;
return true;
}
return false;
}
bool os::Container::memory_throttle_limit(physical_memory_size_type& value) {
physical_memory_size_type result = 0;
if (OSContainer::memory_throttle_limit_in_bytes(result) && result != value_unlimited) {
value = result;
return true;
}
return false;
}
bool os::Container::used_memory(physical_memory_size_type& value) {
return OSContainer::memory_usage_in_bytes(value);
}
bool os::available_memory(physical_memory_size_type& value) {
if (is_containerized() && Container::available_memory(value)) {
if (OSContainer::is_containerized() && OSContainer::available_memory_in_bytes(value)) {
log_trace(os)("available container memory: " PHYS_MEM_TYPE_FORMAT, value);
return true;
}
return Machine::available_memory(value);
}
bool os::Machine::available_memory(physical_memory_size_type& value) {
return Linux::available_memory(value);
}
bool os::Container::available_memory(physical_memory_size_type& value) {
return OSContainer::available_memory_in_bytes(value);
}
bool os::Linux::available_memory(physical_memory_size_type& value) {
physical_memory_size_type avail_mem = 0;
@ -294,15 +251,11 @@ bool os::Linux::available_memory(physical_memory_size_type& value) {
}
bool os::free_memory(physical_memory_size_type& value) {
if (is_containerized() && Container::available_memory(value)) {
if (OSContainer::is_containerized() && OSContainer::available_memory_in_bytes(value)) {
log_trace(os)("free container memory: " PHYS_MEM_TYPE_FORMAT, value);
return true;
}
return Machine::free_memory(value);
}
bool os::Machine::free_memory(physical_memory_size_type& value) {
return Linux::free_memory(value);
}
@ -321,28 +274,19 @@ bool os::Linux::free_memory(physical_memory_size_type& value) {
}
bool os::total_swap_space(physical_memory_size_type& value) {
if (is_containerized() && Container::total_swap_space(value)) {
return true;
} // fallback to the host swap space if the container value fails
return Machine::total_swap_space(value);
}
bool os::Machine::total_swap_space(physical_memory_size_type& value) {
return Linux::host_swap(value);
}
bool os::Container::total_swap_space(physical_memory_size_type& value) {
physical_memory_size_type mem_swap_limit = value_unlimited;
physical_memory_size_type memory_limit = value_unlimited;
if (OSContainer::memory_and_swap_limit_in_bytes(mem_swap_limit) &&
OSContainer::memory_limit_in_bytes(memory_limit)) {
if (memory_limit != value_unlimited && mem_swap_limit != value_unlimited &&
mem_swap_limit >= memory_limit /* ensure swap is >= 0 */) {
value = mem_swap_limit - memory_limit;
return true;
if (OSContainer::is_containerized()) {
physical_memory_size_type mem_swap_limit = value_unlimited;
physical_memory_size_type memory_limit = value_unlimited;
if (OSContainer::memory_and_swap_limit_in_bytes(mem_swap_limit) &&
OSContainer::memory_limit_in_bytes(memory_limit)) {
if (memory_limit != value_unlimited && mem_swap_limit != value_unlimited &&
mem_swap_limit >= memory_limit /* ensure swap is >= 0 */) {
value = mem_swap_limit - memory_limit;
return true;
}
}
}
return false;
} // fallback to the host swap space if the container returned unlimited
return Linux::host_swap(value);
}
static bool host_free_swap_f(physical_memory_size_type& value) {
@ -365,45 +309,32 @@ bool os::free_swap_space(physical_memory_size_type& value) {
return false;
}
physical_memory_size_type host_free_swap_val = MIN2(total_swap_space, host_free_swap);
if (is_containerized()) {
if (Container::free_swap_space(value)) {
if (OSContainer::is_containerized()) {
if (OSContainer::available_swap_in_bytes(host_free_swap_val, value)) {
return true;
}
// Fall through to use host value
log_trace(os,container)("os::free_swap_space: containerized value unavailable"
" returning host value: " PHYS_MEM_TYPE_FORMAT, host_free_swap_val);
}
value = host_free_swap_val;
return true;
}
bool os::Machine::free_swap_space(physical_memory_size_type& value) {
return host_free_swap_f(value);
}
bool os::Container::free_swap_space(physical_memory_size_type& value) {
return OSContainer::available_swap_in_bytes(value);
}
physical_memory_size_type os::physical_memory() {
if (is_containerized()) {
if (OSContainer::is_containerized()) {
physical_memory_size_type mem_limit = value_unlimited;
if (Container::memory_limit(mem_limit) && mem_limit != value_unlimited) {
if (OSContainer::memory_limit_in_bytes(mem_limit) && mem_limit != value_unlimited) {
log_trace(os)("total container memory: " PHYS_MEM_TYPE_FORMAT, mem_limit);
return mem_limit;
}
}
physical_memory_size_type phys_mem = Machine::physical_memory();
physical_memory_size_type phys_mem = Linux::physical_memory();
log_trace(os)("total system memory: " PHYS_MEM_TYPE_FORMAT, phys_mem);
return phys_mem;
}
physical_memory_size_type os::Machine::physical_memory() {
return Linux::physical_memory();
}
// Returns the resident set size (RSS) of the process.
// Falls back to using VmRSS from /proc/self/status if /proc/self/smaps_rollup is unavailable.
// Note: On kernels with memory cgroups or shared memory, VmRSS may underreport RSS.
@ -2508,21 +2439,20 @@ bool os::Linux::print_container_info(outputStream* st) {
OSContainer::print_container_metric(st, "cpu_memory_nodes", p != nullptr ? p : "not supported");
free(p);
double cpus = -1;
bool supported = OSContainer::active_processor_count(cpus);
int i = -1;
bool supported = OSContainer::active_processor_count(i);
if (supported) {
assert(cpus > 0, "must be");
assert(i > 0, "must be");
if (ActiveProcessorCount > 0) {
OSContainer::print_container_metric(st, "active_processor_count", ActiveProcessorCount, "(from -XX:ActiveProcessorCount)");
} else {
OSContainer::print_container_metric(st, "active_processor_count", cpus);
OSContainer::print_container_metric(st, "active_processor_count", i);
}
} else {
OSContainer::print_container_metric(st, "active_processor_count", "not supported");
}
int i = -1;
supported = OSContainer::cpu_quota(i);
if (supported && i > 0) {
OSContainer::print_container_metric(st, "cpu_quota", i);
@ -4807,26 +4737,15 @@ int os::active_processor_count() {
return ActiveProcessorCount;
}
if (is_containerized()) {
double cpu_quota;
if (Container::processor_count(cpu_quota)) {
int active_cpus = ceilf(cpu_quota); // Round fractional CPU quota up.
assert(active_cpus <= Machine::active_processor_count(), "must be");
log_trace(os)("active_processor_count: determined by OSContainer: %d",
active_cpus);
return active_cpus;
}
int active_cpus = -1;
if (OSContainer::is_containerized() && OSContainer::active_processor_count(active_cpus)) {
log_trace(os)("active_processor_count: determined by OSContainer: %d",
active_cpus);
} else {
active_cpus = os::Linux::active_processor_count();
}
return Machine::active_processor_count();
}
int os::Machine::active_processor_count() {
return os::Linux::active_processor_count();
}
bool os::Container::processor_count(double& value) {
return OSContainer::active_processor_count(value);
return active_cpus;
}
static bool should_warn_invalid_processor_id() {
@ -4963,14 +4882,9 @@ int os::open(const char *path, int oflag, int mode) {
oflag |= O_CLOEXEC;
int fd = ::open(path, oflag, mode);
// No further checking is needed if open() returned an error or
// access mode is not read only.
if (fd == -1 || (oflag & O_ACCMODE) != O_RDONLY) {
return fd;
}
if (fd == -1) return -1;
// If the open succeeded and is read only, the file might be a directory
// which the JVM doesn't allow to be read.
//If the open succeeded, the file might still be a directory
{
struct stat buf;
int ret = ::fstat(fd, &buf);

View File

@ -112,10 +112,6 @@ static void save_memory_to_file(char* addr, size_t size) {
result = ::close(fd);
if (result == OS_ERR) {
warning("Could not close %s: %s\n", destfile, os::strerror(errno));
} else {
if (!successful_write) {
remove(destfile);
}
}
}
FREE_C_HEAP_ARRAY(char, destfile);
@ -953,7 +949,6 @@ static int create_sharedmem_file(const char* dirname, const char* filename, size
warning("Insufficient space for shared memory file: %s/%s\n", dirname, filename);
}
result = OS_ERR;
remove(filename);
break;
}
}

View File

@ -839,18 +839,10 @@ bool os::available_memory(physical_memory_size_type& value) {
return win32::available_memory(value);
}
bool os::Machine::available_memory(physical_memory_size_type& value) {
return win32::available_memory(value);
}
bool os::free_memory(physical_memory_size_type& value) {
return win32::available_memory(value);
}
bool os::Machine::free_memory(physical_memory_size_type& value) {
return win32::available_memory(value);
}
bool os::win32::available_memory(physical_memory_size_type& value) {
// Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
// value if total memory is larger than 4GB
@ -866,11 +858,7 @@ bool os::win32::available_memory(physical_memory_size_type& value) {
}
}
bool os::total_swap_space(physical_memory_size_type& value) {
return Machine::total_swap_space(value);
}
bool os::Machine::total_swap_space(physical_memory_size_type& value) {
bool os::total_swap_space(physical_memory_size_type& value) {
MEMORYSTATUSEX ms;
ms.dwLength = sizeof(ms);
BOOL res = GlobalMemoryStatusEx(&ms);
@ -884,10 +872,6 @@ bool os::Machine::total_swap_space(physical_memory_size_type& value) {
}
bool os::free_swap_space(physical_memory_size_type& value) {
return Machine::free_swap_space(value);
}
bool os::Machine::free_swap_space(physical_memory_size_type& value) {
MEMORYSTATUSEX ms;
ms.dwLength = sizeof(ms);
BOOL res = GlobalMemoryStatusEx(&ms);
@ -904,10 +888,6 @@ physical_memory_size_type os::physical_memory() {
return win32::physical_memory();
}
physical_memory_size_type os::Machine::physical_memory() {
return win32::physical_memory();
}
size_t os::rss() {
size_t rss = 0;
PROCESS_MEMORY_COUNTERS_EX pmex;
@ -931,10 +911,6 @@ int os::active_processor_count() {
return ActiveProcessorCount;
}
return Machine::active_processor_count();
}
int os::Machine::active_processor_count() {
bool schedules_all_processor_groups = win32::is_windows_11_or_greater() || win32::is_windows_server_2022_or_greater();
if (UseAllWindowsProcessorGroups && !schedules_all_processor_groups && !win32::processor_group_warning_displayed()) {
win32::set_processor_group_warning_displayed(true);

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2026 SAP SE. All rights reserved.
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -412,8 +412,12 @@ run_stub:
}
void os::Aix::init_thread_fpu_state(void) {
#if !defined(USE_XLC_BUILTINS)
// Disable FP exceptions.
__asm__ __volatile__ ("mtfsfi 6,0");
#else
__mtfsfi(6, 0);
#endif
}
////////////////////////////////////////////////////////////////////////////////

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2026 SAP SE. All rights reserved.
* Copyright (c) 2012, 2013 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,21 +29,29 @@
// Included in runtime/prefetch.inline.hpp
inline void Prefetch::read(const void *loc, intx interval) {
#if !defined(USE_XLC_BUILTINS)
__asm__ __volatile__ (
" dcbt 0, %0 \n"
:
: /*%0*/"r" ( ((address)loc) +((long)interval) )
//:
);
#else
__dcbt(((address)loc) +((long)interval));
#endif
}
inline void Prefetch::write(void *loc, intx interval) {
#if !defined(USE_XLC_BUILTINS)
__asm__ __volatile__ (
" dcbtst 0, %0 \n"
:
: /*%0*/"r" ( ((address)loc) +((long)interval) )
//:
);
#else
__dcbtst( ((address)loc) +((long)interval) );
#endif
}
#endif // OS_CPU_AIX_PPC_PREFETCH_AIX_PPC_INLINE_HPP

View File

@ -43,8 +43,7 @@ void JavaThread::cache_global_variables() {
BarrierSet* bs = BarrierSet::barrier_set();
if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
_card_table_base = (address)ctbs->card_table_base_const();
_card_table_base = (address) (barrier_set_cast<CardTableBarrierSet>(bs)->card_table()->byte_map_base());
} else {
_card_table_base = nullptr;
}

View File

@ -1,34 +0,0 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "cds/aotGrowableArray.hpp"
#include "cds/aotMetaspace.hpp"
#include "memory/allocation.inline.hpp"
#include "utilities/growableArray.hpp"
void AOTGrowableArrayHelper::deallocate(void* mem) {
if (!AOTMetaspace::in_aot_cache(mem)) {
GrowableArrayCHeapAllocator::deallocate(mem);
}
}

View File

@ -1,76 +0,0 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_AOT_AOTGROWABLEARRAY_HPP
#define SHARE_AOT_AOTGROWABLEARRAY_HPP
#include <memory/metaspaceClosureType.hpp>
#include <utilities/growableArray.hpp>
class AOTGrowableArrayHelper {
public:
static void deallocate(void* mem);
};
// An AOTGrowableArray<T> provides the same functionality as a GrowableArray<T> that
// uses the C heap allocator. In addition, AOTGrowableArray<T> can be iterated with
// MetaspaceClosure. This type should be used for growable arrays that need to be
// stored in the AOT cache. See ModuleEntry::_reads for an example.
template <typename E>
class AOTGrowableArray : public GrowableArrayWithAllocator<E, AOTGrowableArray<E>> {
friend class VMStructs;
friend class GrowableArrayWithAllocator<E, AOTGrowableArray>;
static E* allocate(int max, MemTag mem_tag) {
return (E*)GrowableArrayCHeapAllocator::allocate(max, sizeof(E), mem_tag);
}
E* allocate() {
return allocate(this->_capacity, mtClass);
}
void deallocate(E* mem) {
#if INCLUDE_CDS
AOTGrowableArrayHelper::deallocate(mem);
#else
GrowableArrayCHeapAllocator::deallocate(mem);
#endif
}
public:
AOTGrowableArray(int initial_capacity, MemTag mem_tag) :
GrowableArrayWithAllocator<E, AOTGrowableArray>(
allocate(initial_capacity, mem_tag),
initial_capacity) {}
AOTGrowableArray() : AOTGrowableArray(0, mtClassShared) {}
// methods required by MetaspaceClosure
void metaspace_pointers_do(MetaspaceClosure* it);
int size_in_heapwords() const { return (int)heap_word_size(sizeof(*this)); }
MetaspaceClosureType type() const { return MetaspaceClosureType::GrowableArrayType; }
static bool is_read_only_by_default() { return false; }
};
#endif // SHARE_AOT_AOTGROWABLEARRAY_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,8 +29,6 @@
#include "cds/aotStreamedHeapWriter.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/filemap.hpp"
#include "classfile/moduleEntry.hpp"
#include "classfile/packageEntry.hpp"
#include "classfile/systemDictionaryShared.hpp"
#include "classfile/vmClasses.hpp"
#include "logging/log.hpp"
@ -143,7 +141,7 @@ public:
info._buffered_addr = ref->obj();
info._requested_addr = ref->obj();
info._bytes = ref->size() * BytesPerWord;
info._type = ref->type();
info._type = ref->msotype();
_objs.append(info);
}
@ -216,7 +214,7 @@ void AOTMapLogger::dumptime_log_metaspace_region(const char* name, DumpRegion* r
info._buffered_addr = src_info->buffered_addr();
info._requested_addr = info._buffered_addr + _buffer_to_requested_delta;
info._bytes = src_info->size_in_bytes();
info._type = src_info->type();
info._type = src_info->msotype();
objs.append(info);
}
@ -334,52 +332,43 @@ void AOTMapLogger::log_metaspace_objects_impl(address region_base, address regio
address buffered_addr = info._buffered_addr;
address requested_addr = info._requested_addr;
int bytes = info._bytes;
MetaspaceClosureType type = info._type;
const char* type_name = MetaspaceClosure::type_name(type);
MetaspaceObj::Type type = info._type;
const char* type_name = MetaspaceObj::type_name(type);
log_as_hex(last_obj_base, buffered_addr, last_obj_base + _buffer_to_requested_delta);
switch (type) {
case MetaspaceClosureType::ClassType:
case MetaspaceObj::ClassType:
log_klass((Klass*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceClosureType::ConstantPoolType:
case MetaspaceObj::ConstantPoolType:
log_constant_pool((ConstantPool*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceClosureType::ConstantPoolCacheType:
case MetaspaceObj::ConstantPoolCacheType:
log_constant_pool_cache((ConstantPoolCache*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceClosureType::ConstMethodType:
case MetaspaceObj::ConstMethodType:
log_const_method((ConstMethod*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceClosureType::MethodType:
case MetaspaceObj::MethodType:
log_method((Method*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceClosureType::MethodCountersType:
case MetaspaceObj::MethodCountersType:
log_method_counters((MethodCounters*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceClosureType::MethodDataType:
case MetaspaceObj::MethodDataType:
log_method_data((MethodData*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceClosureType::ModuleEntryType:
log_module_entry((ModuleEntry*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceClosureType::PackageEntryType:
log_package_entry((PackageEntry*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceClosureType::GrowableArrayType:
log_growable_array((GrowableArrayBase*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceClosureType::SymbolType:
case MetaspaceObj::SymbolType:
log_symbol((Symbol*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceClosureType::KlassTrainingDataType:
case MetaspaceObj::KlassTrainingDataType:
log_klass_training_data((KlassTrainingData*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceClosureType::MethodTrainingDataType:
case MetaspaceObj::MethodTrainingDataType:
log_method_training_data((MethodTrainingData*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceClosureType::CompileTrainingDataType:
case MetaspaceObj::CompileTrainingDataType:
log_compile_training_data((CompileTrainingData*)src, requested_addr, type_name, bytes, current);
break;
default:
@ -432,27 +421,6 @@ void AOTMapLogger::log_method_data(MethodData* md, address requested_addr, const
log_debug(aot, map)(_LOG_PREFIX " %s", p2i(requested_addr), type_name, bytes, md->method()->external_name());
}
void AOTMapLogger::log_module_entry(ModuleEntry* mod, address requested_addr, const char* type_name,
int bytes, Thread* current) {
ResourceMark rm(current);
log_debug(aot, map)(_LOG_PREFIX " %s", p2i(requested_addr), type_name, bytes,
mod->name_as_C_string());
}
void AOTMapLogger::log_package_entry(PackageEntry* pkg, address requested_addr, const char* type_name,
int bytes, Thread* current) {
ResourceMark rm(current);
log_debug(aot, map)(_LOG_PREFIX " %s - %s", p2i(requested_addr), type_name, bytes,
pkg->module()->name_as_C_string(), pkg->name_as_C_string());
}
void AOTMapLogger::log_growable_array(GrowableArrayBase* arr, address requested_addr, const char* type_name,
int bytes, Thread* current) {
ResourceMark rm(current);
log_debug(aot, map)(_LOG_PREFIX " %d (%d)", p2i(requested_addr), type_name, bytes,
arr->length(), arr->capacity());
}
void AOTMapLogger::log_klass(Klass* k, address requested_addr, const char* type_name,
int bytes, Thread* current) {
ResourceMark rm(current);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,7 +28,6 @@
#include "cds/archiveBuilder.hpp"
#include "memory/allocation.hpp"
#include "memory/allStatic.hpp"
#include "memory/metaspaceClosureType.hpp"
#include "oops/oopsHierarchy.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/growableArray.hpp"
@ -38,13 +37,9 @@ class ArchiveStreamedHeapInfo;
class CompileTrainingData;
class DumpRegion;
class FileMapInfo;
class GrowableArrayBase;
class KlassTrainingData;
class MethodCounters;
class MethodTrainingData;
class ModuleEntry;
class outputStream;
class PackageEntry;
// Write detailed info to a mapfile to analyze contents of the AOT cache/CDS archive.
// -Xlog:aot+map* can be used both when creating an AOT cache, or when using an AOT cache.
@ -67,7 +62,7 @@ class AOTMapLogger : AllStatic {
address _buffered_addr;
address _requested_addr;
int _bytes;
MetaspaceClosureType _type;
MetaspaceObj::Type _type;
};
public:
@ -147,9 +142,6 @@ private:
Thread* current);
static void log_klass(Klass* k, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_method(Method* m, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_module_entry(ModuleEntry* mod, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_package_entry(PackageEntry* pkg, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_growable_array(GrowableArrayBase* arr, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_symbol(Symbol* s, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_klass_training_data(KlassTrainingData* ktd, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_method_training_data(MethodTrainingData* mtd, address requested_addr, const char* type_name, int bytes, Thread* current);

View File

@ -619,7 +619,7 @@ bool AOTMappedHeapLoader::map_heap_region_impl(FileMapInfo* info) {
aot_log_info(aot)("Preferred address to map heap data (to avoid relocation) is " INTPTR_FORMAT, p2i(requested_start));
// allocate from java heap
HeapWord* start = G1CollectedHeap::heap()->alloc_archive_region(word_size);
HeapWord* start = G1CollectedHeap::heap()->alloc_archive_region(word_size, (HeapWord*)requested_start);
if (start == nullptr) {
AOTMetaspace::report_loading_error("UseSharedSpaces: Unable to allocate java heap region for archive heap.");
return false;

View File

@ -698,9 +698,6 @@ public:
Universe::metaspace_pointers_do(it);
vmSymbols::metaspace_pointers_do(it);
TrainingData::iterate_roots(it);
if (CDSConfig::is_dumping_full_module_graph()) {
ClassLoaderDataShared::iterate_roots(it);
}
// The above code should find all the symbols that are referenced by the
// archived classes. We just need to add the extra symbols which
@ -798,10 +795,6 @@ void VM_PopulateDumpSharedSpace::doit() {
_builder.make_klasses_shareable();
AOTMetaspace::make_method_handle_intrinsics_shareable();
if (CDSConfig::is_dumping_full_module_graph()) {
ClassLoaderDataShared::remove_unshareable_info();
}
dump_java_heap_objects();
dump_shared_symbol_table(_builder.symbols());
@ -1142,7 +1135,6 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS
HeapShared::init_heap_writer();
if (CDSConfig::is_dumping_full_module_graph()) {
ClassLoaderDataShared::ensure_module_entry_tables_exist();
ClassLoaderDataShared::build_tables(CHECK);
HeapShared::reset_archived_object_states(CHECK);
}

View File

@ -243,7 +243,7 @@ bool ArchiveBuilder::gather_klass_and_symbol(MetaspaceClosure::Ref* ref, bool re
if (get_follow_mode(ref) != make_a_copy) {
return false;
}
if (ref->type() == MetaspaceClosureType::ClassType) {
if (ref->msotype() == MetaspaceObj::ClassType) {
Klass* klass = (Klass*)ref->obj();
assert(klass->is_klass(), "must be");
if (!is_excluded(klass)) {
@ -252,7 +252,7 @@ bool ArchiveBuilder::gather_klass_and_symbol(MetaspaceClosure::Ref* ref, bool re
assert(klass->is_instance_klass(), "must be");
}
}
} else if (ref->type() == MetaspaceClosureType::SymbolType) {
} else if (ref->msotype() == MetaspaceObj::SymbolType) {
// Make sure the symbol won't be GC'ed while we are dumping the archive.
Symbol* sym = (Symbol*)ref->obj();
sym->increment_refcount();
@ -271,6 +271,11 @@ void ArchiveBuilder::gather_klasses_and_symbols() {
aot_log_info(aot)("Gathering classes and symbols ... ");
GatherKlassesAndSymbols doit(this);
iterate_roots(&doit);
#if INCLUDE_CDS_JAVA_HEAP
if (CDSConfig::is_dumping_full_module_graph()) {
ClassLoaderDataShared::iterate_symbols(&doit);
}
#endif
doit.finish();
if (CDSConfig::is_dumping_static_archive()) {
@ -441,14 +446,14 @@ bool ArchiveBuilder::gather_one_source_obj(MetaspaceClosure::Ref* ref, bool read
}
#ifdef ASSERT
if (ref->type() == MetaspaceClosureType::MethodType) {
if (ref->msotype() == MetaspaceObj::MethodType) {
Method* m = (Method*)ref->obj();
assert(!RegeneratedClasses::has_been_regenerated((address)m->method_holder()),
"Should not archive methods in a class that has been regenerated");
}
#endif
if (ref->type() == MetaspaceClosureType::MethodDataType) {
if (ref->msotype() == MetaspaceObj::MethodDataType) {
MethodData* md = (MethodData*)ref->obj();
md->clean_method_data(false /* always_clean */);
}
@ -549,16 +554,16 @@ ArchiveBuilder::FollowMode ArchiveBuilder::get_follow_mode(MetaspaceClosure::Ref
if (CDSConfig::is_dumping_dynamic_archive() && AOTMetaspace::in_aot_cache(obj)) {
// Don't dump existing shared metadata again.
return point_to_it;
} else if (ref->type() == MetaspaceClosureType::MethodDataType ||
ref->type() == MetaspaceClosureType::MethodCountersType ||
ref->type() == MetaspaceClosureType::KlassTrainingDataType ||
ref->type() == MetaspaceClosureType::MethodTrainingDataType ||
ref->type() == MetaspaceClosureType::CompileTrainingDataType) {
} else if (ref->msotype() == MetaspaceObj::MethodDataType ||
ref->msotype() == MetaspaceObj::MethodCountersType ||
ref->msotype() == MetaspaceObj::KlassTrainingDataType ||
ref->msotype() == MetaspaceObj::MethodTrainingDataType ||
ref->msotype() == MetaspaceObj::CompileTrainingDataType) {
return (TrainingData::need_data() || TrainingData::assembling_data()) ? make_a_copy : set_to_null;
} else if (ref->type() == MetaspaceClosureType::AdapterHandlerEntryType) {
} else if (ref->msotype() == MetaspaceObj::AdapterHandlerEntryType) {
return CDSConfig::is_dumping_adapters() ? make_a_copy : set_to_null;
} else {
if (ref->type() == MetaspaceClosureType::ClassType) {
if (ref->msotype() == MetaspaceObj::ClassType) {
Klass* klass = (Klass*)ref->obj();
assert(klass->is_klass(), "must be");
if (RegeneratedClasses::has_been_regenerated(klass)) {
@ -566,12 +571,7 @@ ArchiveBuilder::FollowMode ArchiveBuilder::get_follow_mode(MetaspaceClosure::Ref
}
if (is_excluded(klass)) {
ResourceMark rm;
aot_log_trace(aot)("pointer set to null: class (excluded): %s", klass->external_name());
return set_to_null;
}
if (klass->is_array_klass() && CDSConfig::is_dumping_dynamic_archive()) {
ResourceMark rm;
aot_log_trace(aot)("pointer set to null: array class not supported in dynamic region: %s", klass->external_name());
log_debug(cds, dynamic)("Skipping class (excluded): %s", klass->external_name());
return set_to_null;
}
}
@ -615,6 +615,15 @@ void ArchiveBuilder::dump_rw_metadata() {
ResourceMark rm;
aot_log_info(aot)("Allocating RW objects ... ");
make_shallow_copies(&_rw_region, &_rw_src_objs);
#if INCLUDE_CDS_JAVA_HEAP
if (CDSConfig::is_dumping_full_module_graph()) {
// Archive the ModuleEntry's and PackageEntry's of the 3 built-in loaders
char* start = rw_region()->top();
ClassLoaderDataShared::allocate_archived_tables();
alloc_stats()->record_modules(rw_region()->top() - start, /*read_only*/false);
}
#endif
}
void ArchiveBuilder::dump_ro_metadata() {
@ -623,6 +632,15 @@ void ArchiveBuilder::dump_ro_metadata() {
start_dump_region(&_ro_region);
make_shallow_copies(&_ro_region, &_ro_src_objs);
#if INCLUDE_CDS_JAVA_HEAP
if (CDSConfig::is_dumping_full_module_graph()) {
char* start = ro_region()->top();
ClassLoaderDataShared::init_archived_tables();
alloc_stats()->record_modules(ro_region()->top() - start, /*read_only*/true);
}
#endif
RegeneratedClasses::record_regenerated_objects();
}
@ -640,7 +658,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
size_t alignment = SharedSpaceObjectAlignment; // alignment for the dest pointer
char* oldtop = dump_region->top();
if (src_info->type() == MetaspaceClosureType::ClassType) {
if (src_info->msotype() == MetaspaceObj::ClassType) {
// Allocate space for a pointer directly in front of the future InstanceKlass, so
// we can do a quick lookup from InstanceKlass* -> RunTimeClassInfo*
// without building another hashtable. See RunTimeClassInfo::get_for()
@ -656,7 +674,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
alignment = nth_bit(ArchiveBuilder::precomputed_narrow_klass_shift());
}
#endif
} else if (src_info->type() == MetaspaceClosureType::SymbolType) {
} else if (src_info->msotype() == MetaspaceObj::SymbolType) {
// Symbols may be allocated by using AllocateHeap, so their sizes
// may be less than size_in_bytes() indicates.
bytes = ((Symbol*)src)->byte_size();
@ -666,7 +684,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
memcpy(dest, src, bytes);
// Update the hash of buffered sorted symbols for static dump so that the symbols have deterministic contents
if (CDSConfig::is_dumping_static_archive() && (src_info->type() == MetaspaceClosureType::SymbolType)) {
if (CDSConfig::is_dumping_static_archive() && (src_info->msotype() == MetaspaceObj::SymbolType)) {
Symbol* buffered_symbol = (Symbol*)dest;
assert(((Symbol*)src)->is_permanent(), "archived symbols must be permanent");
buffered_symbol->update_identity_hash();
@ -681,7 +699,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
}
}
intptr_t* archived_vtable = CppVtables::get_archived_vtable(src_info->type(), (address)dest);
intptr_t* archived_vtable = CppVtables::get_archived_vtable(src_info->msotype(), (address)dest);
if (archived_vtable != nullptr) {
*(address*)dest = (address)archived_vtable;
ArchivePtrMarker::mark_pointer((address*)dest);
@ -691,7 +709,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
src_info->set_buffered_addr((address)dest);
char* newtop = dump_region->top();
_alloc_stats.record(src_info->type(), int(newtop - oldtop), src_info->read_only());
_alloc_stats.record(src_info->msotype(), int(newtop - oldtop), src_info->read_only());
DEBUG_ONLY(_alloc_stats.verify((int)dump_region->used(), src_info->read_only()));
}
@ -974,15 +992,15 @@ void ArchiveBuilder::make_training_data_shareable() {
return;
}
if (info.type() == MetaspaceClosureType::KlassTrainingDataType ||
info.type() == MetaspaceClosureType::MethodTrainingDataType ||
info.type() == MetaspaceClosureType::CompileTrainingDataType) {
if (info.msotype() == MetaspaceObj::KlassTrainingDataType ||
info.msotype() == MetaspaceObj::MethodTrainingDataType ||
info.msotype() == MetaspaceObj::CompileTrainingDataType) {
TrainingData* buffered_td = (TrainingData*)info.buffered_addr();
buffered_td->remove_unshareable_info();
} else if (info.type() == MetaspaceClosureType::MethodDataType) {
} else if (info.msotype() == MetaspaceObj::MethodDataType) {
MethodData* buffered_mdo = (MethodData*)info.buffered_addr();
buffered_mdo->remove_unshareable_info();
} else if (info.type() == MetaspaceClosureType::MethodCountersType) {
} else if (info.msotype() == MetaspaceObj::MethodCountersType) {
MethodCounters* buffered_mc = (MethodCounters*)info.buffered_addr();
buffered_mc->remove_unshareable_info();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -134,13 +134,13 @@ private:
int _size_in_bytes;
int _id; // Each object has a unique serial ID, starting from zero. The ID is assigned
// when the object is added into _source_objs.
MetaspaceClosureType _type;
MetaspaceObj::Type _msotype;
address _source_addr; // The source object to be copied.
address _buffered_addr; // The copy of this object insider the buffer.
public:
SourceObjInfo(MetaspaceClosure::Ref* ref, bool read_only, FollowMode follow_mode) :
_ptrmap_start(0), _ptrmap_end(0), _read_only(read_only), _has_embedded_pointer(false), _follow_mode(follow_mode),
_size_in_bytes(ref->size() * BytesPerWord), _id(0), _type(ref->type()),
_size_in_bytes(ref->size() * BytesPerWord), _id(0), _msotype(ref->msotype()),
_source_addr(ref->obj()) {
if (follow_mode == point_to_it) {
_buffered_addr = ref->obj();
@ -155,7 +155,7 @@ private:
SourceObjInfo(address src, SourceObjInfo* renegerated_obj_info) :
_ptrmap_start(0), _ptrmap_end(0), _read_only(false),
_follow_mode(renegerated_obj_info->_follow_mode),
_size_in_bytes(0), _type(renegerated_obj_info->_type),
_size_in_bytes(0), _msotype(renegerated_obj_info->_msotype),
_source_addr(src), _buffered_addr(renegerated_obj_info->_buffered_addr) {}
bool should_copy() const { return _follow_mode == make_a_copy; }
@ -182,7 +182,7 @@ private:
}
return _buffered_addr;
}
MetaspaceClosureType type() const { return _type; }
MetaspaceObj::Type msotype() const { return _msotype; }
FollowMode follow_mode() const { return _follow_mode; }
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,14 +22,12 @@
*
*/
#include "cds/aotGrowableArray.hpp"
#include "cds/aotMetaspace.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/archiveUtils.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/cppVtables.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
#include "oops/instanceClassLoaderKlass.hpp"
#include "oops/instanceMirrorKlass.hpp"
#include "oops/instanceRefKlass.hpp"
@ -55,19 +53,6 @@
// + at run time: we clone the actual contents of the vtables from libjvm.so
// into our own tables.
#ifndef PRODUCT
// AOTGrowableArray has a vtable only when in non-product builds (due to
// the virtual printing functions in AnyObj).
using GrowableArray_ModuleEntry_ptr = AOTGrowableArray<ModuleEntry*>;
#define DEBUG_CPP_VTABLE_TYPES_DO(f) \
f(GrowableArray_ModuleEntry_ptr) \
#endif
// Currently, the archive contains ONLY the following types of objects that have C++ vtables.
#define CPP_VTABLE_TYPES_DO(f) \
f(ConstantPool) \
@ -83,8 +68,7 @@ using GrowableArray_ModuleEntry_ptr = AOTGrowableArray<ModuleEntry*>;
f(TypeArrayKlass) \
f(KlassTrainingData) \
f(MethodTrainingData) \
f(CompileTrainingData) \
NOT_PRODUCT(DEBUG_CPP_VTABLE_TYPES_DO(f))
f(CompileTrainingData)
class CppVtableInfo {
intptr_t _vtable_size;
@ -102,7 +86,7 @@ public:
}
};
static inline intptr_t* vtable_of(const void* m) {
static inline intptr_t* vtable_of(const Metadata* m) {
return *((intptr_t**)m);
}
@ -132,7 +116,6 @@ CppVtableInfo* CppVtableCloner<T>::allocate_and_initialize(const char* name) {
template <class T>
void CppVtableCloner<T>::initialize(const char* name, CppVtableInfo* info) {
ResourceMark rm;
T tmp; // Allocate temporary dummy metadata object to get to the original vtable.
int n = info->vtable_size();
intptr_t* srcvtable = vtable_of(&tmp);
@ -285,7 +268,7 @@ void CppVtables::serialize(SerializeClosure* soc) {
}
}
intptr_t* CppVtables::get_archived_vtable(MetaspaceClosureType type, address obj) {
intptr_t* CppVtables::get_archived_vtable(MetaspaceObj::Type msotype, address obj) {
if (!_orig_cpp_vtptrs_inited) {
CPP_VTABLE_TYPES_DO(INIT_ORIG_CPP_VTPTRS);
_orig_cpp_vtptrs_inited = true;
@ -293,23 +276,19 @@ intptr_t* CppVtables::get_archived_vtable(MetaspaceClosureType type, address obj
assert(CDSConfig::is_dumping_archive(), "sanity");
int kind = -1;
switch (type) {
case MetaspaceClosureType::SymbolType:
case MetaspaceClosureType::TypeArrayU1Type:
case MetaspaceClosureType::TypeArrayU2Type:
case MetaspaceClosureType::TypeArrayU4Type:
case MetaspaceClosureType::TypeArrayU8Type:
case MetaspaceClosureType::TypeArrayOtherType:
case MetaspaceClosureType::CArrayType:
case MetaspaceClosureType::ConstMethodType:
case MetaspaceClosureType::ConstantPoolCacheType:
case MetaspaceClosureType::AnnotationsType:
case MetaspaceClosureType::ModuleEntryType:
case MetaspaceClosureType::PackageEntryType:
case MetaspaceClosureType::RecordComponentType:
case MetaspaceClosureType::AdapterHandlerEntryType:
case MetaspaceClosureType::AdapterFingerPrintType:
PRODUCT_ONLY(case MetaspaceClosureType::GrowableArrayType:)
switch (msotype) {
case MetaspaceObj::SymbolType:
case MetaspaceObj::TypeArrayU1Type:
case MetaspaceObj::TypeArrayU2Type:
case MetaspaceObj::TypeArrayU4Type:
case MetaspaceObj::TypeArrayU8Type:
case MetaspaceObj::TypeArrayOtherType:
case MetaspaceObj::ConstMethodType:
case MetaspaceObj::ConstantPoolCacheType:
case MetaspaceObj::AnnotationsType:
case MetaspaceObj::RecordComponentType:
case MetaspaceObj::AdapterHandlerEntryType:
case MetaspaceObj::AdapterFingerPrintType:
// These have no vtables.
break;
default:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,7 +27,6 @@
#include "memory/allocation.hpp"
#include "memory/allStatic.hpp"
#include "memory/metaspaceClosureType.hpp"
#include "utilities/globalDefinitions.hpp"
class ArchiveBuilder;
@ -41,7 +40,7 @@ class CppVtables : AllStatic {
public:
static void dumptime_init(ArchiveBuilder* builder);
static void zero_archived_vtables();
static intptr_t* get_archived_vtable(MetaspaceClosureType type, address obj);
static intptr_t* get_archived_vtable(MetaspaceObj::Type msotype, address obj);
static void serialize(SerializeClosure* sc);
static bool is_valid_shared_method(const Method* m) NOT_CDS_RETURN_(false);
static char* vtables_serialized_base() { return _vtables_serialized_base; }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,34 +27,32 @@
#include "classfile/compactHashtable.hpp"
#include "memory/allocation.hpp"
#include "memory/metaspaceClosureType.hpp"
// This is for dumping detailed statistics for the allocations
// in the shared spaces.
class DumpAllocStats : public StackObj {
public:
#define DUMPED_OBJ_TYPES_DO(f) \
METASPACE_CLOSURE_TYPES_DO(f) \
// Here's poor man's enum inheritance
#define SHAREDSPACE_OBJ_TYPES_DO(f) \
METASPACE_OBJ_TYPES_DO(f) \
f(SymbolHashentry) \
f(SymbolBucket) \
f(StringHashentry) \
f(StringBucket) \
f(ModulesNatives) \
f(CppVTables) \
f(Other)
#define DUMPED_TYPE_DECLARE(name) name ## Type,
#define DUMPED_TYPE_NAME_CASE(name) case name ## Type: return #name;
enum Type {
// Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc
DUMPED_OBJ_TYPES_DO(DUMPED_TYPE_DECLARE)
SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE)
_number_of_types
};
static const char* type_name(Type type) {
switch(type) {
DUMPED_OBJ_TYPES_DO(DUMPED_TYPE_NAME_CASE)
SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE)
default:
ShouldNotReachHere();
return nullptr;
@ -103,12 +101,16 @@ public:
CompactHashtableStats* symbol_stats() { return &_symbol_stats; }
CompactHashtableStats* string_stats() { return &_string_stats; }
void record(MetaspaceClosureType type, int byte_size, bool read_only) {
int t = (int)type;
assert(t >= 0 && t < (int)MetaspaceClosureType::_number_of_types, "sanity");
void record(MetaspaceObj::Type type, int byte_size, bool read_only) {
assert(int(type) >= 0 && type < MetaspaceObj::_number_of_types, "sanity");
int which = (read_only) ? RO : RW;
_counts[which][t] ++;
_bytes [which][t] += byte_size;
_counts[which][type] ++;
_bytes [which][type] += byte_size;
}
void record_modules(int byte_size, bool read_only) {
int which = (read_only) ? RO : RW;
_bytes [which][ModulesNativesType] += byte_size;
}
void record_other_type(int byte_size, bool read_only) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -948,6 +948,10 @@ void HeapShared::archive_subgraphs() {
true /* is_full_module_graph */);
}
}
if (CDSConfig::is_dumping_full_module_graph()) {
Modules::verify_archived_modules();
}
}
//

View File

@ -216,10 +216,6 @@ ciField::ciField(fieldDescriptor *fd) :
static bool trust_final_non_static_fields(ciInstanceKlass* holder) {
if (holder == nullptr)
return false;
if (holder->trust_final_fields()) {
// Explicit opt-in from system classes
return true;
}
// Even if general trusting is disabled, trust system-built closures in these packages.
if (holder->is_in_package("java/lang/invoke") || holder->is_in_package("sun/invoke") ||
holder->is_in_package("java/lang/reflect") || holder->is_in_package("jdk/internal/reflect") ||
@ -234,6 +230,14 @@ static bool trust_final_non_static_fields(ciInstanceKlass* holder) {
// Trust final fields in records
if (holder->is_record())
return true;
// Trust Atomic*FieldUpdaters: they are very important for performance, and make up one
// more reason not to use Unsafe, if their final fields are trusted. See more in JDK-8140483.
if (holder->name() == ciSymbols::java_util_concurrent_atomic_AtomicIntegerFieldUpdater_Impl() ||
holder->name() == ciSymbols::java_util_concurrent_atomic_AtomicLongFieldUpdater_CASUpdater() ||
holder->name() == ciSymbols::java_util_concurrent_atomic_AtomicLongFieldUpdater_LockedUpdater() ||
holder->name() == ciSymbols::java_util_concurrent_atomic_AtomicReferenceFieldUpdater_Impl()) {
return true;
}
return TrustFinalNonStaticFields;
}

View File

@ -65,7 +65,6 @@ ciInstanceKlass::ciInstanceKlass(Klass* k) :
_has_nonstatic_concrete_methods = ik->has_nonstatic_concrete_methods();
_is_hidden = ik->is_hidden();
_is_record = ik->is_record();
_trust_final_fields = ik->trust_final_fields();
_nonstatic_fields = nullptr; // initialized lazily by compute_nonstatic_fields:
_has_injected_fields = -1;
_implementor = nullptr; // we will fill these lazily

View File

@ -59,7 +59,6 @@ private:
bool _has_nonstatic_concrete_methods;
bool _is_hidden;
bool _is_record;
bool _trust_final_fields;
bool _has_trusted_loader;
ciFlags _flags;
@ -208,10 +207,6 @@ public:
return _is_record;
}
bool trust_final_fields() const {
return _trust_final_fields;
}
ciInstanceKlass* get_canonical_holder(int offset);
ciField* get_field_by_offset(int field_offset, bool is_static);
ciField* get_field_by_name(ciSymbol* name, ciSymbol* signature, bool is_static);

View File

@ -42,7 +42,10 @@ const char* basictype_to_str(BasicType t) {
// ------------------------------------------------------------------
// card_table_base
CardTable::CardValue* ci_card_table_address_const() {
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
return ctbs->card_table_base_const();
CardTable::CardValue* ci_card_table_address() {
BarrierSet* bs = BarrierSet::barrier_set();
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
assert(!UseShenandoahGC, "Shenandoah byte_map_base is not constant.");
return ct->byte_map_base();
}

View File

@ -51,9 +51,9 @@ inline const char* bool_to_str(bool b) {
const char* basictype_to_str(BasicType t);
CardTable::CardValue* ci_card_table_address_const();
CardTable::CardValue* ci_card_table_address();
template <typename T> T ci_card_table_address_as() {
return reinterpret_cast<T>(ci_card_table_address_const());
return reinterpret_cast<T>(ci_card_table_address());
}
#endif // SHARE_CI_CIUTILITIES_HPP

View File

@ -943,7 +943,6 @@ public:
_java_lang_Deprecated_for_removal,
_jdk_internal_vm_annotation_AOTSafeClassInitializer,
_method_AOTRuntimeSetup,
_jdk_internal_vm_annotation_TrustFinalFields,
_annotation_LIMIT
};
const Location _location;
@ -1879,11 +1878,6 @@ AnnotationCollector::annotation_index(const ClassLoaderData* loader_data,
if (!privileged) break; // only allow in privileged code
return _field_Stable;
}
case VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_TrustFinalFields_signature): {
if (_location != _in_class) break; // only allow for classes
if (!privileged) break; // only allow in privileged code
return _jdk_internal_vm_annotation_TrustFinalFields;
}
case VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_Contended_signature): {
if (_location != _in_field && _location != _in_class) {
break; // only allow for fields and classes
@ -1998,9 +1992,6 @@ void ClassFileParser::ClassAnnotationCollector::apply_to(InstanceKlass* ik) {
if (has_annotation(_jdk_internal_vm_annotation_AOTSafeClassInitializer)) {
ik->set_has_aot_safe_initializer();
}
if (has_annotation(_jdk_internal_vm_annotation_TrustFinalFields)) {
ik->set_trust_final_fields(true);
}
}
#define MAX_ARGS_SIZE 255

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -33,7 +33,6 @@
#include "classfile/packageEntry.hpp"
#include "classfile/systemDictionary.hpp"
#include "logging/log.hpp"
#include "memory/metaspaceClosure.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/safepoint.hpp"
@ -57,9 +56,9 @@ class ArchivedClassLoaderData {
public:
ArchivedClassLoaderData() : _packages(nullptr), _modules(nullptr), _unnamed_module(nullptr) {}
void iterate_roots(MetaspaceClosure* closure);
void build_tables(ClassLoaderData* loader_data, TRAPS);
void remove_unshareable_info();
void iterate_symbols(ClassLoaderData* loader_data, MetaspaceClosure* closure);
void allocate(ClassLoaderData* loader_data);
void init_archived_entries(ClassLoaderData* loader_data);
ModuleEntry* unnamed_module() {
return _unnamed_module;
}
@ -81,14 +80,17 @@ static ModuleEntry* _archived_javabase_moduleEntry = nullptr;
static int _platform_loader_root_index = -1;
static int _system_loader_root_index = -1;
void ArchivedClassLoaderData::iterate_roots(MetaspaceClosure* it) {
void ArchivedClassLoaderData::iterate_symbols(ClassLoaderData* loader_data, MetaspaceClosure* closure) {
assert(CDSConfig::is_dumping_full_module_graph(), "must be");
it->push(&_packages);
it->push(&_modules);
it->push(&_unnamed_module);
assert_valid(loader_data);
if (loader_data != nullptr) {
loader_data->packages()->iterate_symbols(closure);
loader_data->modules() ->iterate_symbols(closure);
loader_data->unnamed_module()->iterate_symbols(closure);
}
}
void ArchivedClassLoaderData::build_tables(ClassLoaderData* loader_data, TRAPS) {
void ArchivedClassLoaderData::allocate(ClassLoaderData* loader_data) {
assert(CDSConfig::is_dumping_full_module_graph(), "must be");
assert_valid(loader_data);
if (loader_data != nullptr) {
@ -96,28 +98,19 @@ void ArchivedClassLoaderData::build_tables(ClassLoaderData* loader_data, TRAPS)
// address of the Symbols, which may be relocated at runtime due to ASLR.
// So we store the packages/modules in Arrays. At runtime, we create
// the hashtables using these arrays.
_packages = loader_data->packages()->build_aot_table(loader_data, CHECK);
_modules = loader_data->modules()->build_aot_table(loader_data, CHECK);
_unnamed_module = loader_data->unnamed_module();
_packages = loader_data->packages()->allocate_archived_entries();
_modules = loader_data->modules() ->allocate_archived_entries();
_unnamed_module = loader_data->unnamed_module()->allocate_archived_entry();
}
}
void ArchivedClassLoaderData::remove_unshareable_info() {
if (_packages != nullptr) {
_packages = ArchiveBuilder::current()->get_buffered_addr(_packages);
for (int i = 0; i < _packages->length(); i++) {
_packages->at(i)->remove_unshareable_info();
}
}
if (_modules != nullptr) {
_modules = ArchiveBuilder::current()->get_buffered_addr(_modules);
for (int i = 0; i < _modules->length(); i++) {
_modules->at(i)->remove_unshareable_info();
}
}
if (_unnamed_module != nullptr) {
_unnamed_module = ArchiveBuilder::current()->get_buffered_addr(_unnamed_module);
_unnamed_module->remove_unshareable_info();
void ArchivedClassLoaderData::init_archived_entries(ClassLoaderData* loader_data) {
assert(CDSConfig::is_dumping_full_module_graph(), "must be");
assert_valid(loader_data);
if (loader_data != nullptr) {
loader_data->packages()->init_archived_entries(_packages);
loader_data->modules() ->init_archived_entries(_modules);
_unnamed_module->init_as_archived_entry();
}
}
@ -160,6 +153,7 @@ void ArchivedClassLoaderData::clear_archived_oops() {
// ------------------------------
void ClassLoaderDataShared::load_archived_platform_and_system_class_loaders() {
#if INCLUDE_CDS_JAVA_HEAP
// The streaming object loader prefers loading the class loader related objects before
// the CLD constructor which has a NoSafepointVerifier.
if (!HeapShared::is_loading_streaming_mode()) {
@ -184,6 +178,7 @@ void ClassLoaderDataShared::load_archived_platform_and_system_class_loaders() {
if (system_loader_module_entry != nullptr) {
system_loader_module_entry->preload_archived_oops();
}
#endif
}
static ClassLoaderData* null_class_loader_data() {
@ -215,27 +210,28 @@ void ClassLoaderDataShared::ensure_module_entry_table_exists(oop class_loader) {
assert(met != nullptr, "sanity");
}
void ClassLoaderDataShared::build_tables(TRAPS) {
void ClassLoaderDataShared::iterate_symbols(MetaspaceClosure* closure) {
assert(CDSConfig::is_dumping_full_module_graph(), "must be");
_archived_boot_loader_data.build_tables(null_class_loader_data(), CHECK);
_archived_platform_loader_data.build_tables(java_platform_loader_data_or_null(), CHECK);
_archived_system_loader_data.build_tables(java_system_loader_data_or_null(), CHECK);
_archived_boot_loader_data.iterate_symbols (null_class_loader_data(), closure);
_archived_platform_loader_data.iterate_symbols(java_platform_loader_data_or_null(), closure);
_archived_system_loader_data.iterate_symbols (java_system_loader_data_or_null(), closure);
}
void ClassLoaderDataShared::iterate_roots(MetaspaceClosure* it) {
void ClassLoaderDataShared::allocate_archived_tables() {
assert(CDSConfig::is_dumping_full_module_graph(), "must be");
_archived_boot_loader_data.iterate_roots(it);
_archived_platform_loader_data.iterate_roots(it);
_archived_system_loader_data.iterate_roots(it);
_archived_boot_loader_data.allocate (null_class_loader_data());
_archived_platform_loader_data.allocate(java_platform_loader_data_or_null());
_archived_system_loader_data.allocate (java_system_loader_data_or_null());
}
void ClassLoaderDataShared::remove_unshareable_info() {
void ClassLoaderDataShared::init_archived_tables() {
assert(CDSConfig::is_dumping_full_module_graph(), "must be");
_archived_boot_loader_data.remove_unshareable_info();
_archived_platform_loader_data.remove_unshareable_info();
_archived_system_loader_data.remove_unshareable_info();
_archived_javabase_moduleEntry = ArchiveBuilder::current()->get_buffered_addr(ModuleEntryTable::javabase_moduleEntry());
_archived_boot_loader_data.init_archived_entries (null_class_loader_data());
_archived_platform_loader_data.init_archived_entries(java_platform_loader_data_or_null());
_archived_system_loader_data.init_archived_entries (java_system_loader_data_or_null());
_archived_javabase_moduleEntry = ModuleEntry::get_archived_entry(ModuleEntryTable::javabase_moduleEntry());
_platform_loader_root_index = HeapShared::append_root(SystemDictionary::java_platform_loader());
_system_loader_root_index = HeapShared::append_root(SystemDictionary::java_system_loader());
@ -275,6 +271,7 @@ ModuleEntry* ClassLoaderDataShared::archived_unnamed_module(ClassLoaderData* loa
return archived_module;
}
void ClassLoaderDataShared::clear_archived_oops() {
assert(!CDSConfig::is_using_full_module_graph(), "must be");
_archived_boot_loader_data.clear_archived_oops();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -40,11 +40,11 @@ class ClassLoaderDataShared : AllStatic {
public:
static void load_archived_platform_and_system_class_loaders() NOT_CDS_JAVA_HEAP_RETURN;
static void restore_archived_modules_for_preloading_classes(JavaThread* current) NOT_CDS_JAVA_HEAP_RETURN;
static void build_tables(TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
static void iterate_roots(MetaspaceClosure* closure) NOT_CDS_JAVA_HEAP_RETURN;
static void remove_unshareable_info() NOT_CDS_JAVA_HEAP_RETURN;
#if INCLUDE_CDS_JAVA_HEAP
static void ensure_module_entry_tables_exist();
static void allocate_archived_tables();
static void iterate_symbols(MetaspaceClosure* closure);
static void init_archived_tables();
static void serialize(SerializeClosure* f);
static void clear_archived_oops();
static void restore_archived_entries_for_null_class_loader_data();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,7 +23,6 @@
*/
#include "cds/aotClassLocation.hpp"
#include "cds/aotGrowableArray.inline.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/archiveUtils.hpp"
#include "cds/cdsConfig.hpp"
@ -38,7 +37,6 @@
#include "jni.h"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/oopHandle.inline.hpp"
@ -46,6 +44,7 @@
#include "runtime/handles.inline.hpp"
#include "runtime/safepoint.hpp"
#include "utilities/events.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/hashTable.hpp"
#include "utilities/ostream.hpp"
#include "utilities/quickSort.hpp"
@ -168,7 +167,7 @@ void ModuleEntry::add_read(ModuleEntry* m) {
} else {
if (reads() == nullptr) {
// Lazily create a module's reads list
AOTGrowableArray<ModuleEntry*>* new_reads = new (mtModule) AOTGrowableArray<ModuleEntry*>(MODULE_READS_SIZE, mtModule);
GrowableArray<ModuleEntry*>* new_reads = new (mtModule) GrowableArray<ModuleEntry*>(MODULE_READS_SIZE, mtModule);
set_reads(new_reads);
}
@ -275,7 +274,8 @@ ModuleEntry::ModuleEntry(Handle module_handle,
_has_default_read_edges(false),
_must_walk_reads(false),
_is_open(is_open),
_is_patched(false) {
_is_patched(false)
DEBUG_ONLY(COMMA _reads_is_archived(false)) {
// Initialize fields specific to a ModuleEntry
if (_name == nullptr) {
@ -394,6 +394,7 @@ ModuleEntryTable::~ModuleEntryTable() {
ModuleEntryTableDeleter deleter;
_table.unlink(&deleter);
assert(_table.number_of_entries() == 0, "should have removed all entries");
}
void ModuleEntry::set_loader_data(ClassLoaderData* cld) {
@ -401,51 +402,147 @@ void ModuleEntry::set_loader_data(ClassLoaderData* cld) {
_loader_data = cld;
}
void ModuleEntry::metaspace_pointers_do(MetaspaceClosure* it) {
it->push(&_name);
it->push(&_reads);
it->push(&_version);
it->push(&_location);
}
#if INCLUDE_CDS_JAVA_HEAP
typedef HashTable<
const ModuleEntry*,
ModuleEntry*,
557, // prime number
AnyObj::C_HEAP> ArchivedModuleEntries;
static ArchivedModuleEntries* _archive_modules_entries = nullptr;
#ifndef PRODUCT
static int _num_archived_module_entries = 0;
static int _num_inited_module_entries = 0;
#endif
bool ModuleEntry::should_be_archived() const {
return SystemDictionaryShared::is_builtin_loader(loader_data());
}
void ModuleEntry::remove_unshareable_info() {
_archived_module_index = HeapShared::append_root(module_oop());
ModuleEntry* ModuleEntry::allocate_archived_entry() const {
precond(should_be_archived());
precond(CDSConfig::is_dumping_full_module_graph());
ModuleEntry* archived_entry = (ModuleEntry*)ArchiveBuilder::rw_region_alloc(sizeof(ModuleEntry));
memcpy((void*)archived_entry, (void*)this, sizeof(ModuleEntry));
if (_reads != nullptr) {
_reads->set_in_aot_cache();
archived_entry->_archived_module_index = HeapShared::append_root(module_oop());
if (_archive_modules_entries == nullptr) {
_archive_modules_entries = new (mtClass)ArchivedModuleEntries();
}
assert(_archive_modules_entries->get(this) == nullptr, "Each ModuleEntry must not be shared across ModuleEntryTables");
_archive_modules_entries->put(this, archived_entry);
DEBUG_ONLY(_num_archived_module_entries++);
if (CDSConfig::is_dumping_final_static_archive()) {
OopHandle null_handle;
archived_entry->_shared_pd = null_handle;
} else {
assert(archived_entry->shared_protection_domain() == nullptr, "never set during -Xshare:dump");
}
// Clear handles and restore at run time. Handles cannot be archived.
if (CDSConfig::is_dumping_final_static_archive()) {
OopHandle null_handle;
_shared_pd = null_handle;
} else {
assert(shared_protection_domain() == nullptr, "never set during -Xshare:dump");
OopHandle null_handle;
archived_entry->_module_handle = null_handle;
// For verify_archived_module_entries()
DEBUG_ONLY(_num_inited_module_entries++);
if (log_is_enabled(Info, aot, module)) {
ResourceMark rm;
LogStream ls(Log(aot, module)::info());
ls.print("Stored in archive: ");
archived_entry->print(&ls);
}
return archived_entry;
}
bool ModuleEntry::has_been_archived() {
assert(!ArchiveBuilder::current()->is_in_buffer_space(this), "must be called on original ModuleEntry");
return _archive_modules_entries->contains(this);
}
ModuleEntry* ModuleEntry::get_archived_entry(ModuleEntry* orig_entry) {
ModuleEntry** ptr = _archive_modules_entries->get(orig_entry);
assert(ptr != nullptr && *ptr != nullptr, "must have been allocated");
return *ptr;
}
// This function is used to archive ModuleEntry::_reads and PackageEntry::_qualified_exports.
// GrowableArray cannot be directly archived, as it needs to be expandable at runtime.
// Write it out as an Array, and convert it back to GrowableArray at runtime.
Array<ModuleEntry*>* ModuleEntry::write_growable_array(GrowableArray<ModuleEntry*>* array) {
Array<ModuleEntry*>* archived_array = nullptr;
int length = (array == nullptr) ? 0 : array->length();
if (length > 0) {
archived_array = ArchiveBuilder::new_ro_array<ModuleEntry*>(length);
for (int i = 0; i < length; i++) {
ModuleEntry* archived_entry = get_archived_entry(array->at(i));
archived_array->at_put(i, archived_entry);
ArchivePtrMarker::mark_pointer((address*)archived_array->adr_at(i));
}
}
OopHandle null_handle;
_module_handle = null_handle;
return archived_array;
}
GrowableArray<ModuleEntry*>* ModuleEntry::restore_growable_array(Array<ModuleEntry*>* archived_array) {
GrowableArray<ModuleEntry*>* array = nullptr;
int length = (archived_array == nullptr) ? 0 : archived_array->length();
if (length > 0) {
array = new (mtModule) GrowableArray<ModuleEntry*>(length, mtModule);
for (int i = 0; i < length; i++) {
ModuleEntry* archived_entry = archived_array->at(i);
array->append(archived_entry);
}
}
return array;
}
void ModuleEntry::iterate_symbols(MetaspaceClosure* closure) {
closure->push(&_name);
closure->push(&_version);
closure->push(&_location);
}
void ModuleEntry::init_as_archived_entry() {
set_archived_reads(write_growable_array(reads()));
_loader_data = nullptr; // re-init at runtime
if (name() != nullptr) {
Symbol* src_location = ArchiveBuilder::current()->get_source_addr(_location);
_shared_path_index = AOTClassLocationConfig::dumptime()->get_module_shared_path_index(src_location);
_shared_path_index = AOTClassLocationConfig::dumptime()->get_module_shared_path_index(_location);
_name = ArchiveBuilder::get_buffered_symbol(_name);
ArchivePtrMarker::mark_pointer((address*)&_name);
} else {
// _shared_path_index is used only by SystemDictionary::is_shared_class_visible_impl()
// for checking classes in named modules.
_shared_path_index = -1;
}
if (_version != nullptr) {
_version = ArchiveBuilder::get_buffered_symbol(_version);
}
if (_location != nullptr) {
_location = ArchiveBuilder::get_buffered_symbol(_location);
}
JFR_ONLY(set_trace_id(0);) // re-init at runtime
ArchivePtrMarker::mark_pointer((address*)&_reads);
ArchivePtrMarker::mark_pointer((address*)&_version);
ArchivePtrMarker::mark_pointer((address*)&_location);
}
#ifndef PRODUCT
void ModuleEntry::verify_archived_module_entries() {
assert(_num_archived_module_entries == _num_inited_module_entries,
"%d ModuleEntries have been archived but %d of them have been properly initialized with archived java.lang.Module objects",
_num_archived_module_entries, _num_inited_module_entries);
}
#endif // PRODUCT
void ModuleEntry::load_from_archive(ClassLoaderData* loader_data) {
assert(CDSConfig::is_using_archive(), "runtime only");
set_loader_data(loader_data);
set_reads(restore_growable_array(archived_reads()));
JFR_ONLY(INIT_ID(this);)
}
@ -484,28 +581,38 @@ static int compare_module_by_name(ModuleEntry* a, ModuleEntry* b) {
return a->name()->fast_compare(b->name());
}
Array<ModuleEntry*>* ModuleEntryTable::build_aot_table(ClassLoaderData* loader_data, TRAPS) {
Array<ModuleEntry*>* aot_table =
MetadataFactory::new_array<ModuleEntry*>(loader_data, _table.number_of_entries(), nullptr, CHECK_NULL);
void ModuleEntryTable::iterate_symbols(MetaspaceClosure* closure) {
auto syms = [&] (const SymbolHandle& key, ModuleEntry*& m) {
m->iterate_symbols(closure);
};
_table.iterate_all(syms);
}
Array<ModuleEntry*>* ModuleEntryTable::allocate_archived_entries() {
Array<ModuleEntry*>* archived_modules = ArchiveBuilder::new_rw_array<ModuleEntry*>(_table.number_of_entries());
int n = 0;
auto grab = [&] (const SymbolHandle& key, ModuleEntry*& m) {
m->pack_reads();
aot_table->at_put(n++, m);
if (log_is_enabled(Info, aot, module)) {
ResourceMark rm;
LogStream ls(Log(aot, module)::info());
ls.print("Stored in archive: ");
m->print(&ls);
}
archived_modules->at_put(n++, m);
};
_table.iterate_all(grab);
if (n > 1) {
// Always allocate in the same order to produce deterministic archive.
QuickSort::sort(aot_table->data(), n, compare_module_by_name);
QuickSort::sort(archived_modules->data(), n, compare_module_by_name);
}
for (int i = 0; i < n; i++) {
archived_modules->at_put(i, archived_modules->at(i)->allocate_archived_entry());
ArchivePtrMarker::mark_pointer((address*)archived_modules->adr_at(i));
}
return archived_modules;
}
return aot_table;
void ModuleEntryTable::init_archived_entries(Array<ModuleEntry*>* archived_modules) {
assert(CDSConfig::is_dumping_full_module_graph(), "sanity");
for (int i = 0; i < archived_modules->length(); i++) {
ModuleEntry* archived_entry = archived_modules->at(i);
archived_entry->init_as_archived_entry();
}
}
void ModuleEntryTable::load_archived_entries(ClassLoaderData* loader_data,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,9 +25,7 @@
#ifndef SHARE_CLASSFILE_MODULEENTRY_HPP
#define SHARE_CLASSFILE_MODULEENTRY_HPP
#include "cds/aotGrowableArray.hpp"
#include "jni.h"
#include "memory/metaspaceClosureType.hpp"
#include "oops/oopHandle.hpp"
#include "oops/symbol.hpp"
#include "oops/symbolHandle.hpp"
@ -70,8 +68,11 @@ private:
// for shared classes from this module
Symbol* _name; // name of this module
ClassLoaderData* _loader_data;
AOTGrowableArray<ModuleEntry*>* _reads; // list of modules that are readable by this module
union {
GrowableArray<ModuleEntry*>* _reads; // list of modules that are readable by this module
Array<ModuleEntry*>* _archived_reads; // List of readable modules stored in the CDS archive
};
Symbol* _version; // module version number
Symbol* _location; // module location
CDS_ONLY(int _shared_path_index;) // >=0 if classes in this module are in CDS archive
@ -80,6 +81,7 @@ private:
bool _must_walk_reads; // walk module's reads list at GC safepoints to purge out dead modules
bool _is_open; // whether the packages in the module are all unqualifiedly exported
bool _is_patched; // whether the module is patched via --patch-module
DEBUG_ONLY(bool _reads_is_archived);
CDS_JAVA_HEAP_ONLY(int _archived_module_index;)
JFR_ONLY(DEFINE_TRACE_ID_FIELD;)
@ -118,18 +120,22 @@ public:
bool can_read(ModuleEntry* m) const;
bool has_reads_list() const;
AOTGrowableArray<ModuleEntry*>* reads() const {
GrowableArray<ModuleEntry*>* reads() const {
assert(!_reads_is_archived, "sanity");
return _reads;
}
void set_reads(AOTGrowableArray<ModuleEntry*>* r) {
void set_reads(GrowableArray<ModuleEntry*>* r) {
_reads = r;
DEBUG_ONLY(_reads_is_archived = false);
}
void pack_reads() {
if (_reads != nullptr) {
_reads->shrink_to_fit();
}
Array<ModuleEntry*>* archived_reads() const {
assert(_reads_is_archived, "sanity");
return _archived_reads;
}
void set_archived_reads(Array<ModuleEntry*>* r) {
_archived_reads = r;
DEBUG_ONLY(_reads_is_archived = true);
}
void add_read(ModuleEntry* m);
void set_read_walk_required(ClassLoaderData* m_loader_data);
@ -183,13 +189,6 @@ public:
const char* name_as_C_string() const {
return is_named() ? name()->as_C_string() : UNNAMED_MODULE;
}
// methods required by MetaspaceClosure
void metaspace_pointers_do(MetaspaceClosure* it);
int size_in_heapwords() const { return (int)heap_word_size(sizeof(ModuleEntry)); }
MetaspaceClosureType type() const { return MetaspaceClosureType::ModuleEntryType; }
static bool is_read_only_by_default() { return false; }
void print(outputStream* st = tty) const;
void verify();
@ -199,11 +198,18 @@ public:
#if INCLUDE_CDS_JAVA_HEAP
bool should_be_archived() const;
void remove_unshareable_info();
void iterate_symbols(MetaspaceClosure* closure);
ModuleEntry* allocate_archived_entry() const;
void init_as_archived_entry();
static ModuleEntry* get_archived_entry(ModuleEntry* orig_entry);
bool has_been_archived();
static Array<ModuleEntry*>* write_growable_array(GrowableArray<ModuleEntry*>* array);
static GrowableArray<ModuleEntry*>* restore_growable_array(Array<ModuleEntry*>* archived_array);
void load_from_archive(ClassLoaderData* loader_data);
void preload_archived_oops();
void restore_archived_oops(ClassLoaderData* loader_data);
void clear_archived_oops();
static void verify_archived_module_entries() PRODUCT_RETURN;
#endif
};
@ -269,7 +275,9 @@ public:
void verify();
#if INCLUDE_CDS_JAVA_HEAP
Array<ModuleEntry*>* build_aot_table(ClassLoaderData* loader_data, TRAPS);
void iterate_symbols(MetaspaceClosure* closure);
Array<ModuleEntry*>* allocate_archived_entries();
void init_archived_entries(Array<ModuleEntry*>* archived_modules);
void load_archived_entries(ClassLoaderData* loader_data,
Array<ModuleEntry*>* archived_modules);
void restore_archived_oops(ClassLoaderData* loader_data,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -505,10 +505,13 @@ void Modules::check_archived_module_oop(oop orig_module_obj) {
ClassLoaderData* loader_data = orig_module_ent->loader_data();
assert(loader_data->is_builtin_class_loader_data(), "must be");
precond(ArchiveBuilder::current()->has_been_archived(orig_module_ent));
if (orig_module_ent->name() == nullptr) {
if (orig_module_ent->name() != nullptr) {
// For each named module, we archive both the java.lang.Module oop and the ModuleEntry.
assert(orig_module_ent->has_been_archived(), "sanity");
} else {
// We always archive unnamed module oop for boot, platform, and system loaders.
precond(orig_module_ent->should_be_archived());
precond(orig_module_ent->has_been_archived());
if (loader_data->is_boot_class_loader_data()) {
assert(!_seen_boot_unnamed_module, "only once");
@ -526,6 +529,10 @@ void Modules::check_archived_module_oop(oop orig_module_obj) {
}
}
void Modules::verify_archived_modules() {
ModuleEntry::verify_archived_module_entries();
}
class Modules::ArchivedProperty {
const char* _prop;
const bool _numbered;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -59,6 +59,7 @@ public:
TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
static void init_archived_modules(JavaThread* current, Handle h_platform_loader, Handle h_system_loader)
NOT_CDS_JAVA_HEAP_RETURN;
static void verify_archived_modules() NOT_CDS_JAVA_HEAP_RETURN;
static void dump_archived_module_info() NOT_CDS_JAVA_HEAP_RETURN;
static void serialize_archived_module_info(SerializeClosure* soc) NOT_CDS_JAVA_HEAP_RETURN;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,8 +22,6 @@
*
*/
#include "cds/aotGrowableArray.inline.hpp"
#include "cds/aotMetaspace.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/archiveUtils.hpp"
#include "cds/cdsConfig.hpp"
@ -33,13 +31,13 @@
#include "classfile/vmSymbols.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/resourceArea.hpp"
#include "oops/array.hpp"
#include "oops/symbol.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
#include "utilities/events.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/hashTable.hpp"
#include "utilities/ostream.hpp"
#include "utilities/quickSort.hpp"
@ -53,7 +51,7 @@ PackageEntry::PackageEntry(Symbol* name, ModuleEntry* module) :
_qualified_exports(nullptr),
_defined_by_cds_in_class_path(0)
{
// name can't be null -- a class in the default package gets a PackageEntry of nullptr.
// name can't be null
_name->increment_refcount();
JFR_ONLY(INIT_ID(this);)
@ -83,7 +81,7 @@ void PackageEntry::add_qexport(ModuleEntry* m) {
if (!has_qual_exports_list()) {
// Lazily create a package's qualified exports list.
// Initial size is small, do not anticipate export lists to be large.
_qualified_exports = new (mtModule) AOTGrowableArray<ModuleEntry*>(QUAL_EXP_SIZE, mtModule);
_qualified_exports = new (mtModule) GrowableArray<ModuleEntry*>(QUAL_EXP_SIZE, mtModule);
}
// Determine, based on this newly established export to module m,
@ -185,24 +183,12 @@ void PackageEntry::purge_qualified_exports() {
}
void PackageEntry::delete_qualified_exports() {
if (_qualified_exports != nullptr && !AOTMetaspace::in_aot_cache(_qualified_exports)) {
if (_qualified_exports != nullptr) {
delete _qualified_exports;
}
_qualified_exports = nullptr;
}
void PackageEntry::pack_qualified_exports() {
if (_qualified_exports != nullptr) {
_qualified_exports->shrink_to_fit();
}
}
void PackageEntry::metaspace_pointers_do(MetaspaceClosure* it) {
it->push(&_name);
it->push(&_module);
it->push(&_qualified_exports);
}
PackageEntryTable::PackageEntryTable() { }
PackageEntryTable::~PackageEntryTable() {
@ -226,19 +212,66 @@ PackageEntryTable::~PackageEntryTable() {
}
#if INCLUDE_CDS_JAVA_HEAP
typedef HashTable<
const PackageEntry*,
PackageEntry*,
557, // prime number
AnyObj::C_HEAP> ArchivedPackageEntries;
static ArchivedPackageEntries* _archived_packages_entries = nullptr;
bool PackageEntry::should_be_archived() const {
return module()->should_be_archived();
}
void PackageEntry::remove_unshareable_info() {
if (_qualified_exports != nullptr) {
_qualified_exports->set_in_aot_cache();
PackageEntry* PackageEntry::allocate_archived_entry() const {
precond(should_be_archived());
PackageEntry* archived_entry = (PackageEntry*)ArchiveBuilder::rw_region_alloc(sizeof(PackageEntry));
memcpy((void*)archived_entry, (void*)this, sizeof(PackageEntry));
if (_archived_packages_entries == nullptr) {
_archived_packages_entries = new (mtClass)ArchivedPackageEntries();
}
assert(_archived_packages_entries->get(this) == nullptr, "Each PackageEntry must not be shared across PackageEntryTables");
_archived_packages_entries->put(this, archived_entry);
return archived_entry;
}
PackageEntry* PackageEntry::get_archived_entry(PackageEntry* orig_entry) {
PackageEntry** ptr = _archived_packages_entries->get(orig_entry);
if (ptr != nullptr) {
return *ptr;
} else {
return nullptr;
}
}
void PackageEntry::iterate_symbols(MetaspaceClosure* closure) {
closure->push(&_name);
}
void PackageEntry::init_as_archived_entry() {
Array<ModuleEntry*>* archived_qualified_exports = ModuleEntry::write_growable_array(_qualified_exports);
_name = ArchiveBuilder::get_buffered_symbol(_name);
_module = ModuleEntry::get_archived_entry(_module);
_qualified_exports = (GrowableArray<ModuleEntry*>*)archived_qualified_exports;
_defined_by_cds_in_class_path = 0;
JFR_ONLY(set_trace_id(0);) // re-init at runtime
ArchivePtrMarker::mark_pointer((address*)&_name);
ArchivePtrMarker::mark_pointer((address*)&_module);
ArchivePtrMarker::mark_pointer((address*)&_qualified_exports);
LogStreamHandle(Info, aot, package) st;
if (st.is_enabled()) {
st.print("archived ");
print(&st);
}
}
void PackageEntry::load_from_archive() {
_qualified_exports = ModuleEntry::restore_growable_array((Array<ModuleEntry*>*)_qualified_exports);
JFR_ONLY(INIT_ID(this);)
}
@ -247,7 +280,14 @@ static int compare_package_by_name(PackageEntry* a, PackageEntry* b) {
return a->name()->fast_compare(b->name());
}
Array<PackageEntry*>* PackageEntryTable::build_aot_table(ClassLoaderData* loader_data, TRAPS) {
void PackageEntryTable::iterate_symbols(MetaspaceClosure* closure) {
auto syms = [&] (const SymbolHandle& key, PackageEntry*& p) {
p->iterate_symbols(closure);
};
_table.iterate_all(syms);
}
Array<PackageEntry*>* PackageEntryTable::allocate_archived_entries() {
// First count the packages in named modules
int n = 0;
auto count = [&] (const SymbolHandle& key, PackageEntry*& p) {
@ -257,19 +297,12 @@ Array<PackageEntry*>* PackageEntryTable::build_aot_table(ClassLoaderData* loader
};
_table.iterate_all(count);
Array<PackageEntry*>* archived_packages = MetadataFactory::new_array<PackageEntry*>(loader_data, n, nullptr, CHECK_NULL);
Array<PackageEntry*>* archived_packages = ArchiveBuilder::new_rw_array<PackageEntry*>(n);
// reset n
n = 0;
auto grab = [&] (const SymbolHandle& key, PackageEntry*& p) {
if (p->should_be_archived()) {
p->pack_qualified_exports();
archived_packages->at_put(n++, p);
LogStreamHandle(Info, aot, package) st;
if (st.is_enabled()) {
st.print("archived ");
p->print(&st);
}
}
};
_table.iterate_all(grab);
@ -278,10 +311,20 @@ Array<PackageEntry*>* PackageEntryTable::build_aot_table(ClassLoaderData* loader
// Always allocate in the same order to produce deterministic archive.
QuickSort::sort(archived_packages->data(), n, compare_package_by_name);
}
for (int i = 0; i < n; i++) {
archived_packages->at_put(i, archived_packages->at(i)->allocate_archived_entry());
ArchivePtrMarker::mark_pointer((address*)archived_packages->adr_at(i));
}
return archived_packages;
}
void PackageEntryTable::init_archived_entries(Array<PackageEntry*>* archived_packages) {
for (int i = 0; i < archived_packages->length(); i++) {
PackageEntry* archived_entry = archived_packages->at(i);
archived_entry->init_as_archived_entry();
}
}
void PackageEntryTable::load_archived_entries(Array<PackageEntry*>* archived_packages) {
assert(CDSConfig::is_using_archive(), "runtime only");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,9 +25,7 @@
#ifndef SHARE_CLASSFILE_PACKAGEENTRY_HPP
#define SHARE_CLASSFILE_PACKAGEENTRY_HPP
#include "cds/aotGrowableArray.hpp"
#include "classfile/moduleEntry.hpp"
#include "memory/metaspaceClosureType.hpp"
#include "oops/symbol.hpp"
#include "oops/symbolHandle.hpp"
#include "runtime/atomicAccess.hpp"
@ -116,7 +114,7 @@ private:
bool _must_walk_exports;
// Contains list of modules this package is qualifiedly exported to. Access
// to this list is protected by the Module_lock.
AOTGrowableArray<ModuleEntry*>* _qualified_exports;
GrowableArray<ModuleEntry*>* _qualified_exports;
JFR_ONLY(DEFINE_TRACE_ID_FIELD;)
// Initial size of a package entry's list of qualified exports.
@ -207,24 +205,14 @@ public:
void purge_qualified_exports();
void delete_qualified_exports();
void pack_qualified_exports(); // used by AOT
// methods required by MetaspaceClosure
void metaspace_pointers_do(MetaspaceClosure* it);
int size_in_heapwords() const { return (int)heap_word_size(sizeof(PackageEntry)); }
MetaspaceClosureType type() const { return MetaspaceClosureType::PackageEntryType; }
static bool is_read_only_by_default() { return false; }
void print(outputStream* st = tty);
char* name_as_C_string() const {
assert(_name != nullptr, "name can't be null");
return name()->as_C_string();
}
#if INCLUDE_CDS_JAVA_HEAP
bool should_be_archived() const;
void remove_unshareable_info();
void iterate_symbols(MetaspaceClosure* closure);
PackageEntry* allocate_archived_entry() const;
void init_as_archived_entry();
static PackageEntry* get_archived_entry(PackageEntry* orig_entry);
void load_from_archive();
#endif
@ -283,7 +271,9 @@ public:
void print(outputStream* st = tty);
#if INCLUDE_CDS_JAVA_HEAP
Array<PackageEntry*>* build_aot_table(ClassLoaderData* loader_data, TRAPS);
void iterate_symbols(MetaspaceClosure* closure);
Array<PackageEntry*>* allocate_archived_entries();
void init_archived_entries(Array<PackageEntry*>* archived_packages);
void load_archived_entries(Array<PackageEntry*>* archived_packages);
#endif
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -614,10 +614,6 @@ struct StringTableDeleteCheck : StackObj {
};
void StringTable::clean_dead_entries(JavaThread* jt) {
// BulkDeleteTask::prepare() may take ConcurrentHashTableResize_lock (nosafepoint-2).
// When NativeHeapTrimmer is enabled, SuspendMark may take NativeHeapTrimmer::_lock (nosafepoint).
// Take SuspendMark first to keep lock order and avoid deadlock.
NativeHeapTrimmer::SuspendMark sm("stringtable");
StringTableHash::BulkDeleteTask bdt(_local_table);
if (!bdt.prepare(jt)) {
return;
@ -625,6 +621,7 @@ void StringTable::clean_dead_entries(JavaThread* jt) {
StringTableDeleteCheck stdc;
StringTableDoDelete stdd;
NativeHeapTrimmer::SuspendMark sm("stringtable");
{
TraceTime timer("Clean", TRACETIME_LOG(Debug, stringtable, perf));
while(bdt.do_task(jt, stdc, stdd)) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -763,10 +763,6 @@ struct SymbolTableDeleteCheck : StackObj {
};
void SymbolTable::clean_dead_entries(JavaThread* jt) {
// BulkDeleteTask::prepare() may take ConcurrentHashTableResize_lock (nosafepoint-2).
// When NativeHeapTrimmer is enabled, SuspendMark may take NativeHeapTrimmer::_lock (nosafepoint).
// Take SuspendMark first to keep lock order and avoid deadlock.
NativeHeapTrimmer::SuspendMark sm("symboltable");
SymbolTableHash::BulkDeleteTask bdt(_local_table);
if (!bdt.prepare(jt)) {
return;
@ -774,6 +770,7 @@ void SymbolTable::clean_dead_entries(JavaThread* jt) {
SymbolTableDeleteCheck stdc;
SymbolTableDoDelete stdd;
NativeHeapTrimmer::SuspendMark sm("symboltable");
{
TraceTime timer("Clean", TRACETIME_LOG(Debug, symboltable, perf));
while (bdt.do_task(jt, stdc, stdd)) {

View File

@ -245,6 +245,10 @@ class SerializeClosure;
\
/* Concurrency support */ \
template(java_util_concurrent_locks_AbstractOwnableSynchronizer, "java/util/concurrent/locks/AbstractOwnableSynchronizer") \
template(java_util_concurrent_atomic_AtomicIntegerFieldUpdater_Impl, "java/util/concurrent/atomic/AtomicIntegerFieldUpdater$AtomicIntegerFieldUpdaterImpl") \
template(java_util_concurrent_atomic_AtomicLongFieldUpdater_CASUpdater, "java/util/concurrent/atomic/AtomicLongFieldUpdater$CASUpdater") \
template(java_util_concurrent_atomic_AtomicLongFieldUpdater_LockedUpdater, "java/util/concurrent/atomic/AtomicLongFieldUpdater$LockedUpdater") \
template(java_util_concurrent_atomic_AtomicReferenceFieldUpdater_Impl, "java/util/concurrent/atomic/AtomicReferenceFieldUpdater$AtomicReferenceFieldUpdaterImpl") \
template(jdk_internal_vm_annotation_Contended_signature, "Ljdk/internal/vm/annotation/Contended;") \
template(jdk_internal_vm_annotation_ReservedStackAccess_signature, "Ljdk/internal/vm/annotation/ReservedStackAccess;") \
template(jdk_internal_ValueBased_signature, "Ljdk/internal/ValueBased;") \
@ -298,7 +302,6 @@ class SerializeClosure;
template(jdk_internal_misc_Scoped_signature, "Ljdk/internal/misc/ScopedMemoryAccess$Scoped;") \
template(jdk_internal_vm_annotation_IntrinsicCandidate_signature, "Ljdk/internal/vm/annotation/IntrinsicCandidate;") \
template(jdk_internal_vm_annotation_Stable_signature, "Ljdk/internal/vm/annotation/Stable;") \
template(jdk_internal_vm_annotation_TrustFinalFields_signature, "Ljdk/internal/vm/annotation/TrustFinalFields;") \
\
template(jdk_internal_vm_annotation_ChangesCurrentThread_signature, "Ljdk/internal/vm/annotation/ChangesCurrentThread;") \
template(jdk_internal_vm_annotation_JvmtiHideEvents_signature, "Ljdk/internal/vm/annotation/JvmtiHideEvents;") \

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1124,7 +1124,7 @@ class AbstractClassHierarchyWalker {
Klass* find_witness(InstanceKlass* context_type, KlassDepChange* changes = nullptr);
static void init();
NOT_PRODUCT(static void print_statistics();)
static void print_statistics();
};
PerfCounter* AbstractClassHierarchyWalker::_perf_find_witness_anywhere_calls_count = nullptr;
@ -2277,7 +2277,6 @@ bool KlassDepChange::involves_context(Klass* k) {
return is_contained;
}
#ifndef PRODUCT
void Dependencies::print_statistics() {
AbstractClassHierarchyWalker::print_statistics();
}
@ -2303,7 +2302,6 @@ void AbstractClassHierarchyWalker::print_statistics() {
}
}
}
#endif
CallSiteDepChange::CallSiteDepChange(Handle call_site, Handle method_handle) :
_call_site(call_site),

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -649,7 +649,7 @@ class Dependencies: public ResourceObj {
};
friend class Dependencies::DepStream;
NOT_PRODUCT(static void print_statistics();)
static void print_statistics();
};

View File

@ -607,27 +607,10 @@ void decode_env::print_address(address adr) {
return;
}
address card_table_base = nullptr;
BarrierSet* bs = BarrierSet::barrier_set();
#if INCLUDE_G1GC
if (bs->is_a(BarrierSet::G1BarrierSet)) {
G1BarrierSet* g1bs = barrier_set_cast<G1BarrierSet>(bs);
card_table_base = g1bs->card_table()->byte_map_base();
} else
#endif
#if INCLUDE_SHENANDOAHGC
if (bs->is_a(BarrierSet::ShenandoahBarrierSet)) {
ShenandoahBarrierSet* sbs = barrier_set_cast<ShenandoahBarrierSet>(bs);
if (sbs->card_table() != nullptr) {
card_table_base = sbs->card_table()->byte_map_base();
}
} else
#endif
if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
card_table_base = ci_card_table_address_as<address>();
}
if (card_table_base != nullptr && adr == card_table_base) {
st->print("card_table_base");
if (bs->is_a(BarrierSet::CardTableBarrierSet) &&
adr == ci_card_table_address_as<address>()) {
st->print("word_map_base");
if (WizardMode) st->print(" " INTPTR_FORMAT, p2i(adr));
return;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -209,17 +209,6 @@ void G1Arguments::initialize() {
FLAG_SET_DEFAULT(GCTimeRatio, 24);
}
// Do not interfere with GC-Pressure driven heap resizing unless the user
// explicitly sets otherwise. G1 heap sizing should be free to grow or shrink
// the heap based on GC pressure, rather than being forced to satisfy
// MinHeapFreeRatio or MaxHeapFreeRatio defaults that the user did not set.
if (FLAG_IS_DEFAULT(MinHeapFreeRatio)) {
FLAG_SET_DEFAULT(MinHeapFreeRatio, 0);
}
if (FLAG_IS_DEFAULT(MaxHeapFreeRatio)) {
FLAG_SET_DEFAULT(MaxHeapFreeRatio, 100);
}
// Below, we might need to calculate the pause time interval based on
// the pause target. When we do so we are going to give G1 maximum
// flexibility and allow it to do pauses when it needs to. So, we'll

View File

@ -70,11 +70,7 @@ inline void G1BarrierSet::write_ref_field_pre(T* field) {
template <DecoratorSet decorators, typename T>
inline void G1BarrierSet::write_ref_field_post(T* field) {
// Make sure that the card table reference is read only once. Otherwise the compiler
// might reload that value in the two accesses below, that could cause writes to
// the wrong card table.
CardTable* card_table = AtomicAccess::load(&_card_table);
CardValue* byte = card_table->byte_for(field);
volatile CardValue* byte = _card_table->byte_for(field);
if (*byte == G1CardTable::clean_card_val()) {
*byte = G1CardTable::dirty_card_val();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,6 +26,7 @@
#include "gc/g1/g1BatchedTask.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1GCParPhaseTimesTracker.hpp"
#include "runtime/atomicAccess.hpp"
#include "utilities/growableArray.hpp"
void G1AbstractSubTask::record_work_item(uint worker_id, uint index, size_t count) {
@ -39,7 +40,7 @@ const char* G1AbstractSubTask::name() const {
}
bool G1BatchedTask::try_claim_serial_task(int& task) {
task = _num_serial_tasks_done.fetch_then_add(1);
task = AtomicAccess::fetch_then_add(&_num_serial_tasks_done, 1);
return task < _serial_tasks.length();
}
@ -95,8 +96,8 @@ void G1BatchedTask::work(uint worker_id) {
}
G1BatchedTask::~G1BatchedTask() {
assert(_num_serial_tasks_done.load_relaxed() >= _serial_tasks.length(),
"Only %d tasks of %d claimed", _num_serial_tasks_done.load_relaxed(), _serial_tasks.length());
assert(AtomicAccess::load(&_num_serial_tasks_done) >= _serial_tasks.length(),
"Only %d tasks of %d claimed", AtomicAccess::load(&_num_serial_tasks_done), _serial_tasks.length());
for (G1AbstractSubTask* task : _parallel_tasks) {
delete task;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,7 +28,6 @@
#include "gc/g1/g1GCPhaseTimes.hpp"
#include "gc/shared/workerThread.hpp"
#include "memory/allocation.hpp"
#include "runtime/atomic.hpp"
template <typename E, MemTag MT>
class GrowableArrayCHeap;
@ -121,7 +120,7 @@ public:
// 5) ~T()
//
class G1BatchedTask : public WorkerTask {
Atomic<int> _num_serial_tasks_done;
volatile int _num_serial_tasks_done;
G1GCPhaseTimes* _phase_times;
bool try_claim_serial_task(int& task);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,6 +29,7 @@
#include "gc/shared/gcLogPrecious.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
#include "memory/allocation.inline.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/java.hpp"
#include "utilities/bitMap.inline.hpp"
@ -191,32 +192,32 @@ const char* G1CardSetConfiguration::mem_object_type_name_str(uint index) {
void G1CardSetCoarsenStats::reset() {
STATIC_ASSERT(ARRAY_SIZE(_coarsen_from) == ARRAY_SIZE(_coarsen_collision));
for (uint i = 0; i < ARRAY_SIZE(_coarsen_from); i++) {
_coarsen_from[i].store_relaxed(0);
_coarsen_collision[i].store_relaxed(0);
_coarsen_from[i] = 0;
_coarsen_collision[i] = 0;
}
}
void G1CardSetCoarsenStats::set(G1CardSetCoarsenStats& other) {
STATIC_ASSERT(ARRAY_SIZE(_coarsen_from) == ARRAY_SIZE(_coarsen_collision));
for (uint i = 0; i < ARRAY_SIZE(_coarsen_from); i++) {
_coarsen_from[i].store_relaxed(other._coarsen_from[i].load_relaxed());
_coarsen_collision[i].store_relaxed(other._coarsen_collision[i].load_relaxed());
_coarsen_from[i] = other._coarsen_from[i];
_coarsen_collision[i] = other._coarsen_collision[i];
}
}
void G1CardSetCoarsenStats::subtract_from(G1CardSetCoarsenStats& other) {
STATIC_ASSERT(ARRAY_SIZE(_coarsen_from) == ARRAY_SIZE(_coarsen_collision));
for (uint i = 0; i < ARRAY_SIZE(_coarsen_from); i++) {
_coarsen_from[i].store_relaxed(other._coarsen_from[i].load_relaxed() - _coarsen_from[i].load_relaxed());
_coarsen_collision[i].store_relaxed(other._coarsen_collision[i].load_relaxed() - _coarsen_collision[i].load_relaxed());
_coarsen_from[i] = other._coarsen_from[i] - _coarsen_from[i];
_coarsen_collision[i] = other._coarsen_collision[i] - _coarsen_collision[i];
}
}
void G1CardSetCoarsenStats::record_coarsening(uint tag, bool collision) {
assert(tag < ARRAY_SIZE(_coarsen_from), "tag %u out of bounds", tag);
_coarsen_from[tag].add_then_fetch(1u, memory_order_relaxed);
AtomicAccess::inc(&_coarsen_from[tag], memory_order_relaxed);
if (collision) {
_coarsen_collision[tag].add_then_fetch(1u, memory_order_relaxed);
AtomicAccess::inc(&_coarsen_collision[tag], memory_order_relaxed);
}
}
@ -227,13 +228,13 @@ void G1CardSetCoarsenStats::print_on(outputStream* out) {
"Inline->AoC %zu (%zu) "
"AoC->BitMap %zu (%zu) "
"BitMap->Full %zu (%zu) ",
_coarsen_from[0].load_relaxed(), _coarsen_collision[0].load_relaxed(),
_coarsen_from[1].load_relaxed(), _coarsen_collision[1].load_relaxed(),
_coarsen_from[0], _coarsen_collision[0],
_coarsen_from[1], _coarsen_collision[1],
// There is no BitMap at the first level so we can't .
_coarsen_from[3].load_relaxed(), _coarsen_collision[3].load_relaxed(),
_coarsen_from[4].load_relaxed(), _coarsen_collision[4].load_relaxed(),
_coarsen_from[5].load_relaxed(), _coarsen_collision[5].load_relaxed(),
_coarsen_from[6].load_relaxed(), _coarsen_collision[6].load_relaxed()
_coarsen_from[3], _coarsen_collision[3],
_coarsen_from[4], _coarsen_collision[4],
_coarsen_from[5], _coarsen_collision[5],
_coarsen_from[6], _coarsen_collision[6]
);
}
@ -247,7 +248,7 @@ class G1CardSetHashTable : public CHeapObj<mtGCCardSet> {
// the per region cardsets.
const static uint GroupBucketClaimSize = 4;
// Did we insert at least one card in the table?
Atomic<bool> _inserted_card;
bool volatile _inserted_card;
G1CardSetMemoryManager* _mm;
CardSetHash _table;
@ -310,10 +311,10 @@ public:
G1CardSetHashTableValue value(region_idx, G1CardSetInlinePtr());
bool inserted = _table.insert_get(Thread::current(), lookup, value, found, should_grow);
if (!_inserted_card.load_relaxed() && inserted) {
if (!_inserted_card && inserted) {
// It does not matter to us who is setting the flag so a regular atomic store
// is sufficient.
_inserted_card.store_relaxed(true);
AtomicAccess::store(&_inserted_card, true);
}
return found.value();
@ -342,9 +343,9 @@ public:
}
void reset() {
if (_inserted_card.load_relaxed()) {
if (AtomicAccess::load(&_inserted_card)) {
_table.unsafe_reset(InitialLogTableSize);
_inserted_card.store_relaxed(false);
AtomicAccess::store(&_inserted_card, false);
}
}
@ -454,14 +455,14 @@ void G1CardSet::free_mem_object(ContainerPtr container) {
_mm->free(container_type_to_mem_object_type(type), value);
}
G1CardSet::ContainerPtr G1CardSet::acquire_container(Atomic<ContainerPtr>* container_addr) {
G1CardSet::ContainerPtr G1CardSet::acquire_container(ContainerPtr volatile* container_addr) {
// Update reference counts under RCU critical section to avoid a
// use-after-cleapup bug where we increment a reference count for
// an object whose memory has already been cleaned up and reused.
GlobalCounter::CriticalSection cs(Thread::current());
while (true) {
// Get ContainerPtr and increment refcount atomically wrt to memory reuse.
ContainerPtr container = container_addr->load_acquire();
ContainerPtr container = AtomicAccess::load_acquire(container_addr);
uint cs_type = container_type(container);
if (container == FullCardSet || cs_type == ContainerInlinePtr) {
return container;
@ -502,15 +503,15 @@ class G1ReleaseCardsets : public StackObj {
G1CardSet* _card_set;
using ContainerPtr = G1CardSet::ContainerPtr;
void coarsen_to_full(Atomic<ContainerPtr>* container_addr) {
void coarsen_to_full(ContainerPtr* container_addr) {
while (true) {
ContainerPtr cur_container = container_addr->load_acquire();
ContainerPtr cur_container = AtomicAccess::load_acquire(container_addr);
uint cs_type = G1CardSet::container_type(cur_container);
if (cur_container == G1CardSet::FullCardSet) {
return;
}
ContainerPtr old_value = container_addr->compare_exchange(cur_container, G1CardSet::FullCardSet);
ContainerPtr old_value = AtomicAccess::cmpxchg(container_addr, cur_container, G1CardSet::FullCardSet);
if (old_value == cur_container) {
_card_set->release_and_maybe_free_container(cur_container);
@ -522,7 +523,7 @@ class G1ReleaseCardsets : public StackObj {
public:
explicit G1ReleaseCardsets(G1CardSet* card_set) : _card_set(card_set) { }
void operator ()(Atomic<ContainerPtr>* container_addr) {
void operator ()(ContainerPtr* container_addr) {
coarsen_to_full(container_addr);
}
};
@ -543,10 +544,10 @@ G1AddCardResult G1CardSet::add_to_howl(ContainerPtr parent_container,
ContainerPtr container;
uint bucket = _config->howl_bucket_index(card_in_region);
Atomic<ContainerPtr>* bucket_entry = howl->container_addr(bucket);
ContainerPtr volatile* bucket_entry = howl->container_addr(bucket);
while (true) {
if (howl->_num_entries.load_relaxed() >= _config->cards_in_howl_threshold()) {
if (AtomicAccess::load(&howl->_num_entries) >= _config->cards_in_howl_threshold()) {
return Overflow;
}
@ -570,7 +571,7 @@ G1AddCardResult G1CardSet::add_to_howl(ContainerPtr parent_container,
}
if (increment_total && add_result == Added) {
howl->_num_entries.add_then_fetch(1u, memory_order_relaxed);
AtomicAccess::inc(&howl->_num_entries, memory_order_relaxed);
}
if (to_transfer != nullptr) {
@ -587,7 +588,7 @@ G1AddCardResult G1CardSet::add_to_bitmap(ContainerPtr container, uint card_in_re
return bitmap->add(card_offset, _config->cards_in_howl_bitmap_threshold(), _config->max_cards_in_howl_bitmap());
}
G1AddCardResult G1CardSet::add_to_inline_ptr(Atomic<ContainerPtr>* container_addr, ContainerPtr container, uint card_in_region) {
G1AddCardResult G1CardSet::add_to_inline_ptr(ContainerPtr volatile* container_addr, ContainerPtr container, uint card_in_region) {
G1CardSetInlinePtr value(container_addr, container);
return value.add(card_in_region, _config->inline_ptr_bits_per_card(), _config->max_cards_in_inline_ptr());
}
@ -609,7 +610,7 @@ G1CardSet::ContainerPtr G1CardSet::create_coarsened_array_of_cards(uint card_in_
return new_container;
}
bool G1CardSet::coarsen_container(Atomic<ContainerPtr>* container_addr,
bool G1CardSet::coarsen_container(ContainerPtr volatile* container_addr,
ContainerPtr cur_container,
uint card_in_region,
bool within_howl) {
@ -639,7 +640,7 @@ bool G1CardSet::coarsen_container(Atomic<ContainerPtr>* container_addr,
ShouldNotReachHere();
}
ContainerPtr old_value = container_addr->compare_exchange(cur_container, new_container); // Memory order?
ContainerPtr old_value = AtomicAccess::cmpxchg(container_addr, cur_container, new_container); // Memory order?
if (old_value == cur_container) {
// Success. Indicate that the cards from the current card set must be transferred
// by this caller.
@ -686,7 +687,7 @@ void G1CardSet::transfer_cards(G1CardSetHashTableValue* table_entry, ContainerPt
assert(container_type(source_container) == ContainerHowl, "must be");
// Need to correct for that the Full remembered set occupies more cards than the
// AoCS before.
_num_occupied.add_then_fetch(_config->max_cards_in_region() - table_entry->_num_occupied.load_relaxed(), memory_order_relaxed);
AtomicAccess::add(&_num_occupied, _config->max_cards_in_region() - table_entry->_num_occupied, memory_order_relaxed);
}
}
@ -712,18 +713,18 @@ void G1CardSet::transfer_cards_in_howl(ContainerPtr parent_container,
diff -= 1;
G1CardSetHowl* howling_array = container_ptr<G1CardSetHowl>(parent_container);
howling_array->_num_entries.add_then_fetch(diff, memory_order_relaxed);
AtomicAccess::add(&howling_array->_num_entries, diff, memory_order_relaxed);
G1CardSetHashTableValue* table_entry = get_container(card_region);
assert(table_entry != nullptr, "Table entry not found for transferred cards");
table_entry->_num_occupied.add_then_fetch(diff, memory_order_relaxed);
AtomicAccess::add(&table_entry->_num_occupied, diff, memory_order_relaxed);
_num_occupied.add_then_fetch(diff, memory_order_relaxed);
AtomicAccess::add(&_num_occupied, diff, memory_order_relaxed);
}
}
G1AddCardResult G1CardSet::add_to_container(Atomic<ContainerPtr>* container_addr,
G1AddCardResult G1CardSet::add_to_container(ContainerPtr volatile* container_addr,
ContainerPtr container,
uint card_region,
uint card_in_region,
@ -826,8 +827,8 @@ G1AddCardResult G1CardSet::add_card(uint card_region, uint card_in_region, bool
}
if (increment_total && add_result == Added) {
table_entry->_num_occupied.add_then_fetch(1u, memory_order_relaxed);
_num_occupied.add_then_fetch(1u, memory_order_relaxed);
AtomicAccess::inc(&table_entry->_num_occupied, memory_order_relaxed);
AtomicAccess::inc(&_num_occupied, memory_order_relaxed);
}
if (should_grow_table) {
_table->grow();
@ -852,7 +853,7 @@ bool G1CardSet::contains_card(uint card_region, uint card_in_region) {
return false;
}
ContainerPtr container = table_entry->_container.load_relaxed();
ContainerPtr container = table_entry->_container;
if (container == FullCardSet) {
// contains_card() is not a performance critical method so we do not hide that
// case in the switch below.
@ -888,7 +889,7 @@ void G1CardSet::print_info(outputStream* st, uintptr_t card) {
return;
}
ContainerPtr container = table_entry->_container.load_relaxed();
ContainerPtr container = table_entry->_container;
if (container == FullCardSet) {
st->print("FULL card set)");
return;
@ -939,7 +940,7 @@ void G1CardSet::iterate_cards_during_transfer(ContainerPtr const container, Card
void G1CardSet::iterate_containers(ContainerPtrClosure* cl, bool at_safepoint) {
auto do_value =
[&] (G1CardSetHashTableValue* value) {
cl->do_containerptr(value->_region_idx, value->_num_occupied.load_relaxed(), value->_container.load_relaxed());
cl->do_containerptr(value->_region_idx, value->_num_occupied, value->_container);
return true;
};
@ -1000,11 +1001,11 @@ bool G1CardSet::occupancy_less_or_equal_to(size_t limit) const {
}
bool G1CardSet::is_empty() const {
return _num_occupied.load_relaxed() == 0;
return _num_occupied == 0;
}
size_t G1CardSet::occupied() const {
return _num_occupied.load_relaxed();
return _num_occupied;
}
size_t G1CardSet::num_containers() {
@ -1023,6 +1024,10 @@ size_t G1CardSet::num_containers() {
return cl._count;
}
G1CardSetCoarsenStats G1CardSet::coarsen_stats() {
return _coarsen_stats;
}
void G1CardSet::print_coarsen_stats(outputStream* out) {
_last_coarsen_stats.subtract_from(_coarsen_stats);
@ -1050,7 +1055,7 @@ size_t G1CardSet::static_mem_size() {
void G1CardSet::clear() {
_table->reset();
_num_occupied.store_relaxed(0);
_num_occupied = 0;
_mm->flush();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,7 +27,6 @@
#include "memory/allocation.hpp"
#include "memory/memRegion.hpp"
#include "runtime/atomic.hpp"
#include "utilities/concurrentHashTable.hpp"
class G1CardSetAllocOptions;
@ -155,8 +154,8 @@ public:
private:
// Indices are "from" indices.
Atomic<size_t> _coarsen_from[NumCoarsenCategories];
Atomic<size_t> _coarsen_collision[NumCoarsenCategories];
size_t _coarsen_from[NumCoarsenCategories];
size_t _coarsen_collision[NumCoarsenCategories];
public:
G1CardSetCoarsenStats() { reset(); }
@ -272,11 +271,11 @@ private:
// Total number of cards in this card set. This is a best-effort value, i.e. there may
// be (slightly) more cards in the card set than this value in reality.
Atomic<size_t> _num_occupied;
size_t _num_occupied;
ContainerPtr make_container_ptr(void* value, uintptr_t type);
ContainerPtr acquire_container(Atomic<ContainerPtr>* container_addr);
ContainerPtr acquire_container(ContainerPtr volatile* container_addr);
// Returns true if the card set container should be released
bool release_container(ContainerPtr container);
// Release card set and free if needed.
@ -289,7 +288,7 @@ private:
// coarsen_container does not transfer cards from cur_container
// to the new container. Transfer is achieved by transfer_cards.
// Returns true if this was the thread that coarsened the container (and added the card).
bool coarsen_container(Atomic<ContainerPtr>* container_addr,
bool coarsen_container(ContainerPtr volatile* container_addr,
ContainerPtr cur_container,
uint card_in_region, bool within_howl = false);
@ -301,9 +300,9 @@ private:
void transfer_cards(G1CardSetHashTableValue* table_entry, ContainerPtr source_container, uint card_region);
void transfer_cards_in_howl(ContainerPtr parent_container, ContainerPtr source_container, uint card_region);
G1AddCardResult add_to_container(Atomic<ContainerPtr>* container_addr, ContainerPtr container, uint card_region, uint card, bool increment_total = true);
G1AddCardResult add_to_container(ContainerPtr volatile* container_addr, ContainerPtr container, uint card_region, uint card, bool increment_total = true);
G1AddCardResult add_to_inline_ptr(Atomic<ContainerPtr>* container_addr, ContainerPtr container, uint card_in_region);
G1AddCardResult add_to_inline_ptr(ContainerPtr volatile* container_addr, ContainerPtr container, uint card_in_region);
G1AddCardResult add_to_array(ContainerPtr container, uint card_in_region);
G1AddCardResult add_to_bitmap(ContainerPtr container, uint card_in_region);
G1AddCardResult add_to_howl(ContainerPtr parent_container, uint card_region, uint card_in_region, bool increment_total = true);
@ -367,6 +366,7 @@ public:
size_t num_containers();
static G1CardSetCoarsenStats coarsen_stats();
static void print_coarsen_stats(outputStream* out);
// Returns size of the actual remembered set containers in bytes.
@ -412,15 +412,8 @@ public:
using ContainerPtr = G1CardSet::ContainerPtr;
const uint _region_idx;
Atomic<uint> _num_occupied;
Atomic<ContainerPtr> _container;
// Copy constructor needed for use in ConcurrentHashTable.
G1CardSetHashTableValue(const G1CardSetHashTableValue& other) :
_region_idx(other._region_idx),
_num_occupied(other._num_occupied.load_relaxed()),
_container(other._container.load_relaxed())
{ }
uint volatile _num_occupied;
ContainerPtr volatile _container;
G1CardSetHashTableValue(uint region_idx, ContainerPtr container) : _region_idx(region_idx), _num_occupied(0), _container(container) { }
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,7 +27,7 @@
#include "gc/g1/g1CardSet.hpp"
#include "memory/allocation.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "utilities/bitMap.hpp"
#include "utilities/globalDefinitions.hpp"
@ -67,7 +67,7 @@ class G1CardSetInlinePtr : public StackObj {
using ContainerPtr = G1CardSet::ContainerPtr;
Atomic<ContainerPtr>* _value_addr;
ContainerPtr volatile * _value_addr;
ContainerPtr _value;
static const uint SizeFieldLen = 3;
@ -103,7 +103,7 @@ public:
explicit G1CardSetInlinePtr(ContainerPtr value) :
G1CardSetInlinePtr(nullptr, value) {}
G1CardSetInlinePtr(Atomic<ContainerPtr>* value_addr, ContainerPtr value) : _value_addr(value_addr), _value(value) {
G1CardSetInlinePtr(ContainerPtr volatile* value_addr, ContainerPtr value) : _value_addr(value_addr), _value(value) {
assert(G1CardSet::container_type(_value) == G1CardSet::ContainerInlinePtr, "Value " PTR_FORMAT " is not a valid G1CardSetInlinePtr.", p2i(_value));
}
@ -145,13 +145,13 @@ public:
// All but inline pointers are of this kind. For those, card entries are stored
// directly in the ContainerPtr of the ConcurrentHashTable node.
class G1CardSetContainer {
Atomic<uintptr_t> _ref_count;
uintptr_t _ref_count;
protected:
~G1CardSetContainer() = default;
public:
G1CardSetContainer() : _ref_count(3) { }
uintptr_t refcount() const { return _ref_count.load_acquire(); }
uintptr_t refcount() const { return AtomicAccess::load_acquire(&_ref_count); }
bool try_increment_refcount();
@ -172,7 +172,7 @@ public:
using ContainerPtr = G1CardSet::ContainerPtr;
private:
EntryCountType _size;
Atomic<EntryCountType> _num_entries;
EntryCountType volatile _num_entries;
// VLA implementation.
EntryDataType _data[1];
@ -180,10 +180,10 @@ private:
static const EntryCountType EntryMask = LockBitMask - 1;
class G1CardSetArrayLocker : public StackObj {
Atomic<EntryCountType>* _num_entries_addr;
EntryCountType volatile* _num_entries_addr;
EntryCountType _local_num_entries;
public:
G1CardSetArrayLocker(Atomic<EntryCountType>* value);
G1CardSetArrayLocker(EntryCountType volatile* value);
EntryCountType num_entries() const { return _local_num_entries; }
void inc_num_entries() {
@ -192,7 +192,7 @@ private:
}
~G1CardSetArrayLocker() {
_num_entries_addr->release_store(_local_num_entries);
AtomicAccess::release_store(_num_entries_addr, _local_num_entries);
}
};
@ -213,7 +213,7 @@ public:
template <class CardVisitor>
void iterate(CardVisitor& found);
size_t num_entries() const { return _num_entries.load_relaxed() & EntryMask; }
size_t num_entries() const { return _num_entries & EntryMask; }
static size_t header_size_in_bytes();
@ -223,7 +223,7 @@ public:
};
class G1CardSetBitMap : public G1CardSetContainer {
Atomic<size_t> _num_bits_set;
size_t _num_bits_set;
BitMap::bm_word_t _bits[1];
public:
@ -236,7 +236,7 @@ public:
return bm.at(card_idx);
}
uint num_bits_set() const { return (uint)_num_bits_set.load_relaxed(); }
uint num_bits_set() const { return (uint)_num_bits_set; }
template <class CardVisitor>
void iterate(CardVisitor& found, size_t const size_in_bits, uint offset);
@ -255,10 +255,10 @@ class G1CardSetHowl : public G1CardSetContainer {
public:
typedef uint EntryCountType;
using ContainerPtr = G1CardSet::ContainerPtr;
Atomic<EntryCountType> _num_entries;
EntryCountType volatile _num_entries;
private:
// VLA implementation.
Atomic<ContainerPtr> _buckets[1];
ContainerPtr _buckets[1];
// Do not add class member variables beyond this point.
// Iterates over the given ContainerPtr with at index in this Howl card set,
@ -268,14 +268,14 @@ private:
ContainerPtr at(EntryCountType index) const;
Atomic<ContainerPtr> const* buckets() const;
ContainerPtr const* buckets() const;
public:
G1CardSetHowl(EntryCountType card_in_region, G1CardSetConfiguration* config);
Atomic<ContainerPtr> const* container_addr(EntryCountType index) const;
ContainerPtr const* container_addr(EntryCountType index) const;
Atomic<ContainerPtr>* container_addr(EntryCountType index);
ContainerPtr* container_addr(EntryCountType index);
bool contains(uint card_idx, G1CardSetConfiguration* config);
// Iterates over all ContainerPtrs in this Howl card set, applying a CardOrRangeVisitor

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -67,7 +67,7 @@ inline G1AddCardResult G1CardSetInlinePtr::add(uint card_idx, uint bits_per_card
return Overflow;
}
ContainerPtr new_value = merge(_value, card_idx, num_cards, bits_per_card);
ContainerPtr old_value = _value_addr->compare_exchange(_value, new_value, memory_order_relaxed);
ContainerPtr old_value = AtomicAccess::cmpxchg(_value_addr, _value, new_value, memory_order_relaxed);
if (_value == old_value) {
return Added;
}
@ -126,7 +126,7 @@ inline bool G1CardSetContainer::try_increment_refcount() {
}
uintptr_t new_value = old_value + 2;
uintptr_t ref_count = _ref_count.compare_exchange(old_value, new_value);
uintptr_t ref_count = AtomicAccess::cmpxchg(&_ref_count, old_value, new_value);
if (ref_count == old_value) {
return true;
}
@ -137,7 +137,7 @@ inline bool G1CardSetContainer::try_increment_refcount() {
inline uintptr_t G1CardSetContainer::decrement_refcount() {
uintptr_t old_value = refcount();
assert((old_value & 0x1) != 0 && old_value >= 3, "precondition");
return _ref_count.sub_then_fetch(2u);
return AtomicAccess::sub(&_ref_count, 2u);
}
inline G1CardSetArray::G1CardSetArray(uint card_in_region, EntryCountType num_cards) :
@ -149,13 +149,14 @@ inline G1CardSetArray::G1CardSetArray(uint card_in_region, EntryCountType num_ca
*entry_addr(0) = checked_cast<EntryDataType>(card_in_region);
}
inline G1CardSetArray::G1CardSetArrayLocker::G1CardSetArrayLocker(Atomic<EntryCountType>* num_entries_addr) :
inline G1CardSetArray::G1CardSetArrayLocker::G1CardSetArrayLocker(EntryCountType volatile* num_entries_addr) :
_num_entries_addr(num_entries_addr) {
SpinYield s;
EntryCountType num_entries = _num_entries_addr->load_relaxed() & EntryMask;
EntryCountType num_entries = AtomicAccess::load(_num_entries_addr) & EntryMask;
while (true) {
EntryCountType old_value = _num_entries_addr->compare_exchange(num_entries,
(EntryCountType)(num_entries | LockBitMask));
EntryCountType old_value = AtomicAccess::cmpxchg(_num_entries_addr,
num_entries,
(EntryCountType)(num_entries | LockBitMask));
if (old_value == num_entries) {
// Succeeded locking the array.
_local_num_entries = num_entries;
@ -173,7 +174,7 @@ inline G1CardSetArray::EntryDataType const* G1CardSetArray::base_addr() const {
}
inline G1CardSetArray::EntryDataType const* G1CardSetArray::entry_addr(EntryCountType index) const {
assert(index < _num_entries.load_relaxed(), "precondition");
assert(index < _num_entries, "precondition");
return base_addr() + index;
}
@ -188,7 +189,7 @@ inline G1CardSetArray::EntryDataType G1CardSetArray::at(EntryCountType index) co
inline G1AddCardResult G1CardSetArray::add(uint card_idx) {
assert(card_idx < (1u << (sizeof(EntryDataType) * BitsPerByte)),
"Card index %u does not fit allowed card value range.", card_idx);
EntryCountType num_entries = _num_entries.load_acquire() & EntryMask;
EntryCountType num_entries = AtomicAccess::load_acquire(&_num_entries) & EntryMask;
EntryCountType idx = 0;
for (; idx < num_entries; idx++) {
if (at(idx) == card_idx) {
@ -222,7 +223,7 @@ inline G1AddCardResult G1CardSetArray::add(uint card_idx) {
}
inline bool G1CardSetArray::contains(uint card_idx) {
EntryCountType num_entries = _num_entries.load_acquire() & EntryMask;
EntryCountType num_entries = AtomicAccess::load_acquire(&_num_entries) & EntryMask;
for (EntryCountType idx = 0; idx < num_entries; idx++) {
if (at(idx) == card_idx) {
@ -234,7 +235,7 @@ inline bool G1CardSetArray::contains(uint card_idx) {
template <class CardVisitor>
void G1CardSetArray::iterate(CardVisitor& found) {
EntryCountType num_entries = _num_entries.load_acquire() & EntryMask;
EntryCountType num_entries = AtomicAccess::load_acquire(&_num_entries) & EntryMask;
for (EntryCountType idx = 0; idx < num_entries; idx++) {
found(at(idx));
}
@ -255,11 +256,11 @@ inline G1CardSetBitMap::G1CardSetBitMap(uint card_in_region, uint size_in_bits)
inline G1AddCardResult G1CardSetBitMap::add(uint card_idx, size_t threshold, size_t size_in_bits) {
BitMapView bm(_bits, size_in_bits);
if (_num_bits_set.load_relaxed() >= threshold) {
if (_num_bits_set >= threshold) {
return bm.at(card_idx) ? Found : Overflow;
}
if (bm.par_set_bit(card_idx)) {
_num_bits_set.add_then_fetch(1u, memory_order_relaxed);
AtomicAccess::inc(&_num_bits_set, memory_order_relaxed);
return Added;
}
return Found;
@ -275,22 +276,22 @@ inline size_t G1CardSetBitMap::header_size_in_bytes() {
return offset_of(G1CardSetBitMap, _bits);
}
inline Atomic<G1CardSetHowl::ContainerPtr> const* G1CardSetHowl::container_addr(EntryCountType index) const {
assert(index < _num_entries.load_relaxed(), "precondition");
inline G1CardSetHowl::ContainerPtr const* G1CardSetHowl::container_addr(EntryCountType index) const {
assert(index < _num_entries, "precondition");
return buckets() + index;
}
inline Atomic<G1CardSetHowl::ContainerPtr>* G1CardSetHowl::container_addr(EntryCountType index) {
return const_cast<Atomic<ContainerPtr>*>(const_cast<const G1CardSetHowl*>(this)->container_addr(index));
inline G1CardSetHowl::ContainerPtr* G1CardSetHowl::container_addr(EntryCountType index) {
return const_cast<ContainerPtr*>(const_cast<const G1CardSetHowl*>(this)->container_addr(index));
}
inline G1CardSetHowl::ContainerPtr G1CardSetHowl::at(EntryCountType index) const {
return (*container_addr(index)).load_relaxed();
return *container_addr(index);
}
inline Atomic<G1CardSetHowl::ContainerPtr> const* G1CardSetHowl::buckets() const {
inline G1CardSetHowl::ContainerPtr const* G1CardSetHowl::buckets() const {
const void* ptr = reinterpret_cast<const char*>(this) + header_size_in_bytes();
return reinterpret_cast<Atomic<ContainerPtr> const*>(ptr);
return reinterpret_cast<ContainerPtr const*>(ptr);
}
inline G1CardSetHowl::G1CardSetHowl(EntryCountType card_in_region, G1CardSetConfiguration* config) :
@ -299,7 +300,7 @@ inline G1CardSetHowl::G1CardSetHowl(EntryCountType card_in_region, G1CardSetConf
EntryCountType num_buckets = config->num_buckets_in_howl();
EntryCountType bucket = config->howl_bucket_index(card_in_region);
for (uint i = 0; i < num_buckets; ++i) {
container_addr(i)->store_relaxed(G1CardSetInlinePtr());
*container_addr(i) = G1CardSetInlinePtr();
if (i == bucket) {
G1CardSetInlinePtr value(container_addr(i), at(i));
value.add(card_in_region, config->inline_ptr_bits_per_card(), config->max_cards_in_inline_ptr());
@ -309,8 +310,8 @@ inline G1CardSetHowl::G1CardSetHowl(EntryCountType card_in_region, G1CardSetConf
inline bool G1CardSetHowl::contains(uint card_idx, G1CardSetConfiguration* config) {
EntryCountType bucket = config->howl_bucket_index(card_idx);
Atomic<ContainerPtr>* array_entry = container_addr(bucket);
ContainerPtr container = array_entry->load_acquire();
ContainerPtr* array_entry = container_addr(bucket);
ContainerPtr container = AtomicAccess::load_acquire(array_entry);
switch (G1CardSet::container_type(container)) {
case G1CardSet::ContainerArrayOfCards: {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,6 +26,7 @@
#include "gc/g1/g1CardSetContainers.inline.hpp"
#include "gc/g1/g1CardSetMemory.inline.hpp"
#include "gc/g1/g1MonotonicArena.inline.hpp"
#include "runtime/atomicAccess.hpp"
#include "utilities/ostream.hpp"
G1CardSetAllocator::G1CardSetAllocator(const char* name,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -44,20 +44,20 @@ G1CardTableClaimTable::~G1CardTableClaimTable() {
void G1CardTableClaimTable::initialize(uint max_reserved_regions) {
assert(_card_claims == nullptr, "Must not be initialized twice");
_card_claims = NEW_C_HEAP_ARRAY(Atomic<uint>, max_reserved_regions, mtGC);
_card_claims = NEW_C_HEAP_ARRAY(uint, max_reserved_regions, mtGC);
_max_reserved_regions = max_reserved_regions;
reset_all_to_unclaimed();
}
void G1CardTableClaimTable::reset_all_to_unclaimed() {
for (uint i = 0; i < _max_reserved_regions; i++) {
_card_claims[i].store_relaxed(0);
_card_claims[i] = 0;
}
}
void G1CardTableClaimTable::reset_all_to_claimed() {
for (uint i = 0; i < _max_reserved_regions; i++) {
_card_claims[i].store_relaxed((uint)G1HeapRegion::CardsPerRegion);
_card_claims[i] = (uint)G1HeapRegion::CardsPerRegion;
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,7 +27,6 @@
#include "gc/g1/g1CardTable.hpp"
#include "memory/allocation.hpp"
#include "runtime/atomic.hpp"
class G1HeapRegionClosure;
@ -46,7 +45,7 @@ class G1CardTableClaimTable : public CHeapObj<mtGC> {
// Card table iteration claim values for every heap region, from 0 (completely unclaimed)
// to (>=) G1HeapRegion::CardsPerRegion (completely claimed).
Atomic<uint>* _card_claims;
uint volatile* _card_claims;
uint _cards_per_chunk; // For conversion between card index and chunk index.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,25 +29,26 @@
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1HeapRegion.inline.hpp"
#include "runtime/atomicAccess.hpp"
bool G1CardTableClaimTable::has_unclaimed_cards(uint region) {
assert(region < _max_reserved_regions, "Tried to access invalid region %u", region);
return _card_claims[region].load_relaxed() < G1HeapRegion::CardsPerRegion;
return AtomicAccess::load(&_card_claims[region]) < G1HeapRegion::CardsPerRegion;
}
void G1CardTableClaimTable::reset_to_unclaimed(uint region) {
assert(region < _max_reserved_regions, "Tried to access invalid region %u", region);
_card_claims[region].store_relaxed(0u);
AtomicAccess::store(&_card_claims[region], 0u);
}
uint G1CardTableClaimTable::claim_cards(uint region, uint increment) {
assert(region < _max_reserved_regions, "Tried to access invalid region %u", region);
return _card_claims[region].fetch_then_add(increment, memory_order_relaxed);
return AtomicAccess::fetch_then_add(&_card_claims[region], increment, memory_order_relaxed);
}
uint G1CardTableClaimTable::claim_chunk(uint region) {
assert(region < _max_reserved_regions, "Tried to access invalid region %u", region);
return _card_claims[region].fetch_then_add(cards_per_chunk(), memory_order_relaxed);
return AtomicAccess::fetch_then_add(&_card_claims[region], cards_per_chunk(), memory_order_relaxed);
}
uint G1CardTableClaimTable::claim_all_cards(uint region) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,7 +28,7 @@
#include "gc/g1/g1HeapRegion.hpp"
#include "memory/allocation.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "utilities/concurrentHashTable.inline.hpp"
#include "utilities/concurrentHashTableTasks.inline.hpp"
@ -60,7 +60,7 @@ class G1CodeRootSetHashTable : public CHeapObj<mtGC> {
HashTable _table;
HashTableScanTask _table_scanner;
Atomic<size_t> _num_entries;
size_t volatile _num_entries;
bool is_empty() const { return number_of_entries() == 0; }
@ -120,7 +120,7 @@ public:
bool grow_hint = false;
bool inserted = _table.insert(Thread::current(), lookup, method, &grow_hint);
if (inserted) {
_num_entries.add_then_fetch(1u);
AtomicAccess::inc(&_num_entries);
}
if (grow_hint) {
_table.grow(Thread::current());
@ -131,7 +131,7 @@ public:
HashTableLookUp lookup(method);
bool removed = _table.remove(Thread::current(), lookup);
if (removed) {
_num_entries.sub_then_fetch(1u);
AtomicAccess::dec(&_num_entries);
}
return removed;
}
@ -182,7 +182,7 @@ public:
guarantee(succeeded, "unable to clean table");
if (num_deleted != 0) {
size_t current_size = _num_entries.sub_then_fetch(num_deleted);
size_t current_size = AtomicAccess::sub(&_num_entries, num_deleted);
shrink_to_match(current_size);
}
}
@ -226,7 +226,7 @@ public:
size_t mem_size() { return sizeof(*this) + _table.get_mem_size(Thread::current()); }
size_t number_of_entries() const { return _num_entries.load_relaxed(); }
size_t number_of_entries() const { return AtomicAccess::load(&_num_entries); }
};
uintx G1CodeRootSetHashTable::HashTableLookUp::get_hash() const {

Some files were not shown because too many files have changed in this diff Show More