Merge branch 'master' into JDK-8372526

# Conflicts:
#	src/java.base/share/classes/sun/security/ssl/SSLConfiguration.java
This commit is contained in:
Artur Barashev 2026-01-16 15:33:23 -05:00
commit f2ccafd026
1173 changed files with 45401 additions and 27425 deletions

View File

@ -96,6 +96,8 @@ jobs:
--with-boot-jdk=${{ steps.bootjdk.outputs.path }}
--with-zlib=system
--with-jmod-compress=zip-1
--with-external-symbols-in-bundles=none
--with-native-debug-symbols-level=1
${{ inputs.extra-conf-options }} ${{ inputs.configure-arguments }} || (
echo "Dumping config.log:" &&
cat config.log &&

View File

@ -179,6 +179,8 @@ jobs:
--openjdk-target=${{ matrix.gnu-arch }}-linux-gnu${{ matrix.gnu-abi}}
--with-sysroot=sysroot
--with-jmod-compress=zip-1
--with-external-symbols-in-bundles=none
--with-native-debug-symbols-level=1
CC=${{ matrix.gnu-arch }}-linux-gnu${{ matrix.gnu-abi}}-gcc-${{ inputs.gcc-major-version }}
CXX=${{ matrix.gnu-arch }}-linux-gnu${{ matrix.gnu-abi}}-g++-${{ inputs.gcc-major-version }}
${{ inputs.extra-conf-options }} ${{ inputs.configure-arguments }} || (

View File

@ -143,6 +143,8 @@ jobs:
--with-gtest=${{ steps.gtest.outputs.path }}
--with-zlib=system
--with-jmod-compress=zip-1
--with-external-symbols-in-bundles=none
--with-native-debug-symbols-level=1
${{ inputs.extra-conf-options }} ${{ inputs.configure-arguments }} || (
echo "Dumping config.log:" &&
cat config.log &&

View File

@ -110,6 +110,8 @@ jobs:
--with-gtest=${{ steps.gtest.outputs.path }}
--with-zlib=system
--with-jmod-compress=zip-1
--with-external-symbols-in-bundles=none
--with-native-debug-symbols-level=1
${{ inputs.extra-conf-options }} ${{ inputs.configure-arguments }} || (
echo "Dumping config.log:" &&
cat config.log &&

View File

@ -134,6 +134,7 @@ jobs:
--with-gtest=${{ steps.gtest.outputs.path }}
--with-msvc-toolset-version=${{ inputs.msvc-toolset-version }}
--with-jmod-compress=zip-1
--with-external-symbols-in-bundles=none
${{ inputs.extra-conf-options }} ${{ inputs.configure-arguments }} || (
echo "Dumping config.log:" &&
cat config.log &&

View File

@ -69,6 +69,23 @@ AC_DEFUN([FLAGS_SETUP_DEBUG_SYMBOLS],
# Debug prefix mapping if supported by compiler
DEBUG_PREFIX_CFLAGS=
UTIL_ARG_WITH(NAME: native-debug-symbols-level, TYPE: string,
DEFAULT: "",
RESULT: DEBUG_SYMBOLS_LEVEL,
DESC: [set the native debug symbol level (GCC and Clang only)],
DEFAULT_DESC: [toolchain default])
AC_SUBST(DEBUG_SYMBOLS_LEVEL)
if test "x${TOOLCHAIN_TYPE}" = xgcc || \
test "x${TOOLCHAIN_TYPE}" = xclang; then
DEBUG_SYMBOLS_LEVEL_FLAGS="-g"
if test "x${DEBUG_SYMBOLS_LEVEL}" != "x"; then
DEBUG_SYMBOLS_LEVEL_FLAGS="-g${DEBUG_SYMBOLS_LEVEL}"
FLAGS_COMPILER_CHECK_ARGUMENTS(ARGUMENT: [${DEBUG_SYMBOLS_LEVEL_FLAGS}],
IF_FALSE: AC_MSG_ERROR("Debug info level ${DEBUG_SYMBOLS_LEVEL} is not supported"))
fi
fi
# Debug symbols
if test "x$TOOLCHAIN_TYPE" = xgcc; then
if test "x$ALLOW_ABSOLUTE_PATHS_IN_OUTPUT" = "xfalse"; then
@ -93,8 +110,9 @@ AC_DEFUN([FLAGS_SETUP_DEBUG_SYMBOLS],
)
fi
CFLAGS_DEBUG_SYMBOLS="-g -gdwarf-4"
ASFLAGS_DEBUG_SYMBOLS="-g"
# Debug info level should follow the debug format to be effective.
CFLAGS_DEBUG_SYMBOLS="-gdwarf-4 ${DEBUG_SYMBOLS_LEVEL_FLAGS}"
ASFLAGS_DEBUG_SYMBOLS="${DEBUG_SYMBOLS_LEVEL_FLAGS}"
elif test "x$TOOLCHAIN_TYPE" = xclang; then
if test "x$ALLOW_ABSOLUTE_PATHS_IN_OUTPUT" = "xfalse"; then
# Check if compiler supports -fdebug-prefix-map. If so, use that to make
@ -113,8 +131,9 @@ AC_DEFUN([FLAGS_SETUP_DEBUG_SYMBOLS],
FLAGS_COMPILER_CHECK_ARGUMENTS(ARGUMENT: [${GDWARF_FLAGS}],
IF_FALSE: [GDWARF_FLAGS=""])
CFLAGS_DEBUG_SYMBOLS="-g ${GDWARF_FLAGS}"
ASFLAGS_DEBUG_SYMBOLS="-g"
# Debug info level should follow the debug format to be effective.
CFLAGS_DEBUG_SYMBOLS="${GDWARF_FLAGS} ${DEBUG_SYMBOLS_LEVEL_FLAGS}"
ASFLAGS_DEBUG_SYMBOLS="${DEBUG_SYMBOLS_LEVEL_FLAGS}"
elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
CFLAGS_DEBUG_SYMBOLS="-Z7"
fi

View File

@ -114,7 +114,7 @@ EscapeDollar = $(subst $$,\$$,$(subst \$$,$$,$(strip $1)))
################################################################################
# This macro works just like EscapeDollar above, but for #.
EscapeHash = $(subst \#,\\\#,$(subst \\\#,\#,$(strip $1)))
EscapeHash = $(subst $(HASH),\$(HASH),$(subst \$(HASH),$(HASH),$(strip $1)))
################################################################################
# This macro translates $ into $$ to protect the string from make itself.

View File

@ -286,7 +286,7 @@ public class ClassGenerator {
diagnosticFlags.isEmpty() ?
StubKind.DIAGNOSTIC_FLAGS_EMPTY.format() :
StubKind.DIAGNOSTIC_FLAGS_NON_EMPTY.format(diagnosticFlags),
StubKind.LINT_CATEGORY.format("\"" + lintCategory + "\""),
StubKind.LINT_CATEGORY.format(toLintFieldName(lintCategory)),
"\"" + keyParts[0] + "\"",
"\"" + Stream.of(keyParts).skip(2).collect(Collectors.joining(".")) + "\"",
javadoc);
@ -314,7 +314,7 @@ public class ClassGenerator {
diagnosticFlags.isEmpty() ?
StubKind.DIAGNOSTIC_FLAGS_EMPTY.format() :
StubKind.DIAGNOSTIC_FLAGS_NON_EMPTY.format(diagnosticFlags),
StubKind.LINT_CATEGORY.format("\"" + lintCategory + "\""),
StubKind.LINT_CATEGORY.format(toLintFieldName(lintCategory)),
"\"" + keyParts[0] + "\"",
"\"" + Stream.of(keyParts).skip(2).collect(Collectors.joining(".")) + "\"",
argNames.stream().collect(Collectors.joining(", ")));
@ -329,6 +329,11 @@ public class ClassGenerator {
}
}
String toLintFieldName(String lintCategory) {
return lintCategory.toUpperCase()
.replaceAll("-", "_");
}
/**
* Form the name of a factory method/field given a resource key.
*/

View File

@ -87,7 +87,7 @@ suppress.warnings=\
@SuppressWarnings("rawtypes")\n
lint.category=\
LintCategory.get({0}).get()
LintCategory.{0}
diagnostic.flags.empty=\
EnumSet.noneOf(DiagnosticFlag.class)

View File

@ -54,21 +54,6 @@ $(eval $(call SetupJdkLibrary, BUILD_LIBJDWP, \
NAME := jdwp, \
OPTIMIZATION := LOW, \
CFLAGS := -DJDWP_LOGGING $(ICONV_CFLAGS), \
DISABLED_WARNINGS_gcc_eventFilter.c := unused-variable, \
DISABLED_WARNINGS_gcc_SDE.c := unused-function, \
DISABLED_WARNINGS_gcc_threadControl.c := unused-but-set-variable \
unused-variable, \
DISABLED_WARNINGS_gcc_utf_util.c := unused-but-set-variable, \
DISABLED_WARNINGS_clang_error_messages.c := format-nonliteral, \
DISABLED_WARNINGS_clang_eventFilter.c := unused-variable, \
DISABLED_WARNINGS_clang_EventRequestImpl.c := self-assign, \
DISABLED_WARNINGS_clang_inStream.c := sometimes-uninitialized, \
DISABLED_WARNINGS_clang_log_messages.c := format-nonliteral, \
DISABLED_WARNINGS_clang_SDE.c := unused-function, \
DISABLED_WARNINGS_clang_threadControl.c := unused-but-set-variable \
unused-variable, \
DISABLED_WARNINGS_clang_utf_util.c := unused-but-set-variable, \
DISABLED_WARNINGS_microsoft_debugInit.c := 5287, \
LDFLAGS := $(ICONV_LDFLAGS), \
EXTRA_HEADER_DIRS := \
include \

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -23,13 +23,13 @@
*
*/
#include <stdlib.h> // do not reorder
#include <stdint.h> // do not reorder
#include "cppstdlib/cstdlib.hpp"
#include "immediate_aarch64.hpp"
#include "metaprogramming/primitiveConversions.hpp"
#include "utilities/globalDefinitions.hpp"
#include <stdint.h>
// there are at most 2^13 possible logical immediate encodings
// however, some combinations of immr and imms are invalid
static const unsigned LI_TABLE_SIZE = (1 << 13);

View File

@ -1,4 +1,5 @@
/*
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -22,10 +23,10 @@
*
*/
#ifndef _IMMEDIATE_H
#define _IMMEDIATE_H
#ifndef CPU_AARCH64_IMMEDIATE_AARCH64_HPP
#define CPU_AARCH64_IMMEDIATE_AARCH64_HPP
#include <sys/types.h>
#include <stdint.h>
/*
* functions to map backwards and forwards between logical or floating
@ -51,4 +52,4 @@ uint32_t encoding_for_logical_immediate(uint64_t immediate);
uint64_t fp_immediate_for_encoding(uint32_t imm8, int is_dp);
uint32_t encoding_for_fp_immediate(float immediate);
#endif // _IMMEDIATE_H
#endif // CPU_AARCH64_IMMEDIATE_AARCH64_HPP

View File

@ -6259,14 +6259,10 @@ void MacroAssembler::fill_words(Register base, Register cnt, Register value)
// Intrinsic for
//
// - sun.nio.cs.ISO_8859_1.Encoder#encodeISOArray0(byte[] sa, int sp, byte[] da, int dp, int len)
// Encodes char[] to byte[] in ISO-8859-1
//
// - java.lang.StringCoding#encodeISOArray0(byte[] sa, int sp, byte[] da, int dp, int len)
// Encodes byte[] (containing UTF-16) to byte[] in ISO-8859-1
//
// - java.lang.StringCoding#encodeAsciiArray0(char[] sa, int sp, byte[] da, int dp, int len)
// Encodes char[] to byte[] in ASCII
// - sun/nio/cs/ISO_8859_1$Encoder.implEncodeISOArray
// return the number of characters copied.
// - java/lang/StringUTF16.compress
// return index of non-latin1 character if copy fails, otherwise 'len'.
//
// This version always returns the number of characters copied, and does not
// clobber the 'len' register. A successful copy will complete with the post-

View File

@ -4535,7 +4535,7 @@ void MacroAssembler::push_cont_fastpath() {
Label done;
ld_ptr(R0, JavaThread::cont_fastpath_offset(), R16_thread);
cmpld(CR0, R1_SP, R0);
ble(CR0, done);
ble(CR0, done); // if (SP <= _cont_fastpath) goto done;
st_ptr(R1_SP, JavaThread::cont_fastpath_offset(), R16_thread);
bind(done);
}
@ -4546,7 +4546,7 @@ void MacroAssembler::pop_cont_fastpath() {
Label done;
ld_ptr(R0, JavaThread::cont_fastpath_offset(), R16_thread);
cmpld(CR0, R1_SP, R0);
ble(CR0, done);
blt(CR0, done); // if (SP < _cont_fastpath) goto done;
li(R0, 0);
st_ptr(R0, JavaThread::cont_fastpath_offset(), R16_thread);
bind(done);

View File

@ -2813,14 +2813,10 @@ void C2_MacroAssembler::char_array_compress_v(Register src, Register dst, Regist
// Intrinsic for
//
// - sun.nio.cs.ISO_8859_1.Encoder#encodeISOArray0(byte[] sa, int sp, byte[] da, int dp, int len)
// Encodes char[] to byte[] in ISO-8859-1
//
// - java.lang.StringCoding#encodeISOArray0(byte[] sa, int sp, byte[] da, int dp, int len)
// Encodes byte[] (containing UTF-16) to byte[] in ISO-8859-1
//
// - java.lang.StringCoding#encodeAsciiArray0(char[] sa, int sp, byte[] da, int dp, int len)
// Encodes char[] to byte[] in ASCII
// - sun/nio/cs/ISO_8859_1$Encoder.implEncodeISOArray
// return the number of characters copied.
// - java/lang/StringUTF16.compress
// return index of non-latin1 character if copy fails, otherwise 'len'.
//
// This version always returns the number of characters copied. A successful
// copy will complete with the post-condition: 'res' == 'len', while an

View File

@ -114,12 +114,12 @@ source %{
case Op_VectorCastHF2F:
case Op_VectorCastF2HF:
case Op_AddVHF:
case Op_SubVHF:
case Op_MulVHF:
case Op_DivVHF:
case Op_MaxVHF:
case Op_MinVHF:
case Op_MulVHF:
case Op_SqrtVHF:
case Op_SubVHF:
return UseZvfh;
case Op_FmaVHF:
return UseZvfh && UseFMA;
@ -147,13 +147,28 @@ source %{
if (!UseRVV) {
return false;
}
switch (opcode) {
case Op_SelectFromTwoVector:
// There is no masked version of selectFrom two vector, i.e. selectFrom(av, bv, mask) in vector API.
return false;
// Currently, the masked versions of the following 8 Float16 operations are disabled.
// When the support for Float16 vector classes is added in VectorAPI and the masked
// Float16 IR can be generated, these masked operations will be enabled and relevant
// backend support added.
case Op_AddVHF:
case Op_SubVHF:
case Op_MulVHF:
case Op_DivVHF:
case Op_MaxVHF:
case Op_MinVHF:
case Op_SqrtVHF:
case Op_FmaVHF:
return false;
default:
break;
}
return match_rule_supported_vector(opcode, vlen, bt);
}

View File

@ -167,11 +167,6 @@ void VM_Version::common_initialize() {
(unaligned_scalar.value() == MISALIGNED_SCALAR_FAST));
}
if (FLAG_IS_DEFAULT(AlignVector)) {
FLAG_SET_DEFAULT(AlignVector,
unaligned_vector.value() != MISALIGNED_VECTOR_FAST);
}
#ifdef __riscv_ztso
// Hotspot is compiled with TSO support, it will only run on hardware which
// supports Ztso
@ -242,6 +237,11 @@ void VM_Version::c2_initialize() {
}
}
if (FLAG_IS_DEFAULT(AlignVector)) {
FLAG_SET_DEFAULT(AlignVector,
unaligned_vector.value() != MISALIGNED_VECTOR_FAST);
}
// NOTE: Make sure codes dependent on UseRVV are put after MaxVectorSize initialize,
// as there are extra checks inside it which could disable UseRVV
// in some situations.

View File

@ -1261,29 +1261,9 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
void LIR_Assembler::type_profile_helper(Register mdo,
ciMethodData *md, ciProfileData *data,
Register recv, Label* update_done) {
for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
Label next_test;
// See if the receiver is receiver[n].
__ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
__ jccb(Assembler::notEqual, next_test);
Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
__ addptr(data_addr, DataLayout::counter_increment);
__ jmp(*update_done);
__ bind(next_test);
}
// Didn't find receiver; find next empty slot and fill it in
for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
Label next_test;
Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)));
__ cmpptr(recv_addr, NULL_WORD);
__ jccb(Assembler::notEqual, next_test);
__ movptr(recv_addr, recv);
__ movptr(Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))), DataLayout::counter_increment);
__ jmp(*update_done);
__ bind(next_test);
}
Register recv) {
int mdp_offset = md->byte_offset_of_slot(data, in_ByteSize(0));
__ profile_receiver_type(recv, mdo, mdp_offset);
}
void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
@ -1341,15 +1321,9 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
__ jmp(*obj_is_null);
__ bind(not_null);
Label update_done;
Register recv = k_RInfo;
__ load_klass(recv, obj, tmp_load_klass);
type_profile_helper(mdo, md, data, recv, &update_done);
Address nonprofiled_receiver_count_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
__ addptr(nonprofiled_receiver_count_addr, DataLayout::counter_increment);
__ bind(update_done);
type_profile_helper(mdo, md, data, recv);
} else {
__ jcc(Assembler::equal, *obj_is_null);
}
@ -1461,14 +1435,9 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
__ jmp(done);
__ bind(not_null);
Label update_done;
Register recv = k_RInfo;
__ load_klass(recv, value, tmp_load_klass);
type_profile_helper(mdo, md, data, recv, &update_done);
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
__ addptr(counter_addr, DataLayout::counter_increment);
__ bind(update_done);
type_profile_helper(mdo, md, data, recv);
} else {
__ jcc(Assembler::equal, done);
}
@ -2791,13 +2760,9 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) {
// We know the type that will be seen at this call site; we can
// statically update the MethodData* rather than needing to do
// dynamic tests on the receiver type
// NOTE: we should probably put a lock around this search to
// avoid collisions by concurrent compilations
// dynamic tests on the receiver type.
ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
uint i;
for (i = 0; i < VirtualCallData::row_limit(); i++) {
for (uint i = 0; i < VirtualCallData::row_limit(); i++) {
ciKlass* receiver = vc_data->receiver(i);
if (known_klass->equals(receiver)) {
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
@ -2805,32 +2770,13 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
return;
}
}
// Receiver type not found in profile data; select an empty slot
// Note that this is less efficient than it should be because it
// always does a write to the receiver part of the
// VirtualCallData rather than just the first time
for (i = 0; i < VirtualCallData::row_limit(); i++) {
ciKlass* receiver = vc_data->receiver(i);
if (receiver == nullptr) {
Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
__ mov_metadata(recv_addr, known_klass->constant_encoding(), rscratch1);
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
__ addptr(data_addr, DataLayout::counter_increment);
return;
}
}
// Receiver type is not found in profile data.
// Fall back to runtime helper to handle the rest at runtime.
__ mov_metadata(recv, known_klass->constant_encoding());
} else {
__ load_klass(recv, recv, tmp_load_klass);
Label update_done;
type_profile_helper(mdo, md, data, recv, &update_done);
// Receiver did not match any saved receiver and there is no empty row for it.
// Increment total counter to indicate polymorphic case.
__ addptr(counter_addr, DataLayout::counter_increment);
__ bind(update_done);
}
type_profile_helper(mdo, md, data, recv);
} else {
// Static call
__ addptr(counter_addr, DataLayout::counter_increment);

View File

@ -43,7 +43,7 @@
// Record the type of the receiver in ReceiverTypeData
void type_profile_helper(Register mdo,
ciMethodData *md, ciProfileData *data,
Register recv, Label* update_done);
Register recv);
enum {
_call_stub_size = 28,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2024, 2025, Intel Corporation. All rights reserved.
* Copyright (c) 2024, 2026, Intel Corporation. All rights reserved.
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -1330,10 +1330,12 @@ static void big_case_loop_helper(bool sizeKnown, int size, Label &noMatch, Label
// Clarification: The BYTE_K compare above compares haystack[(n-32):(n-1)]. We need to
// compare haystack[(k-1):(k-1+31)]. Subtracting either index gives shift value of
// (k + 31 - n): x = (k-1+31)-(n-1) = k-1+31-n+1 = k+31-n.
// When isU is set, similarly, shift is from haystack[(n-32):(n-1)] to [(k-2):(k-2+31)]
if (sizeKnown) {
__ movl(temp2, 31 + size);
__ movl(temp2, (isU ? 30 : 31) + size);
} else {
__ movl(temp2, 31);
__ movl(temp2, isU ? 30 : 31);
__ addl(temp2, needleLen);
}
__ subl(temp2, hsLength);

View File

@ -580,17 +580,16 @@ void InterpreterMacroAssembler::load_resolved_klass_at_index(Register klass,
// Rsub_klass: subklass
//
// Kills:
// rcx, rdi
// rcx
void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
Label& ok_is_subtype) {
assert(Rsub_klass != rax, "rax holds superklass");
assert(Rsub_klass != r14, "r14 holds locals");
assert(Rsub_klass != r13, "r13 holds bcp");
assert(Rsub_klass != rcx, "rcx holds 2ndary super array length");
assert(Rsub_klass != rdi, "rdi holds 2ndary super array scan ptr");
// Profile the not-null value's klass.
profile_typecheck(rcx, Rsub_klass, rdi); // blows rcx, reloads rdi
profile_typecheck(rcx, Rsub_klass); // blows rcx
// Do the check.
check_klass_subtype(Rsub_klass, rax, rcx, ok_is_subtype); // blows rcx
@ -1394,7 +1393,6 @@ void InterpreterMacroAssembler::profile_final_call(Register mdp) {
void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
Register mdp,
Register reg2,
bool receiver_can_be_null) {
if (ProfileInterpreter) {
Label profile_continue;
@ -1414,7 +1412,7 @@ void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
}
// Record the receiver type.
record_klass_in_profile(receiver, mdp, reg2, true);
profile_receiver_type(receiver, mdp, 0);
bind(skip_receiver_profile);
// The method data pointer needs to be updated to reflect the new target.
@ -1423,135 +1421,6 @@ void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
}
}
// This routine creates a state machine for updating the multi-row
// type profile at a virtual call site (or other type-sensitive bytecode).
// The machine visits each row (of receiver/count) until the receiver type
// is found, or until it runs out of rows. At the same time, it remembers
// the location of the first empty row. (An empty row records null for its
// receiver, and can be allocated for a newly-observed receiver type.)
// Because there are two degrees of freedom in the state, a simple linear
// search will not work; it must be a decision tree. Hence this helper
// function is recursive, to generate the required tree structured code.
// It's the interpreter, so we are trading off code space for speed.
// See below for example code.
void InterpreterMacroAssembler::record_klass_in_profile_helper(
Register receiver, Register mdp,
Register reg2, int start_row,
Label& done, bool is_virtual_call) {
if (TypeProfileWidth == 0) {
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
} else {
record_item_in_profile_helper(receiver, mdp, reg2, 0, done, TypeProfileWidth,
&VirtualCallData::receiver_offset, &VirtualCallData::receiver_count_offset);
}
}
void InterpreterMacroAssembler::record_item_in_profile_helper(Register item, Register mdp, Register reg2, int start_row,
Label& done, int total_rows,
OffsetFunction item_offset_fn,
OffsetFunction item_count_offset_fn) {
int last_row = total_rows - 1;
assert(start_row <= last_row, "must be work left to do");
// Test this row for both the item and for null.
// Take any of three different outcomes:
// 1. found item => increment count and goto done
// 2. found null => keep looking for case 1, maybe allocate this cell
// 3. found something else => keep looking for cases 1 and 2
// Case 3 is handled by a recursive call.
for (int row = start_row; row <= last_row; row++) {
Label next_test;
bool test_for_null_also = (row == start_row);
// See if the item is item[n].
int item_offset = in_bytes(item_offset_fn(row));
test_mdp_data_at(mdp, item_offset, item,
(test_for_null_also ? reg2 : noreg),
next_test);
// (Reg2 now contains the item from the CallData.)
// The item is item[n]. Increment count[n].
int count_offset = in_bytes(item_count_offset_fn(row));
increment_mdp_data_at(mdp, count_offset);
jmp(done);
bind(next_test);
if (test_for_null_also) {
// Failed the equality check on item[n]... Test for null.
testptr(reg2, reg2);
if (start_row == last_row) {
// The only thing left to do is handle the null case.
Label found_null;
jccb(Assembler::zero, found_null);
// Item did not match any saved item and there is no empty row for it.
// Increment total counter to indicate polymorphic case.
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
jmp(done);
bind(found_null);
break;
}
Label found_null;
// Since null is rare, make it be the branch-taken case.
jcc(Assembler::zero, found_null);
// Put all the "Case 3" tests here.
record_item_in_profile_helper(item, mdp, reg2, start_row + 1, done, total_rows,
item_offset_fn, item_count_offset_fn);
// Found a null. Keep searching for a matching item,
// but remember that this is an empty (unused) slot.
bind(found_null);
}
}
// In the fall-through case, we found no matching item, but we
// observed the item[start_row] is null.
// Fill in the item field and increment the count.
int item_offset = in_bytes(item_offset_fn(start_row));
set_mdp_data_at(mdp, item_offset, item);
int count_offset = in_bytes(item_count_offset_fn(start_row));
movl(reg2, DataLayout::counter_increment);
set_mdp_data_at(mdp, count_offset, reg2);
if (start_row > 0) {
jmp(done);
}
}
// Example state machine code for three profile rows:
// // main copy of decision tree, rooted at row[1]
// if (row[0].rec == rec) { row[0].incr(); goto done; }
// if (row[0].rec != nullptr) {
// // inner copy of decision tree, rooted at row[1]
// if (row[1].rec == rec) { row[1].incr(); goto done; }
// if (row[1].rec != nullptr) {
// // degenerate decision tree, rooted at row[2]
// if (row[2].rec == rec) { row[2].incr(); goto done; }
// if (row[2].rec != nullptr) { count.incr(); goto done; } // overflow
// row[2].init(rec); goto done;
// } else {
// // remember row[1] is empty
// if (row[2].rec == rec) { row[2].incr(); goto done; }
// row[1].init(rec); goto done;
// }
// } else {
// // remember row[0] is empty
// if (row[1].rec == rec) { row[1].incr(); goto done; }
// if (row[2].rec == rec) { row[2].incr(); goto done; }
// row[0].init(rec); goto done;
// }
// done:
void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
Register mdp, Register reg2,
bool is_virtual_call) {
assert(ProfileInterpreter, "must be profiling");
Label done;
record_klass_in_profile_helper(receiver, mdp, reg2, 0, done, is_virtual_call);
bind (done);
}
void InterpreterMacroAssembler::profile_ret(Register return_bci,
Register mdp) {
if (ProfileInterpreter) {
@ -1611,7 +1480,7 @@ void InterpreterMacroAssembler::profile_null_seen(Register mdp) {
}
void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass, Register reg2) {
void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass) {
if (ProfileInterpreter) {
Label profile_continue;
@ -1624,7 +1493,7 @@ void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass,
mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
// Record the object type.
record_klass_in_profile(klass, mdp, reg2, false);
profile_receiver_type(klass, mdp, 0);
}
update_mdp_by_constant(mdp, mdp_delta);

View File

@ -234,16 +234,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
Register test_value_out,
Label& not_equal_continue);
void record_klass_in_profile(Register receiver, Register mdp,
Register reg2, bool is_virtual_call);
void record_klass_in_profile_helper(Register receiver, Register mdp,
Register reg2, int start_row,
Label& done, bool is_virtual_call);
void record_item_in_profile_helper(Register item, Register mdp, Register reg2, int start_row,
Label& done, int total_rows,
OffsetFunction item_offset_fn,
OffsetFunction item_count_offset_fn);
void update_mdp_by_offset(Register mdp_in, int offset_of_offset);
void update_mdp_by_offset(Register mdp_in, Register reg, int offset_of_disp);
void update_mdp_by_constant(Register mdp_in, int constant);
@ -254,11 +244,10 @@ class InterpreterMacroAssembler: public MacroAssembler {
void profile_call(Register mdp);
void profile_final_call(Register mdp);
void profile_virtual_call(Register receiver, Register mdp,
Register scratch2,
bool receiver_can_be_null = false);
void profile_ret(Register return_bci, Register mdp);
void profile_null_seen(Register mdp);
void profile_typecheck(Register mdp, Register klass, Register scratch);
void profile_typecheck(Register mdp, Register klass);
void profile_switch_default(Register mdp);
void profile_switch_case(Register index_in_scratch, Register mdp,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -4749,6 +4749,203 @@ Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
return Address(rsp, scale_reg, scale_factor, offset);
}
// Handle the receiver type profile update given the "recv" klass.
//
// Normally updates the ReceiverData (RD) that starts at "mdp" + "mdp_offset".
// If there are no matching or claimable receiver entries in RD, updates
// the polymorphic counter.
//
// This code expected to run by either the interpreter or JIT-ed code, without
// extra synchronization. For safety, receiver cells are claimed atomically, which
// avoids grossly misrepresenting the profiles under concurrent updates. For speed,
// counter updates are not atomic.
//
void MacroAssembler::profile_receiver_type(Register recv, Register mdp, int mdp_offset) {
int base_receiver_offset = in_bytes(ReceiverTypeData::receiver_offset(0));
int end_receiver_offset = in_bytes(ReceiverTypeData::receiver_offset(ReceiverTypeData::row_limit()));
int poly_count_offset = in_bytes(CounterData::count_offset());
int receiver_step = in_bytes(ReceiverTypeData::receiver_offset(1)) - base_receiver_offset;
int receiver_to_count_step = in_bytes(ReceiverTypeData::receiver_count_offset(0)) - base_receiver_offset;
// Adjust for MDP offsets. Slots are pointer-sized, so is the global offset.
assert(is_aligned(mdp_offset, BytesPerWord), "sanity");
base_receiver_offset += mdp_offset;
end_receiver_offset += mdp_offset;
poly_count_offset += mdp_offset;
// Scale down to optimize encoding. Slots are pointer-sized.
assert(is_aligned(base_receiver_offset, BytesPerWord), "sanity");
assert(is_aligned(end_receiver_offset, BytesPerWord), "sanity");
assert(is_aligned(poly_count_offset, BytesPerWord), "sanity");
assert(is_aligned(receiver_step, BytesPerWord), "sanity");
assert(is_aligned(receiver_to_count_step, BytesPerWord), "sanity");
base_receiver_offset >>= LogBytesPerWord;
end_receiver_offset >>= LogBytesPerWord;
poly_count_offset >>= LogBytesPerWord;
receiver_step >>= LogBytesPerWord;
receiver_to_count_step >>= LogBytesPerWord;
#ifdef ASSERT
// We are about to walk the MDO slots without asking for offsets.
// Check that our math hits all the right spots.
for (uint c = 0; c < ReceiverTypeData::row_limit(); c++) {
int real_recv_offset = mdp_offset + in_bytes(ReceiverTypeData::receiver_offset(c));
int real_count_offset = mdp_offset + in_bytes(ReceiverTypeData::receiver_count_offset(c));
int offset = base_receiver_offset + receiver_step*c;
int count_offset = offset + receiver_to_count_step;
assert((offset << LogBytesPerWord) == real_recv_offset, "receiver slot math");
assert((count_offset << LogBytesPerWord) == real_count_offset, "receiver count math");
}
int real_poly_count_offset = mdp_offset + in_bytes(CounterData::count_offset());
assert(poly_count_offset << LogBytesPerWord == real_poly_count_offset, "poly counter math");
#endif
// Corner case: no profile table. Increment poly counter and exit.
if (ReceiverTypeData::row_limit() == 0) {
addptr(Address(mdp, poly_count_offset, Address::times_ptr), DataLayout::counter_increment);
return;
}
Register offset = rscratch1;
Label L_loop_search_receiver, L_loop_search_empty;
Label L_restart, L_found_recv, L_found_empty, L_polymorphic, L_count_update;
// The code here recognizes three major cases:
// A. Fastest: receiver found in the table
// B. Fast: no receiver in the table, and the table is full
// C. Slow: no receiver in the table, free slots in the table
//
// The case A performance is most important, as perfectly-behaved code would end up
// there, especially with larger TypeProfileWidth. The case B performance is
// important as well, this is where bulk of code would land for normally megamorphic
// cases. The case C performance is not essential, its job is to deal with installation
// races, we optimize for code density instead. Case C needs to make sure that receiver
// rows are only claimed once. This makes sure we never overwrite a row for another
// receiver and never duplicate the receivers in the list, making profile type-accurate.
//
// It is very tempting to handle these cases in a single loop, and claim the first slot
// without checking the rest of the table. But, profiling code should tolerate free slots
// in the table, as class unloading can clear them. After such cleanup, the receiver
// we need might be _after_ the free slot. Therefore, we need to let at least full scan
// to complete, before trying to install new slots. Splitting the code in several tight
// loops also helpfully optimizes for cases A and B.
//
// This code is effectively:
//
// restart:
// // Fastest: receiver is already installed
// for (i = 0; i < receiver_count(); i++) {
// if (receiver(i) == recv) goto found_recv(i);
// }
//
// // Fast: no receiver, but profile is full
// for (i = 0; i < receiver_count(); i++) {
// if (receiver(i) == null) goto found_null(i);
// }
// goto polymorphic
//
// // Slow: try to install receiver
// found_null(i):
// CAS(&receiver(i), null, recv);
// goto restart
//
// polymorphic:
// count++;
// return
//
// found_recv(i):
// *receiver_count(i)++
//
bind(L_restart);
// Fastest: receiver is already installed
movptr(offset, base_receiver_offset);
bind(L_loop_search_receiver);
cmpptr(recv, Address(mdp, offset, Address::times_ptr));
jccb(Assembler::equal, L_found_recv);
addptr(offset, receiver_step);
cmpptr(offset, end_receiver_offset);
jccb(Assembler::notEqual, L_loop_search_receiver);
// Fast: no receiver, but profile is full
movptr(offset, base_receiver_offset);
bind(L_loop_search_empty);
cmpptr(Address(mdp, offset, Address::times_ptr), NULL_WORD);
jccb(Assembler::equal, L_found_empty);
addptr(offset, receiver_step);
cmpptr(offset, end_receiver_offset);
jccb(Assembler::notEqual, L_loop_search_empty);
jmpb(L_polymorphic);
// Slow: try to install receiver
bind(L_found_empty);
// Atomically swing receiver slot: null -> recv.
//
// The update code uses CAS, which wants RAX register specifically, *and* it needs
// other important registers untouched, as they form the address. Therefore, we need
// to shift any important registers from RAX into some other spare register. If we
// have a spare register, we are forced to save it on stack here.
Register spare_reg = noreg;
Register shifted_mdp = mdp;
Register shifted_recv = recv;
if (recv == rax || mdp == rax) {
spare_reg = (recv != rbx && mdp != rbx) ? rbx :
(recv != rcx && mdp != rcx) ? rcx :
rdx;
assert_different_registers(mdp, recv, offset, spare_reg);
push(spare_reg);
if (recv == rax) {
movptr(spare_reg, recv);
shifted_recv = spare_reg;
} else {
assert(mdp == rax, "Remaining case");
movptr(spare_reg, mdp);
shifted_mdp = spare_reg;
}
} else {
push(rax);
}
// None of the important registers are in RAX after this shuffle.
assert_different_registers(rax, shifted_mdp, shifted_recv, offset);
xorptr(rax, rax);
cmpxchgptr(shifted_recv, Address(shifted_mdp, offset, Address::times_ptr));
// Unshift registers.
if (recv == rax || mdp == rax) {
movptr(rax, spare_reg);
pop(spare_reg);
} else {
pop(rax);
}
// CAS success means the slot now has the receiver we want. CAS failure means
// something had claimed the slot concurrently: it can be the same receiver we want,
// or something else. Since this is a slow path, we can optimize for code density,
// and just restart the search from the beginning.
jmpb(L_restart);
// Counter updates:
// Increment polymorphic counter instead of receiver slot.
bind(L_polymorphic);
movptr(offset, poly_count_offset);
jmpb(L_count_update);
// Found a receiver, convert its slot offset to corresponding count offset.
bind(L_found_recv);
addptr(offset, receiver_to_count_step);
bind(L_count_update);
addptr(Address(mdp, offset, Address::times_ptr), DataLayout::counter_increment);
}
void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) {
if (!VerifyOops) return;
@ -5889,7 +6086,7 @@ void MacroAssembler::generate_fill(BasicType t, bool aligned,
vpbroadcastd(xtmp, xtmp, Assembler::AVX_512bit);
subptr(count, 16 << shift);
jccb(Assembler::less, L_check_fill_32_bytes);
jcc(Assembler::less, L_check_fill_32_bytes);
align(16);
BIND(L_fill_64_bytes_loop_avx3);
@ -6054,46 +6251,32 @@ void MacroAssembler::evpbroadcast(BasicType type, XMMRegister dst, Register src,
}
}
// Encode given char[]/byte[] to byte[] in ISO_8859_1 or ASCII
//
// @IntrinsicCandidate
// int sun.nio.cs.ISO_8859_1.Encoder#encodeISOArray0(
// char[] sa, int sp, byte[] da, int dp, int len) {
// int i = 0;
// for (; i < len; i++) {
// char c = sa[sp++];
// if (c > '\u00FF')
// break;
// da[dp++] = (byte) c;
// }
// return i;
// }
//
// @IntrinsicCandidate
// int java.lang.StringCoding.encodeISOArray0(
// byte[] sa, int sp, byte[] da, int dp, int len) {
// int i = 0;
// for (; i < len; i++) {
// char c = StringUTF16.getChar(sa, sp++);
// if (c > '\u00FF')
// break;
// da[dp++] = (byte) c;
// }
// return i;
// }
//
// @IntrinsicCandidate
// int java.lang.StringCoding.encodeAsciiArray0(
// char[] sa, int sp, byte[] da, int dp, int len) {
// int i = 0;
// for (; i < len; i++) {
// char c = sa[sp++];
// if (c >= '\u0080')
// break;
// da[dp++] = (byte) c;
// }
// return i;
// }
// encode char[] to byte[] in ISO_8859_1 or ASCII
//@IntrinsicCandidate
//private static int implEncodeISOArray(byte[] sa, int sp,
//byte[] da, int dp, int len) {
// int i = 0;
// for (; i < len; i++) {
// char c = StringUTF16.getChar(sa, sp++);
// if (c > '\u00FF')
// break;
// da[dp++] = (byte)c;
// }
// return i;
//}
//
//@IntrinsicCandidate
//private static int implEncodeAsciiArray(char[] sa, int sp,
// byte[] da, int dp, int len) {
// int i = 0;
// for (; i < len; i++) {
// char c = sa[sp++];
// if (c >= '\u0080')
// break;
// da[dp++] = (byte)c;
// }
// return i;
//}
void MacroAssembler::encode_iso_array(Register src, Register dst, Register len,
XMMRegister tmp1Reg, XMMRegister tmp2Reg,
XMMRegister tmp3Reg, XMMRegister tmp4Reg,

View File

@ -668,6 +668,8 @@ public:
// method handles (JSR 292)
Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
void profile_receiver_type(Register recv, Register mdp, int mdp_offset);
// Debugging
// only if +VerifyOops

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,14 +32,10 @@ const KRegister::KRegisterImpl all_KRegisterImpls [KRegister::number_
const char * Register::RegisterImpl::name() const {
static const char *const names[number_of_registers] = {
#ifdef _LP64
"rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
"r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
"r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
#else
"eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi"
#endif // _LP64
};
return is_valid() ? names[encoding()] : "noreg";
}
@ -54,11 +50,9 @@ const char* FloatRegister::FloatRegisterImpl::name() const {
const char* XMMRegister::XMMRegisterImpl::name() const {
static const char *const names[number_of_registers] = {
"xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
#ifdef _LP64
,"xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"
,"xmm16", "xmm17", "xmm18", "xmm19", "xmm20", "xmm21", "xmm22", "xmm23"
,"xmm24", "xmm25", "xmm26", "xmm27", "xmm28", "xmm29", "xmm30", "xmm31"
#endif // _LP64
};
return is_valid() ? names[encoding()] : "xnoreg";
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,7 +34,7 @@
class VMRegImpl;
typedef VMRegImpl* VMReg;
// The implementation of integer registers for the x86/x64 architectures.
// The implementation of integer registers for the x64 architectures.
class Register {
private:
int _encoding;
@ -44,11 +44,9 @@ private:
public:
inline friend constexpr Register as_Register(int encoding);
enum {
number_of_registers = LP64_ONLY( 32 ) NOT_LP64( 8 ),
number_of_byte_registers = LP64_ONLY( 32 ) NOT_LP64( 4 ),
max_slots_per_register = LP64_ONLY( 2 ) NOT_LP64( 1 )
};
static const int number_of_registers = 32;
static const int number_of_byte_registers = 32;
static const int max_slots_per_register = 2;
class RegisterImpl: public AbstractRegisterImpl {
friend class Register;
@ -79,11 +77,9 @@ public:
// Actually available GP registers for use, depending on actual CPU capabilities and flags.
static int available_gp_registers() {
#ifdef _LP64
if (!UseAPX) {
return number_of_registers / 2;
}
#endif // _LP64
return number_of_registers;
}
};
@ -116,9 +112,8 @@ constexpr Register rsp = as_Register(4);
constexpr Register rbp = as_Register(5);
constexpr Register rsi = as_Register(6);
constexpr Register rdi = as_Register(7);
#ifdef _LP64
constexpr Register r8 = as_Register( 8);
constexpr Register r9 = as_Register( 9);
constexpr Register r8 = as_Register(8);
constexpr Register r9 = as_Register(9);
constexpr Register r10 = as_Register(10);
constexpr Register r11 = as_Register(11);
constexpr Register r12 = as_Register(12);
@ -141,7 +136,6 @@ constexpr Register r28 = as_Register(28);
constexpr Register r29 = as_Register(29);
constexpr Register r30 = as_Register(30);
constexpr Register r31 = as_Register(31);
#endif // _LP64
// The implementation of x87 floating point registers for the ia32 architecture.
@ -154,10 +148,8 @@ private:
public:
inline friend constexpr FloatRegister as_FloatRegister(int encoding);
enum {
number_of_registers = 8,
max_slots_per_register = 2
};
static const int number_of_registers = 8;
static const int max_slots_per_register = 2;
class FloatRegisterImpl: public AbstractRegisterImpl {
friend class FloatRegister;
@ -217,10 +209,8 @@ private:
public:
inline friend constexpr XMMRegister as_XMMRegister(int encoding);
enum {
number_of_registers = LP64_ONLY( 32 ) NOT_LP64( 8 ),
max_slots_per_register = LP64_ONLY( 16 ) NOT_LP64( 16 ) // 512-bit
};
static const int number_of_registers = 32;
static const int max_slots_per_register = 16; // 512-bit
class XMMRegisterImpl: public AbstractRegisterImpl {
friend class XMMRegister;
@ -250,11 +240,9 @@ public:
// Actually available XMM registers for use, depending on actual CPU capabilities and flags.
static int available_xmm_registers() {
#ifdef _LP64
if (UseAVX < 3) {
return number_of_registers / 2;
}
#endif // _LP64
return number_of_registers;
}
};
@ -287,7 +275,6 @@ constexpr XMMRegister xmm4 = as_XMMRegister( 4);
constexpr XMMRegister xmm5 = as_XMMRegister( 5);
constexpr XMMRegister xmm6 = as_XMMRegister( 6);
constexpr XMMRegister xmm7 = as_XMMRegister( 7);
#ifdef _LP64
constexpr XMMRegister xmm8 = as_XMMRegister( 8);
constexpr XMMRegister xmm9 = as_XMMRegister( 9);
constexpr XMMRegister xmm10 = as_XMMRegister(10);
@ -312,7 +299,6 @@ constexpr XMMRegister xmm28 = as_XMMRegister(28);
constexpr XMMRegister xmm29 = as_XMMRegister(29);
constexpr XMMRegister xmm30 = as_XMMRegister(30);
constexpr XMMRegister xmm31 = as_XMMRegister(31);
#endif // _LP64
// The implementation of AVX-512 opmask registers.
@ -394,25 +380,17 @@ constexpr KRegister k7 = as_KRegister(7);
// Define a class that exports it.
class ConcreteRegisterImpl : public AbstractRegisterImpl {
public:
enum {
max_gpr = Register::number_of_registers * Register::max_slots_per_register,
max_fpr = max_gpr + FloatRegister::number_of_registers * FloatRegister::max_slots_per_register,
max_xmm = max_fpr + XMMRegister::number_of_registers * XMMRegister::max_slots_per_register,
max_kpr = max_xmm + KRegister::number_of_registers * KRegister::max_slots_per_register,
static const int max_gpr = Register::number_of_registers * Register::max_slots_per_register;
static const int max_fpr = max_gpr + FloatRegister::number_of_registers * FloatRegister::max_slots_per_register;
static const int max_xmm = max_fpr + XMMRegister::number_of_registers * XMMRegister::max_slots_per_register;
static const int max_kpr = max_xmm + KRegister::number_of_registers * KRegister::max_slots_per_register;
// A big enough number for C2: all the registers plus flags
// This number must be large enough to cover REG_COUNT (defined by c2) registers.
// There is no requirement that any ordering here matches any ordering c2 gives
// it's optoregs.
// x86_32.ad defines additional dummy FILL0-FILL7 registers, in order to tally
// REG_COUNT (computed by ADLC based on the number of reg_defs seen in .ad files)
// with ConcreteRegisterImpl::number_of_registers additional count of 8 is being
// added for 32 bit jvm.
number_of_registers = max_kpr + // gpr/fpr/xmm/kpr
NOT_LP64( 8 + ) // FILL0-FILL7 in x86_32.ad
1 // eflags
};
static const int number_of_registers = max_kpr + // gpr/fpr/xmm/kpr
1; // eflags
};
template <>

View File

@ -3266,7 +3266,7 @@ void TemplateTable::invokevirtual_helper(Register index,
__ load_klass(rax, recv, rscratch1);
// profile this call
__ profile_virtual_call(rax, rlocals, rdx);
__ profile_virtual_call(rax, rlocals);
// get target Method* & entry point
__ lookup_virtual_method(rax, index, method);
@ -3407,7 +3407,7 @@ void TemplateTable::invokeinterface(int byte_no) {
// profile this call
__ restore_bcp(); // rbcp was destroyed by receiver type check
__ profile_virtual_call(rdx, rbcp, rlocals);
__ profile_virtual_call(rdx, rbcp);
// Get declaring interface class from method, and itable index
__ load_method_holder(rax, rbx);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2006, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2006, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,9 +32,7 @@ void VMRegImpl::set_regName() {
int i;
for (i = 0; i < ConcreteRegisterImpl::max_gpr ; ) {
regName[i++] = reg->name();
#ifdef AMD64
regName[i++] = reg->name();
#endif // AMD64
reg = reg->successor();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2006, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2006, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -52,14 +52,8 @@ inline bool is_KRegister() {
}
inline Register as_Register() {
assert( is_Register(), "must be");
// Yuk
#ifdef AMD64
assert(is_Register(), "must be");
return ::as_Register(value() >> 1);
#else
return ::as_Register(value());
#endif // AMD64
}
inline FloatRegister as_FloatRegister() {
@ -82,9 +76,6 @@ inline KRegister as_KRegister() {
inline bool is_concrete() {
assert(is_reg(), "must be");
#ifndef AMD64
if (is_Register()) return true;
#endif // AMD64
// Do not use is_XMMRegister() here as it depends on the UseAVX setting.
if (value() >= ConcreteRegisterImpl::max_fpr && value() < ConcreteRegisterImpl::max_xmm) {
int base = value() - ConcreteRegisterImpl::max_fpr;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2006, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2006, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,7 +26,7 @@
#define CPU_X86_VMREG_X86_INLINE_HPP
inline VMReg Register::RegisterImpl::as_VMReg() const {
return VMRegImpl::as_VMReg(encoding() LP64_ONLY( << 1 ));
return VMRegImpl::as_VMReg(encoding() << 1);
}
inline VMReg FloatRegister::FloatRegisterImpl::as_VMReg() const {

View File

@ -1,5 +1,5 @@
//
// Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
@ -2633,17 +2633,19 @@ bool Matcher::supports_vector_calling_convention(void) {
return EnableVectorSupport;
}
static bool is_ndd_demotable_opr1(const MachNode* mdef) {
return ((mdef->flags() & Node::PD::Flag_ndd_demotable_opr1) != 0);
}
static bool is_ndd_demotable_opr2(const MachNode* mdef) {
return ((mdef->flags() & Node::PD::Flag_ndd_demotable_opr2) != 0);
}
#ifdef ASSERT
static bool is_ndd_demotable(const MachNode* mdef) {
return ((mdef->flags() & Node::PD::Flag_ndd_demotable) != 0);
}
static bool is_ndd_demotable_commutative(const MachNode* mdef) {
return ((mdef->flags() & Node::PD::Flag_ndd_demotable_commutative) != 0);
}
static bool is_demotion_candidate(const MachNode* mdef) {
return (is_ndd_demotable(mdef) || is_ndd_demotable_commutative(mdef));
return (is_ndd_demotable_opr1(mdef) || is_ndd_demotable_opr2(mdef));
}
#endif
bool Matcher::is_register_biasing_candidate(const MachNode* mdef,
int oper_index) {
@ -2653,8 +2655,8 @@ bool Matcher::is_register_biasing_candidate(const MachNode* mdef,
if (mdef->num_opnds() <= oper_index || mdef->operand_index(oper_index) < 0 ||
mdef->in(mdef->operand_index(oper_index)) == nullptr) {
assert(oper_index != 1 || !is_demotion_candidate(mdef), "%s", mdef->Name());
assert(oper_index != 2 || !is_ndd_demotable_commutative(mdef), "%s", mdef->Name());
assert(oper_index != 1 || !is_ndd_demotable_opr1(mdef), "%s", mdef->Name());
assert(oper_index != 2 || !is_ndd_demotable_opr2(mdef), "%s", mdef->Name());
return false;
}
@ -2662,14 +2664,13 @@ bool Matcher::is_register_biasing_candidate(const MachNode* mdef,
// address computation. Biasing def towards any address component will not
// result in NDD demotion by assembler.
if (mdef->operand_num_edges(oper_index) != 1) {
assert(!is_ndd_demotable(mdef), "%s", mdef->Name());
return false;
}
// Demotion candidate must be register mask compatible with definition.
const RegMask& oper_mask = mdef->in_RegMask(mdef->operand_index(oper_index));
if (!oper_mask.overlap(mdef->out_RegMask())) {
assert(!is_demotion_candidate(mdef), "%s", mdef->Name());
assert(!is_ndd_demotable(mdef), "%s", mdef->Name());
return false;
}
@ -2681,12 +2682,12 @@ bool Matcher::is_register_biasing_candidate(const MachNode* mdef,
// EVEX prefix with shorter REX/REX2 encoding. Demotion candidates
// are decorated with a special flag by instruction selector.
case 1:
return is_demotion_candidate(mdef);
return is_ndd_demotable_opr1(mdef);
// Definition operand of commutative operation can be biased towards second
// operand.
case 2:
return is_ndd_demotable_commutative(mdef);
return is_ndd_demotable_opr2(mdef);
// Current scheme only selects up to two biasing candidates
default:
@ -2888,9 +2889,9 @@ public:
Flag_clears_zero_flag = Node::_last_flag << 9,
Flag_clears_overflow_flag = Node::_last_flag << 10,
Flag_clears_sign_flag = Node::_last_flag << 11,
Flag_ndd_demotable = Node::_last_flag << 12,
Flag_ndd_demotable_commutative = Node::_last_flag << 13,
_last_flag = Flag_ndd_demotable_commutative
Flag_ndd_demotable_opr1 = Node::_last_flag << 12,
Flag_ndd_demotable_opr2 = Node::_last_flag << 13,
_last_flag = Flag_ndd_demotable_opr2
};
};
@ -9872,7 +9873,7 @@ instruct addI_rReg_ndd(rRegI dst, rRegI src1, rRegI src2, rFlagsReg cr)
predicate(UseAPX);
match(Set dst (AddI src1 src2));
effect(KILL cr);
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable_commutative);
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable_opr1, PD::Flag_ndd_demotable_opr2);
format %{ "eaddl $dst, $src1, $src2\t# int ndd" %}
ins_encode %{
@ -9900,7 +9901,7 @@ instruct addI_rReg_rReg_imm_ndd(rRegI dst, rRegI src1, immI src2, rFlagsReg cr)
predicate(UseAPX);
match(Set dst (AddI src1 src2));
effect(KILL cr);
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable);
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable_opr1);
format %{ "eaddl $dst, $src1, $src2\t# int ndd" %}
ins_encode %{
@ -9943,7 +9944,7 @@ instruct addI_rReg_rReg_mem_ndd(rRegI dst, rRegI src1, memory src2, rFlagsReg cr
predicate(UseAPX);
match(Set dst (AddI src1 (LoadI src2)));
effect(KILL cr);
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable_commutative);
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable_opr1, PD::Flag_ndd_demotable_opr2);
ins_cost(150);
format %{ "eaddl $dst, $src1, $src2\t# int ndd" %}
@ -10000,7 +10001,7 @@ instruct incI_rReg_ndd(rRegI dst, rRegI src, immI_1 val, rFlagsReg cr)
predicate(UseAPX && UseIncDec);
match(Set dst (AddI src val));
effect(KILL cr);
flag(PD::Flag_ndd_demotable);
flag(PD::Flag_ndd_demotable_opr1);
format %{ "eincl $dst, $src\t# int ndd" %}
ins_encode %{
@ -10055,7 +10056,7 @@ instruct decI_rReg_ndd(rRegI dst, rRegI src, immI_M1 val, rFlagsReg cr)
predicate(UseAPX && UseIncDec);
match(Set dst (AddI src val));
effect(KILL cr);
flag(PD::Flag_ndd_demotable);
flag(PD::Flag_ndd_demotable_opr1);
format %{ "edecl $dst, $src\t# int ndd" %}
ins_encode %{
@ -10162,7 +10163,7 @@ instruct addL_rReg_ndd(rRegL dst, rRegL src1, rRegL src2, rFlagsReg cr)
predicate(UseAPX);
match(Set dst (AddL src1 src2));
effect(KILL cr);
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable_commutative);
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable_opr1, PD::Flag_ndd_demotable_opr2);
format %{ "eaddq $dst, $src1, $src2\t# long ndd" %}
ins_encode %{
@ -10190,7 +10191,7 @@ instruct addL_rReg_rReg_imm_ndd(rRegL dst, rRegL src1, immL32 src2, rFlagsReg cr
predicate(UseAPX);
match(Set dst (AddL src1 src2));
effect(KILL cr);
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable);
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable_opr1);
format %{ "eaddq $dst, $src1, $src2\t# long ndd" %}
ins_encode %{
@ -10233,7 +10234,7 @@ instruct addL_rReg_rReg_mem_ndd(rRegL dst, rRegL src1, memory src2, rFlagsReg cr
predicate(UseAPX);
match(Set dst (AddL src1 (LoadL src2)));
effect(KILL cr);
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable_commutative);
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable_opr1, PD::Flag_ndd_demotable_opr2);
ins_cost(150);
format %{ "eaddq $dst, $src1, $src2\t# long ndd" %}
@ -10289,7 +10290,7 @@ instruct incL_rReg_ndd(rRegL dst, rRegI src, immL1 val, rFlagsReg cr)
predicate(UseAPX && UseIncDec);
match(Set dst (AddL src val));
effect(KILL cr);
flag(PD::Flag_ndd_demotable);
flag(PD::Flag_ndd_demotable_opr1);
format %{ "eincq $dst, $src\t# long ndd" %}
ins_encode %{
@ -10344,7 +10345,7 @@ instruct decL_rReg_ndd(rRegL dst, rRegL src, immL_M1 val, rFlagsReg cr)
predicate(UseAPX && UseIncDec);
match(Set dst (AddL src val));
effect(KILL cr);
flag(PD::Flag_ndd_demotable);
flag(PD::Flag_ndd_demotable_opr1);
format %{ "edecq $dst, $src\t# long ndd" %}
ins_encode %{
@ -11059,7 +11060,7 @@ instruct subI_rReg_ndd(rRegI dst, rRegI src1, rRegI src2, rFlagsReg cr)
predicate(UseAPX);
match(Set dst (SubI src1 src2));
effect(KILL cr);
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable);
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable_opr1);
format %{ "esubl $dst, $src1, $src2\t# int ndd" %}
ins_encode %{
@ -11073,7 +11074,7 @@ instruct subI_rReg_rReg_imm_ndd(rRegI dst, rRegI src1, immI src2, rFlagsReg cr)
predicate(UseAPX);
match(Set dst (SubI src1 src2));
effect(KILL cr);
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable);
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable_opr1);
format %{ "esubl $dst, $src1, $src2\t# int ndd" %}
ins_encode %{
@ -11116,7 +11117,7 @@ instruct subI_rReg_rReg_mem_ndd(rRegI dst, rRegI src1, memory src2, rFlagsReg cr
predicate(UseAPX);
match(Set dst (SubI src1 (LoadI src2)));
effect(KILL cr);
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable);
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable_opr1);
ins_cost(150);
format %{ "esubl $dst, $src1, $src2\t# int ndd" %}
@ -11174,7 +11175,7 @@ instruct subL_rReg_ndd(rRegL dst, rRegL src1, rRegL src2, rFlagsReg cr)
predicate(UseAPX);
match(Set dst (SubL src1 src2));
effect(KILL cr);
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable);
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable_opr1);
format %{ "esubq $dst, $src1, $src2\t# long ndd" %}
ins_encode %{
@ -11188,7 +11189,7 @@ instruct subL_rReg_rReg_imm_ndd(rRegL dst, rRegL src1, immL32 src2, rFlagsReg cr
predicate(UseAPX);
match(Set dst (SubL src1 src2));
effect(KILL cr);
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable);
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable_opr1);
format %{ "esubq $dst, $src1, $src2\t# long ndd" %}
ins_encode %{
@ -11231,7 +11232,7 @@ instruct subL_rReg_rReg_mem_ndd(rRegL dst, rRegL src1, memory src2, rFlagsReg cr
predicate(UseAPX);
match(Set dst (SubL src1 (LoadL src2)));
effect(KILL cr);
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable);
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_carry_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable_opr1);
ins_cost(150);
format %{ "esubq $dst, $src1, $src2\t# long ndd" %}
@ -11303,7 +11304,7 @@ instruct negI_rReg_ndd(rRegI dst, rRegI src, immI_0 zero, rFlagsReg cr)
predicate(UseAPX);
match(Set dst (SubI zero src));
effect(KILL cr);
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable);
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable_opr2);
format %{ "enegl $dst, $src\t# int ndd" %}
ins_encode %{
@ -11331,7 +11332,7 @@ instruct negI_rReg_2_ndd(rRegI dst, rRegI src, rFlagsReg cr)
predicate(UseAPX);
match(Set dst (NegI src));
effect(KILL cr);
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable);
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable_opr1);
format %{ "enegl $dst, $src\t# int ndd" %}
ins_encode %{
@ -11372,7 +11373,7 @@ instruct negL_rReg_ndd(rRegL dst, rRegL src, immL0 zero, rFlagsReg cr)
predicate(UseAPX);
match(Set dst (SubL zero src));
effect(KILL cr);
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable);
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable_opr2);
format %{ "enegq $dst, $src\t# long ndd" %}
ins_encode %{
@ -11400,7 +11401,7 @@ instruct negL_rReg_2_ndd(rRegL dst, rRegL src, rFlagsReg cr)
predicate(UseAPX);
match(Set dst (NegL src));
effect(KILL cr);
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable);
flag(PD::Flag_sets_overflow_flag, PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_ndd_demotable_opr1);
format %{ "enegq $dst, $src\t# long ndd" %}
ins_encode %{
@ -11445,7 +11446,7 @@ instruct mulI_rReg_ndd(rRegI dst, rRegI src1, rRegI src2, rFlagsReg cr)
predicate(UseAPX);
match(Set dst (MulI src1 src2));
effect(KILL cr);
flag(PD::Flag_ndd_demotable_commutative);
flag(PD::Flag_ndd_demotable_opr1, PD::Flag_ndd_demotable_opr2);
ins_cost(300);
format %{ "eimull $dst, $src1, $src2\t# int ndd" %}
@ -11487,7 +11488,7 @@ instruct mulI_rReg_rReg_mem_ndd(rRegI dst, rRegI src1, memory src2, rFlagsReg cr
predicate(UseAPX);
match(Set dst (MulI src1 (LoadI src2)));
effect(KILL cr);
flag(PD::Flag_ndd_demotable);
flag(PD::Flag_ndd_demotable_opr1, PD::Flag_ndd_demotable_opr2);
ins_cost(350);
format %{ "eimull $dst, $src1, $src2\t# int ndd" %}
@ -11539,7 +11540,7 @@ instruct mulL_rReg_ndd(rRegL dst, rRegL src1, rRegL src2, rFlagsReg cr)
predicate(UseAPX);
match(Set dst (MulL src1 src2));
effect(KILL cr);
flag(PD::Flag_ndd_demotable_commutative);
flag(PD::Flag_ndd_demotable_opr1, PD::Flag_ndd_demotable_opr2);
ins_cost(300);
format %{ "eimulq $dst, $src1, $src2\t# long ndd" %}
@ -11581,7 +11582,7 @@ instruct mulL_rReg_rReg_mem_ndd(rRegL dst, rRegL src1, memory src2, rFlagsReg cr
predicate(UseAPX);
match(Set dst (MulL src1 (LoadL src2)));
effect(KILL cr);
flag(PD::Flag_ndd_demotable_commutative);
flag(PD::Flag_ndd_demotable_opr1, PD::Flag_ndd_demotable_opr2);
ins_cost(350);
format %{ "eimulq $dst, $src1, $src2 \t# long" %}
@ -11856,7 +11857,7 @@ instruct salI_rReg_immI2_ndd(rRegI dst, rRegI src, immI2 shift, rFlagsReg cr)
predicate(UseAPX);
match(Set dst (LShiftI src shift));
effect(KILL cr);
flag(PD::Flag_ndd_demotable);
flag(PD::Flag_ndd_demotable_opr1);
format %{ "esall $dst, $src, $shift\t# int(ndd)" %}
ins_encode %{
@ -11885,7 +11886,7 @@ instruct salI_rReg_imm_ndd(rRegI dst, rRegI src, immI8 shift, rFlagsReg cr)
predicate(UseAPX);
match(Set dst (LShiftI src shift));
effect(KILL cr);
flag(PD::Flag_ndd_demotable);
flag(PD::Flag_ndd_demotable_opr1);
format %{ "esall $dst, $src, $shift\t# int (ndd)" %}
ins_encode %{
@ -11992,7 +11993,7 @@ instruct sarI_rReg_imm_ndd(rRegI dst, rRegI src, immI8 shift, rFlagsReg cr)
predicate(UseAPX);
match(Set dst (RShiftI src shift));
effect(KILL cr);
flag(PD::Flag_ndd_demotable);
flag(PD::Flag_ndd_demotable_opr1);
format %{ "esarl $dst, $src, $shift\t# int (ndd)" %}
ins_encode %{
@ -12099,7 +12100,7 @@ instruct shrI_rReg_imm_ndd(rRegI dst, rRegI src, immI8 shift, rFlagsReg cr)
predicate(UseAPX);
match(Set dst (URShiftI src shift));
effect(KILL cr);
flag(PD::Flag_ndd_demotable);
flag(PD::Flag_ndd_demotable_opr1);
format %{ "eshrl $dst, $src, $shift\t # int (ndd)" %}
ins_encode %{
@ -12207,7 +12208,7 @@ instruct salL_rReg_immI2_ndd(rRegL dst, rRegL src, immI2 shift, rFlagsReg cr)
predicate(UseAPX);
match(Set dst (LShiftL src shift));
effect(KILL cr);
flag(PD::Flag_ndd_demotable);
flag(PD::Flag_ndd_demotable_opr1);
format %{ "esalq $dst, $src, $shift\t# long (ndd)" %}
ins_encode %{
@ -12236,7 +12237,7 @@ instruct salL_rReg_imm_ndd(rRegL dst, rRegL src, immI8 shift, rFlagsReg cr)
predicate(UseAPX);
match(Set dst (LShiftL src shift));
effect(KILL cr);
flag(PD::Flag_ndd_demotable);
flag(PD::Flag_ndd_demotable_opr1);
format %{ "esalq $dst, $src, $shift\t# long (ndd)" %}
ins_encode %{
@ -12343,7 +12344,7 @@ instruct sarL_rReg_imm_ndd(rRegL dst, rRegL src, immI shift, rFlagsReg cr)
predicate(UseAPX);
match(Set dst (RShiftL src shift));
effect(KILL cr);
flag(PD::Flag_ndd_demotable);
flag(PD::Flag_ndd_demotable_opr1);
format %{ "esarq $dst, $src, $shift\t# long (ndd)" %}
ins_encode %{
@ -12450,7 +12451,7 @@ instruct shrL_rReg_imm_ndd(rRegL dst, rRegL src, immI8 shift, rFlagsReg cr)
predicate(UseAPX);
match(Set dst (URShiftL src shift));
effect(KILL cr);
flag(PD::Flag_ndd_demotable);
flag(PD::Flag_ndd_demotable_opr1);
format %{ "eshrq $dst, $src, $shift\t# long (ndd)" %}
ins_encode %{
@ -12622,7 +12623,7 @@ instruct rolI_rReg_Var_ndd(rRegI dst, rRegI src, rcx_RegI shift, rFlagsReg cr)
predicate(UseAPX && n->bottom_type()->basic_type() == T_INT);
match(Set dst (RotateLeft src shift));
effect(KILL cr);
flag(PD::Flag_ndd_demotable);
flag(PD::Flag_ndd_demotable_opr1);
format %{ "eroll $dst, $src, $shift\t# rotate left (int ndd)" %}
ins_encode %{
@ -12687,7 +12688,7 @@ instruct rorI_rReg_Var_ndd(rRegI dst, rRegI src, rcx_RegI shift, rFlagsReg cr)
predicate(UseAPX && n->bottom_type()->basic_type() == T_INT);
match(Set dst (RotateRight src shift));
effect(KILL cr);
flag(PD::Flag_ndd_demotable);
flag(PD::Flag_ndd_demotable_opr1);
format %{ "erorl $dst, $src, $shift\t# rotate right(int ndd)" %}
ins_encode %{
@ -12754,7 +12755,7 @@ instruct rolL_rReg_Var_ndd(rRegL dst, rRegL src, rcx_RegI shift, rFlagsReg cr)
predicate(UseAPX && n->bottom_type()->basic_type() == T_LONG);
match(Set dst (RotateLeft src shift));
effect(KILL cr);
flag(PD::Flag_ndd_demotable);
flag(PD::Flag_ndd_demotable_opr1);
format %{ "erolq $dst, $src, $shift\t# rotate left(long ndd)" %}
ins_encode %{
@ -12819,7 +12820,7 @@ instruct rorL_rReg_Var_ndd(rRegL dst, rRegL src, rcx_RegI shift, rFlagsReg cr)
predicate(UseAPX && n->bottom_type()->basic_type() == T_LONG);
match(Set dst (RotateRight src shift));
effect(KILL cr);
flag(PD::Flag_ndd_demotable);
flag(PD::Flag_ndd_demotable_opr1);
format %{ "erorq $dst, $src, $shift\t# rotate right(long ndd)" %}
ins_encode %{
@ -12897,7 +12898,7 @@ instruct andI_rReg_ndd(rRegI dst, rRegI src1, rRegI src2, rFlagsReg cr)
predicate(UseAPX);
match(Set dst (AndI src1 src2));
effect(KILL cr);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_commutative);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_opr1, PD::Flag_ndd_demotable_opr2);
format %{ "eandl $dst, $src1, $src2\t# int ndd" %}
ins_encode %{
@ -12990,7 +12991,7 @@ instruct andI_rReg_rReg_imm_ndd(rRegI dst, rRegI src1, immI src2, rFlagsReg cr)
predicate(UseAPX);
match(Set dst (AndI src1 src2));
effect(KILL cr);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_opr1);
format %{ "eandl $dst, $src1, $src2\t# int ndd" %}
ins_encode %{
@ -13034,7 +13035,7 @@ instruct andI_rReg_rReg_mem_ndd(rRegI dst, rRegI src1, memory src2, rFlagsReg cr
predicate(UseAPX);
match(Set dst (AndI src1 (LoadI src2)));
effect(KILL cr);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_commutative);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_opr1, PD::Flag_ndd_demotable_opr2);
ins_cost(150);
format %{ "eandl $dst, $src1, $src2\t# int ndd" %}
@ -13234,7 +13235,7 @@ instruct orI_rReg_ndd(rRegI dst, rRegI src1, rRegI src2, rFlagsReg cr)
predicate(UseAPX);
match(Set dst (OrI src1 src2));
effect(KILL cr);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_commutative);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_opr1, PD::Flag_ndd_demotable_opr2);
format %{ "eorl $dst, $src1, $src2\t# int ndd" %}
ins_encode %{
@ -13263,7 +13264,7 @@ instruct orI_rReg_rReg_imm_ndd(rRegI dst, rRegI src1, immI src2, rFlagsReg cr)
predicate(UseAPX);
match(Set dst (OrI src1 src2));
effect(KILL cr);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_opr1);
format %{ "eorl $dst, $src1, $src2\t# int ndd" %}
ins_encode %{
@ -13277,7 +13278,7 @@ instruct orI_rReg_imm_rReg_ndd(rRegI dst, immI src1, rRegI src2, rFlagsReg cr)
predicate(UseAPX);
match(Set dst (OrI src1 src2));
effect(KILL cr);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_opr1);
format %{ "eorl $dst, $src2, $src1\t# int ndd" %}
ins_encode %{
@ -13321,7 +13322,7 @@ instruct orI_rReg_rReg_mem_ndd(rRegI dst, rRegI src1, memory src2, rFlagsReg cr)
predicate(UseAPX);
match(Set dst (OrI src1 (LoadI src2)));
effect(KILL cr);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_opr1, PD::Flag_ndd_demotable_opr2);
ins_cost(150);
format %{ "eorl $dst, $src1, $src2\t# int ndd" %}
@ -13397,7 +13398,7 @@ instruct xorI_rReg_ndd(rRegI dst, rRegI src1, rRegI src2, rFlagsReg cr)
predicate(UseAPX);
match(Set dst (XorI src1 src2));
effect(KILL cr);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_commutative);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_opr1, PD::Flag_ndd_demotable_opr2);
format %{ "exorl $dst, $src1, $src2\t# int ndd" %}
ins_encode %{
@ -13423,7 +13424,7 @@ instruct xorI_rReg_im1_ndd(rRegI dst, rRegI src, immI_M1 imm)
%{
match(Set dst (XorI src imm));
predicate(UseAPX);
flag(PD::Flag_ndd_demotable);
flag(PD::Flag_ndd_demotable_opr1);
format %{ "enotl $dst, $src" %}
ins_encode %{
@ -13454,7 +13455,7 @@ instruct xorI_rReg_rReg_imm_ndd(rRegI dst, rRegI src1, immI src2, rFlagsReg cr)
predicate(UseAPX && n->in(2)->bottom_type()->is_int()->get_con() != -1);
match(Set dst (XorI src1 src2));
effect(KILL cr);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_opr1);
format %{ "exorl $dst, $src1, $src2\t# int ndd" %}
ins_encode %{
@ -13500,7 +13501,7 @@ instruct xorI_rReg_rReg_mem_ndd(rRegI dst, rRegI src1, memory src2, rFlagsReg cr
predicate(UseAPX);
match(Set dst (XorI src1 (LoadI src2)));
effect(KILL cr);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_opr1, PD::Flag_ndd_demotable_opr2);
ins_cost(150);
format %{ "exorl $dst, $src1, $src2\t# int ndd" %}
@ -13579,7 +13580,7 @@ instruct andL_rReg_ndd(rRegL dst, rRegL src1, rRegL src2, rFlagsReg cr)
predicate(UseAPX);
match(Set dst (AndL src1 src2));
effect(KILL cr);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_commutative);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_opr1, PD::Flag_ndd_demotable_opr2);
format %{ "eandq $dst, $src1, $src2\t# long ndd" %}
ins_encode %{
@ -13635,7 +13636,7 @@ instruct andL_rReg_rReg_imm_ndd(rRegL dst, rRegL src1, immL32 src2, rFlagsReg cr
predicate(UseAPX);
match(Set dst (AndL src1 src2));
effect(KILL cr);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_opr1);
format %{ "eandq $dst, $src1, $src2\t# long ndd" %}
ins_encode %{
@ -13679,7 +13680,7 @@ instruct andL_rReg_rReg_mem_ndd(rRegL dst, rRegL src1, memory src2, rFlagsReg cr
predicate(UseAPX);
match(Set dst (AndL src1 (LoadL src2)));
effect(KILL cr);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_commutative);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_opr1, PD::Flag_ndd_demotable_opr2);
ins_cost(150);
format %{ "eandq $dst, $src1, $src2\t# long ndd" %}
@ -13882,7 +13883,7 @@ instruct orL_rReg_ndd(rRegL dst, rRegL src1, rRegL src2, rFlagsReg cr)
predicate(UseAPX);
match(Set dst (OrL src1 src2));
effect(KILL cr);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_commutative);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_opr1, PD::Flag_ndd_demotable_opr2);
format %{ "eorq $dst, $src1, $src2\t# long ndd" %}
ins_encode %{
@ -13937,7 +13938,7 @@ instruct orL_rReg_rReg_imm_ndd(rRegL dst, rRegL src1, immL32 src2, rFlagsReg cr)
predicate(UseAPX);
match(Set dst (OrL src1 src2));
effect(KILL cr);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_opr1);
format %{ "eorq $dst, $src1, $src2\t# long ndd" %}
ins_encode %{
@ -13951,7 +13952,7 @@ instruct orL_rReg_imm_rReg_ndd(rRegL dst, immL32 src1, rRegL src2, rFlagsReg cr)
predicate(UseAPX);
match(Set dst (OrL src1 src2));
effect(KILL cr);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_opr1);
format %{ "eorq $dst, $src2, $src1\t# long ndd" %}
ins_encode %{
@ -13996,7 +13997,7 @@ instruct orL_rReg_rReg_mem_ndd(rRegL dst, rRegL src1, memory src2, rFlagsReg cr)
predicate(UseAPX);
match(Set dst (OrL src1 (LoadL src2)));
effect(KILL cr);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_commutative);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_opr1, PD::Flag_ndd_demotable_opr2);
ins_cost(150);
format %{ "eorq $dst, $src1, $src2\t# long ndd" %}
@ -14075,7 +14076,7 @@ instruct xorL_rReg_ndd(rRegL dst, rRegL src1, rRegL src2, rFlagsReg cr)
predicate(UseAPX);
match(Set dst (XorL src1 src2));
effect(KILL cr);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_commutative);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_opr1, PD::Flag_ndd_demotable_opr2);
format %{ "exorq $dst, $src1, $src2\t# long ndd" %}
ins_encode %{
@ -14101,7 +14102,7 @@ instruct xorL_rReg_im1_ndd(rRegL dst,rRegL src, immL_M1 imm)
%{
predicate(UseAPX);
match(Set dst (XorL src imm));
flag(PD::Flag_ndd_demotable);
flag(PD::Flag_ndd_demotable_opr1);
format %{ "enotq $dst, $src" %}
ins_encode %{
@ -14132,7 +14133,7 @@ instruct xorL_rReg_rReg_imm(rRegL dst, rRegL src1, immL32 src2, rFlagsReg cr)
predicate(UseAPX && n->in(2)->bottom_type()->is_long()->get_con() != -1L);
match(Set dst (XorL src1 src2));
effect(KILL cr);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_opr1);
format %{ "exorq $dst, $src1, $src2\t# long ndd" %}
ins_encode %{
@ -14178,7 +14179,7 @@ instruct xorL_rReg_rReg_mem_ndd(rRegL dst, rRegL src1, memory src2, rFlagsReg cr
predicate(UseAPX);
match(Set dst (XorL src1 (LoadL src2)));
effect(KILL cr);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_commutative);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag, PD::Flag_ndd_demotable_opr1, PD::Flag_ndd_demotable_opr2);
ins_cost(150);
format %{ "exorq $dst, $src1, $src2\t# long ndd" %}
@ -16633,7 +16634,7 @@ instruct minI_rReg_ndd(rRegI dst, rRegI src1, rRegI src2)
predicate(UseAPX);
match(Set dst (MinI src1 src2));
effect(DEF dst, USE src1, USE src2);
flag(PD::Flag_ndd_demotable);
flag(PD::Flag_ndd_demotable_opr1);
ins_cost(200);
expand %{
@ -16685,7 +16686,7 @@ instruct maxI_rReg_ndd(rRegI dst, rRegI src1, rRegI src2)
predicate(UseAPX);
match(Set dst (MaxI src1 src2));
effect(DEF dst, USE src1, USE src2);
flag(PD::Flag_ndd_demotable);
flag(PD::Flag_ndd_demotable_opr1);
ins_cost(200);
expand %{

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2019 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -23,15 +23,15 @@
*
*/
#include "cppstdlib/cstdlib.hpp"
#include "libodm_aix.hpp"
#include "misc_aix.hpp"
#include <stdlib.h>
#include <dlfcn.h>
#include <string.h>
#include "runtime/arguments.hpp"
#include "runtime/os.hpp"
#include "utilities/permitForbiddenFunctions.hpp"
#include <dlfcn.h>
#include <string.h>
dynamicOdm::dynamicOdm() {
const char* libodmname = "/usr/lib/libodm.a(shr_64.o)";

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2024 SAP SE. All rights reserved.
* Copyright (c) 2022, IBM Corp.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -32,8 +32,9 @@
#ifndef OS_AIX_LIBPERFSTAT_AIX_HPP
#define OS_AIX_LIBPERFSTAT_AIX_HPP
#include "cppstdlib/cstdlib.hpp"
#include <sys/types.h>
#include <stdlib.h>
///////////////////////////////////////////////////////////////////////////////////////////////
// These are excerpts from the AIX 7.1 libperfstat.h -

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022, 2024, IBM Corp.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -23,6 +23,7 @@
*
*/
#include "cppstdlib/cstdlib.hpp"
#include "jvm.h"
#include "libperfstat_aix.hpp"
#include "memory/allocation.inline.hpp"
@ -46,7 +47,6 @@
#include <sys/resource.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <stdlib.h>
#include <unistd.h>
typedef struct {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023, 2024, Red Hat, Inc. and/or its affiliates.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -25,6 +25,7 @@
#if defined(__APPLE__)
#include "cppstdlib/cstdlib.hpp"
#include "nmt/memMapPrinter.hpp"
#include "runtime/os.hpp"
#include "utilities/align.hpp"
@ -34,7 +35,6 @@
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <libproc.h>
#include <unistd.h>

View File

@ -66,9 +66,6 @@
#endif
// open(2) flags
#ifndef O_CLOEXEC
#define O_CLOEXEC 02000000
#endif
#ifndef O_TMPFILE
#define O_TMPFILE (020000000 | O_DIRECTORY)
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2024 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -27,6 +27,7 @@
#include "code/vtableStubs.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/disassembler.hpp"
#include "cppstdlib/cstdlib.hpp"
#include "hugepages.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"
@ -96,7 +97,6 @@
# include <signal.h>
# include <stdint.h>
# include <stdio.h>
# include <stdlib.h>
# include <string.h>
# include <sys/ioctl.h>
# include <sys/ipc.h>
@ -4878,31 +4878,8 @@ int os::open(const char *path, int oflag, int mode) {
// All file descriptors that are opened in the Java process and not
// specifically destined for a subprocess should have the close-on-exec
// flag set. If we don't set it, then careless 3rd party native code
// might fork and exec without closing all appropriate file descriptors,
// and this in turn might:
//
// - cause end-of-file to fail to be detected on some file
// descriptors, resulting in mysterious hangs, or
//
// - might cause an fopen in the subprocess to fail on a system
// suffering from bug 1085341.
//
// (Yes, the default setting of the close-on-exec flag is a Unix
// design flaw)
//
// See:
// 1085341: 32-bit stdio routines should support file descriptors >255
// 4843136: (process) pipe file descriptor from Runtime.exec not being closed
// 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
//
// Modern Linux kernels (after 2.6.23 2007) support O_CLOEXEC with open().
// O_CLOEXEC is preferable to using FD_CLOEXEC on an open file descriptor
// because it saves a system call and removes a small window where the flag
// is unset. On ancient Linux kernels the O_CLOEXEC flag will be ignored
// and we fall back to using FD_CLOEXEC (see below).
#ifdef O_CLOEXEC
// might fork and exec without closing all appropriate file descriptors.
oflag |= O_CLOEXEC;
#endif
int fd = ::open(path, oflag, mode);
if (fd == -1) return -1;
@ -4925,21 +4902,6 @@ int os::open(const char *path, int oflag, int mode) {
}
}
#ifdef FD_CLOEXEC
// Validate that the use of the O_CLOEXEC flag on open above worked.
// With recent kernels, we will perform this check exactly once.
static sig_atomic_t O_CLOEXEC_is_known_to_work = 0;
if (!O_CLOEXEC_is_known_to_work) {
int flags = ::fcntl(fd, F_GETFD);
if (flags != -1) {
if ((flags & FD_CLOEXEC) != 0)
O_CLOEXEC_is_known_to_work = 1;
else
::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
}
}
#endif
return fd;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,6 +22,7 @@
*
*/
#include "cppstdlib/cstdlib.hpp"
#include "jvm.h"
#include "memory/allocation.inline.hpp"
#include "os_linux.inline.hpp"
@ -40,7 +41,6 @@
#include <pthread.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/resource.h>
#include <sys/stat.h>

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2024, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -36,15 +36,12 @@
#include <unistd.h>
#endif
// POSIX puts _exit in <unistd.h>.
FORBID_IMPORTED_NORETURN_C_FUNCTION(void _exit(int), /* not noexcept */, "use os::exit")
// If needed, add os::strndup and use that instead.
FORBID_C_FUNCTION(char* strndup(const char*, size_t), noexcept, "don't use");
// These are unimplementable for Windows, and they aren't useful for a
// POSIX implementation of NMT either.
// https://stackoverflow.com/questions/62962839/stdaligned-alloc-missing-from-visual-studio-2019
FORBID_C_FUNCTION(int posix_memalign(void**, size_t, size_t), noexcept, "don't use");
FORBID_C_FUNCTION(void* aligned_alloc(size_t, size_t), noexcept, "don't use");
// realpath with a null second argument mallocs a string for the result.
// With a non-null second argument, there is a risk of buffer overrun.
PRAGMA_DIAG_PUSH

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,6 +23,7 @@
*/
#include "classfile/classLoader.hpp"
#include "cppstdlib/cstdlib.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"
#include "jvmtifiles/jvmti.h"
@ -457,12 +458,10 @@ char* os::map_memory_to_file(char* base, size_t size, int fd) {
warning("Failed mmap to file. (%s)", os::strerror(errno));
return nullptr;
}
if (base != nullptr && addr != base) {
if (!os::release_memory(addr, size)) {
warning("Could not release memory on unsuccessful file mapping");
}
return nullptr;
}
// The requested address should be the same as the returned address when using MAP_FIXED
// as per POSIX.
assert(base == nullptr || addr == base, "base should equal addr when using MAP_FIXED");
return addr;
}

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2021 SAP SE. All rights reserved.
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -946,7 +946,7 @@ static int create_sharedmem_file(const char* dirname, const char* filename, size
if (result == -1 ) break;
if (!os::write(fd, &zero_int, 1)) {
if (errno == ENOSPC) {
warning("Insufficient space for shared memory file:\n %s\nTry using the -Djava.io.tmpdir= option to select an alternate temp location.\n", filename);
warning("Insufficient space for shared memory file: %s/%s\n", dirname, filename);
}
result = OS_ERR;
break;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2024, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,15 +25,20 @@
#ifndef OS_POSIX_PERMITFORBIDDENFUNCTIONS_POSIX_HPP
#define OS_POSIX_PERMITFORBIDDENFUNCTIONS_POSIX_HPP
#include "cppstdlib/cstdlib.hpp"
#include "utilities/compilerWarnings.hpp"
#include "utilities/globalDefinitions.hpp"
#include <unistd.h>
// Provide wrappers for some functions otherwise forbidden from use in HotSpot.
// See forbiddenFunctions.hpp for details.
namespace permit_forbidden_function {
BEGIN_ALLOW_FORBIDDEN_FUNCTIONS
[[noreturn]] inline void _exit(int status) { ::_exit(status); }
// Used by the POSIX implementation of os::realpath.
inline char* realpath(const char* path, char* resolved_path) {
return ::realpath(path, resolved_path);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -951,6 +951,32 @@ struct enum_sigcode_desc_t {
const char* s_desc;
};
#if defined(LINUX)
// Additional kernel si_code definitions that are only exported by
// more recent glibc distributions, so we have to hard-code the values.
#ifndef BUS_MCEERR_AR // glibc 2.17
#define BUS_MCEERR_AR 4
#define BUS_MCEERR_AO 5
#endif
#ifndef SEGV_PKUERR // glibc 2.27
#define SEGV_PKUERR 4
#endif
#ifndef SYS_SECCOMP // glibc 2.28
#define SYS_SECCOMP 1
#endif
#ifndef TRAP_BRANCH // glibc 2.30
#define TRAP_BRANCH 3
#endif
#ifndef TRAP_HWBKPT // not glibc version specific - gdb related
#define TRAP_HWBKPT 4
#endif
#endif // LINUX
static bool get_signal_code_description(const siginfo_t* si, enum_sigcode_desc_t* out) {
const struct {
@ -976,6 +1002,7 @@ static bool get_signal_code_description(const siginfo_t* si, enum_sigcode_desc_t
{ SIGSEGV, SEGV_ACCERR, "SEGV_ACCERR", "Invalid permissions for mapped object." },
#if defined(LINUX)
{ SIGSEGV, SEGV_BNDERR, "SEGV_BNDERR", "Failed address bound checks." },
{ SIGSEGV, SEGV_PKUERR, "SEGV_PKUERR", "Protection key checking failure." },
#endif
#if defined(AIX)
// no explanation found what keyerr would be
@ -984,8 +1011,18 @@ static bool get_signal_code_description(const siginfo_t* si, enum_sigcode_desc_t
{ SIGBUS, BUS_ADRALN, "BUS_ADRALN", "Invalid address alignment." },
{ SIGBUS, BUS_ADRERR, "BUS_ADRERR", "Nonexistent physical address." },
{ SIGBUS, BUS_OBJERR, "BUS_OBJERR", "Object-specific hardware error." },
#if defined(LINUX)
{ SIGBUS, BUS_MCEERR_AR,"BUS_MCEERR_AR","Hardware memory error consumed on a machine check: action required." },
{ SIGBUS, BUS_MCEERR_AO,"BUS_MCEERR_AO","Hardware memory error detected in process but not consumed: action optional." },
{ SIGSYS, SYS_SECCOMP, "SYS_SECCOMP", "Secure computing (seccomp) filter failure." },
#endif
{ SIGTRAP, TRAP_BRKPT, "TRAP_BRKPT", "Process breakpoint." },
{ SIGTRAP, TRAP_TRACE, "TRAP_TRACE", "Process trace trap." },
#if defined(LINUX)
{ SIGTRAP, TRAP_BRANCH, "TRAP_BRANCH", "Process taken branch trap." },
{ SIGTRAP, TRAP_HWBKPT, "TRAP_HWBKPT", "Hardware breakpoint/watchpoint." },
#endif
{ SIGCHLD, CLD_EXITED, "CLD_EXITED", "Child has exited." },
{ SIGCHLD, CLD_KILLED, "CLD_KILLED", "Child has terminated abnormally and did not create a core file." },
{ SIGCHLD, CLD_DUMPED, "CLD_DUMPED", "Child has terminated abnormally and created a core file." },
@ -993,6 +1030,7 @@ static bool get_signal_code_description(const siginfo_t* si, enum_sigcode_desc_t
{ SIGCHLD, CLD_STOPPED, "CLD_STOPPED", "Child has stopped." },
{ SIGCHLD, CLD_CONTINUED,"CLD_CONTINUED","Stopped child has continued." },
#ifdef SIGPOLL
{ SIGPOLL, POLL_IN, "POLL_IN", "Data input available." },
{ SIGPOLL, POLL_OUT, "POLL_OUT", "Output buffers available." },
{ SIGPOLL, POLL_MSG, "POLL_MSG", "Input message available." },
{ SIGPOLL, POLL_ERR, "POLL_ERR", "I/O error." },

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,6 +30,7 @@
#include "code/vtableStubs.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/disassembler.hpp"
#include "cppstdlib/cstdlib.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"
#include "jvmtifiles/jvmti.h"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2024, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,6 +25,7 @@
#ifndef OS_WINDOWS_PERMITFORBIDDENFUNCTIONS_WINDOWS_HPP
#define OS_WINDOWS_PERMITFORBIDDENFUNCTIONS_WINDOWS_HPP
#include "cppstdlib/cstdlib.hpp"
#include "utilities/compilerWarnings.hpp"
#include "utilities/globalDefinitions.hpp"
@ -34,6 +35,8 @@
namespace permit_forbidden_function {
BEGIN_ALLOW_FORBIDDEN_FUNCTIONS
[[noreturn]] inline void _exit(int status) { ::_exit(status); }
// Used by the Windows implementation of os::realpath.
inline char* _fullpath(char* absPath, const char* relPath, size_t maxLength) {
return ::_fullpath(absPath, relPath, maxLength);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,6 +24,7 @@
#include "cds/aotMetaspace.hpp"
#include "cds/cdsConfig.hpp"
#include "cppstdlib/cstdlib.hpp"
#include "runtime/arguments.hpp"
#include "runtime/javaThread.hpp"
#include "runtime/os.hpp"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2013 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -26,8 +26,7 @@
#ifndef OS_CPU_AIX_PPC_PREFETCH_AIX_PPC_INLINE_HPP
#define OS_CPU_AIX_PPC_PREFETCH_AIX_PPC_INLINE_HPP
#include "runtime/prefetch.hpp"
// Included in runtime/prefetch.inline.hpp
inline void Prefetch::read(const void *loc, intx interval) {
#if !defined(USE_XLC_BUILTINS)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -29,6 +29,7 @@
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
#include "code/vtableStubs.hpp"
#include "cppstdlib/cstdlib.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"
#include "logging/log.hpp"
@ -63,7 +64,6 @@
# include <signal.h>
# include <errno.h>
# include <dlfcn.h>
# include <stdlib.h>
# include <stdio.h>
# include <unistd.h>
# include <sys/resource.h>

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -27,8 +27,7 @@
#ifndef OS_CPU_BSD_AARCH64_PREFETCH_BSD_AARCH64_INLINE_HPP
#define OS_CPU_BSD_AARCH64_PREFETCH_BSD_AARCH64_INLINE_HPP
#include "runtime/prefetch.hpp"
// Included in runtime/prefetch.inline.hpp
inline void Prefetch::read (const void *loc, intx interval) {
if (interval >= 0)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,6 +26,7 @@
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
#include "code/vtableStubs.hpp"
#include "cppstdlib/cstdlib.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"
#include "logging/log.hpp"
@ -58,7 +59,6 @@
# include <signal.h>
# include <errno.h>
# include <dlfcn.h>
# include <stdlib.h>
# include <stdio.h>
# include <unistd.h>
# include <sys/resource.h>

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,8 +25,7 @@
#ifndef OS_CPU_BSD_X86_PREFETCH_BSD_X86_INLINE_HPP
#define OS_CPU_BSD_X86_PREFETCH_BSD_X86_INLINE_HPP
#include "runtime/prefetch.hpp"
// Included in runtime/prefetch.inline.hpp
inline void Prefetch::read (const void *loc, intx interval) {
__asm__ ("prefetcht0 (%0,%1,1)" : : "r" (loc), "r" (interval));

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -26,7 +26,7 @@
#ifndef OS_CPU_BSD_ZERO_PREFETCH_BSD_ZERO_INLINE_HPP
#define OS_CPU_BSD_ZERO_PREFETCH_BSD_ZERO_INLINE_HPP
#include "runtime/prefetch.hpp"
// Included in runtime/prefetch.inline.hpp
inline void Prefetch::read(const void* loc, intx interval) {
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -28,6 +28,7 @@
#include "code/codeCache.hpp"
#include "code/vtableStubs.hpp"
#include "code/nativeInst.hpp"
#include "cppstdlib/cstdlib.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"
#include "memory/allocation.inline.hpp"
@ -59,7 +60,6 @@
# include <signal.h>
# include <errno.h>
# include <dlfcn.h>
# include <stdlib.h>
# include <stdio.h>
# include <unistd.h>
# include <sys/resource.h>

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -26,8 +26,7 @@
#ifndef OS_CPU_LINUX_AARCH64_PREFETCH_LINUX_AARCH64_INLINE_HPP
#define OS_CPU_LINUX_AARCH64_PREFETCH_LINUX_AARCH64_INLINE_HPP
#include "runtime/prefetch.hpp"
// Included in runtime/prefetch.inline.hpp
inline void Prefetch::read (const void *loc, intx interval) {
if (interval >= 0)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,6 +25,7 @@
#include "asm/assembler.inline.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/vtableStubs.hpp"
#include "cppstdlib/cstdlib.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"
#include "memory/allocation.inline.hpp"
@ -57,7 +58,6 @@
# include <signal.h>
# include <errno.h>
# include <dlfcn.h>
# include <stdlib.h>
# include <stdio.h>
# include <unistd.h>
# include <sys/resource.h>

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,7 +25,7 @@
#ifndef OS_CPU_LINUX_ARM_PREFETCH_LINUX_ARM_INLINE_HPP
#define OS_CPU_LINUX_ARM_PREFETCH_LINUX_ARM_INLINE_HPP
#include "runtime/prefetch.hpp"
// Included in runtime/prefetch.inline.hpp
inline void Prefetch::read (const void *loc, intx interval) {
#if defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_5TE__)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -28,6 +28,7 @@
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
#include "code/vtableStubs.hpp"
#include "cppstdlib/cstdlib.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"
#include "memory/allocation.inline.hpp"
@ -62,7 +63,6 @@
# include <signal.h>
# include <errno.h>
# include <dlfcn.h>
# include <stdlib.h>
# include <stdio.h>
# include <unistd.h>
# include <sys/resource.h>

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2013 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -26,8 +26,7 @@
#ifndef OS_CPU_LINUX_PPC_PREFETCH_LINUX_PPC_INLINE_HPP
#define OS_CPU_LINUX_PPC_PREFETCH_LINUX_PPC_INLINE_HPP
#include "runtime/prefetch.hpp"
// Included in runtime/prefetch.inline.hpp
inline void Prefetch::read(const void *loc, intx interval) {
__asm__ __volatile__ (

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -28,6 +28,7 @@
#include "code/codeCache.hpp"
#include "code/nativeInst.hpp"
#include "code/vtableStubs.hpp"
#include "cppstdlib/cstdlib.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"
#include "memory/allocation.inline.hpp"
@ -61,7 +62,6 @@
# include <pthread.h>
# include <signal.h>
# include <stdio.h>
# include <stdlib.h>
# include <sys/mman.h>
# include <sys/resource.h>
# include <sys/socket.h>

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -26,7 +26,7 @@
#ifndef OS_CPU_LINUX_RISCV_VM_PREFETCH_LINUX_RISCV_INLINE_HPP
#define OS_CPU_LINUX_RISCV_VM_PREFETCH_LINUX_RISCV_INLINE_HPP
#include "runtime/prefetch.hpp"
// Included in runtime/prefetch.inline.hpp
inline void Prefetch::read (const void *loc, intx interval) {
if (interval >= 0 && UseZicbop) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2024 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -30,6 +30,7 @@
#include "code/nativeInst.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/disassembler.hpp"
#include "cppstdlib/cstdlib.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"
#include "memory/allocation.inline.hpp"
@ -62,7 +63,6 @@
# include <signal.h>
# include <errno.h>
# include <dlfcn.h>
# include <stdlib.h>
# include <stdio.h>
# include <unistd.h>
# include <sys/resource.h>

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -26,7 +26,7 @@
#ifndef OS_CPU_LINUX_S390_PREFETCH_LINUX_S390_INLINE_HPP
#define OS_CPU_LINUX_S390_PREFETCH_LINUX_S390_INLINE_HPP
#include "runtime/prefetch.hpp"
// Included in runtime/prefetch.inline.hpp
inline void Prefetch::read(const void* loc, intx interval) {
// No prefetch instructions on z/Architecture -> implement trivially.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,6 +26,7 @@
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
#include "code/vtableStubs.hpp"
#include "cppstdlib/cstdlib.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"
#include "logging/log.hpp"
@ -59,7 +60,6 @@
# include <signal.h>
# include <errno.h>
# include <dlfcn.h>
# include <stdlib.h>
# include <stdio.h>
# include <unistd.h>
# include <sys/resource.h>

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,8 +25,7 @@
#ifndef OS_CPU_LINUX_X86_PREFETCH_LINUX_X86_INLINE_HPP
#define OS_CPU_LINUX_X86_PREFETCH_LINUX_X86_INLINE_HPP
#include "runtime/prefetch.hpp"
// Included in runtime/prefetch.inline.hpp
inline void Prefetch::read (const void *loc, intx interval) {
__asm__ ("prefetcht0 (%0,%1,1)" : : "r" (loc), "r" (interval));

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -26,7 +26,7 @@
#ifndef OS_CPU_LINUX_ZERO_PREFETCH_LINUX_ZERO_INLINE_HPP
#define OS_CPU_LINUX_ZERO_PREFETCH_LINUX_ZERO_INLINE_HPP
#include "runtime/prefetch.hpp"
// Included in runtime/prefetch.inline.hpp
inline void Prefetch::read(const void* loc, intx interval) {
}

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2020, Microsoft Corporation. All rights reserved.
* Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,6 +28,7 @@
#include "code/codeCache.hpp"
#include "code/vtableStubs.hpp"
#include "code/nativeInst.hpp"
#include "cppstdlib/cstdlib.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"
#include "memory/allocation.inline.hpp"
@ -54,7 +55,6 @@
# include <sys/types.h>
# include <signal.h>
# include <errno.h>
# include <stdlib.h>
# include <stdio.h>
# include <intrin.h>

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Microsoft Corporation. All rights reserved.
* Copyright (c) 2020, 2026, Microsoft Corporation. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,8 +25,7 @@
#ifndef OS_CPU_WINDOWS_AARCH64_PREFETCH_WINDOWS_AARCH64_INLINE_HPP
#define OS_CPU_WINDOWS_AARCH64_PREFETCH_WINDOWS_AARCH64_INLINE_HPP
#include "runtime/prefetch.hpp"
// Included in runtime/prefetch.inline.hpp
inline void Prefetch::read (const void *loc, intx interval) {
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,7 +25,7 @@
#ifndef OS_CPU_WINDOWS_X86_PREFETCH_WINDOWS_X86_INLINE_HPP
#define OS_CPU_WINDOWS_X86_PREFETCH_WINDOWS_X86_INLINE_HPP
#include "runtime/prefetch.hpp"
// Included in runtime/prefetch.inline.hpp
inline void Prefetch::read (const void *loc, intx interval) {}
inline void Prefetch::write(void *loc, intx interval) {}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -884,7 +884,6 @@ class LinearScanStatistic : public StackObj {
counter_throw,
counter_unwind,
counter_typecheck,
counter_fpu_stack,
counter_misc_inst,
counter_other_inst,
blank_line_2,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -116,6 +116,10 @@ bool AOTConstantPoolResolver::is_class_resolution_deterministic(InstanceKlass* c
return false;
}
} else if (resolved_class->is_objArray_klass()) {
if (CDSConfig::is_dumping_dynamic_archive()) {
// This is difficult to handle. See JDK-8374639
return false;
}
Klass* elem = ObjArrayKlass::cast(resolved_class)->bottom_klass();
if (elem->is_instance_klass()) {
return is_class_resolution_deterministic(cp_holder, InstanceKlass::cast(elem));

View File

@ -696,7 +696,7 @@ template <typename T> void AOTMappedHeapWriter::relocate_field_in_buffer(T* fiel
// We use zero-based, 0-shift encoding, so the narrowOop is just the lower
// 32 bits of request_referent
intptr_t addr = cast_from_oop<intptr_t>(request_referent);
*((narrowOop*)field_addr_in_buffer) = checked_cast<narrowOop>(addr);
*((narrowOop*)field_addr_in_buffer) = CompressedOops::narrow_oop_cast(addr);
} else {
store_requested_oop_in_buffer<T>(field_addr_in_buffer, request_referent);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -2159,7 +2159,6 @@ void AOTMetaspace::initialize_shared_spaces() {
intptr_t* buffer = (intptr_t*)dynamic_mapinfo->serialized_data();
ReadClosure rc(&buffer, (intptr_t)SharedBaseAddress);
DynamicArchive::serialize(&rc);
DynamicArchive::setup_array_klasses();
}
LogStreamHandle(Info, aot) lsh;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -984,8 +984,6 @@ void ArchiveBuilder::make_klasses_shareable() {
#undef STATS_FORMAT
#undef STATS_PARAMS
DynamicArchive::make_array_klasses_shareable();
}
void ArchiveBuilder::make_training_data_shareable() {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -48,6 +48,7 @@
#include "logging/log.hpp"
#include "memory/metaspaceClosure.hpp"
#include "memory/resourceArea.hpp"
#include "oops/array.hpp"
#include "oops/klass.inline.hpp"
#include "runtime/arguments.hpp"
#include "runtime/os.hpp"
@ -95,7 +96,6 @@ public:
void sort_methods(InstanceKlass* ik) const;
void remark_pointers_for_instance_klass(InstanceKlass* k, bool should_mark) const;
void write_archive(char* serialized_data, AOTClassLocationConfig* cl_config);
void gather_array_klasses();
public:
// Do this before and after the archive dump to see if any corruption
@ -132,7 +132,6 @@ public:
init_header();
gather_source_objs();
gather_array_klasses();
reserve_buffer();
log_info(cds, dynamic)("Copying %d klasses and %d symbols",
@ -159,7 +158,6 @@ public:
ArchiveBuilder::OtherROAllocMark mark;
SystemDictionaryShared::write_to_archive(false);
cl_config = AOTClassLocationConfig::dumptime()->write_to_archive();
DynamicArchive::dump_array_klasses();
serialized_data = ro_region()->top();
WriteClosure wc(ro_region());
@ -175,8 +173,6 @@ public:
write_archive(serialized_data, cl_config);
release_header();
DynamicArchive::post_dump();
post_dump();
verify_universe("After CDS dynamic dump");
@ -185,30 +181,6 @@ public:
virtual void iterate_roots(MetaspaceClosure* it) {
AOTArtifactFinder::all_cached_classes_do(it);
SystemDictionaryShared::dumptime_classes_do(it);
iterate_primitive_array_klasses(it);
}
void iterate_primitive_array_klasses(MetaspaceClosure* it) {
for (int i = T_BOOLEAN; i <= T_LONG; i++) {
assert(is_java_primitive((BasicType)i), "sanity");
Klass* k = Universe::typeArrayKlass((BasicType)i); // this give you "[I", etc
assert(AOTMetaspace::in_aot_cache_static_region((void*)k),
"one-dimensional primitive array should be in static archive");
ArrayKlass* ak = ArrayKlass::cast(k);
while (ak != nullptr && ak->in_aot_cache()) {
Klass* next_k = ak->array_klass_or_null();
if (next_k != nullptr) {
ak = ArrayKlass::cast(next_k);
} else {
ak = nullptr;
}
}
if (ak != nullptr) {
assert(ak->dimension() > 1, "sanity");
// this is the lowest dimension that's not in the static archive
it->push(&ak);
}
}
}
};
@ -367,26 +339,6 @@ void DynamicArchiveBuilder::write_archive(char* serialized_data, AOTClassLocatio
log_info(cds, dynamic)("%d klasses; %d symbols", klasses()->length(), symbols()->length());
}
void DynamicArchiveBuilder::gather_array_klasses() {
for (int i = 0; i < klasses()->length(); i++) {
if (klasses()->at(i)->is_objArray_klass()) {
ObjArrayKlass* oak = ObjArrayKlass::cast(klasses()->at(i));
Klass* elem = oak->element_klass();
if (AOTMetaspace::in_aot_cache_static_region(elem)) {
// Only capture the array klass whose element_klass is in the static archive.
// During run time, setup (see DynamicArchive::setup_array_klasses()) is needed
// so that the element_klass can find its array klasses from the dynamic archive.
DynamicArchive::append_array_klass(oak);
} else {
// The element_klass and its array klasses are in the same archive.
assert(!AOTMetaspace::in_aot_cache_static_region(oak),
"we should not gather klasses that are already in the static archive");
}
}
}
log_debug(aot)("Total array klasses gathered for dynamic archive: %d", DynamicArchive::num_array_klasses());
}
class VM_PopulateDynamicDumpSharedSpace: public VM_Heap_Sync_Operation {
DynamicArchiveBuilder _builder;
public:
@ -403,76 +355,9 @@ public:
}
};
// _array_klasses and _dynamic_archive_array_klasses only hold the array klasses
// which have element klass in the static archive.
GrowableArray<ObjArrayKlass*>* DynamicArchive::_array_klasses = nullptr;
Array<ObjArrayKlass*>* DynamicArchive::_dynamic_archive_array_klasses = nullptr;
void DynamicArchive::serialize(SerializeClosure* soc) {
SymbolTable::serialize_shared_table_header(soc, false);
SystemDictionaryShared::serialize_dictionary_headers(soc, false);
soc->do_ptr(&_dynamic_archive_array_klasses);
}
void DynamicArchive::append_array_klass(ObjArrayKlass* ak) {
if (_array_klasses == nullptr) {
_array_klasses = new (mtClassShared) GrowableArray<ObjArrayKlass*>(50, mtClassShared);
}
_array_klasses->append(ak);
}
void DynamicArchive::dump_array_klasses() {
assert(CDSConfig::is_dumping_dynamic_archive(), "sanity");
if (_array_klasses != nullptr) {
ArchiveBuilder* builder = ArchiveBuilder::current();
int num_array_klasses = _array_klasses->length();
_dynamic_archive_array_klasses =
ArchiveBuilder::new_ro_array<ObjArrayKlass*>(num_array_klasses);
for (int i = 0; i < num_array_klasses; i++) {
builder->write_pointer_in_buffer(_dynamic_archive_array_klasses->adr_at(i), _array_klasses->at(i));
}
}
}
void DynamicArchive::setup_array_klasses() {
if (_dynamic_archive_array_klasses != nullptr) {
for (int i = 0; i < _dynamic_archive_array_klasses->length(); i++) {
ObjArrayKlass* oak = _dynamic_archive_array_klasses->at(i);
Klass* elm = oak->element_klass();
assert(AOTMetaspace::in_aot_cache_static_region((void*)elm), "must be");
if (elm->is_instance_klass()) {
assert(InstanceKlass::cast(elm)->array_klasses() == nullptr, "must be");
InstanceKlass::cast(elm)->set_array_klasses(oak);
} else {
assert(elm->is_array_klass(), "sanity");
assert(ArrayKlass::cast(elm)->higher_dimension() == nullptr, "must be");
ArrayKlass::cast(elm)->set_higher_dimension(oak);
}
}
log_debug(aot)("Total array klasses read from dynamic archive: %d", _dynamic_archive_array_klasses->length());
}
}
void DynamicArchive::make_array_klasses_shareable() {
if (_array_klasses != nullptr) {
int num_array_klasses = _array_klasses->length();
for (int i = 0; i < num_array_klasses; i++) {
ObjArrayKlass* k = ArchiveBuilder::current()->get_buffered_addr(_array_klasses->at(i));
k->remove_unshareable_info();
}
}
}
void DynamicArchive::post_dump() {
if (_array_klasses != nullptr) {
delete _array_klasses;
_array_klasses = nullptr;
}
}
int DynamicArchive::num_array_klasses() {
return _array_klasses != nullptr ? _array_klasses->length() : 0;
}
void DynamicArchive::dump_impl(bool jcmd_request, const char* archive_name, TRAPS) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,13 +26,8 @@
#define SHARE_CDS_DYNAMICARCHIVE_HPP
#include "cds/filemap.hpp"
#include "classfile/compactHashtable.hpp"
#include "memory/allStatic.hpp"
#include "memory/memRegion.hpp"
#include "oops/array.hpp"
#include "oops/oop.hpp"
#include "utilities/exceptions.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_CDS
@ -59,22 +54,13 @@ public:
};
class DynamicArchive : AllStatic {
private:
static GrowableArray<ObjArrayKlass*>* _array_klasses;
static Array<ObjArrayKlass*>* _dynamic_archive_array_klasses;
public:
static void dump_for_jcmd(const char* archive_name, TRAPS);
static void dump_at_exit(JavaThread* current);
static void dump_impl(bool jcmd_request, const char* archive_name, TRAPS);
static bool is_mapped() { return FileMapInfo::dynamic_info() != nullptr; }
static bool validate(FileMapInfo* dynamic_info);
static void dump_array_klasses();
static void setup_array_klasses();
static void append_array_klass(ObjArrayKlass* oak);
static void serialize(SerializeClosure* soc);
static void make_array_klasses_shareable();
static void post_dump();
static int num_array_klasses();
};
#endif // INCLUDE_CDS
#endif // SHARE_CDS_DYNAMICARCHIVE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -42,6 +42,7 @@
#include "classfile/vmClasses.hpp"
#include "classfile/vmSymbols.hpp"
#include "compiler/compileBroker.hpp"
#include "cppstdlib/cstdlib.hpp"
#include "interpreter/bytecodeStream.hpp"
#include "interpreter/oopMapCache.hpp"
#include "jimage.hpp"
@ -81,7 +82,6 @@
#include "utilities/utf8.hpp"
#include <ctype.h>
#include <stdlib.h>
// Entry point in java.dll for path canonicalization

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -4297,10 +4297,6 @@ int jdk_internal_foreign_abi_NativeEntryPoint::_downcall_stub_address_offset;
macro(_method_type_offset, k, "methodType", java_lang_invoke_MethodType_signature, false); \
macro(_downcall_stub_address_offset, k, "downcallStubAddress", long_signature, false);
bool jdk_internal_foreign_abi_NativeEntryPoint::is_instance(oop obj) {
return obj != nullptr && is_subclass(obj->klass());
}
void jdk_internal_foreign_abi_NativeEntryPoint::compute_offsets() {
InstanceKlass* k = vmClasses::NativeEntryPoint_klass();
NEP_FIELDS_DO(FIELD_COMPUTE_OFFSET);
@ -4337,10 +4333,6 @@ int jdk_internal_foreign_abi_ABIDescriptor::_scratch2_offset;
macro(_scratch1_offset, k, "scratch1", jdk_internal_foreign_abi_VMStorage_signature, false); \
macro(_scratch2_offset, k, "scratch2", jdk_internal_foreign_abi_VMStorage_signature, false);
bool jdk_internal_foreign_abi_ABIDescriptor::is_instance(oop obj) {
return obj != nullptr && is_subclass(obj->klass());
}
void jdk_internal_foreign_abi_ABIDescriptor::compute_offsets() {
InstanceKlass* k = vmClasses::ABIDescriptor_klass();
ABIDescriptor_FIELDS_DO(FIELD_COMPUTE_OFFSET);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1179,13 +1179,6 @@ class jdk_internal_foreign_abi_NativeEntryPoint: AllStatic {
static oop method_type(oop entry);
static jlong downcall_stub_address(oop entry);
// Testers
static bool is_subclass(Klass* klass) {
return vmClasses::NativeEntryPoint_klass() != nullptr &&
klass->is_subclass_of(vmClasses::NativeEntryPoint_klass());
}
static bool is_instance(oop obj);
// Accessors for code generation:
static int method_type_offset_in_bytes() { return _method_type_offset; }
static int downcall_stub_address_offset_in_bytes() { return _downcall_stub_address_offset; }
@ -1216,13 +1209,6 @@ class jdk_internal_foreign_abi_ABIDescriptor: AllStatic {
static jint shadowSpace(oop entry);
static oop scratch1(oop entry);
static oop scratch2(oop entry);
// Testers
static bool is_subclass(Klass* klass) {
return vmClasses::ABIDescriptor_klass() != nullptr &&
klass->is_subclass_of(vmClasses::ABIDescriptor_klass());
}
static bool is_instance(oop obj);
};
class jdk_internal_foreign_abi_VMStorage: AllStatic {

View File

@ -415,18 +415,18 @@ class methodHandle;
\
do_class(java_lang_StringCoding, "java/lang/StringCoding") \
do_intrinsic(_countPositives, java_lang_StringCoding, countPositives_name, countPositives_signature, F_S) \
do_name( countPositives_name, "countPositives0") \
do_name( countPositives_name, "countPositives") \
do_signature(countPositives_signature, "([BII)I") \
\
do_class(sun_nio_cs_iso8859_1_Encoder, "sun/nio/cs/ISO_8859_1$Encoder") \
do_intrinsic(_encodeISOArray, sun_nio_cs_iso8859_1_Encoder, encodeISOArray_name, encodeISOArray_signature, F_S) \
do_name( encodeISOArray_name, "encodeISOArray0") \
do_name( encodeISOArray_name, "implEncodeISOArray") \
do_signature(encodeISOArray_signature, "([CI[BII)I") \
\
do_intrinsic(_encodeByteISOArray, java_lang_StringCoding, encodeISOArray_name, indexOfI_signature, F_S) \
\
do_intrinsic(_encodeAsciiArray, java_lang_StringCoding, encodeAsciiArray_name, encodeISOArray_signature, F_S) \
do_name( encodeAsciiArray_name, "encodeAsciiArray0") \
do_name( encodeAsciiArray_name, "implEncodeAsciiArray") \
\
do_class(java_math_BigInteger, "java/math/BigInteger") \
do_intrinsic(_multiplyToLen, java_math_BigInteger, multiplyToLen_name, multiplyToLen_signature, F_S) \

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1371,6 +1371,7 @@ void AOTCodeAddressTable::init_extrs() {
SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
#endif
#if INCLUDE_ZGC
SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr());
SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr());
#if defined(AMD64)
SET_ADDRESS(_extrs, &ZPointerLoadShift);

View File

@ -0,0 +1,105 @@
/*
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_CPPSTDLIB_CSTDLIB_HPP
#define SHARE_CPPSTDLIB_CSTDLIB_HPP
#include "utilities/compilerWarnings.hpp"
// HotSpot usage for <cstdlib>:
//
// Some functions are explicitly forbidden below. That may not be a complete
// list of all the functions we should forbid.
//
// We assume <cstdlib> provides definitions in the global namespace, in
// addition to providing them in the std namespace. We prefer to use the names
// in the global namespace.
BEGIN_ALLOW_FORBIDDEN_FUNCTIONS
#include "utilities/vmassert_uninstall.hpp"
#include <cstdlib>
#include "utilities/vmassert_reinstall.hpp" // don't reorder
END_ALLOW_FORBIDDEN_FUNCTIONS
// AIX may define malloc and calloc as macros when certain other features are
// present, causing us all sorts of grief.
// https://www.ibm.com/docs/en/openxl-c-and-cpp-aix/17.1.4?topic=compilers-memory-allocation
// Replace the macro definitions with something we can work with.
// AIX 7.3 no longer uses macro renaming when building with clang, instead
// using the same asm replacement approach as used below. This workaround can
// be removed once earlier versions are no longer supported as build platforms.
#if defined(AIX) && (defined(__VEC__) || defined(__AIXVEC))
#if defined(malloc) || defined(calloc)
#if !defined(malloc) || !defined(calloc)
#error "Inconsistent alloc macro mappings, expected both to be mapped."
#endif
// Remove the macros.
#undef malloc
#undef calloc
// Implement the mapping using gcc/clang asm name mapping.
extern "C" {
extern void* malloc(size_t) noexcept asm("vec_malloc");
extern void* calloc(size_t, size_t) noexcept asm("vec_calloc");
} // extern "C"
// Because the macros are in place when <cstdlib> brings names into the std
// namespace, macro replacement causes the expanded names to be added instead
// of the intended names. We can't remove std::vec_malloc and std::vec_calloc,
// but we do add the standard names in case someone uses them.
namespace std {
using ::malloc;
using ::calloc;
} // namespace std
#endif // Macro definition for malloc or calloc
#endif // AIX altivec allocator support
// Prefer os:: variants of these.
FORBID_IMPORTED_NORETURN_C_FUNCTION(void exit(int), noexcept, "use os::exit")
FORBID_IMPORTED_NORETURN_C_FUNCTION(void _Exit(int), noexcept, "use os::exit")
// Windows puts _exit in <stdlib.h>. POSIX puts it in <unistd.h>.
// We can't forbid it here when using clang if it's not in <stdlib.h> - see
// the clang definition for FORBIDDEN_FUNCTION_NORETURN_ATTRIBUTE.
#ifdef _WINDOWS
FORBID_IMPORTED_NORETURN_C_FUNCTION(void _exit(int), /* not noexcept */, "use os::exit")
#endif // _WINDOWS
// These functions return raw C-heap pointers or, in case of free(), take raw
// C-heap pointers. We generally want allocation to be done through NMT, using
// os::malloc and friends.
FORBID_IMPORTED_C_FUNCTION(void* malloc(size_t), noexcept, "use os::malloc");
FORBID_IMPORTED_C_FUNCTION(void free(void*), noexcept, "use os::free");
FORBID_IMPORTED_C_FUNCTION(void* calloc(size_t, size_t), noexcept, "use os::malloc and zero out manually");
FORBID_IMPORTED_C_FUNCTION(void* realloc(void*, size_t), noexcept, "use os::realloc");
// These are not provided (and are unimplementable?) by Windows.
// https://stackoverflow.com/questions/62962839/stdaligned-alloc-missing-from-visual-studio-2019
// They also aren't useful for a POSIX implementation of NMT.
#ifndef _WINDOWS
FORBID_C_FUNCTION(void* aligned_alloc(size_t, size_t), noexcept, "don't use");
FORBID_C_FUNCTION(int posix_memalign(void**, size_t, size_t), noexcept, "don't use");
#endif // !_WINDOWS
#endif // SHARE_CPPSTDLIB_CSTDLIB_HPP

View File

@ -62,8 +62,6 @@ jint EpsilonHeap::initialize() {
// Enable monitoring
_monitoring_support = new EpsilonMonitoringSupport(this);
_last_counter_update = 0;
_last_heap_print = 0;
// Install barrier set
BarrierSet::set_barrier_set(new EpsilonBarrierSet());
@ -77,6 +75,7 @@ jint EpsilonHeap::initialize() {
void EpsilonHeap::initialize_serviceability() {
_pool = new EpsilonMemoryPool(this);
_memory_manager.add_pool(_pool);
_monitoring_support->mark_ready();
}
GrowableArray<GCMemoryManager*> EpsilonHeap::memory_managers() {
@ -101,7 +100,7 @@ EpsilonHeap* EpsilonHeap::heap() {
return named_heap<EpsilonHeap>(CollectedHeap::Epsilon);
}
HeapWord* EpsilonHeap::allocate_work(size_t size, bool verbose) {
HeapWord* EpsilonHeap::allocate_work(size_t size) {
assert(is_object_aligned(size), "Allocation size should be aligned: %zu", size);
HeapWord* res = nullptr;
@ -151,19 +150,23 @@ HeapWord* EpsilonHeap::allocate_work(size_t size, bool verbose) {
size_t used = _space->used();
// Allocation successful, update counters
if (verbose) {
size_t last = _last_counter_update;
if ((used - last >= _step_counter_update) && AtomicAccess::cmpxchg(&_last_counter_update, last, used) == last) {
// Allocation successful, update counters and print status.
// At this point, some diagnostic subsystems might not yet be initialized.
// We pretend the printout happened either way. This keeps allocation path
// from obsessively checking the subsystems' status on every allocation.
size_t last_counter = _last_counter_update.load_relaxed();
if ((used - last_counter >= _step_counter_update) &&
_last_counter_update.compare_set(last_counter, used)) {
if (_monitoring_support->is_ready()) {
_monitoring_support->update_counters();
}
}
// ...and print the occupancy line, if needed
if (verbose) {
size_t last = _last_heap_print;
if ((used - last >= _step_heap_print) && AtomicAccess::cmpxchg(&_last_heap_print, last, used) == last) {
print_heap_info(used);
size_t last_heap = _last_heap_print.load_relaxed();
if ((used - last_heap >= _step_heap_print) &&
_last_heap_print.compare_set(last_heap, used)) {
print_heap_info(used);
if (Metaspace::initialized()) {
print_metaspace_info();
}
}
@ -265,8 +268,7 @@ HeapWord* EpsilonHeap::mem_allocate(size_t size) {
}
HeapWord* EpsilonHeap::allocate_loaded_archive_space(size_t size) {
// Cannot use verbose=true because Metaspace is not initialized
return allocate_work(size, /* verbose = */false);
return allocate_work(size);
}
void EpsilonHeap::collect(GCCause::Cause cause) {

View File

@ -31,6 +31,7 @@
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/space.hpp"
#include "memory/virtualspace.hpp"
#include "runtime/atomic.hpp"
#include "services/memoryManager.hpp"
class EpsilonHeap : public CollectedHeap {
@ -45,8 +46,8 @@ private:
size_t _step_counter_update;
size_t _step_heap_print;
int64_t _decay_time_ns;
volatile size_t _last_counter_update;
volatile size_t _last_heap_print;
Atomic<size_t> _last_counter_update;
Atomic<size_t> _last_heap_print;
void print_tracing_info() const override;
void stop() override {};
@ -83,7 +84,7 @@ public:
bool requires_barriers(stackChunkOop obj) const override { return false; }
// Allocation
HeapWord* allocate_work(size_t size, bool verbose = true);
HeapWord* allocate_work(size_t size);
HeapWord* mem_allocate(size_t size) override;
HeapWord* allocate_new_tlab(size_t min_size,
size_t requested_size,

View File

@ -99,6 +99,7 @@ EpsilonMonitoringSupport::EpsilonMonitoringSupport(EpsilonHeap* heap) {
}
void EpsilonMonitoringSupport::update_counters() {
assert(is_ready(), "Must be ready");
MemoryService::track_memory_usage();
if (UsePerfData) {
@ -110,3 +111,11 @@ void EpsilonMonitoringSupport::update_counters() {
MetaspaceCounters::update_performance_counters();
}
}
bool EpsilonMonitoringSupport::is_ready() {
return _ready.load_acquire();
}
void EpsilonMonitoringSupport::mark_ready() {
_ready.release_store(true);
}

View File

@ -26,6 +26,7 @@
#define SHARE_GC_EPSILON_EPSILONMONITORINGSUPPORT_HPP
#include "memory/allocation.hpp"
#include "runtime/atomic.hpp"
class EpsilonGenerationCounters;
class EpsilonSpaceCounters;
@ -35,9 +36,12 @@ class EpsilonMonitoringSupport : public CHeapObj<mtGC> {
private:
EpsilonGenerationCounters* _heap_counters;
EpsilonSpaceCounters* _space_counters;
Atomic<bool> _ready;
public:
EpsilonMonitoringSupport(EpsilonHeap* heap);
bool is_ready();
void mark_ready();
void update_counters();
};

View File

@ -36,6 +36,7 @@
#include "gc/shared/fullGCForwarding.hpp"
#include "gc/shared/gcArguments.hpp"
#include "gc/shared/workerPolicy.hpp"
#include "runtime/flags/jvmFlagLimit.hpp"
#include "runtime/globals.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/java.hpp"
@ -190,7 +191,8 @@ void G1Arguments::initialize() {
}
FLAG_SET_DEFAULT(G1ConcRefinementThreads, 0);
} else if (FLAG_IS_DEFAULT(G1ConcRefinementThreads)) {
FLAG_SET_ERGO(G1ConcRefinementThreads, ParallelGCThreads);
const JVMTypedFlagLimit<uint>* conc_refinement_threads_limits = JVMFlagLimit::get_range_at(FLAG_MEMBER_ENUM(G1ConcRefinementThreads))->cast<uint>();
FLAG_SET_ERGO(G1ConcRefinementThreads, MIN2(ParallelGCThreads, conc_refinement_threads_limits->max()));
}
if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1024,10 +1024,6 @@ size_t G1CardSet::num_containers() {
return cl._count;
}
G1CardSetCoarsenStats G1CardSet::coarsen_stats() {
return _coarsen_stats;
}
void G1CardSet::print_coarsen_stats(outputStream* out) {
_last_coarsen_stats.subtract_from(_coarsen_stats);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -686,8 +686,9 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
// the check before we do the actual allocation. The reason for doing it
// before the allocation is that we avoid having to keep track of the newly
// allocated memory while we do a GC.
if (policy()->need_to_start_conc_mark("concurrent humongous allocation",
word_size)) {
// Only try that if we can actually perform a GC.
if (is_init_completed() &&
policy()->need_to_start_conc_mark("concurrent humongous allocation", word_size)) {
try_collect(word_size, GCCause::_g1_humongous_allocation, collection_counters(this));
}

View File

@ -108,7 +108,7 @@ void G1FullGCMarker::follow_array_chunk(objArrayOop array, int index) {
push_objarray(array, end_index);
}
array->oop_iterate_range(mark_closure(), beg_index, end_index);
array->oop_iterate_elements_range(mark_closure(), beg_index, end_index);
}
inline void G1FullGCMarker::follow_object(oop obj) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -123,9 +123,6 @@ public:
static void initialize(MemRegion reserved);
// Coarsening statistics since VM start.
static G1CardSetCoarsenStats coarsen_stats() { return G1CardSet::coarsen_stats(); }
inline uintptr_t to_card(OopOrNarrowOopStar from) const;
private:

View File

@ -238,9 +238,9 @@ void G1ParScanThreadState::do_partial_array(PartialArrayState* state, bool stole
G1HeapRegionAttr dest_attr = _g1h->region_attr(to_array);
G1SkipCardMarkSetter x(&_scanner, dest_attr.is_new_survivor());
// Process claimed task.
to_array->oop_iterate_range(&_scanner,
checked_cast<int>(claim._start),
checked_cast<int>(claim._end));
to_array->oop_iterate_elements_range(&_scanner,
checked_cast<int>(claim._start),
checked_cast<int>(claim._end));
}
MAYBE_INLINE_EVACUATION
@ -260,7 +260,7 @@ void G1ParScanThreadState::start_partial_objarray(oop from_obj,
// Process the initial chunk. No need to process the type in the
// klass, as it will already be handled by processing the built-in
// module.
to_array->oop_iterate_range(&_scanner, 0, checked_cast<int>(initial_chunk_size));
to_array->oop_iterate_elements_range(&_scanner, 0, checked_cast<int>(initial_chunk_size));
}
MAYBE_INLINE_EVACUATION

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -46,7 +46,7 @@
#include "oops/access.inline.hpp"
#include "oops/compressedOops.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/prefetch.hpp"
#include "runtime/prefetch.inline.hpp"
#include "runtime/threads.hpp"
#include "runtime/threadSMR.hpp"
#include "utilities/bitMap.inline.hpp"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,6 +28,7 @@
#include "gc/serial/serialHeap.inline.hpp"
#include "gc/shared/space.hpp"
#include "memory/iterator.inline.hpp"
#include "runtime/prefetch.inline.hpp"
#include "utilities/align.hpp"
void CardTableRS::scan_old_to_young_refs(TenuredGeneration* tg, HeapWord* saved_top) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,7 +34,6 @@
#include "memory/virtualspace.hpp"
#include "runtime/mutex.hpp"
#include "runtime/perfData.hpp"
#include "runtime/prefetch.inline.hpp"
// A Generation models a heap area for similarly-aged objects.
// It will contain one ore more spaces holding the actual objects.

View File

@ -412,7 +412,7 @@ void SerialFullGC::follow_array_chunk(objArrayOop array, int index) {
const int stride = MIN2(len - beg_index, (int) ObjArrayMarkingStride);
const int end_index = beg_index + stride;
array->oop_iterate_range(&mark_and_push_closure, beg_index, end_index);
array->oop_iterate_elements_range(&mark_and_push_closure, beg_index, end_index);
if (end_index < len) {
SerialFullGC::push_objarray(array, end_index); // Push the continuation.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -70,6 +70,7 @@
#include "runtime/init.hpp"
#include "runtime/java.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/prefetch.inline.hpp"
#include "runtime/threads.hpp"
#include "runtime/vmThread.hpp"
#include "services/memoryManager.hpp"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -33,6 +33,7 @@
#include "memory/universe.hpp"
#include "oops/access.inline.hpp"
#include "oops/method.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/javaThread.hpp"
#include "runtime/threads.hpp"
@ -196,8 +197,8 @@ int BarrierSetNMethod::nmethod_stub_entry_barrier(address* return_address_ptr) {
// Diagnostic option to force deoptimization 1 in 10 times. It is otherwise
// a very rare event.
if (DeoptimizeNMethodBarriersALot && !nm->is_osr_method()) {
static volatile uint32_t counter=0;
if (AtomicAccess::add(&counter, 1u) % 10 == 0) {
static Atomic<uint32_t> counter{0};
if (counter.add_then_fetch(1u) % 10 == 0) {
may_enter = false;
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,7 +23,7 @@
*/
#include "gc/shared/concurrentGCThread.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/atomic.hpp"
#include "runtime/init.hpp"
#include "runtime/jniHandles.hpp"
#include "runtime/mutexLocker.hpp"
@ -48,7 +48,7 @@ void ConcurrentGCThread::run() {
// Signal thread has terminated
MonitorLocker ml(Terminator_lock);
AtomicAccess::release_store(&_has_terminated, true);
_has_terminated.release_store(true);
ml.notify_all();
}
@ -57,21 +57,21 @@ void ConcurrentGCThread::stop() {
assert(!has_terminated(), "Invalid state");
// Signal thread to terminate
AtomicAccess::release_store_fence(&_should_terminate, true);
_should_terminate.release_store_fence(true);
stop_service();
// Wait for thread to terminate
MonitorLocker ml(Terminator_lock);
while (!_has_terminated) {
while (!_has_terminated.load_relaxed()) {
ml.wait();
}
}
bool ConcurrentGCThread::should_terminate() const {
return AtomicAccess::load_acquire(&_should_terminate);
return _should_terminate.load_acquire();
}
bool ConcurrentGCThread::has_terminated() const {
return AtomicAccess::load_acquire(&_has_terminated);
return _has_terminated.load_acquire();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,14 +25,15 @@
#ifndef SHARE_GC_SHARED_CONCURRENTGCTHREAD_HPP
#define SHARE_GC_SHARED_CONCURRENTGCTHREAD_HPP
#include "runtime/atomic.hpp"
#include "runtime/javaThread.hpp"
#include "runtime/nonJavaThread.hpp"
#include "utilities/debug.hpp"
class ConcurrentGCThread: public NamedThread {
private:
volatile bool _should_terminate;
volatile bool _has_terminated;
Atomic<bool> _should_terminate;
Atomic<bool> _has_terminated;
protected:
void create_and_start(ThreadPriority prio = NearMaxPriority);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,7 +28,7 @@
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/atomic.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/javaThread.inline.hpp"
#include "runtime/safepoint.hpp"
@ -60,16 +60,13 @@ public:
};
Monitor* GCLocker::_lock;
volatile bool GCLocker::_is_gc_request_pending;
Atomic<bool> GCLocker::_is_gc_request_pending{false};
DEBUG_ONLY(uint64_t GCLocker::_verify_in_cr_count;)
DEBUG_ONLY(Atomic<uint64_t> GCLocker::_verify_in_cr_count{0};)
void GCLocker::initialize() {
assert(JNICritical_lock != nullptr, "inv");
_lock = JNICritical_lock;
_is_gc_request_pending = false;
DEBUG_ONLY(_verify_in_cr_count = 0;)
}
bool GCLocker::is_active() {
@ -84,11 +81,11 @@ bool GCLocker::is_active() {
void GCLocker::block() {
// _lock is held from the beginning of block() to the end of of unblock().
_lock->lock();
assert(AtomicAccess::load(&_is_gc_request_pending) == false, "precondition");
assert(_is_gc_request_pending.load_relaxed() == false, "precondition");
GCLockerTimingDebugLogger logger("Thread blocked to start GC.");
AtomicAccess::store(&_is_gc_request_pending, true);
_is_gc_request_pending.store_relaxed(true);
// The _is_gc_request_pending and _jni_active_critical (inside
// in_critical_atomic()) variables form a Dekker duality. On the GC side, the
@ -112,14 +109,14 @@ void GCLocker::block() {
#ifdef ASSERT
// Matching the storestore in GCLocker::exit.
OrderAccess::loadload();
assert(AtomicAccess::load(&_verify_in_cr_count) == 0, "inv");
assert(_verify_in_cr_count.load_relaxed() == 0, "inv");
#endif
}
void GCLocker::unblock() {
assert(AtomicAccess::load(&_is_gc_request_pending) == true, "precondition");
assert(_is_gc_request_pending.load_relaxed() == true, "precondition");
AtomicAccess::store(&_is_gc_request_pending, false);
_is_gc_request_pending.store_relaxed(false);
_lock->unlock();
}
@ -139,7 +136,7 @@ void GCLocker::enter_slow(JavaThread* current_thread) {
// Same as fast path.
OrderAccess::fence();
if (!AtomicAccess::load(&_is_gc_request_pending)) {
if (!_is_gc_request_pending.load_relaxed()) {
return;
}

Some files were not shown because too many files have changed in this diff Show More