Merge branch 'master' into redo-ffdhe

This commit is contained in:
Daniel Jelinski 2026-03-27 19:02:49 +01:00
commit dc7fe83f47
872 changed files with 20209 additions and 12641 deletions

View File

@ -102,6 +102,13 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_JDK_OPTIONS],
CHECKING_MSG: [if we should build headless-only (no GUI)])
AC_SUBST(ENABLE_HEADLESS_ONLY)
# Avoid headless-only on macOS and Windows, it is not supported there
if test "x$ENABLE_HEADLESS_ONLY" = xtrue; then
if test "x$OPENJDK_TARGET_OS" = xwindows || test "x$OPENJDK_TARGET_OS" = xmacosx; then
AC_MSG_ERROR([headless-only is not supported on macOS and Windows])
fi
fi
# should we linktime gc unused code sections in the JDK build ?
if test "x$OPENJDK_TARGET_OS" = "xlinux"; then
if test "x$OPENJDK_TARGET_CPU" = "xs390x" || test "x$OPENJDK_TARGET_CPU" = "xppc64le"; then

View File

@ -338,11 +338,8 @@ else
# noexcept-type required for GCC 7 builds. Not required for GCC 8+.
# expansion-to-defined required for GCC 9 builds. Not required for GCC 10+.
# maybe-uninitialized required for GCC 8 builds. Not required for GCC 9+.
# calloc-transposed-args required for GCC 14 builds. (fixed upstream in
# Harfbuzz 032c931e1c0cfb20f18e5acb8ba005775242bd92)
HARFBUZZ_DISABLED_WARNINGS_CXX_gcc := class-memaccess noexcept-type \
expansion-to-defined dangling-reference maybe-uninitialized \
calloc-transposed-args
expansion-to-defined dangling-reference maybe-uninitialized
HARFBUZZ_DISABLED_WARNINGS_clang := missing-field-initializers \
range-loop-analysis unused-variable
HARFBUZZ_DISABLED_WARNINGS_microsoft := 4267 4244

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -29,7 +29,7 @@ DISABLED_WARNINGS_java += dangling-doc-comments
COPY += .gif .png .txt .spec .script .prerm .preinst \
.postrm .postinst .list .sh .desktop .copyright .control .plist .template \
.icns .scpt .wxs .wxl .wxi .wxf .ico .bmp .tiff .service .xsl
.icns .scpt .wxs .wxl .wxi .wxf .ico .bmp .tiff .service .xsl .js
CLEAN += .properties

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -63,7 +63,8 @@ ifeq ($(call isTargetOs, windows), true)
BUILD_JDK_JTREG_EXCLUDE += libDirectIO.c libInheritedChannel.c \
libExplicitAttach.c libImplicitAttach.c \
exelauncher.c libFDLeaker.c exeFDLeakTester.c \
libChangeSignalDisposition.c exePrintSignalDisposition.c
libChangeSignalDisposition.c exePrintSignalDisposition.c \
libConcNativeFork.c libPipesCloseOnExec.c
BUILD_JDK_JTREG_EXECUTABLES_LIBS_exeNullCallerTest := $(LIBCXX)
BUILD_JDK_JTREG_EXECUTABLES_LIBS_exerevokeall := advapi32.lib
@ -77,6 +78,9 @@ else
BUILD_JDK_JTREG_LIBRARIES_LDFLAGS_libLinkerInvokerUnnamed := -pthread
BUILD_JDK_JTREG_LIBRARIES_LDFLAGS_libLinkerInvokerModule := -pthread
BUILD_JDK_JTREG_LIBRARIES_LDFLAGS_libLoaderLookupInvoker := -pthread
BUILD_JDK_JTREG_LIBRARIES_LDFLAGS_libConcNativeFork := -pthread
BUILD_JDK_JTREG_LIBRARIES_LDFLAGS_libPipesCloseOnExec := -pthread
BUILD_JDK_JTREG_LIBRARIES_LDFLAGS_libLoaderLookupInvoker := -pthread
BUILD_JDK_JTREG_LIBRARIES_LDFLAGS_libExplicitAttach := -pthread
BUILD_JDK_JTREG_LIBRARIES_LDFLAGS_libImplicitAttach := -pthread

View File

@ -2233,15 +2233,9 @@ uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
{
st->print_cr("# MachUEPNode");
if (UseCompressedClassPointers) {
st->print_cr("\tldrw rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
st->print_cr("\tldrw r10, [rscratch2 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
st->print_cr("\tcmpw rscratch1, r10");
} else {
st->print_cr("\tldr rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
st->print_cr("\tldr r10, [rscratch2 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
st->print_cr("\tcmp rscratch1, r10");
}
st->print_cr("\tldrw rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
st->print_cr("\tldrw r10, [rscratch2 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
st->print_cr("\tcmpw rscratch1, r10");
st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
}
#endif
@ -2524,10 +2518,6 @@ uint Matcher::float_pressure_limit()
return (FLOATPRESSURE == -1) ? _FLOAT_REG_mask.size() : FLOATPRESSURE;
}
bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
return false;
}
const RegMask& Matcher::divI_proj_mask() {
ShouldNotReachHere();
return RegMask::EMPTY;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -59,22 +59,6 @@ const Register SHIFT_count = r0; // where count for shift operations must be
#define __ _masm->
static void select_different_registers(Register preserve,
Register extra,
Register &tmp1,
Register &tmp2) {
if (tmp1 == preserve) {
assert_different_registers(tmp1, tmp2, extra);
tmp1 = extra;
} else if (tmp2 == preserve) {
assert_different_registers(tmp1, tmp2, extra);
tmp2 = extra;
}
assert_different_registers(preserve, tmp1, tmp2);
}
static void select_different_registers(Register preserve,
Register extra,
Register &tmp1,
@ -1269,12 +1253,9 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
} else if (obj == klass_RInfo) {
klass_RInfo = dst;
}
if (k->is_loaded() && !UseCompressedClassPointers) {
select_different_registers(obj, dst, k_RInfo, klass_RInfo);
} else {
Rtmp1 = op->tmp3()->as_register();
select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
}
Rtmp1 = op->tmp3()->as_register();
select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
assert_different_registers(obj, k_RInfo, klass_RInfo);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -1287,9 +1287,7 @@ void LIRGenerator::do_CheckCast(CheckCast* x) {
}
LIR_Opr reg = rlock_result(x);
LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
tmp3 = new_register(objectType);
}
tmp3 = new_register(objectType);
__ checkcast(reg, obj.result(), x->klass(),
new_register(objectType), new_register(objectType), tmp3,
x->direct_compare(), info_for_exception, patching_info, stub,
@ -1308,9 +1306,7 @@ void LIRGenerator::do_InstanceOf(InstanceOf* x) {
}
obj.load_item();
LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
tmp3 = new_register(objectType);
}
tmp3 = new_register(objectType);
__ instanceof(reg, obj.result(), x->klass(),
new_register(objectType), new_register(objectType), tmp3,
x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -105,12 +105,8 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
} else {
mov(t1, checked_cast<int32_t>(markWord::prototype().value()));
str(t1, Address(obj, oopDesc::mark_offset_in_bytes()));
if (UseCompressedClassPointers) { // Take care not to kill klass
encode_klass_not_null(t1, klass);
strw(t1, Address(obj, oopDesc::klass_offset_in_bytes()));
} else {
str(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
}
encode_klass_not_null(t1, klass); // Take care not to kill klass
strw(t1, Address(obj, oopDesc::klass_offset_in_bytes()));
}
if (len->is_valid()) {
@ -121,7 +117,7 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
// Clear gap/first 4 bytes following the length field.
strw(zr, Address(obj, base_offset));
}
} else if (UseCompressedClassPointers && !UseCompactObjectHeaders) {
} else if (!UseCompactObjectHeaders) {
store_klass_gap(obj, zr);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -989,26 +989,15 @@ void InterpreterMacroAssembler::profile_final_call(Register mdp) {
void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
Register mdp,
bool receiver_can_be_null) {
Register mdp) {
if (ProfileInterpreter) {
Label profile_continue;
// If no method data exists, go to profile_continue.
test_method_data_pointer(mdp, profile_continue);
Label skip_receiver_profile;
if (receiver_can_be_null) {
Label not_null;
// We are making a call. Increment the count for null receiver.
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
b(skip_receiver_profile);
bind(not_null);
}
// Record the receiver type.
profile_receiver_type(receiver, mdp, 0);
bind(skip_receiver_profile);
// The method data pointer needs to be updated to reflect the new target.
update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size()));

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -285,8 +285,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
void profile_not_taken_branch(Register mdp);
void profile_call(Register mdp);
void profile_final_call(Register mdp);
void profile_virtual_call(Register receiver, Register mdp,
bool receiver_can_be_null = false);
void profile_virtual_call(Register receiver, Register mdp);
void profile_ret(Register return_bci, Register mdp);
void profile_null_seen(Register mdp);
void profile_typecheck(Register mdp, Register klass);

View File

@ -762,7 +762,7 @@ void MacroAssembler::call_VM_base(Register oop_result,
assert(java_thread == rthread, "unexpected register");
#ifdef ASSERT
// TraceBytecodes does not use r12 but saves it over the call, so don't verify
// if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");
// if (!TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");
#endif // ASSERT
assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result");
@ -1002,14 +1002,10 @@ int MacroAssembler::ic_check(int end_alignment) {
load_narrow_klass_compact(tmp1, receiver);
ldrw(tmp2, Address(data, CompiledICData::speculated_klass_offset()));
cmpw(tmp1, tmp2);
} else if (UseCompressedClassPointers) {
} else {
ldrw(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes()));
ldrw(tmp2, Address(data, CompiledICData::speculated_klass_offset()));
cmpw(tmp1, tmp2);
} else {
ldr(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes()));
ldr(tmp2, Address(data, CompiledICData::speculated_klass_offset()));
cmp(tmp1, tmp2);
}
Label dont;
@ -3278,7 +3274,6 @@ int MacroAssembler::pop_p(unsigned int bitset, Register stack) {
#ifdef ASSERT
void MacroAssembler::verify_heapbase(const char* msg) {
#if 0
assert (UseCompressedOops || UseCompressedClassPointers, "should be compressed");
assert (Universe::heap() != nullptr, "java heap should be initialized");
if (!UseCompressedOops || Universe::ptr_base() == nullptr) {
// rheapbase is allocated as general register
@ -5067,13 +5062,10 @@ void MacroAssembler::load_narrow_klass_compact(Register dst, Register src) {
void MacroAssembler::load_klass(Register dst, Register src) {
if (UseCompactObjectHeaders) {
load_narrow_klass_compact(dst, src);
decode_klass_not_null(dst);
} else if (UseCompressedClassPointers) {
ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
decode_klass_not_null(dst);
} else {
ldr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
}
decode_klass_not_null(dst);
}
void MacroAssembler::restore_cpu_control_state_after_jni(Register tmp1, Register tmp2) {
@ -5125,25 +5117,21 @@ void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, R
void MacroAssembler::cmp_klass(Register obj, Register klass, Register tmp) {
assert_different_registers(obj, klass, tmp);
if (UseCompressedClassPointers) {
if (UseCompactObjectHeaders) {
load_narrow_klass_compact(tmp, obj);
} else {
ldrw(tmp, Address(obj, oopDesc::klass_offset_in_bytes()));
}
if (CompressedKlassPointers::base() == nullptr) {
cmp(klass, tmp, LSL, CompressedKlassPointers::shift());
return;
} else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
&& CompressedKlassPointers::shift() == 0) {
// Only the bottom 32 bits matter
cmpw(klass, tmp);
return;
}
decode_klass_not_null(tmp);
if (UseCompactObjectHeaders) {
load_narrow_klass_compact(tmp, obj);
} else {
ldr(tmp, Address(obj, oopDesc::klass_offset_in_bytes()));
ldrw(tmp, Address(obj, oopDesc::klass_offset_in_bytes()));
}
if (CompressedKlassPointers::base() == nullptr) {
cmp(klass, tmp, LSL, CompressedKlassPointers::shift());
return;
} else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
&& CompressedKlassPointers::shift() == 0) {
// Only the bottom 32 bits matter
cmpw(klass, tmp);
return;
}
decode_klass_not_null(tmp);
cmp(klass, tmp);
}
@ -5151,36 +5139,25 @@ void MacroAssembler::cmp_klasses_from_objects(Register obj1, Register obj2, Regi
if (UseCompactObjectHeaders) {
load_narrow_klass_compact(tmp1, obj1);
load_narrow_klass_compact(tmp2, obj2);
cmpw(tmp1, tmp2);
} else if (UseCompressedClassPointers) {
} else {
ldrw(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes()));
ldrw(tmp2, Address(obj2, oopDesc::klass_offset_in_bytes()));
cmpw(tmp1, tmp2);
} else {
ldr(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes()));
ldr(tmp2, Address(obj2, oopDesc::klass_offset_in_bytes()));
cmp(tmp1, tmp2);
}
cmpw(tmp1, tmp2);
}
void MacroAssembler::store_klass(Register dst, Register src) {
// FIXME: Should this be a store release? concurrent gcs assumes
// klass length is valid if klass field is not null.
assert(!UseCompactObjectHeaders, "not with compact headers");
if (UseCompressedClassPointers) {
encode_klass_not_null(src);
strw(src, Address(dst, oopDesc::klass_offset_in_bytes()));
} else {
str(src, Address(dst, oopDesc::klass_offset_in_bytes()));
}
encode_klass_not_null(src);
strw(src, Address(dst, oopDesc::klass_offset_in_bytes()));
}
void MacroAssembler::store_klass_gap(Register dst, Register src) {
assert(!UseCompactObjectHeaders, "not with compact headers");
if (UseCompressedClassPointers) {
// Store to klass gap in destination
strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes()));
}
// Store to klass gap in destination
strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes()));
}
// Algorithm must match CompressedOops::encode.
@ -5326,8 +5303,6 @@ MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode() {
}
MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode(address base, int shift, const size_t range) {
assert(UseCompressedClassPointers, "not using compressed class pointers");
// KlassDecodeMode shouldn't be set already.
assert(_klass_decode_mode == KlassDecodeNone, "set once");
@ -5457,8 +5432,6 @@ void MacroAssembler::decode_klass_not_null_for_aot(Register dst, Register src) {
}
void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
assert (UseCompressedClassPointers, "should only be used for compressed headers");
if (AOTCodeCache::is_on_for_dump()) {
decode_klass_not_null_for_aot(dst, src);
return;
@ -5525,7 +5498,6 @@ void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
}
void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
assert (UseCompressedClassPointers, "should only be used for compressed headers");
assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
int index = oop_recorder()->find_index(k);

View File

@ -1,5 +1,5 @@
//
// Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
@ -1112,10 +1112,6 @@ uint Matcher::float_pressure_limit()
return (FLOATPRESSURE == -1) ? 30 : FLOATPRESSURE;
}
bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
return false;
}
// Register for DIVI projection of divmodI
const RegMask& Matcher::divI_proj_mask() {
ShouldNotReachHere();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1210,7 +1210,7 @@ void InterpreterMacroAssembler::profile_final_call(Register mdp) {
// Sets mdp, blows Rtemp.
void InterpreterMacroAssembler::profile_virtual_call(Register mdp, Register receiver, bool receiver_can_be_null) {
void InterpreterMacroAssembler::profile_virtual_call(Register mdp, Register receiver) {
assert_different_registers(mdp, receiver, Rtemp);
if (ProfileInterpreter) {
@ -1219,19 +1219,8 @@ void InterpreterMacroAssembler::profile_virtual_call(Register mdp, Register rece
// If no method data exists, go to profile_continue.
test_method_data_pointer(mdp, profile_continue);
Label skip_receiver_profile;
if (receiver_can_be_null) {
Label not_null;
cbnz(receiver, not_null);
// We are making a call. Increment the count for null receiver.
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()), Rtemp);
b(skip_receiver_profile);
bind(not_null);
}
// Record the receiver type.
record_klass_in_profile(receiver, mdp, Rtemp, true);
bind(skip_receiver_profile);
// The method data pointer needs to be updated to reflect the new target.
update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size()));

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -239,8 +239,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
void profile_call(Register mdp); // Sets mdp, blows Rtemp.
void profile_final_call(Register mdp); // Sets mdp, blows Rtemp.
void profile_virtual_call(Register mdp, Register receiver, // Sets mdp, blows Rtemp.
bool receiver_can_be_null = false);
void profile_virtual_call(Register mdp, Register receiver); // Sets mdp, blows Rtemp.
void profile_ret(Register mdp, Register return_bci); // Sets mdp, blows R0-R3/R0-R18, Rtemp, LR
void profile_null_seen(Register mdp); // Sets mdp.
void profile_typecheck(Register mdp, Register klass); // Sets mdp, blows Rtemp.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -75,7 +75,6 @@
static bool narrow_klass_use_complex_address() {
NOT_LP64(ShouldNotCallThis());
assert(UseCompressedClassPointers, "only for compressed klass code");
return false;
}

View File

@ -1580,10 +1580,6 @@ class Assembler : public AbstractAssembler {
static bool is_nop(int x) {
return x == 0x60000000;
}
// endgroup opcode for Power6
static bool is_endgroup(int x) {
return is_ori(x) && inv_ra_field(x) == 1 && inv_rs_field(x) == 1 && inv_d1_field(x) == 0;
}
private:
@ -1659,9 +1655,6 @@ class Assembler : public AbstractAssembler {
inline void ori_opt( Register d, int ui16);
inline void oris_opt(Register d, int ui16);
// endgroup opcode for Power6
inline void endgroup();
// count instructions
inline void cntlzw( Register a, Register s);
inline void cntlzw_( Register a, Register s);

View File

@ -253,8 +253,6 @@ inline void Assembler::mr( Register d, Register s) { Assembler::orr(d, s,
inline void Assembler::ori_opt( Register d, int ui16) { if (ui16!=0) Assembler::ori( d, d, ui16); }
inline void Assembler::oris_opt(Register d, int ui16) { if (ui16!=0) Assembler::oris(d, d, ui16); }
inline void Assembler::endgroup() { Assembler::ori(R1, R1, 0); }
// count instructions
inline void Assembler::cntlzw( Register a, Register s) { emit_int32(CNTLZW_OPCODE | rta(a) | rs(s) | rc(0)); }
inline void Assembler::cntlzw_( Register a, Register s) { emit_int32(CNTLZW_OPCODE | rta(a) | rs(s) | rc(1)); }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -144,7 +144,7 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
if (len->is_valid()) {
stw(len, arrayOopDesc::length_offset_in_bytes(), obj);
} else if (UseCompressedClassPointers && !UseCompactObjectHeaders) {
} else if (!UseCompactObjectHeaders) {
// Otherwise length is in the class gap.
store_klass_gap(obj);
}

View File

@ -119,9 +119,6 @@ address Disassembler::decode_instruction0(address here, outputStream * st, addre
} else if (instruction == 0xbadbabe) {
st->print(".data 0xbadbabe");
next = here + Assembler::instr_len(here);
} else if (Assembler::is_endgroup(instruction)) {
st->print("endgroup");
next = here + Assembler::instr_len(here);
} else {
next = here;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -258,7 +258,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
void profile_not_taken_branch(Register scratch1, Register scratch2);
void profile_call(Register scratch1, Register scratch2);
void profile_final_call(Register scratch1, Register scratch2);
void profile_virtual_call(Register Rreceiver, Register Rscratch1, Register Rscratch2, bool receiver_can_be_null);
void profile_virtual_call(Register Rreceiver, Register Rscratch1, Register Rscratch2);
void profile_typecheck(Register Rklass, Register Rscratch1, Register Rscratch2);
void profile_ret(TosState state, Register return_bci, Register scratch1, Register scratch2);
void profile_switch_default(Register scratch1, Register scratch2);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -1340,28 +1340,15 @@ void InterpreterMacroAssembler::profile_final_call(Register scratch1, Register s
// Count a virtual call in the bytecodes.
void InterpreterMacroAssembler::profile_virtual_call(Register Rreceiver,
Register Rscratch1,
Register Rscratch2,
bool receiver_can_be_null) {
Register Rscratch2) {
if (!ProfileInterpreter) { return; }
Label profile_continue;
// If no method data exists, go to profile_continue.
test_method_data_pointer(profile_continue);
Label skip_receiver_profile;
if (receiver_can_be_null) {
Label not_null;
cmpdi(CR0, Rreceiver, 0);
bne(CR0, not_null);
// We are making a call. Increment the count for null receiver.
increment_mdp_data_at(in_bytes(CounterData::count_offset()), Rscratch1, Rscratch2);
b(skip_receiver_profile);
bind(not_null);
}
// Record the receiver type.
record_klass_in_profile(Rreceiver, Rscratch1, Rscratch2);
bind(skip_receiver_profile);
// The method data pointer needs to be updated to reflect the new target.
update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));

View File

@ -483,7 +483,7 @@ void MacroAssembler::set_dest_of_bc_far_at(address instruction_addr, address des
// variant 3, far cond branch to the next instruction, already patched to nops:
//
// nop
// endgroup
// nop
// SKIP/DEST:
//
return;
@ -500,7 +500,7 @@ void MacroAssembler::set_dest_of_bc_far_at(address instruction_addr, address des
if (is_bc_far_variant2_at(instruction_addr) && dest == instruction_addr + 8) {
// Far branch to next instruction: Optimize it by patching nops (produce variant 3).
masm.nop();
masm.endgroup();
masm.nop();
} else {
if (is_bc_far_variant1_at(instruction_addr)) {
// variant 1, the 1st instruction contains the destination address:
@ -3201,23 +3201,17 @@ Register MacroAssembler::encode_klass_not_null(Register dst, Register src) {
void MacroAssembler::store_klass(Register dst_oop, Register klass, Register ck) {
assert(!UseCompactObjectHeaders, "not with compact headers");
if (UseCompressedClassPointers) {
Register compressedKlass = encode_klass_not_null(ck, klass);
stw(compressedKlass, oopDesc::klass_offset_in_bytes(), dst_oop);
} else {
std(klass, oopDesc::klass_offset_in_bytes(), dst_oop);
}
Register compressedKlass = encode_klass_not_null(ck, klass);
stw(compressedKlass, oopDesc::klass_offset_in_bytes(), dst_oop);
}
void MacroAssembler::store_klass_gap(Register dst_oop, Register val) {
assert(!UseCompactObjectHeaders, "not with compact headers");
if (UseCompressedClassPointers) {
if (val == noreg) {
val = R0;
li(val, 0);
}
stw(val, oopDesc::klass_gap_offset_in_bytes(), dst_oop);
if (val == noreg) {
val = R0;
li(val, 0);
}
stw(val, oopDesc::klass_gap_offset_in_bytes(), dst_oop);
}
int MacroAssembler::instr_size_for_decode_klass_not_null() {
@ -3226,17 +3220,13 @@ int MacroAssembler::instr_size_for_decode_klass_not_null() {
// Not yet computed?
if (computed_size == -1) {
if (!UseCompressedClassPointers) {
computed_size = 0;
} else {
// Determine by scratch emit.
ResourceMark rm;
int code_size = 8 * BytesPerInstWord;
CodeBuffer cb("decode_klass_not_null scratch buffer", code_size, 0);
MacroAssembler* a = new MacroAssembler(&cb);
a->decode_klass_not_null(R11_scratch1);
computed_size = a->offset();
}
// Determine by scratch emit.
ResourceMark rm;
int code_size = 8 * BytesPerInstWord;
CodeBuffer cb("decode_klass_not_null scratch buffer", code_size, 0);
MacroAssembler* a = new MacroAssembler(&cb);
a->decode_klass_not_null(R11_scratch1);
computed_size = a->offset();
}
return computed_size;
@ -3259,18 +3249,14 @@ void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
void MacroAssembler::load_klass_no_decode(Register dst, Register src) {
if (UseCompactObjectHeaders) {
load_narrow_klass_compact(dst, src);
} else if (UseCompressedClassPointers) {
lwz(dst, oopDesc::klass_offset_in_bytes(), src);
} else {
ld(dst, oopDesc::klass_offset_in_bytes(), src);
lwz(dst, oopDesc::klass_offset_in_bytes(), src);
}
}
void MacroAssembler::load_klass(Register dst, Register src) {
load_klass_no_decode(dst, src);
if (UseCompressedClassPointers) { // also true for UseCompactObjectHeaders
decode_klass_not_null(dst);
}
decode_klass_not_null(dst);
}
// Loads the obj's Klass* into dst.
@ -3286,18 +3272,13 @@ void MacroAssembler::load_narrow_klass_compact(Register dst, Register src) {
void MacroAssembler::cmp_klass(ConditionRegister dst, Register obj, Register klass, Register tmp, Register tmp2) {
assert_different_registers(obj, klass, tmp);
if (UseCompressedClassPointers) {
if (UseCompactObjectHeaders) {
load_narrow_klass_compact(tmp, obj);
} else {
lwz(tmp, oopDesc::klass_offset_in_bytes(), obj);
}
Register encoded_klass = encode_klass_not_null(tmp2, klass);
cmpw(dst, tmp, encoded_klass);
if (UseCompactObjectHeaders) {
load_narrow_klass_compact(tmp, obj);
} else {
ld(tmp, oopDesc::klass_offset_in_bytes(), obj);
cmpd(dst, tmp, klass);
lwz(tmp, oopDesc::klass_offset_in_bytes(), obj);
}
Register encoded_klass = encode_klass_not_null(tmp2, klass);
cmpw(dst, tmp, encoded_klass);
}
void MacroAssembler::cmp_klasses_from_objects(ConditionRegister dst, Register obj1, Register obj2, Register tmp1, Register tmp2) {
@ -3305,14 +3286,10 @@ void MacroAssembler::cmp_klasses_from_objects(ConditionRegister dst, Register ob
load_narrow_klass_compact(tmp1, obj1);
load_narrow_klass_compact(tmp2, obj2);
cmpw(dst, tmp1, tmp2);
} else if (UseCompressedClassPointers) {
} else {
lwz(tmp1, oopDesc::klass_offset_in_bytes(), obj1);
lwz(tmp2, oopDesc::klass_offset_in_bytes(), obj2);
cmpw(dst, tmp1, tmp2);
} else {
ld(tmp1, oopDesc::klass_offset_in_bytes(), obj1);
ld(tmp2, oopDesc::klass_offset_in_bytes(), obj2);
cmpd(dst, tmp1, tmp2);
}
}

View File

@ -70,14 +70,6 @@ class MacroAssembler: public Assembler {
// Move register if destination register and target register are different
inline void mr_if_needed(Register rd, Register rs, bool allow_invalid = false);
inline void fmr_if_needed(FloatRegister rd, FloatRegister rs);
// This is dedicated for emitting scheduled mach nodes. For better
// readability of the ad file I put it here.
// Endgroups are not needed if
// - the scheduler is off
// - the scheduler found that there is a natural group end, in that
// case it reduced the size of the instruction used in the test
// yielding 'needed'.
inline void endgroup_if_needed(bool needed);
// Memory barriers.
inline void membar(int bits);

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
* Copyright (c) 2002, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -72,11 +72,6 @@ inline void MacroAssembler::mr_if_needed(Register rd, Register rs, bool allow_no
inline void MacroAssembler::fmr_if_needed(FloatRegister rd, FloatRegister rs) {
if (rs != rd) fmr(rd, rs);
}
inline void MacroAssembler::endgroup_if_needed(bool needed) {
if (needed) {
endgroup();
}
}
inline void MacroAssembler::membar(int bits) {
// Comment: Usage of elemental_membar(bits) is not recommended for Power 8.
@ -240,13 +235,13 @@ inline bool MacroAssembler::is_bc_far_variant3_at(address instruction_addr) {
// Variant 3, far cond branch to the next instruction, already patched to nops:
//
// nop
// endgroup
// nop
// SKIP/DEST:
//
const int instruction_1 = *(int*)(instruction_addr);
const int instruction_2 = *(int*)(instruction_addr + 4);
return is_nop(instruction_1) &&
is_endgroup(instruction_2);
is_nop(instruction_2);
}
// set dst to -1, 0, +1 as follows: if CR0bi is "greater than", dst is set to 1,

View File

@ -87,7 +87,6 @@
static bool narrow_klass_use_complex_address() {
NOT_LP64(ShouldNotCallThis());
assert(UseCompressedClassPointers, "only for compressed klass code");
// TODO: PPC port if (MatchDecodeNodes) return true;
return false;
}

View File

@ -2457,10 +2457,6 @@ uint Matcher::float_pressure_limit()
return (FLOATPRESSURE == -1) ? 28 : FLOATPRESSURE;
}
bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
return false;
}
// Register for DIVI projection of divmodI.
const RegMask& Matcher::divI_proj_mask() {
ShouldNotReachHere();
@ -6327,36 +6323,8 @@ instruct loadConD_Ex(regD dst, immD src) %{
// Prefetch instructions.
// Must be safe to execute with invalid address (cannot fault).
// Special prefetch versions which use the dcbz instruction.
instruct prefetch_alloc_zero(indirectMemory mem, iRegLsrc src) %{
match(PrefetchAllocation (AddP mem src));
predicate(AllocatePrefetchStyle == 3);
ins_cost(MEMORY_REF_COST);
format %{ "PREFETCH $mem, 2, $src \t// Prefetch write-many with zero" %}
size(4);
ins_encode %{
__ dcbz($src$$Register, $mem$$base$$Register);
%}
ins_pipe(pipe_class_memory);
%}
instruct prefetch_alloc_zero_no_offset(indirectMemory mem) %{
match(PrefetchAllocation mem);
predicate(AllocatePrefetchStyle == 3);
ins_cost(MEMORY_REF_COST);
format %{ "PREFETCH $mem, 2 \t// Prefetch write-many with zero" %}
size(4);
ins_encode %{
__ dcbz($mem$$base$$Register);
%}
ins_pipe(pipe_class_memory);
%}
instruct prefetch_alloc(indirectMemory mem, iRegLsrc src) %{
match(PrefetchAllocation (AddP mem src));
predicate(AllocatePrefetchStyle != 3);
ins_cost(MEMORY_REF_COST);
format %{ "PREFETCH $mem, 2, $src \t// Prefetch write-many" %}
@ -6369,7 +6337,6 @@ instruct prefetch_alloc(indirectMemory mem, iRegLsrc src) %{
instruct prefetch_alloc_no_offset(indirectMemory mem) %{
match(PrefetchAllocation mem);
predicate(AllocatePrefetchStyle != 3);
ins_cost(MEMORY_REF_COST);
format %{ "PREFETCH $mem, 2 \t// Prefetch write-many" %}
@ -14391,20 +14358,6 @@ instruct tlsLoadP(threadRegP dst) %{
//---Some PPC specific nodes---------------------------------------------------
// Stop a group.
instruct endGroup() %{
ins_cost(0);
ins_is_nop(true);
format %{ "End Bundle (ori r1, r1, 0)" %}
size(4);
ins_encode %{
__ endgroup();
%}
ins_pipe(pipe_class_default);
%}
// Nop instructions
instruct fxNop() %{

View File

@ -3489,7 +3489,7 @@ void TemplateTable::invokevirtual(int byte_no) {
// Get receiver klass.
__ load_klass_check_null_throw(Rrecv_klass, Rrecv, R11_scratch1);
__ verify_klass_ptr(Rrecv_klass);
__ profile_virtual_call(Rrecv_klass, R11_scratch1, R12_scratch2, false);
__ profile_virtual_call(Rrecv_klass, R11_scratch1, R12_scratch2);
generate_vtable_call(Rrecv_klass, Rvtableindex_or_method, Rret_addr, R11_scratch1);
}
@ -3596,7 +3596,7 @@ void TemplateTable::invokeinterface_object_method(Register Rrecv_klass,
// Non-final callc case.
__ bind(LnotFinal);
__ lhz(Rindex, in_bytes(ResolvedMethodEntry::table_index_offset()), Rcache);
__ profile_virtual_call(Rrecv_klass, Rtemp1, Rscratch, false);
__ profile_virtual_call(Rrecv_klass, Rtemp1, Rscratch);
generate_vtable_call(Rrecv_klass, Rindex, Rret, Rscratch);
}
@ -3664,7 +3664,7 @@ void TemplateTable::invokeinterface(int byte_no) {
__ lookup_interface_method(Rrecv_klass, Rinterface_klass, noreg, noreg, Rscratch1, Rscratch2,
L_no_such_interface, /*return_method=*/false);
__ profile_virtual_call(Rrecv_klass, Rscratch1, Rscratch2, false);
__ profile_virtual_call(Rrecv_klass, Rscratch1, Rscratch2);
// Find entry point to call.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -196,12 +196,9 @@ void LIR_Assembler::arraycopy_type_check(Register src, Register src_pos, Registe
if (UseCompactObjectHeaders) {
__ load_narrow_klass_compact(tmp, src);
__ load_narrow_klass_compact(t0, dst);
} else if (UseCompressedClassPointers) {
} else {
__ lwu(tmp, Address(src, oopDesc::klass_offset_in_bytes()));
__ lwu(t0, Address(dst, oopDesc::klass_offset_in_bytes()));
} else {
__ ld(tmp, Address(src, oopDesc::klass_offset_in_bytes()));
__ ld(t0, Address(dst, oopDesc::klass_offset_in_bytes()));
}
__ bne(tmp, t0, *stub->entry(), /* is_far */ true);
} else {
@ -257,9 +254,7 @@ void LIR_Assembler::arraycopy_assert(Register src, Register dst, Register tmp, c
// but not necessarily exactly of type default_type.
Label known_ok, halt;
__ mov_metadata(tmp, default_type->constant_encoding());
if (UseCompressedClassPointers) {
__ encode_klass_not_null(tmp);
}
__ encode_klass_not_null(tmp);
if (basic_type != T_OBJECT) {
__ cmp_klass_compressed(dst, tmp, t0, halt, false);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -55,20 +55,6 @@ const Register SHIFT_count = x10; // where count for shift operations must be
#define __ _masm->
static void select_different_registers(Register preserve,
Register extra,
Register &tmp1,
Register &tmp2) {
if (tmp1 == preserve) {
assert_different_registers(tmp1, tmp2, extra);
tmp1 = extra;
} else if (tmp2 == preserve) {
assert_different_registers(tmp1, tmp2, extra);
tmp2 = extra;
}
assert_different_registers(preserve, tmp1, tmp2);
}
static void select_different_registers(Register preserve,
Register extra,
Register &tmp1,
@ -1155,12 +1141,8 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
} else if (obj == klass_RInfo) {
klass_RInfo = dst;
}
if (k->is_loaded() && !UseCompressedClassPointers) {
select_different_registers(obj, dst, k_RInfo, klass_RInfo);
} else {
Rtmp1 = op->tmp3()->as_register();
select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
}
Rtmp1 = op->tmp3()->as_register();
select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
assert_different_registers(obj, k_RInfo, klass_RInfo);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -1073,9 +1073,7 @@ void LIRGenerator::do_CheckCast(CheckCast* x) {
}
LIR_Opr reg = rlock_result(x);
LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
tmp3 = new_register(objectType);
}
tmp3 = new_register(objectType);
__ checkcast(reg, obj.result(), x->klass(),
new_register(objectType), new_register(objectType), tmp3,
x->direct_compare(), info_for_exception, patching_info, stub,
@ -1094,9 +1092,7 @@ void LIRGenerator::do_InstanceOf(InstanceOf* x) {
}
obj.load_item();
LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
tmp3 = new_register(objectType);
}
tmp3 = new_register(objectType);
__ instanceof(reg, obj.result(), x->klass(),
new_register(objectType), new_register(objectType), tmp3,
x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -92,12 +92,8 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
// This assumes that all prototype bits fitr in an int32_t
mv(tmp1, checked_cast<int32_t>(markWord::prototype().value()));
sd(tmp1, Address(obj, oopDesc::mark_offset_in_bytes()));
if (UseCompressedClassPointers) { // Take care not to kill klass
encode_klass_not_null(tmp1, klass, tmp2);
sw(tmp1, Address(obj, oopDesc::klass_offset_in_bytes()));
} else {
sd(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
}
encode_klass_not_null(tmp1, klass, tmp2);
sw(tmp1, Address(obj, oopDesc::klass_offset_in_bytes()));
}
if (len->is_valid()) {
@ -108,7 +104,7 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
// Clear gap/first 4 bytes following the length field.
sw(zr, Address(obj, base_offset));
}
} else if (UseCompressedClassPointers && !UseCompactObjectHeaders) {
} else if (!UseCompactObjectHeaders) {
store_klass_gap(obj, zr);
}
}

View File

@ -1175,8 +1175,7 @@ void C2_MacroAssembler::string_compare_long_same_encoding(Register result, Regis
Label TAIL_CHECK, TAIL, NEXT_WORD, DIFFERENCE;
const int base_offset = arrayOopDesc::base_offset_in_bytes(T_BYTE);
assert((base_offset % (UseCompactObjectHeaders ? 4 :
(UseCompressedClassPointers ? 8 : 4))) == 0, "Must be");
assert((base_offset % (UseCompactObjectHeaders ? 4 : 8)) == 0, "Must be");
const int minCharsInWord = isLL ? wordSize : wordSize / 2;
@ -1269,8 +1268,7 @@ void C2_MacroAssembler::string_compare_long_different_encoding(Register result,
Label TAIL, NEXT_WORD, DIFFERENCE;
const int base_offset = arrayOopDesc::base_offset_in_bytes(T_BYTE);
assert((base_offset % (UseCompactObjectHeaders ? 4 :
(UseCompressedClassPointers ? 8 : 4))) == 0, "Must be");
assert((base_offset % (UseCompactObjectHeaders ? 4 : 8)) == 0, "Must be");
Register strL = isLU ? str1 : str2;
Register strU = isLU ? str2 : str1;
@ -1485,8 +1483,7 @@ void C2_MacroAssembler::arrays_equals(Register a1, Register a2,
int length_offset = arrayOopDesc::length_offset_in_bytes();
int base_offset = arrayOopDesc::base_offset_in_bytes(elem_size == 2 ? T_CHAR : T_BYTE);
assert((base_offset % (UseCompactObjectHeaders ? 4 :
(UseCompressedClassPointers ? 8 : 4))) == 0, "Must be");
assert((base_offset % (UseCompactObjectHeaders ? 4 : 8)) == 0, "Must be");
Register cnt1 = tmp3;
Register cnt2 = tmp1; // cnt2 only used in array length compare
@ -1611,8 +1608,7 @@ void C2_MacroAssembler::string_equals(Register a1, Register a2,
int base_offset = arrayOopDesc::base_offset_in_bytes(T_BYTE);
assert((base_offset % (UseCompactObjectHeaders ? 4 :
(UseCompressedClassPointers ? 8 : 4))) == 0, "Must be");
assert((base_offset % (UseCompactObjectHeaders ? 4 : 8)) == 0, "Must be");
BLOCK_COMMENT("string_equals {");
@ -2699,8 +2695,7 @@ void C2_MacroAssembler::arrays_equals_v(Register a1, Register a2, Register resul
int length_offset = arrayOopDesc::length_offset_in_bytes();
int base_offset = arrayOopDesc::base_offset_in_bytes(elem_size == 2 ? T_CHAR : T_BYTE);
assert((base_offset % (UseCompactObjectHeaders ? 4 :
(UseCompressedClassPointers ? 8 : 4))) == 0, "Must be");
assert((base_offset % (UseCompactObjectHeaders ? 4 : 8)) == 0, "Must be");
BLOCK_COMMENT("arrays_equals_v {");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -1040,26 +1040,15 @@ void InterpreterMacroAssembler::profile_final_call(Register mdp) {
void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
Register mdp,
bool receiver_can_be_null) {
Register mdp) {
if (ProfileInterpreter) {
Label profile_continue;
// If no method data exists, go to profile_continue.
test_method_data_pointer(mdp, profile_continue);
Label skip_receiver_profile;
if (receiver_can_be_null) {
Label not_null;
// We are making a call. Increment the count for null receiver.
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
j(skip_receiver_profile);
bind(not_null);
}
// Record the receiver type.
profile_receiver_type(receiver, mdp, 0);
bind(skip_receiver_profile);
// The method data pointer needs to be updated to reflect the new target.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -274,8 +274,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
void profile_not_taken_branch(Register mdp);
void profile_call(Register mdp);
void profile_final_call(Register mdp);
void profile_virtual_call(Register receiver, Register mdp,
bool receiver_can_be_null = false);
void profile_virtual_call(Register receiver, Register mdp);
void profile_ret(Register return_bci, Register mdp);
void profile_null_seen(Register mdp);
void profile_typecheck(Register mdp, Register klass);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -3514,10 +3514,8 @@ void MacroAssembler::orptr(Address adr, RegisterOrConstant src, Register tmp1, R
void MacroAssembler::cmp_klass_compressed(Register oop, Register trial_klass, Register tmp, Label &L, bool equal) {
if (UseCompactObjectHeaders) {
load_narrow_klass_compact(tmp, oop);
} else if (UseCompressedClassPointers) {
lwu(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
} else {
ld(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
lwu(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
}
if (equal) {
beq(trial_klass, tmp, L);
@ -3741,11 +3739,9 @@ void MacroAssembler::load_klass(Register dst, Register src, Register tmp) {
if (UseCompactObjectHeaders) {
load_narrow_klass_compact(dst, src);
decode_klass_not_null(dst, tmp);
} else if (UseCompressedClassPointers) {
} else {
lwu(dst, Address(src, oopDesc::klass_offset_in_bytes()));
decode_klass_not_null(dst, tmp);
} else {
ld(dst, Address(src, oopDesc::klass_offset_in_bytes()));
}
}
@ -3753,20 +3749,15 @@ void MacroAssembler::store_klass(Register dst, Register src, Register tmp) {
// FIXME: Should this be a store release? concurrent gcs assumes
// klass length is valid if klass field is not null.
assert(!UseCompactObjectHeaders, "not with compact headers");
if (UseCompressedClassPointers) {
encode_klass_not_null(src, tmp);
sw(src, Address(dst, oopDesc::klass_offset_in_bytes()));
} else {
sd(src, Address(dst, oopDesc::klass_offset_in_bytes()));
}
encode_klass_not_null(src, tmp);
sw(src, Address(dst, oopDesc::klass_offset_in_bytes()));
}
void MacroAssembler::store_klass_gap(Register dst, Register src) {
assert(!UseCompactObjectHeaders, "not with compact headers");
if (UseCompressedClassPointers) {
// Store to klass gap in destination
sw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes()));
}
// Store to klass gap in destination
sw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes()));
}
void MacroAssembler::decode_klass_not_null(Register r, Register tmp) {
@ -3775,7 +3766,6 @@ void MacroAssembler::decode_klass_not_null(Register r, Register tmp) {
}
void MacroAssembler::decode_klass_not_null(Register dst, Register src, Register tmp) {
assert(UseCompressedClassPointers, "should only be used for compressed headers");
assert_different_registers(dst, tmp);
assert_different_registers(src, tmp);
@ -3806,8 +3796,6 @@ void MacroAssembler::encode_klass_not_null(Register r, Register tmp) {
}
void MacroAssembler::encode_klass_not_null(Register dst, Register src, Register tmp) {
assert(UseCompressedClassPointers, "should only be used for compressed headers");
if (CompressedKlassPointers::base() == nullptr) {
if (CompressedKlassPointers::shift() != 0) {
srli(dst, src, CompressedKlassPointers::shift());
@ -5337,7 +5325,6 @@ void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
}
void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
assert (UseCompressedClassPointers, "should only be used for compressed headers");
assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
int index = oop_recorder()->find_index(k);
@ -5417,12 +5404,9 @@ int MacroAssembler::ic_check(int end_alignment) {
if (UseCompactObjectHeaders) {
load_narrow_klass_compact(tmp1, receiver);
lwu(tmp2, Address(data, CompiledICData::speculated_klass_offset()));
} else if (UseCompressedClassPointers) {
} else {
lwu(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes()));
lwu(tmp2, Address(data, CompiledICData::speculated_klass_offset()));
} else {
ld(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes()));
ld(tmp2, Address(data, CompiledICData::speculated_klass_offset()));
}
Label ic_hit;

View File

@ -1801,13 +1801,8 @@ void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
{
assert_cond(st != nullptr);
st->print_cr("# MachUEPNode");
if (UseCompressedClassPointers) {
st->print_cr("\tlwu t1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
st->print_cr("\tlwu t2, [t0 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
} else {
st->print_cr("\tld t1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
st->print_cr("\tld t2, [t0 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
}
st->print_cr("\tlwu t1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
st->print_cr("\tlwu t2, [t0 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
st->print_cr("\tbeq t1, t2, ic_hit");
st->print_cr("\tj, SharedRuntime::_ic_miss_stub\t # Inline cache check");
st->print_cr("\tic_hit:");
@ -2111,10 +2106,6 @@ uint Matcher::float_pressure_limit()
return (FLOATPRESSURE == -1) ? _FLOAT_REG_mask.size() : FLOATPRESSURE;
}
bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
return false;
}
const RegMask& Matcher::divI_proj_mask() {
ShouldNotReachHere();
return RegMask::EMPTY;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2025, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2025, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -3070,8 +3070,7 @@ class StubGenerator: public StubCodeGenerator {
const Register tmp = x30, tmpLval = x12;
int base_offset = arrayOopDesc::base_offset_in_bytes(T_BYTE);
assert((base_offset % (UseCompactObjectHeaders ? 4 :
(UseCompressedClassPointers ? 8 : 4))) == 0, "Must be");
assert((base_offset % (UseCompactObjectHeaders ? 4 : 8)) == 0, "Must be");
#ifdef ASSERT
if (AvoidUnalignedAccesses) {
@ -3128,8 +3127,7 @@ class StubGenerator: public StubCodeGenerator {
tmp1 = x28, tmp2 = x29, tmp3 = x30, tmp4 = x12;
int base_offset = arrayOopDesc::base_offset_in_bytes(T_BYTE);
assert((base_offset % (UseCompactObjectHeaders ? 4 :
(UseCompressedClassPointers ? 8 : 4))) == 0, "Must be");
assert((base_offset % (UseCompactObjectHeaders ? 4 : 8)) == 0, "Must be");
Register strU = isLU ? str2 : str1,
strL = isLU ? str1 : str2,

View File

@ -55,7 +55,7 @@ class VM_Version : public Abstract_VM_Version {
public:
RVFeatureValue(const char* pretty, int linux_bit_num, bool fstring) :
_pretty(pretty), _feature_string(fstring), _linux_feature_bit(nth_bit(linux_bit_num)) {
_pretty(pretty), _feature_string(fstring), _linux_feature_bit(nth_bit<uint64_t>(linux_bit_num)) {
}
virtual void enable_feature(int64_t value = 0) = 0;
virtual void disable_feature() = 0;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2024 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -2251,9 +2251,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
// but not necessarily exactly of type default_type.
NearLabel known_ok, halt;
metadata2reg(default_type->constant_encoding(), tmp);
if (UseCompressedClassPointers) {
__ encode_klass_not_null(tmp);
}
__ encode_klass_not_null(tmp);
if (basic_type != T_OBJECT) {
__ cmp_klass(tmp, dst, Z_R1_scratch);
@ -2540,13 +2538,8 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
// Get object class.
// Not a safepoint as obj null check happens earlier.
if (op->fast_check()) {
if (UseCompressedClassPointers) {
__ load_klass(klass_RInfo, obj);
__ compareU64_and_branch(k_RInfo, klass_RInfo, Assembler::bcondNotEqual, *failure_target);
} else {
__ z_cg(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
__ branch_optimized(Assembler::bcondNotEqual, *failure_target);
}
__ load_klass(klass_RInfo, obj);
__ compareU64_and_branch(k_RInfo, klass_RInfo, Assembler::bcondNotEqual, *failure_target);
// Successful cast, fall through to profile or jump.
} else {
bool need_slow_path = !k->is_loaded() ||

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2024 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -107,10 +107,10 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
}
if (len->is_valid()) {
// Length will be in the klass gap, if one exists.
// Length will be in the klass gap.
z_st(len, Address(obj, arrayOopDesc::length_offset_in_bytes()));
} else if (UseCompressedClassPointers && !UseCompactObjectHeaders) {
store_klass_gap(Rzero, obj); // Zero klass gap for compressed oops.
} else if (!UseCompactObjectHeaders) {
store_klass_gap(Rzero, obj); // Zero klass gap.
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2024 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -1259,27 +1259,15 @@ void InterpreterMacroAssembler::profile_final_call(Register mdp) {
void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
Register mdp,
Register reg2,
bool receiver_can_be_null) {
Register reg2) {
if (ProfileInterpreter) {
NearLabel profile_continue;
// If no method data exists, go to profile_continue.
test_method_data_pointer(mdp, profile_continue);
NearLabel skip_receiver_profile;
if (receiver_can_be_null) {
NearLabel not_null;
compareU64_and_branch(receiver, (intptr_t)0L, bcondNotEqual, not_null);
// We are making a call. Increment the count for null receiver.
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
z_bru(skip_receiver_profile);
bind(not_null);
}
// Record the receiver type.
record_klass_in_profile(receiver, mdp, reg2);
bind(skip_receiver_profile);
// The method data pointer needs to be updated to reflect the new target.
update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size()));

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2024 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -296,8 +296,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
void profile_call(Register mdp);
void profile_final_call(Register mdp);
void profile_virtual_call(Register receiver, Register mdp,
Register scratch2,
bool receiver_can_be_null = false);
Register scratch2);
void profile_ret(Register return_bci, Register mdp);
void profile_null_seen(Register mdp);
void profile_typecheck(Register mdp, Register klass, Register scratch);

View File

@ -1237,7 +1237,6 @@ void MacroAssembler::load_narrow_oop(Register t, narrowOop a) {
// Load narrow klass constant, compression required.
void MacroAssembler::load_narrow_klass(Register t, Klass* k) {
assert(UseCompressedClassPointers, "must be on to call this method");
narrowKlass encoded_k = CompressedKlassPointers::encode(k);
load_const_32to64(t, encoded_k, false /*sign_extend*/);
}
@ -1255,7 +1254,6 @@ void MacroAssembler::compare_immediate_narrow_oop(Register oop1, narrowOop oop2)
// Compare narrow oop in reg with narrow oop constant, no decompression.
void MacroAssembler::compare_immediate_narrow_klass(Register klass1, Klass* klass2) {
assert(UseCompressedClassPointers, "must be on to call this method");
narrowKlass encoded_k = CompressedKlassPointers::encode(klass2);
Assembler::z_clfi(klass1, encoded_k);
@ -1348,8 +1346,6 @@ int MacroAssembler::patch_load_narrow_oop(address pos, oop o) {
// Patching the immediate value of CPU version dependent load_narrow_klass sequence.
// The passed ptr must NOT be in compressed format!
int MacroAssembler::patch_load_narrow_klass(address pos, Klass* k) {
assert(UseCompressedClassPointers, "Can only patch compressed klass pointers");
narrowKlass nk = CompressedKlassPointers::encode(k);
return patch_load_const_32to64(pos, nk);
}
@ -1364,8 +1360,6 @@ int MacroAssembler::patch_compare_immediate_narrow_oop(address pos, oop o) {
// Patching the immediate value of CPU version dependent compare_immediate_narrow_klass sequence.
// The passed ptr must NOT be in compressed format!
int MacroAssembler::patch_compare_immediate_narrow_klass(address pos, Klass* k) {
assert(UseCompressedClassPointers, "Can only patch compressed klass pointers");
narrowKlass nk = CompressedKlassPointers::encode(k);
return patch_compare_immediate_32(pos, nk);
}
@ -2235,10 +2229,8 @@ int MacroAssembler::ic_check(int end_alignment) {
if (UseCompactObjectHeaders) {
load_narrow_klass_compact(R1_scratch, R2_receiver);
} else if (UseCompressedClassPointers) {
z_llgf(R1_scratch, Address(R2_receiver, oopDesc::klass_offset_in_bytes()));
} else {
z_lg(R1_scratch, Address(R2_receiver, oopDesc::klass_offset_in_bytes()));
z_llgf(R1_scratch, Address(R2_receiver, oopDesc::klass_offset_in_bytes()));
}
z_cg(R1_scratch, Address(R9_data, in_bytes(CompiledICData::speculated_klass_offset())));
z_bre(success);
@ -3916,7 +3908,6 @@ void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
address base = CompressedKlassPointers::base();
int shift = CompressedKlassPointers::shift();
bool need_zero_extend = base != nullptr;
assert(UseCompressedClassPointers, "only for compressed klass ptrs");
BLOCK_COMMENT("cKlass encoder {");
@ -4013,7 +4004,6 @@ int MacroAssembler::instr_size_for_decode_klass_not_null() {
address base = CompressedKlassPointers::base();
int shift_size = CompressedKlassPointers::shift() == 0 ? 0 : 6; /* sllg */
int addbase_size = 0;
assert(UseCompressedClassPointers, "only for compressed klass ptrs");
if (base != nullptr) {
unsigned int base_h = ((unsigned long)base)>>32;
@ -4043,7 +4033,6 @@ void MacroAssembler::decode_klass_not_null(Register dst) {
address base = CompressedKlassPointers::base();
int shift = CompressedKlassPointers::shift();
int beg_off = offset();
assert(UseCompressedClassPointers, "only for compressed klass ptrs");
BLOCK_COMMENT("cKlass decoder (const size) {");
@ -4085,7 +4074,6 @@ void MacroAssembler::decode_klass_not_null(Register dst) {
void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
address base = CompressedKlassPointers::base();
int shift = CompressedKlassPointers::shift();
assert(UseCompressedClassPointers, "only for compressed klass ptrs");
BLOCK_COMMENT("cKlass decoder {");
@ -4125,13 +4113,9 @@ void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
}
void MacroAssembler::load_klass(Register klass, Address mem) {
if (UseCompressedClassPointers) {
z_llgf(klass, mem);
// Attention: no null check here!
decode_klass_not_null(klass);
} else {
z_lg(klass, mem);
}
z_llgf(klass, mem);
// Attention: no null check here!
decode_klass_not_null(klass);
}
// Loads the obj's Klass* into dst.
@ -4154,10 +4138,8 @@ void MacroAssembler::cmp_klass(Register klass, Register obj, Register tmp) {
assert_different_registers(klass, obj, tmp);
load_narrow_klass_compact(tmp, obj);
z_cr(klass, tmp);
} else if (UseCompressedClassPointers) {
z_c(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
} else {
z_cg(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
z_c(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
}
BLOCK_COMMENT("} cmp_klass");
}
@ -4170,12 +4152,9 @@ void MacroAssembler::cmp_klasses_from_objects(Register obj1, Register obj2, Regi
load_narrow_klass_compact(tmp1, obj1);
load_narrow_klass_compact(tmp2, obj2);
z_cr(tmp1, tmp2);
} else if (UseCompressedClassPointers) {
} else {
z_l(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes()));
z_c(tmp1, Address(obj2, oopDesc::klass_offset_in_bytes()));
} else {
z_lg(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes()));
z_cg(tmp1, Address(obj2, oopDesc::klass_offset_in_bytes()));
}
BLOCK_COMMENT("} cmp_klasses_from_objects");
}
@ -4184,36 +4163,28 @@ void MacroAssembler::load_klass(Register klass, Register src_oop) {
if (UseCompactObjectHeaders) {
load_narrow_klass_compact(klass, src_oop);
decode_klass_not_null(klass);
} else if (UseCompressedClassPointers) {
} else {
z_llgf(klass, oopDesc::klass_offset_in_bytes(), src_oop);
decode_klass_not_null(klass);
} else {
z_lg(klass, oopDesc::klass_offset_in_bytes(), src_oop);
}
}
void MacroAssembler::store_klass(Register klass, Register dst_oop, Register ck) {
assert(!UseCompactObjectHeaders, "Don't use with compact headers");
if (UseCompressedClassPointers) {
assert_different_registers(dst_oop, klass, Z_R0);
if (ck == noreg) ck = klass;
encode_klass_not_null(ck, klass);
z_st(ck, Address(dst_oop, oopDesc::klass_offset_in_bytes()));
} else {
z_stg(klass, Address(dst_oop, oopDesc::klass_offset_in_bytes()));
}
assert_different_registers(dst_oop, klass, Z_R0);
if (ck == noreg) ck = klass;
encode_klass_not_null(ck, klass);
z_st(ck, Address(dst_oop, oopDesc::klass_offset_in_bytes()));
}
void MacroAssembler::store_klass_gap(Register s, Register d) {
assert(!UseCompactObjectHeaders, "Don't use with compact headers");
if (UseCompressedClassPointers) {
assert(s != d, "not enough registers");
// Support s = noreg.
if (s != noreg) {
z_st(s, Address(d, oopDesc::klass_gap_offset_in_bytes()));
} else {
z_mvhi(Address(d, oopDesc::klass_gap_offset_in_bytes()), 0);
}
assert(s != d, "not enough registers");
// Support s = noreg.
if (s != noreg) {
z_st(s, Address(d, oopDesc::klass_gap_offset_in_bytes()));
} else {
z_mvhi(Address(d, oopDesc::klass_gap_offset_in_bytes()), 0);
}
}
@ -4227,67 +4198,64 @@ void MacroAssembler::compare_klass_ptr(Register Rop1, int64_t disp, Register Rba
BLOCK_COMMENT("compare klass ptr {");
if (UseCompressedClassPointers) {
const int shift = CompressedKlassPointers::shift();
address base = CompressedKlassPointers::base();
const int shift = CompressedKlassPointers::shift();
address base = CompressedKlassPointers::base();
if (UseCompactObjectHeaders) {
assert(shift >= 3, "cKlass encoder detected bad shift");
} else {
assert((shift == 0) || (shift == 3), "cKlass encoder detected bad shift");
}
assert_different_registers(Rop1, Z_R0);
assert_different_registers(Rop1, Rbase, Z_R1);
// First encode register oop and then compare with cOop in memory.
// This sequence saves an unnecessary cOop load and decode.
if (base == nullptr) {
if (shift == 0) {
z_cl(Rop1, disp, Rbase); // Unscaled
} else {
z_srlg(Z_R0, Rop1, shift); // ZeroBased
z_cl(Z_R0, disp, Rbase);
}
} else { // HeapBased
#ifdef ASSERT
bool used_R0 = true;
bool used_R1 = true;
#endif
Register current = Rop1;
Label done;
if (maybenull) { // null pointer must be preserved!
z_ltgr(Z_R0, current);
z_bre(done);
current = Z_R0;
}
unsigned int base_h = ((unsigned long)base)>>32;
unsigned int base_l = (unsigned int)((unsigned long)base);
if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
lgr_if_needed(Z_R0, current);
z_aih(Z_R0, -((int)base_h)); // Base has no set bits in lower half.
} else if ((base_h == 0) && (base_l != 0)) {
lgr_if_needed(Z_R0, current);
z_agfi(Z_R0, -(int)base_l);
} else {
int pow2_offset = get_oop_base_complement(Z_R1, ((uint64_t)(intptr_t)base));
add2reg_with_index(Z_R0, pow2_offset, Z_R1, Rop1); // Subtract base by adding complement.
}
if (shift != 0) {
z_srlg(Z_R0, Z_R0, shift);
}
bind(done);
z_cl(Z_R0, disp, Rbase);
#ifdef ASSERT
if (used_R0) preset_reg(Z_R0, 0xb05bUL, 2);
if (used_R1) preset_reg(Z_R1, 0xb06bUL, 2);
#endif
}
if (UseCompactObjectHeaders) {
assert(shift >= 3, "cKlass encoder detected bad shift");
} else {
z_clg(Rop1, disp, Z_R0, Rbase);
assert((shift == 0) || (shift == 3), "cKlass encoder detected bad shift");
}
assert_different_registers(Rop1, Z_R0);
assert_different_registers(Rop1, Rbase, Z_R1);
// First encode register oop and then compare with cOop in memory.
// This sequence saves an unnecessary cOop load and decode.
if (base == nullptr) {
if (shift == 0) {
z_cl(Rop1, disp, Rbase); // Unscaled
} else {
z_srlg(Z_R0, Rop1, shift); // ZeroBased
z_cl(Z_R0, disp, Rbase);
}
} else { // HeapBased
#ifdef ASSERT
bool used_R0 = true;
bool used_R1 = true;
#endif
Register current = Rop1;
Label done;
if (maybenull) { // null pointer must be preserved!
z_ltgr(Z_R0, current);
z_bre(done);
current = Z_R0;
}
unsigned int base_h = ((unsigned long)base)>>32;
unsigned int base_l = (unsigned int)((unsigned long)base);
if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
lgr_if_needed(Z_R0, current);
z_aih(Z_R0, -((int)base_h)); // Base has no set bits in lower half.
} else if ((base_h == 0) && (base_l != 0)) {
lgr_if_needed(Z_R0, current);
z_agfi(Z_R0, -(int)base_l);
} else {
int pow2_offset = get_oop_base_complement(Z_R1, ((uint64_t)(intptr_t)base));
add2reg_with_index(Z_R0, pow2_offset, Z_R1, Rop1); // Subtract base by adding complement.
}
if (shift != 0) {
z_srlg(Z_R0, Z_R0, shift);
}
bind(done);
z_cl(Z_R0, disp, Rbase);
#ifdef ASSERT
if (used_R0) preset_reg(Z_R0, 0xb05bUL, 2);
if (used_R1) preset_reg(Z_R1, 0xb06bUL, 2);
#endif
}
BLOCK_COMMENT("} compare klass ptr");
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2024 SAP SE. All rights reserved.
* Copyright (c) 2024 IBM Corporation. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -842,8 +842,7 @@ class MacroAssembler: public Assembler {
void store_klass(Register klass, Register dst_oop, Register ck = noreg); // Klass will get compressed if ck not provided.
void store_klass_gap(Register s, Register dst_oop);
void load_narrow_klass_compact(Register dst, Register src);
// Compares the Klass pointer of an object to a given Klass (which might be narrow,
// depending on UseCompressedClassPointers).
// Compares the narrow Klass pointer of an object to a given narrow Klass
void cmp_klass(Register klass, Register obj, Register tmp);
// Compares the Klass pointer of two objects obj1 and obj2. Result is in the condition flags.
// Uses tmp1 and tmp2 as temporary registers.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2024 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -82,7 +82,6 @@
static bool narrow_klass_use_complex_address() {
NOT_LP64(ShouldNotCallThis());
assert(UseCompressedClassPointers, "only for compressed klass code");
// TODO HS25: z port if (MatchDecodeNodes) return true;
return false;
}

View File

@ -1929,10 +1929,6 @@ uint Matcher::float_pressure_limit()
return (FLOATPRESSURE == -1) ? 15 : FLOATPRESSURE;
}
bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
return false;
}
// Register for DIVI projection of divmodI
const RegMask& Matcher::divI_proj_mask() {
return _Z_RARG4_INT_REG_mask;

View File

@ -3472,7 +3472,7 @@ void Assembler::vmovdqu(XMMRegister dst, XMMRegister src) {
emit_int16(0x6F, (0xC0 | encode));
}
void Assembler::vmovw(XMMRegister dst, Register src) {
void Assembler::evmovw(XMMRegister dst, Register src) {
assert(VM_Version::supports_avx512_fp16(), "requires AVX512-FP16");
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_is_evex_instruction();
@ -3480,7 +3480,7 @@ void Assembler::vmovw(XMMRegister dst, Register src) {
emit_int16(0x6E, (0xC0 | encode));
}
void Assembler::vmovw(Register dst, XMMRegister src) {
void Assembler::evmovw(Register dst, XMMRegister src) {
assert(VM_Version::supports_avx512_fp16(), "requires AVX512-FP16");
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_is_evex_instruction();
@ -3488,6 +3488,36 @@ void Assembler::vmovw(Register dst, XMMRegister src) {
emit_int16(0x7E, (0xC0 | encode));
}
void Assembler::evmovw(XMMRegister dst, Address src) {
assert(VM_Version::supports_avx10_2(), "");
InstructionMark im(this);
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit);
attributes.set_is_evex_instruction();
vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_MAP5, &attributes);
emit_int8(0x6E);
emit_operand(dst, src, 0);
}
void Assembler::evmovw(Address dst, XMMRegister src) {
assert(VM_Version::supports_avx10_2(), "");
InstructionMark im(this);
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit);
attributes.set_is_evex_instruction();
vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_MAP5, &attributes);
emit_int8(0x7E);
emit_operand(src, dst, 0);
}
void Assembler::evmovw(XMMRegister dst, XMMRegister src) {
assert(VM_Version::supports_avx10_2(), "");
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_is_evex_instruction();
int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_MAP5, &attributes);
emit_int16(0x6E, (0xC0 | encode));
}
void Assembler::vmovdqu(XMMRegister dst, Address src) {
assert(UseAVX > 0, "");
InstructionMark im(this);
@ -7310,6 +7340,42 @@ void Assembler::etzcntq(Register dst, Address src, bool no_flags) {
emit_operand(dst, src, 0);
}
void Assembler::evucomish(XMMRegister dst, Address src) {
InstructionMark im(this);
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit);
attributes.set_is_evex_instruction();
vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_MAP5, &attributes);
emit_int8(0x2E);
emit_operand(dst, src, 0);
}
void Assembler::evucomish(XMMRegister dst, XMMRegister src) {
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_is_evex_instruction();
int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_MAP5, &attributes);
emit_int16(0x2E, (0xC0 | encode));
}
void Assembler::evucomxsh(XMMRegister dst, Address src) {
assert(VM_Version::supports_avx10_2(), "");
InstructionMark im(this);
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit);
attributes.set_is_evex_instruction();
vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_MAP5, &attributes);
emit_int8(0x2E);
emit_operand(dst, src, 0);
}
void Assembler::evucomxsh(XMMRegister dst, XMMRegister src) {
assert(VM_Version::supports_avx10_2(), "");
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_is_evex_instruction();
int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_MAP5, &attributes);
emit_int16(0x2E, (0xC0 | encode));
}
void Assembler::ucomisd(XMMRegister dst, Address src) {
InstructionMark im(this);
InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
@ -7327,7 +7393,7 @@ void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
emit_int16(0x2E, (0xC0 | encode));
}
void Assembler::vucomxsd(XMMRegister dst, Address src) {
void Assembler::evucomxsd(XMMRegister dst, Address src) {
assert(VM_Version::supports_avx10_2(), "");
InstructionMark im(this);
InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
@ -7338,7 +7404,7 @@ void Assembler::vucomxsd(XMMRegister dst, Address src) {
emit_operand(dst, src, 0);
}
void Assembler::vucomxsd(XMMRegister dst, XMMRegister src) {
void Assembler::evucomxsd(XMMRegister dst, XMMRegister src) {
assert(VM_Version::supports_avx10_2(), "");
InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_is_evex_instruction();
@ -7361,7 +7427,7 @@ void Assembler::ucomiss(XMMRegister dst, XMMRegister src) {
emit_int16(0x2E, (0xC0 | encode));
}
void Assembler::vucomxss(XMMRegister dst, Address src) {
void Assembler::evucomxss(XMMRegister dst, Address src) {
assert(VM_Version::supports_avx10_2(), "");
InstructionMark im(this);
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
@ -7372,7 +7438,7 @@ void Assembler::vucomxss(XMMRegister dst, Address src) {
emit_operand(dst, src, 0);
}
void Assembler::vucomxss(XMMRegister dst, XMMRegister src) {
void Assembler::evucomxss(XMMRegister dst, XMMRegister src) {
assert(VM_Version::supports_avx10_2(), "");
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_is_evex_instruction();
@ -8411,30 +8477,6 @@ void Assembler::vmulsh(XMMRegister dst, XMMRegister nds, XMMRegister src) {
emit_int16(0x59, (0xC0 | encode));
}
void Assembler::vmaxsh(XMMRegister dst, XMMRegister nds, XMMRegister src) {
assert(VM_Version::supports_avx512_fp16(), "requires AVX512-FP16");
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_is_evex_instruction();
int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_MAP5, &attributes);
emit_int16(0x5F, (0xC0 | encode));
}
void Assembler::eminmaxsh(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) {
assert(VM_Version::supports_avx10_2(), "");
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_is_evex_instruction();
int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3A, &attributes);
emit_int24(0x53, (0xC0 | encode), imm8);
}
void Assembler::vminsh(XMMRegister dst, XMMRegister nds, XMMRegister src) {
assert(VM_Version::supports_avx512_fp16(), "requires AVX512-FP16");
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_is_evex_instruction();
int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_MAP5, &attributes);
emit_int16(0x5D, (0xC0 | encode));
}
void Assembler::vsqrtsh(XMMRegister dst, XMMRegister src) {
assert(VM_Version::supports_avx512_fp16(), "requires AVX512-FP16");
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
@ -13369,48 +13411,38 @@ bool Assembler::is_demotable(bool no_flags, int dst_enc, int nds_enc) {
return (!no_flags && dst_enc == nds_enc);
}
void Assembler::vmaxss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
assert(VM_Version::supports_avx(), "");
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
emit_int16(0x5F, (0xC0 | encode));
}
void Assembler::vmaxsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
assert(VM_Version::supports_avx(), "");
InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_rex_vex_w_reverted();
int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
emit_int16(0x5F, (0xC0 | encode));
}
void Assembler::vminss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
assert(VM_Version::supports_avx(), "");
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
emit_int16(0x5D, (0xC0 | encode));
}
void Assembler::eminmaxss(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) {
void Assembler::evminmaxsh(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int imm8) {
assert(VM_Version::supports_avx10_2(), "");
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
attributes.set_is_evex_instruction();
attributes.set_embedded_opmask_register_specifier(mask);
if (merge) {
attributes.reset_is_clear_context();
}
int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3A, &attributes);
emit_int24(0x53, (0xC0 | encode), imm8);
}
void Assembler::evminmaxss(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int imm8) {
assert(VM_Version::supports_avx10_2(), "");
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
attributes.set_is_evex_instruction();
attributes.set_embedded_opmask_register_specifier(mask);
if (merge) {
attributes.reset_is_clear_context();
}
int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
emit_int24(0x53, (0xC0 | encode), imm8);
}
void Assembler::vminsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
assert(VM_Version::supports_avx(), "");
InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_rex_vex_w_reverted();
int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
emit_int16(0x5D, (0xC0 | encode));
}
void Assembler::eminmaxsd(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) {
void Assembler::evminmaxsd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int imm8) {
assert(VM_Version::supports_avx10_2(), "");
InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
attributes.set_is_evex_instruction();
attributes.set_embedded_opmask_register_specifier(mask);
if (merge) {
attributes.reset_is_clear_context();
}
int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
emit_int24(0x53, (0xC0 | encode), imm8);
}

View File

@ -1694,8 +1694,11 @@ private:
void movsbl(Register dst, Address src);
void movsbl(Register dst, Register src);
void vmovw(XMMRegister dst, Register src);
void vmovw(Register dst, XMMRegister src);
void evmovw(XMMRegister dst, Register src);
void evmovw(Register dst, XMMRegister src);
void evmovw(XMMRegister dst, Address src);
void evmovw(Address dst, XMMRegister src);
void evmovw(XMMRegister dst, XMMRegister src);
void movsbq(Register dst, Address src);
void movsbq(Register dst, Register src);
@ -2329,17 +2332,23 @@ private:
void tzcntq(Register dst, Address src);
void etzcntq(Register dst, Address src, bool no_flags);
// Unordered Compare Scalar Half-Precision Floating-Point Values and set EFLAGS
void evucomish(XMMRegister dst, Address src);
void evucomish(XMMRegister dst, XMMRegister src);
void evucomxsh(XMMRegister dst, Address src);
void evucomxsh(XMMRegister dst, XMMRegister src);
// Unordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS
void ucomisd(XMMRegister dst, Address src);
void ucomisd(XMMRegister dst, XMMRegister src);
void vucomxsd(XMMRegister dst, Address src);
void vucomxsd(XMMRegister dst, XMMRegister src);
void evucomxsd(XMMRegister dst, Address src);
void evucomxsd(XMMRegister dst, XMMRegister src);
// Unordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS
void ucomiss(XMMRegister dst, Address src);
void ucomiss(XMMRegister dst, XMMRegister src);
void vucomxss(XMMRegister dst, Address src);
void vucomxss(XMMRegister dst, XMMRegister src);
void evucomxss(XMMRegister dst, Address src);
void evucomxss(XMMRegister dst, XMMRegister src);
void xabort(int8_t imm8);
@ -2417,11 +2426,6 @@ private:
void vsubss(XMMRegister dst, XMMRegister nds, Address src);
void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vmaxss(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vmaxsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vminss(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vminsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
void sarxl(Register dst, Register src1, Register src2);
void sarxl(Register dst, Address src1, Register src2);
void sarxq(Register dst, Register src1, Register src2);
@ -2552,8 +2556,6 @@ private:
void vsubsh(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vmulsh(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vdivsh(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vmaxsh(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vminsh(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vsqrtsh(XMMRegister dst, XMMRegister src);
void vfmadd132sh(XMMRegister dst, XMMRegister src1, XMMRegister src2);
@ -2790,9 +2792,9 @@ private:
void vminpd(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len);
// AVX10.2 floating point minmax instructions
void eminmaxsh(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8);
void eminmaxss(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8);
void eminmaxsd(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8);
void evminmaxsh(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int imm8);
void evminmaxss(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int imm8);
void evminmaxsd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int imm8);
void evminmaxph(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int imm8, int vector_len);
void evminmaxph(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int imm8, int vector_len);
void evminmaxps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int imm8, int vector_len);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -77,23 +77,6 @@ const Register SHIFT_count = rcx; // where count for shift operations must be
#define __ _masm->
static void select_different_registers(Register preserve,
Register extra,
Register &tmp1,
Register &tmp2) {
if (tmp1 == preserve) {
assert_different_registers(tmp1, tmp2, extra);
tmp1 = extra;
} else if (tmp2 == preserve) {
assert_different_registers(tmp1, tmp2, extra);
tmp2 = extra;
}
assert_different_registers(preserve, tmp1, tmp2);
}
static void select_different_registers(Register preserve,
Register extra,
Register &tmp1,
@ -1309,12 +1292,8 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
} else if (obj == klass_RInfo) {
klass_RInfo = dst;
}
if (k->is_loaded() && !UseCompressedClassPointers) {
select_different_registers(obj, dst, k_RInfo, klass_RInfo);
} else {
Rtmp1 = op->tmp3()->as_register();
select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
}
Rtmp1 = op->tmp3()->as_register();
select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
assert_different_registers(obj, k_RInfo, klass_RInfo);
@ -1348,12 +1327,8 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
if (op->fast_check()) {
// get object class
// not a safepoint as obj null check happens earlier
if (UseCompressedClassPointers) {
__ load_klass(Rtmp1, obj, tmp_load_klass);
__ cmpptr(k_RInfo, Rtmp1);
} else {
__ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
}
__ load_klass(Rtmp1, obj, tmp_load_klass);
__ cmpptr(k_RInfo, Rtmp1);
__ jcc(Assembler::notEqual, *failure_target);
// successful cast, fall through to profile or jump
} else {
@ -2651,9 +2626,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
// but not necessarily exactly of type default_type.
Label known_ok, halt;
__ mov_metadata(tmp, default_type->constant_encoding());
if (UseCompressedClassPointers) {
__ encode_klass_not_null(tmp, rscratch1);
}
__ encode_klass_not_null(tmp, rscratch1);
if (basic_type != T_OBJECT) {
__ cmp_klass(tmp, dst, tmp2);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1291,9 +1291,7 @@ void LIRGenerator::do_CheckCast(CheckCast* x) {
}
LIR_Opr reg = rlock_result(x);
LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
tmp3 = new_register(objectType);
}
tmp3 = new_register(objectType);
__ checkcast(reg, obj.result(), x->klass(),
new_register(objectType), new_register(objectType), tmp3,
x->direct_compare(), info_for_exception, patching_info, stub,
@ -1313,9 +1311,7 @@ void LIRGenerator::do_InstanceOf(InstanceOf* x) {
}
obj.load_item();
LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
tmp3 = new_register(objectType);
}
tmp3 = new_register(objectType);
__ instanceof(reg, obj.result(), x->klass(),
new_register(objectType), new_register(objectType), tmp3,
x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -85,14 +85,11 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
if (UseCompactObjectHeaders) {
movptr(t1, Address(klass, Klass::prototype_header_offset()));
movptr(Address(obj, oopDesc::mark_offset_in_bytes()), t1);
} else if (UseCompressedClassPointers) { // Take care not to kill klass
} else { // Take care not to kill klass
movptr(Address(obj, oopDesc::mark_offset_in_bytes()), checked_cast<int32_t>(markWord::prototype().value()));
movptr(t1, klass);
encode_klass_not_null(t1, rscratch1);
movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1);
} else {
movptr(Address(obj, oopDesc::mark_offset_in_bytes()), checked_cast<int32_t>(markWord::prototype().value()));
movptr(Address(obj, oopDesc::klass_offset_in_bytes()), klass);
}
if (len->is_valid()) {
@ -104,7 +101,7 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
xorl(t1, t1);
movl(Address(obj, base_offset), t1);
}
} else if (UseCompressedClassPointers && !UseCompactObjectHeaders) {
} else if (!UseCompactObjectHeaders) {
xorptr(t1, t1);
store_klass_gap(obj, t1);
}

View File

@ -152,7 +152,7 @@ inline Assembler::AvxVectorLen C2_MacroAssembler::vector_length_encoding(int vle
// Because the transitions from emitted code to the runtime
// monitorenter/exit helper stubs are so slow it's critical that
// we inline both the stack-locking fast path and the inflated fast path.
// we inline both the lock-stack fast path and the inflated fast path.
//
// See also: cmpFastLock and cmpFastUnlock.
//
@ -1037,8 +1037,8 @@ void C2_MacroAssembler::evminmax_fp(int opcode, BasicType elem_bt,
}
}
void C2_MacroAssembler::vminmax_fp(int opc, BasicType elem_bt, XMMRegister dst, KRegister mask,
XMMRegister src1, XMMRegister src2, int vlen_enc) {
void C2_MacroAssembler::vminmax_fp_avx10_2(int opc, BasicType elem_bt, XMMRegister dst, KRegister mask,
XMMRegister src1, XMMRegister src2, int vlen_enc) {
assert(opc == Op_MinV || opc == Op_MinReductionV ||
opc == Op_MaxV || opc == Op_MaxReductionV, "sanity");
@ -1052,6 +1052,21 @@ void C2_MacroAssembler::vminmax_fp(int opc, BasicType elem_bt, XMMRegister dst,
}
}
void C2_MacroAssembler::sminmax_fp_avx10_2(int opc, BasicType elem_bt, XMMRegister dst, KRegister mask,
XMMRegister src1, XMMRegister src2) {
assert(opc == Op_MinF || opc == Op_MaxF ||
opc == Op_MinD || opc == Op_MaxD, "sanity");
int imm8 = (opc == Op_MinF || opc == Op_MinD) ? AVX10_2_MINMAX_MIN_COMPARE_SIGN
: AVX10_2_MINMAX_MAX_COMPARE_SIGN;
if (elem_bt == T_FLOAT) {
evminmaxss(dst, mask, src1, src2, true, imm8);
} else {
assert(elem_bt == T_DOUBLE, "");
evminmaxsd(dst, mask, src1, src2, true, imm8);
}
}
// Float/Double signum
void C2_MacroAssembler::signum_fp(int opcode, XMMRegister dst, XMMRegister zero, XMMRegister one) {
assert(opcode == Op_SignumF || opcode == Op_SignumD, "sanity");
@ -1063,7 +1078,7 @@ void C2_MacroAssembler::signum_fp(int opcode, XMMRegister dst, XMMRegister zero,
// If other floating point comparison instructions used, ZF=1 for equal and unordered cases
if (opcode == Op_SignumF) {
if (VM_Version::supports_avx10_2()) {
vucomxss(dst, zero);
evucomxss(dst, zero);
jcc(Assembler::negative, DONE_LABEL);
} else {
ucomiss(dst, zero);
@ -1074,7 +1089,7 @@ void C2_MacroAssembler::signum_fp(int opcode, XMMRegister dst, XMMRegister zero,
xorps(dst, ExternalAddress(StubRoutines::x86::vector_float_sign_flip()), noreg);
} else if (opcode == Op_SignumD) {
if (VM_Version::supports_avx10_2()) {
vucomxsd(dst, zero);
evucomxsd(dst, zero);
jcc(Assembler::negative, DONE_LABEL);
} else {
ucomisd(dst, zero);
@ -2400,7 +2415,7 @@ void C2_MacroAssembler::reduceFloatMinMax(int opcode, int vlen, bool is_dst_vali
}
if (VM_Version::supports_avx10_2()) {
vminmax_fp(opcode, T_FLOAT, wdst, k0, wtmp, wsrc, vlen_enc);
vminmax_fp_avx10_2(opcode, T_FLOAT, wdst, k0, wtmp, wsrc, vlen_enc);
} else {
vminmax_fp(opcode, T_FLOAT, wdst, wtmp, wsrc, tmp, atmp, btmp, vlen_enc);
}
@ -2409,7 +2424,7 @@ void C2_MacroAssembler::reduceFloatMinMax(int opcode, int vlen, bool is_dst_vali
}
if (is_dst_valid) {
if (VM_Version::supports_avx10_2()) {
vminmax_fp(opcode, T_FLOAT, dst, k0, wdst, dst, Assembler::AVX_128bit);
vminmax_fp_avx10_2(opcode, T_FLOAT, dst, k0, wdst, dst, Assembler::AVX_128bit);
} else {
vminmax_fp(opcode, T_FLOAT, dst, wdst, dst, tmp, atmp, btmp, Assembler::AVX_128bit);
}
@ -2440,7 +2455,7 @@ void C2_MacroAssembler::reduceDoubleMinMax(int opcode, int vlen, bool is_dst_val
}
if (VM_Version::supports_avx10_2()) {
vminmax_fp(opcode, T_DOUBLE, wdst, k0, wtmp, wsrc, vlen_enc);
vminmax_fp_avx10_2(opcode, T_DOUBLE, wdst, k0, wtmp, wsrc, vlen_enc);
} else {
vminmax_fp(opcode, T_DOUBLE, wdst, wtmp, wsrc, tmp, atmp, btmp, vlen_enc);
}
@ -2451,7 +2466,7 @@ void C2_MacroAssembler::reduceDoubleMinMax(int opcode, int vlen, bool is_dst_val
if (is_dst_valid) {
if (VM_Version::supports_avx10_2()) {
vminmax_fp(opcode, T_DOUBLE, dst, k0, wdst, dst, Assembler::AVX_128bit);
vminmax_fp_avx10_2(opcode, T_DOUBLE, dst, k0, wdst, dst, Assembler::AVX_128bit);
} else {
vminmax_fp(opcode, T_DOUBLE, dst, wdst, dst, tmp, atmp, btmp, Assembler::AVX_128bit);
}
@ -7061,13 +7076,25 @@ void C2_MacroAssembler::evfp16ph(int opcode, XMMRegister dst, XMMRegister src1,
}
}
void C2_MacroAssembler::scalar_max_min_fp16(int opcode, XMMRegister dst, XMMRegister src1, XMMRegister src2,
KRegister ktmp, XMMRegister xtmp1, XMMRegister xtmp2) {
vector_max_min_fp16(opcode, dst, src1, src2, ktmp, xtmp1, xtmp2, Assembler::AVX_128bit);
void C2_MacroAssembler::sminmax_fp16(int opcode, XMMRegister dst, XMMRegister src1, XMMRegister src2,
KRegister ktmp, XMMRegister xtmp1, XMMRegister xtmp2) {
vminmax_fp16(opcode, dst, src1, src2, ktmp, xtmp1, xtmp2, Assembler::AVX_128bit);
}
void C2_MacroAssembler::vector_max_min_fp16(int opcode, XMMRegister dst, XMMRegister src1, XMMRegister src2,
KRegister ktmp, XMMRegister xtmp1, XMMRegister xtmp2, int vlen_enc) {
void C2_MacroAssembler::sminmax_fp16_avx10_2(int opcode, XMMRegister dst, XMMRegister src1, XMMRegister src2,
KRegister ktmp) {
if (opcode == Op_MaxHF) {
// dst = max(src1, src2)
evminmaxsh(dst, ktmp, src1, src2, true, AVX10_2_MINMAX_MAX_COMPARE_SIGN);
} else {
assert(opcode == Op_MinHF, "");
// dst = min(src1, src2)
evminmaxsh(dst, ktmp, src1, src2, true, AVX10_2_MINMAX_MIN_COMPARE_SIGN);
}
}
void C2_MacroAssembler::vminmax_fp16(int opcode, XMMRegister dst, XMMRegister src1, XMMRegister src2,
KRegister ktmp, XMMRegister xtmp1, XMMRegister xtmp2, int vlen_enc) {
if (opcode == Op_MaxVHF || opcode == Op_MaxHF) {
// Move sign bits of src2 to mask register.
evpmovw2m(ktmp, src2, vlen_enc);
@ -7110,3 +7137,27 @@ void C2_MacroAssembler::vector_max_min_fp16(int opcode, XMMRegister dst, XMMRegi
Assembler::evmovdquw(dst, ktmp, xtmp1, true, vlen_enc);
}
}
void C2_MacroAssembler::vminmax_fp16_avx10_2(int opcode, XMMRegister dst, XMMRegister src1, XMMRegister src2,
KRegister ktmp, int vlen_enc) {
if (opcode == Op_MaxVHF) {
// dst = max(src1, src2)
evminmaxph(dst, ktmp, src1, src2, true, AVX10_2_MINMAX_MAX_COMPARE_SIGN, vlen_enc);
} else {
assert(opcode == Op_MinVHF, "");
// dst = min(src1, src2)
evminmaxph(dst, ktmp, src1, src2, true, AVX10_2_MINMAX_MIN_COMPARE_SIGN, vlen_enc);
}
}
void C2_MacroAssembler::vminmax_fp16_avx10_2(int opcode, XMMRegister dst, XMMRegister src1, Address src2,
KRegister ktmp, int vlen_enc) {
if (opcode == Op_MaxVHF) {
// dst = max(src1, src2)
evminmaxph(dst, ktmp, src1, src2, true, AVX10_2_MINMAX_MAX_COMPARE_SIGN, vlen_enc);
} else {
assert(opcode == Op_MinVHF, "");
// dst = min(src1, src2)
evminmaxph(dst, ktmp, src1, src2, true, AVX10_2_MINMAX_MIN_COMPARE_SIGN, vlen_enc);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -67,8 +67,11 @@ public:
XMMRegister tmp, XMMRegister atmp, XMMRegister btmp,
int vlen_enc);
void vminmax_fp(int opc, BasicType elem_bt, XMMRegister dst, KRegister mask,
XMMRegister src1, XMMRegister src2, int vlen_enc);
void vminmax_fp_avx10_2(int opc, BasicType elem_bt, XMMRegister dst, KRegister mask,
XMMRegister src1, XMMRegister src2, int vlen_enc);
void sminmax_fp_avx10_2(int opc, BasicType elem_bt, XMMRegister dst, KRegister mask,
XMMRegister src1, XMMRegister src2);
void vpuminmaxq(int opcode, XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister xtmp1, XMMRegister xtmp2, int vlen_enc);
@ -576,11 +579,20 @@ public:
void evfp16ph(int opcode, XMMRegister dst, XMMRegister src1, Address src2, int vlen_enc);
void vector_max_min_fp16(int opcode, XMMRegister dst, XMMRegister src1, XMMRegister src2,
KRegister ktmp, XMMRegister xtmp1, XMMRegister xtmp2, int vlen_enc);
void vminmax_fp16(int opcode, XMMRegister dst, XMMRegister src1, XMMRegister src2,
KRegister ktmp, XMMRegister xtmp1, XMMRegister xtmp2, int vlen_enc);
void scalar_max_min_fp16(int opcode, XMMRegister dst, XMMRegister src1, XMMRegister src2,
KRegister ktmp, XMMRegister xtmp1, XMMRegister xtmp2);
void vminmax_fp16_avx10_2(int opcode, XMMRegister dst, XMMRegister src1, XMMRegister src2,
KRegister ktmp, int vlen_enc);
void vminmax_fp16_avx10_2(int opcode, XMMRegister dst, XMMRegister src1, Address src2,
KRegister ktmp, int vlen_enc);
void sminmax_fp16(int opcode, XMMRegister dst, XMMRegister src1, XMMRegister src2,
KRegister ktmp, XMMRegister xtmp1, XMMRegister xtmp2);
void sminmax_fp16_avx10_2(int opcode, XMMRegister dst, XMMRegister src1, XMMRegister src2,
KRegister ktmp);
void reconstruct_frame_pointer(Register rtmp);

View File

@ -117,9 +117,6 @@ define_pd_global(intx, InitArrayShortSize, 8*BytesPerLong);
product(bool, UseIncDec, true, DIAGNOSTIC, \
"Use INC, DEC instructions on x86") \
\
product(bool, UseNewLongLShift, false, \
"Use optimized bitwise shift left") \
\
product(bool, UseAddressNop, false, \
"Use '0F 1F [addr]' NOP instructions on x86 cpus") \
\

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1392,28 +1392,15 @@ void InterpreterMacroAssembler::profile_final_call(Register mdp) {
void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
Register mdp,
bool receiver_can_be_null) {
Register mdp) {
if (ProfileInterpreter) {
Label profile_continue;
// If no method data exists, go to profile_continue.
test_method_data_pointer(mdp, profile_continue);
Label skip_receiver_profile;
if (receiver_can_be_null) {
Label not_null;
testptr(receiver, receiver);
jccb(Assembler::notZero, not_null);
// We are making a call. Increment the count for null receiver.
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
jmp(skip_receiver_profile);
bind(not_null);
}
// Record the receiver type.
profile_receiver_type(receiver, mdp, 0);
bind(skip_receiver_profile);
// The method data pointer needs to be updated to reflect the new target.
update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size()));

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -243,8 +243,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
void profile_not_taken_branch(Register mdp);
void profile_call(Register mdp);
void profile_final_call(Register mdp);
void profile_virtual_call(Register receiver, Register mdp,
bool receiver_can_be_null = false);
void profile_virtual_call(Register receiver, Register mdp);
void profile_ret(Register return_bci, Register mdp);
void profile_null_seen(Register mdp);
void profile_typecheck(Register mdp, Register klass);

View File

@ -985,12 +985,9 @@ int MacroAssembler::ic_check(int end_alignment) {
if (UseCompactObjectHeaders) {
load_narrow_klass_compact(temp, receiver);
cmpl(temp, Address(data, CompiledICData::speculated_klass_offset()));
} else if (UseCompressedClassPointers) {
} else {
movl(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
cmpl(temp, Address(data, CompiledICData::speculated_klass_offset()));
} else {
movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
cmpptr(temp, Address(data, CompiledICData::speculated_klass_offset()));
}
// if inline cache check fails, then jump to runtime routine
@ -1961,6 +1958,16 @@ void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src, Register rscrat
}
}
void MacroAssembler::movhlf(XMMRegister dst, XMMRegister src, Register rscratch) {
if (VM_Version::supports_avx10_2()) {
evmovw(dst, src);
} else {
assert(rscratch != noreg, "missing");
evmovw(rscratch, src);
evmovw(dst, rscratch);
}
}
void MacroAssembler::mov64(Register dst, int64_t imm64) {
if (is_uimm32(imm64)) {
movl(dst, checked_cast<uint32_t>(imm64));
@ -2664,14 +2671,14 @@ void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src, Register rscra
}
}
void MacroAssembler::vucomxsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
void MacroAssembler::evucomxsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
assert(rscratch != noreg || always_reachable(src), "missing");
if (reachable(src)) {
Assembler::vucomxsd(dst, as_Address(src));
Assembler::evucomxsd(dst, as_Address(src));
} else {
lea(rscratch, src);
Assembler::vucomxsd(dst, Address(rscratch, 0));
Assembler::evucomxsd(dst, Address(rscratch, 0));
}
}
@ -2686,14 +2693,36 @@ void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src, Register rscra
}
}
void MacroAssembler::vucomxss(XMMRegister dst, AddressLiteral src, Register rscratch) {
void MacroAssembler::evucomxss(XMMRegister dst, AddressLiteral src, Register rscratch) {
assert(rscratch != noreg || always_reachable(src), "missing");
if (reachable(src)) {
Assembler::vucomxss(dst, as_Address(src));
Assembler::evucomxss(dst, as_Address(src));
} else {
lea(rscratch, src);
Assembler::vucomxss(dst, Address(rscratch, 0));
Assembler::evucomxss(dst, Address(rscratch, 0));
}
}
void MacroAssembler::evucomish(XMMRegister dst, AddressLiteral src, Register rscratch) {
assert(rscratch != noreg || always_reachable(src), "missing");
if (reachable(src)) {
Assembler::evucomish(dst, as_Address(src));
} else {
lea(rscratch, src);
Assembler::evucomish(dst, Address(rscratch, 0));
}
}
void MacroAssembler::evucomxsh(XMMRegister dst, AddressLiteral src, Register rscratch) {
assert(rscratch != noreg || always_reachable(src), "missing");
if (reachable(src)) {
Assembler::evucomxsh(dst, as_Address(src));
} else {
lea(rscratch, src);
Assembler::evucomxsh(dst, Address(rscratch, 0));
}
}
@ -5384,11 +5413,9 @@ void MacroAssembler::load_klass(Register dst, Register src, Register tmp) {
if (UseCompactObjectHeaders) {
load_narrow_klass_compact(dst, src);
decode_klass_not_null(dst, tmp);
} else if (UseCompressedClassPointers) {
} else {
movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
decode_klass_not_null(dst, tmp);
} else {
movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
}
}
@ -5396,12 +5423,8 @@ void MacroAssembler::store_klass(Register dst, Register src, Register tmp) {
assert(!UseCompactObjectHeaders, "not with compact headers");
assert_different_registers(src, tmp);
assert_different_registers(dst, tmp);
if (UseCompressedClassPointers) {
encode_klass_not_null(src, tmp);
movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
} else {
movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
}
encode_klass_not_null(src, tmp);
movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
}
void MacroAssembler::cmp_klass(Register klass, Register obj, Register tmp) {
@ -5410,10 +5433,8 @@ void MacroAssembler::cmp_klass(Register klass, Register obj, Register tmp) {
assert_different_registers(klass, obj, tmp);
load_narrow_klass_compact(tmp, obj);
cmpl(klass, tmp);
} else if (UseCompressedClassPointers) {
cmpl(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
} else {
cmpptr(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
cmpl(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
}
}
@ -5424,12 +5445,9 @@ void MacroAssembler::cmp_klasses_from_objects(Register obj1, Register obj2, Regi
load_narrow_klass_compact(tmp1, obj1);
load_narrow_klass_compact(tmp2, obj2);
cmpl(tmp1, tmp2);
} else if (UseCompressedClassPointers) {
} else {
movl(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes()));
cmpl(tmp1, Address(obj2, oopDesc::klass_offset_in_bytes()));
} else {
movptr(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes()));
cmpptr(tmp1, Address(obj2, oopDesc::klass_offset_in_bytes()));
}
}
@ -5478,10 +5496,8 @@ void MacroAssembler::store_heap_oop_null(Address dst) {
void MacroAssembler::store_klass_gap(Register dst, Register src) {
assert(!UseCompactObjectHeaders, "Don't use with compact headers");
if (UseCompressedClassPointers) {
// Store to klass gap in destination
movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
}
// Store to klass gap in destination
movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
}
#ifdef ASSERT
@ -5671,7 +5687,6 @@ void MacroAssembler::decode_klass_not_null(Register r, Register tmp) {
BLOCK_COMMENT("decode_klass_not_null {");
assert_different_registers(r, tmp);
// Note: it will change flags
assert(UseCompressedClassPointers, "should only be used for compressed headers");
// Cannot assert, unverified entry point counts instructions (see .ad file)
// vtableStubs also counts instructions in pd_code_size_limit.
// Also do not verify_oop as this is called by verify_oop.
@ -5693,7 +5708,6 @@ void MacroAssembler::decode_and_move_klass_not_null(Register dst, Register src)
BLOCK_COMMENT("decode_and_move_klass_not_null {");
assert_different_registers(src, dst);
// Note: it will change flags
assert (UseCompressedClassPointers, "should only be used for compressed headers");
// Cannot assert, unverified entry point counts instructions (see .ad file)
// vtableStubs also counts instructions in pd_code_size_limit.
// Also do not verify_oop as this is called by verify_oop.
@ -5750,7 +5764,6 @@ void MacroAssembler::set_narrow_oop(Address dst, jobject obj) {
}
void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
assert (UseCompressedClassPointers, "should only be used for compressed headers");
assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
int klass_index = oop_recorder()->find_index(k);
RelocationHolder rspec = metadata_Relocation::spec(klass_index);
@ -5758,7 +5771,6 @@ void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
}
void MacroAssembler::set_narrow_klass(Address dst, Klass* k) {
assert (UseCompressedClassPointers, "should only be used for compressed headers");
assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
int klass_index = oop_recorder()->find_index(k);
RelocationHolder rspec = metadata_Relocation::spec(klass_index);
@ -5784,7 +5796,6 @@ void MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) {
}
void MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) {
assert (UseCompressedClassPointers, "should only be used for compressed headers");
assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
int klass_index = oop_recorder()->find_index(k);
RelocationHolder rspec = metadata_Relocation::spec(klass_index);
@ -5792,7 +5803,6 @@ void MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) {
}
void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
assert (UseCompressedClassPointers, "should only be used for compressed headers");
assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
int klass_index = oop_recorder()->find_index(k);
RelocationHolder rspec = metadata_Relocation::spec(klass_index);
@ -9185,7 +9195,7 @@ void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XM
case T_FLOAT:
evminmaxps(dst, mask, nds, src, merge, AVX10_2_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
case T_DOUBLE:
evminmaxps(dst, mask, nds, src, merge, AVX10_2_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
evminmaxpd(dst, mask, nds, src, merge, AVX10_2_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
default:
fatal("Unexpected type argument %s", type2name(type)); break;
}

View File

@ -162,6 +162,8 @@ class MacroAssembler: public Assembler {
void incrementq(AddressLiteral dst, Register rscratch = noreg);
void movhlf(XMMRegister dst, XMMRegister src, Register rscratch = noreg);
// Support optimal SSE move instructions.
void movflt(XMMRegister dst, XMMRegister src) {
if (dst-> encoding() == src->encoding()) return;
@ -351,8 +353,7 @@ class MacroAssembler: public Assembler {
void load_klass(Register dst, Register src, Register tmp);
void store_klass(Register dst, Register src, Register tmp);
// Compares the Klass pointer of an object to a given Klass (which might be narrow,
// depending on UseCompressedClassPointers).
// Compares the narrow Klass pointer of an object to a given narrow Klass.
void cmp_klass(Register klass, Register obj, Register tmp);
// Compares the Klass pointer of two objects obj1 and obj2. Result is in the condition flags.
@ -1309,21 +1310,29 @@ public:
void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); }
void subss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
void evucomish(XMMRegister dst, XMMRegister src) { Assembler::evucomish(dst, src); }
void evucomish(XMMRegister dst, Address src) { Assembler::evucomish(dst, src); }
void evucomish(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
void evucomxsh(XMMRegister dst, XMMRegister src) { Assembler::evucomxsh(dst, src); }
void evucomxsh(XMMRegister dst, Address src) { Assembler::evucomxsh(dst, src); }
void evucomxsh(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); }
void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); }
void ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
void vucomxss(XMMRegister dst, XMMRegister src) { Assembler::vucomxss(dst, src); }
void vucomxss(XMMRegister dst, Address src) { Assembler::vucomxss(dst, src); }
void vucomxss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
void evucomxss(XMMRegister dst, XMMRegister src) { Assembler::evucomxss(dst, src); }
void evucomxss(XMMRegister dst, Address src) { Assembler::evucomxss(dst, src); }
void evucomxss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); }
void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); }
void ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
void vucomxsd(XMMRegister dst, XMMRegister src) { Assembler::vucomxsd(dst, src); }
void vucomxsd(XMMRegister dst, Address src) { Assembler::vucomxsd(dst, src); }
void vucomxsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
void evucomxsd(XMMRegister dst, XMMRegister src) { Assembler::evucomxsd(dst, src); }
void evucomxsd(XMMRegister dst, Address src) { Assembler::evucomxsd(dst, src); }
void evucomxsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
// Bitwise Logical XOR of Packed Double-Precision Floating-Point Values
void xorpd(XMMRegister dst, XMMRegister src);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -75,7 +75,6 @@
}
static bool narrow_klass_use_complex_address() {
assert(UseCompressedClassPointers, "only for compressed klass code");
return (CompressedKlassPointers::shift() <= 3);
}

View File

@ -1508,9 +1508,6 @@ void VM_Version::get_processor_features() {
MaxLoopPad = 11;
}
#endif // COMPILER2
if (FLAG_IS_DEFAULT(UseXMMForArrayCopy)) {
UseXMMForArrayCopy = true; // use SSE2 movq on new ZX cpus
}
if (supports_sse4_2()) { // new ZX cpus
if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
UseUnalignedLoadStores = true; // use movdqu on newest ZX cpus
@ -1528,10 +1525,6 @@ void VM_Version::get_processor_features() {
// Use it on new AMD cpus starting from Opteron.
UseAddressNop = true;
}
if (supports_sse2() && FLAG_IS_DEFAULT(UseNewLongLShift)) {
// Use it on new AMD cpus starting from Opteron.
UseNewLongLShift = true;
}
if (FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper)) {
if (supports_sse4a()) {
UseXmmLoadAndClearUpper = true; // use movsd only on '10h' Opteron
@ -1571,10 +1564,6 @@ void VM_Version::get_processor_features() {
if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) {
FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3);
}
// On family 15h processors use XMM and UnalignedLoadStores for Array Copy
if (supports_sse2() && FLAG_IS_DEFAULT(UseXMMForArrayCopy)) {
FLAG_SET_DEFAULT(UseXMMForArrayCopy, true);
}
if (supports_sse2() && FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
FLAG_SET_DEFAULT(UseUnalignedLoadStores, true);
}
@ -1591,9 +1580,6 @@ void VM_Version::get_processor_features() {
if (cpu_family() >= 0x17) {
// On family >=17h processors use XMM and UnalignedLoadStores
// for Array Copy
if (supports_sse2() && FLAG_IS_DEFAULT(UseXMMForArrayCopy)) {
FLAG_SET_DEFAULT(UseXMMForArrayCopy, true);
}
if (supports_sse2() && FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
FLAG_SET_DEFAULT(UseUnalignedLoadStores, true);
}
@ -1640,9 +1626,6 @@ void VM_Version::get_processor_features() {
}
#endif // COMPILER2
if (FLAG_IS_DEFAULT(UseXMMForArrayCopy)) {
UseXMMForArrayCopy = true; // use SSE2 movq on new Intel cpus
}
if ((supports_sse4_2() && supports_ht()) || supports_avx()) { // Newest Intel cpus
if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus

View File

@ -1708,84 +1708,99 @@ static void emit_cmpfp3(MacroAssembler* masm, Register dst) {
__ bind(done);
}
// Math.min() # Math.max()
// --------------------------
// ucomis[s/d] #
// ja -> b # a
// jp -> NaN # NaN
// jb -> a # b
// je #
// |-jz -> a | b # a & b
// | -> a #
enum FP_PREC {
fp_prec_hlf,
fp_prec_flt,
fp_prec_dbl
};
static inline void emit_fp_ucom(MacroAssembler* masm, enum FP_PREC pt,
XMMRegister p, XMMRegister q) {
if (pt == fp_prec_hlf) {
__ evucomish(p, q);
} else if (pt == fp_prec_flt) {
__ ucomiss(p, q);
} else {
__ ucomisd(p, q);
}
}
static inline void movfp(MacroAssembler* masm, enum FP_PREC pt,
XMMRegister dst, XMMRegister src, Register scratch) {
if (pt == fp_prec_hlf) {
__ movhlf(dst, src, scratch);
} else if (pt == fp_prec_flt) {
__ movflt(dst, src);
} else {
__ movdbl(dst, src);
}
}
// Math.min() # Math.max()
// -----------------------------
// (v)ucomis[h/s/d] #
// ja -> b # a
// jp -> NaN # NaN
// jb -> a # b
// je #
// |-jz -> a | b # a & b
// | -> a #
static void emit_fp_min_max(MacroAssembler* masm, XMMRegister dst,
XMMRegister a, XMMRegister b,
XMMRegister xmmt, Register rt,
bool min, bool single) {
bool min, enum FP_PREC pt) {
Label nan, zero, below, above, done;
if (single)
__ ucomiss(a, b);
else
__ ucomisd(a, b);
emit_fp_ucom(masm, pt, a, b);
if (dst->encoding() != (min ? b : a)->encoding())
if (dst->encoding() != (min ? b : a)->encoding()) {
__ jccb(Assembler::above, above); // CF=0 & ZF=0
else
} else {
__ jccb(Assembler::above, done);
}
__ jccb(Assembler::parity, nan); // PF=1
__ jccb(Assembler::below, below); // CF=1
// equal
__ vpxor(xmmt, xmmt, xmmt, Assembler::AVX_128bit);
if (single) {
__ ucomiss(a, xmmt);
__ jccb(Assembler::equal, zero);
emit_fp_ucom(masm, pt, a, xmmt);
__ movflt(dst, a);
__ jmp(done);
}
else {
__ ucomisd(a, xmmt);
__ jccb(Assembler::equal, zero);
__ jccb(Assembler::equal, zero);
movfp(masm, pt, dst, a, rt);
__ movdbl(dst, a);
__ jmp(done);
}
__ jmp(done);
__ bind(zero);
if (min)
if (min) {
__ vpor(dst, a, b, Assembler::AVX_128bit);
else
} else {
__ vpand(dst, a, b, Assembler::AVX_128bit);
}
__ jmp(done);
__ bind(above);
if (single)
__ movflt(dst, min ? b : a);
else
__ movdbl(dst, min ? b : a);
movfp(masm, pt, dst, min ? b : a, rt);
__ jmp(done);
__ bind(nan);
if (single) {
if (pt == fp_prec_hlf) {
__ movl(rt, 0x00007e00); // Float16.NaN
__ evmovw(dst, rt);
} else if (pt == fp_prec_flt) {
__ movl(rt, 0x7fc00000); // Float.NaN
__ movdl(dst, rt);
}
else {
} else {
__ mov64(rt, 0x7ff8000000000000L); // Double.NaN
__ movdq(dst, rt);
}
__ jmp(done);
__ bind(below);
if (single)
__ movflt(dst, min ? a : b);
else
__ movdbl(dst, min ? a : b);
movfp(masm, pt, dst, min ? a : b, rt);
__ bind(done);
}
@ -2605,13 +2620,8 @@ uint BoxLockNode::size(PhaseRegAlloc *ra_) const
#ifndef PRODUCT
void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
{
if (UseCompressedClassPointers) {
st->print_cr("movl rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
st->print_cr("\tcmpl rscratch1, [rax + CompiledICData::speculated_klass_offset()]\t # Inline cache check");
} else {
st->print_cr("movq rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
st->print_cr("\tcmpq rscratch1, [rax + CompiledICData::speculated_klass_offset()]\t # Inline cache check");
}
st->print_cr("movl rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
st->print_cr("\tcmpl rscratch1, [rax + CompiledICData::speculated_klass_offset()]\t # Inline cache check");
st->print_cr("\tjne SharedRuntime::_ic_miss_stub");
}
#endif
@ -2763,13 +2773,6 @@ uint Matcher::float_pressure_limit()
return (FLOATPRESSURE == -1) ? default_float_pressure_threshold : FLOATPRESSURE;
}
bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
// In 64 bit mode a code which use multiply when
// devisor is constant is faster than hardware
// DIV instruction (it uses MulHiL).
return false;
}
// Register for DIVI projection of divmodI
const RegMask& Matcher::divI_proj_mask() {
return INT_RAX_REG_mask();
@ -7357,146 +7360,140 @@ instruct loadAOTRCAddress(rRegP dst, immAOTRuntimeConstantsAddress con)
ins_pipe(ialu_reg_fat);
%}
// min = java.lang.Math.min(float a, float b)
// max = java.lang.Math.max(float a, float b)
instruct maxF_reg_avx10_2(regF dst, regF a, regF b) %{
predicate(VM_Version::supports_avx10_2());
instruct minmaxF_reg_avx10_2(regF dst, regF a, regF b)
%{
predicate(VM_Version::supports_avx10_2() && !VLoopReductions::is_reduction(n));
match(Set dst (MaxF a b));
format %{ "maxF $dst, $a, $b" %}
ins_encode %{
__ eminmaxss($dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, AVX10_2_MINMAX_MAX_COMPARE_SIGN);
%}
ins_pipe( pipe_slow );
%}
// max = java.lang.Math.max(float a, float b)
instruct maxF_reg(legRegF dst, legRegF a, legRegF b, legRegF tmp, legRegF atmp, legRegF btmp) %{
predicate(!VM_Version::supports_avx10_2() && UseAVX > 0 && !VLoopReductions::is_reduction(n));
match(Set dst (MaxF a b));
effect(USE a, USE b, TEMP tmp, TEMP atmp, TEMP btmp);
format %{ "maxF $dst, $a, $b \t! using $tmp, $atmp and $btmp as TEMP" %}
ins_encode %{
__ vminmax_fp(Op_MaxV, T_FLOAT, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $tmp$$XMMRegister, $atmp$$XMMRegister, $btmp$$XMMRegister, Assembler::AVX_128bit);
%}
ins_pipe( pipe_slow );
%}
instruct maxF_reduction_reg(legRegF dst, legRegF a, legRegF b, legRegF xtmp, rRegI rtmp, rFlagsReg cr) %{
predicate(!VM_Version::supports_avx10_2() && UseAVX > 0 && VLoopReductions::is_reduction(n));
match(Set dst (MaxF a b));
effect(USE a, USE b, TEMP xtmp, TEMP rtmp, KILL cr);
format %{ "maxF_reduction $dst, $a, $b \t!using $xtmp and $rtmp as TEMP" %}
ins_encode %{
emit_fp_min_max(masm, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $xtmp$$XMMRegister, $rtmp$$Register,
false /*min*/, true /*single*/);
%}
ins_pipe( pipe_slow );
%}
// max = java.lang.Math.max(double a, double b)
instruct maxD_reg_avx10_2(regD dst, regD a, regD b) %{
predicate(VM_Version::supports_avx10_2());
match(Set dst (MaxD a b));
format %{ "maxD $dst, $a, $b" %}
ins_encode %{
__ eminmaxsd($dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, AVX10_2_MINMAX_MAX_COMPARE_SIGN);
%}
ins_pipe( pipe_slow );
%}
// max = java.lang.Math.max(double a, double b)
instruct maxD_reg(legRegD dst, legRegD a, legRegD b, legRegD tmp, legRegD atmp, legRegD btmp) %{
predicate(!VM_Version::supports_avx10_2() && UseAVX > 0 && !VLoopReductions::is_reduction(n));
match(Set dst (MaxD a b));
effect(USE a, USE b, TEMP atmp, TEMP btmp, TEMP tmp);
format %{ "maxD $dst, $a, $b \t! using $tmp, $atmp and $btmp as TEMP" %}
ins_encode %{
__ vminmax_fp(Op_MaxV, T_DOUBLE, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $tmp$$XMMRegister, $atmp$$XMMRegister, $btmp$$XMMRegister, Assembler::AVX_128bit);
%}
ins_pipe( pipe_slow );
%}
instruct maxD_reduction_reg(legRegD dst, legRegD a, legRegD b, legRegD xtmp, rRegL rtmp, rFlagsReg cr) %{
predicate(!VM_Version::supports_avx10_2() && UseAVX > 0 && VLoopReductions::is_reduction(n));
match(Set dst (MaxD a b));
effect(USE a, USE b, TEMP xtmp, TEMP rtmp, KILL cr);
format %{ "maxD_reduction $dst, $a, $b \t! using $xtmp and $rtmp as TEMP" %}
ins_encode %{
emit_fp_min_max(masm, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $xtmp$$XMMRegister, $rtmp$$Register,
false /*min*/, false /*single*/);
%}
ins_pipe( pipe_slow );
%}
// max = java.lang.Math.min(float a, float b)
instruct minF_reg_avx10_2(regF dst, regF a, regF b) %{
predicate(VM_Version::supports_avx10_2());
match(Set dst (MinF a b));
format %{ "minF $dst, $a, $b" %}
format %{ "minmaxF $dst, $a, $b" %}
ins_encode %{
__ eminmaxss($dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, AVX10_2_MINMAX_MIN_COMPARE_SIGN);
int opcode = this->ideal_Opcode();
__ sminmax_fp_avx10_2(opcode, T_FLOAT, $dst$$XMMRegister, k0, $a$$XMMRegister, $b$$XMMRegister);
%}
ins_pipe( pipe_slow );
%}
instruct minmaxF_reduction_reg_avx10_2(regF dst, regF a, regF b, regF xtmp, rRegI rtmp, rFlagsReg cr)
%{
predicate(VM_Version::supports_avx10_2() && VLoopReductions::is_reduction(n));
match(Set dst (MaxF a b));
match(Set dst (MinF a b));
effect(USE a, USE b, TEMP xtmp, TEMP rtmp, KILL cr);
format %{ "minmaxF_reduction $dst, $a, $b \t! using $xtmp and $rtmp as TEMP" %}
ins_encode %{
int opcode = this->ideal_Opcode();
bool min = (opcode == Op_MinF) ? true : false;
emit_fp_min_max(masm, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $xtmp$$XMMRegister, $rtmp$$Register,
min, fp_prec_flt /*pt*/);
%}
ins_pipe( pipe_slow );
%}
// min = java.lang.Math.min(float a, float b)
instruct minF_reg(legRegF dst, legRegF a, legRegF b, legRegF tmp, legRegF atmp, legRegF btmp) %{
// max = java.lang.Math.max(float a, float b)
instruct minmaxF_reg(legRegF dst, legRegF a, legRegF b, legRegF tmp, legRegF atmp, legRegF btmp)
%{
predicate(!VM_Version::supports_avx10_2() && UseAVX > 0 && !VLoopReductions::is_reduction(n));
match(Set dst (MaxF a b));
match(Set dst (MinF a b));
effect(USE a, USE b, TEMP tmp, TEMP atmp, TEMP btmp);
format %{ "minF $dst, $a, $b \t! using $tmp, $atmp and $btmp as TEMP" %}
format %{ "minmaxF $dst, $a, $b \t! using $tmp, $atmp and $btmp as TEMP" %}
ins_encode %{
__ vminmax_fp(Op_MinV, T_FLOAT, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $tmp$$XMMRegister, $atmp$$XMMRegister, $btmp$$XMMRegister, Assembler::AVX_128bit);
int opcode = this->ideal_Opcode();
int param_opcode = (opcode == Op_MinF) ? Op_MinV : Op_MaxV;
__ vminmax_fp(param_opcode, T_FLOAT, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $tmp$$XMMRegister,
$atmp$$XMMRegister, $btmp$$XMMRegister, Assembler::AVX_128bit);
%}
ins_pipe( pipe_slow );
%}
instruct minF_reduction_reg(legRegF dst, legRegF a, legRegF b, legRegF xtmp, rRegI rtmp, rFlagsReg cr) %{
instruct minmaxF_reduction_reg(legRegF dst, legRegF a, legRegF b, legRegF xtmp, rRegI rtmp, rFlagsReg cr)
%{
predicate(!VM_Version::supports_avx10_2() && UseAVX > 0 && VLoopReductions::is_reduction(n));
match(Set dst (MaxF a b));
match(Set dst (MinF a b));
effect(USE a, USE b, TEMP xtmp, TEMP rtmp, KILL cr);
format %{ "minF_reduction $dst, $a, $b \t! using $xtmp and $rtmp as TEMP" %}
format %{ "minmaxF_reduction $dst, $a, $b \t!using $xtmp and $rtmp as TEMP" %}
ins_encode %{
int opcode = this->ideal_Opcode();
bool min = (opcode == Op_MinF) ? true : false;
emit_fp_min_max(masm, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $xtmp$$XMMRegister, $rtmp$$Register,
true /*min*/, true /*single*/);
%}
ins_pipe( pipe_slow );
%}
// max = java.lang.Math.min(double a, double b)
instruct minD_reg_avx10_2(regD dst, regD a, regD b) %{
predicate(VM_Version::supports_avx10_2());
match(Set dst (MinD a b));
format %{ "minD $dst, $a, $b" %}
ins_encode %{
__ eminmaxsd($dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, AVX10_2_MINMAX_MIN_COMPARE_SIGN);
min, fp_prec_flt /*pt*/);
%}
ins_pipe( pipe_slow );
%}
// min = java.lang.Math.min(double a, double b)
instruct minD_reg(legRegD dst, legRegD a, legRegD b, legRegD tmp, legRegD atmp, legRegD btmp) %{
predicate(!VM_Version::supports_avx10_2() && UseAVX > 0 && !VLoopReductions::is_reduction(n));
// max = java.lang.Math.max(double a, double b)
instruct minmaxD_reg_avx10_2(regD dst, regD a, regD b)
%{
predicate(VM_Version::supports_avx10_2() && !VLoopReductions::is_reduction(n));
match(Set dst (MaxD a b));
match(Set dst (MinD a b));
effect(USE a, USE b, TEMP tmp, TEMP atmp, TEMP btmp);
format %{ "minD $dst, $a, $b \t! using $tmp, $atmp and $btmp as TEMP" %}
format %{ "minmaxD $dst, $a, $b" %}
ins_encode %{
__ vminmax_fp(Op_MinV, T_DOUBLE, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $tmp$$XMMRegister, $atmp$$XMMRegister, $btmp$$XMMRegister, Assembler::AVX_128bit);
int opcode = this->ideal_Opcode();
__ sminmax_fp_avx10_2(opcode, T_DOUBLE, $dst$$XMMRegister, k0, $a$$XMMRegister, $b$$XMMRegister);
%}
ins_pipe( pipe_slow );
%}
instruct minD_reduction_reg(legRegD dst, legRegD a, legRegD b, legRegD xtmp, rRegL rtmp, rFlagsReg cr) %{
predicate(!VM_Version::supports_avx10_2() && UseAVX > 0 && VLoopReductions::is_reduction(n));
instruct minmaxD_reduction_reg_avx10_2(regD dst, regD a, regD b, regD xtmp, rRegI rtmp, rFlagsReg cr)
%{
predicate(VM_Version::supports_avx10_2() && VLoopReductions::is_reduction(n));
match(Set dst (MaxD a b));
match(Set dst (MinD a b));
effect(USE a, USE b, TEMP xtmp, TEMP rtmp, KILL cr);
format %{ "maxD_reduction $dst, $a, $b \t! using $xtmp and $rtmp as TEMP" %}
format %{ "minmaxD_reduction $dst, $a, $b \t! using $xtmp and $rtmp as TEMP" %}
ins_encode %{
int opcode = this->ideal_Opcode();
bool min = (opcode == Op_MinD) ? true : false;
emit_fp_min_max(masm, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $xtmp$$XMMRegister, $rtmp$$Register,
true /*min*/, false /*single*/);
min, fp_prec_dbl /*pt*/);
%}
ins_pipe( pipe_slow );
%}
// min = java.lang.Math.min(double a, double b)
// max = java.lang.Math.max(double a, double b)
instruct minmaxD_reg(legRegD dst, legRegD a, legRegD b, legRegD tmp, legRegD atmp, legRegD btmp)
%{
predicate(!VM_Version::supports_avx10_2() && UseAVX > 0 && !VLoopReductions::is_reduction(n));
match(Set dst (MaxD a b));
match(Set dst (MinD a b));
effect(USE a, USE b, TEMP atmp, TEMP btmp, TEMP tmp);
format %{ "minmaxD $dst, $a, $b \t! using $tmp, $atmp and $btmp as TEMP" %}
ins_encode %{
int opcode = this->ideal_Opcode();
int param_opcode = (opcode == Op_MinD) ? Op_MinV : Op_MaxV;
__ vminmax_fp(param_opcode, T_DOUBLE, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $tmp$$XMMRegister,
$atmp$$XMMRegister, $btmp$$XMMRegister, Assembler::AVX_128bit);
%}
ins_pipe( pipe_slow );
%}
instruct minmaxD_reduction_reg(legRegD dst, legRegD a, legRegD b, legRegD xtmp, rRegL rtmp, rFlagsReg cr)
%{
predicate(!VM_Version::supports_avx10_2() && UseAVX > 0 && VLoopReductions::is_reduction(n));
match(Set dst (MaxD a b));
match(Set dst (MinD a b));
effect(USE a, USE b, TEMP xtmp, TEMP rtmp, KILL cr);
format %{ "minmaxD_reduction $dst, $a, $b \t! using $xtmp and $rtmp as TEMP" %}
ins_encode %{
int opcode = this->ideal_Opcode();
bool min = (opcode == Op_MinD) ? true : false;
emit_fp_min_max(masm, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $xtmp$$XMMRegister, $rtmp$$Register,
min, fp_prec_dbl /*pt*/);
%}
ins_pipe( pipe_slow );
%}
@ -14406,9 +14403,9 @@ instruct cmpF_cc_regCFE(rFlagsRegUCFE cr, regF src1, regF src2) %{
match(Set cr (CmpF src1 src2));
ins_cost(100);
format %{ "vucomxss $src1, $src2" %}
format %{ "evucomxss $src1, $src2" %}
ins_encode %{
__ vucomxss($src1$$XMMRegister, $src2$$XMMRegister);
__ evucomxss($src1$$XMMRegister, $src2$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
@ -14428,9 +14425,9 @@ instruct cmpF_cc_memCFE(rFlagsRegUCFE cr, regF src1, memory src2) %{
match(Set cr (CmpF src1 (LoadF src2)));
ins_cost(100);
format %{ "vucomxss $src1, $src2" %}
format %{ "evucomxss $src1, $src2" %}
ins_encode %{
__ vucomxss($src1$$XMMRegister, $src2$$Address);
__ evucomxss($src1$$XMMRegister, $src2$$Address);
%}
ins_pipe(pipe_slow);
%}
@ -14450,9 +14447,9 @@ instruct cmpF_cc_immCFE(rFlagsRegUCFE cr, regF src, immF con) %{
match(Set cr (CmpF src con));
ins_cost(100);
format %{ "vucomxss $src, [$constantaddress]\t# load from constant table: float=$con" %}
format %{ "evucomxss $src, [$constantaddress]\t# load from constant table: float=$con" %}
ins_encode %{
__ vucomxss($src$$XMMRegister, $constantaddress($con));
__ evucomxss($src$$XMMRegister, $constantaddress($con));
%}
ins_pipe(pipe_slow);
%}
@ -14491,9 +14488,9 @@ instruct cmpD_cc_regCFE(rFlagsRegUCFE cr, regD src1, regD src2) %{
match(Set cr (CmpD src1 src2));
ins_cost(100);
format %{ "vucomxsd $src1, $src2 test" %}
format %{ "evucomxsd $src1, $src2 test" %}
ins_encode %{
__ vucomxsd($src1$$XMMRegister, $src2$$XMMRegister);
__ evucomxsd($src1$$XMMRegister, $src2$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
@ -14513,9 +14510,9 @@ instruct cmpD_cc_memCFE(rFlagsRegUCFE cr, regD src1, memory src2) %{
match(Set cr (CmpD src1 (LoadD src2)));
ins_cost(100);
format %{ "vucomxsd $src1, $src2" %}
format %{ "evucomxsd $src1, $src2" %}
ins_encode %{
__ vucomxsd($src1$$XMMRegister, $src2$$Address);
__ evucomxsd($src1$$XMMRegister, $src2$$Address);
%}
ins_pipe(pipe_slow);
%}
@ -14534,9 +14531,9 @@ instruct cmpD_cc_immCFE(rFlagsRegUCFE cr, regD src, immD con) %{
match(Set cr (CmpD src con));
ins_cost(100);
format %{ "vucomxsd $src, [$constantaddress]\t# load from constant table: double=$con" %}
format %{ "evucomxsd $src, [$constantaddress]\t# load from constant table: double=$con" %}
ins_encode %{
__ vucomxsd($src$$XMMRegister, $constantaddress($con));
__ evucomxsd($src$$XMMRegister, $constantaddress($con));
%}
ins_pipe(pipe_slow);
%}
@ -18844,7 +18841,7 @@ instruct ReplHF_reg(vec dst, regF src, rRegI rtmp) %{
format %{ "replicateHF $dst, $src \t! using $rtmp as TEMP" %}
ins_encode %{
int vlen_enc = vector_length_encoding(this);
__ vmovw($rtmp$$Register, $src$$XMMRegister);
__ evmovw($rtmp$$Register, $src$$XMMRegister);
__ evpbroadcastw($dst$$XMMRegister, $rtmp$$Register, vlen_enc);
%}
ins_pipe( pipe_slow );
@ -20959,7 +20956,7 @@ instruct minmaxFP_reg_avx10_2(vec dst, vec a, vec b) %{
int vlen_enc = vector_length_encoding(this);
int opcode = this->ideal_Opcode();
BasicType elem_bt = Matcher::vector_element_basic_type(this);
__ vminmax_fp(opcode, elem_bt, $dst$$XMMRegister, k0, $a$$XMMRegister, $b$$XMMRegister, vlen_enc);
__ vminmax_fp_avx10_2(opcode, elem_bt, $dst$$XMMRegister, k0, $a$$XMMRegister, $b$$XMMRegister, vlen_enc);
%}
ins_pipe( pipe_slow );
%}
@ -23975,8 +23972,12 @@ instruct vmask_gen_imm(kReg dst, immL len, rRegL temp) %{
format %{ "vector_mask_gen $len \t! vector mask generator" %}
effect(TEMP temp);
ins_encode %{
__ mov64($temp$$Register, (0xFFFFFFFFFFFFFFFFUL >> (64 -$len$$constant)));
__ kmovql($dst$$KRegister, $temp$$Register);
if ($len$$constant > 0) {
__ mov64($temp$$Register, right_n_bits($len$$constant));
__ kmovql($dst$$KRegister, $temp$$Register);
} else {
__ kxorql($dst$$KRegister, $dst$$KRegister, $dst$$KRegister);
}
%}
ins_pipe( pipe_slow );
%}
@ -25303,9 +25304,9 @@ instruct vector_selectfrom_twovectors_reg_evex(vec index, vec src1, vec src2)
instruct reinterpretS2HF(regF dst, rRegI src)
%{
match(Set dst (ReinterpretS2HF src));
format %{ "vmovw $dst, $src" %}
format %{ "evmovw $dst, $src" %}
ins_encode %{
__ vmovw($dst$$XMMRegister, $src$$Register);
__ evmovw($dst$$XMMRegister, $src$$Register);
%}
ins_pipe(pipe_slow);
%}
@ -25313,9 +25314,9 @@ instruct reinterpretS2HF(regF dst, rRegI src)
instruct reinterpretHF2S(rRegI dst, regF src)
%{
match(Set dst (ReinterpretHF2S src));
format %{ "vmovw $dst, $src" %}
format %{ "evmovw $dst, $src" %}
ins_encode %{
__ vmovw($dst$$Register, $src$$XMMRegister);
__ evmovw($dst$$Register, $src$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
@ -25369,10 +25370,11 @@ instruct scalar_minmax_HF_reg_avx10_2(regF dst, regF src1, regF src2)
predicate(VM_Version::supports_avx10_2());
match(Set dst (MaxHF src1 src2));
match(Set dst (MinHF src1 src2));
format %{ "scalar_min_max_fp16 $dst, $src1, $src2" %}
ins_encode %{
int function = this->ideal_Opcode() == Op_MinHF ? AVX10_2_MINMAX_MIN_COMPARE_SIGN : AVX10_2_MINMAX_MAX_COMPARE_SIGN;
__ eminmaxsh($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, function);
int opcode = this->ideal_Opcode();
__ sminmax_fp16_avx10_2(opcode, $dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, k0);
%}
ins_pipe( pipe_slow );
%}
@ -25383,11 +25385,12 @@ instruct scalar_minmax_HF_reg(regF dst, regF src1, regF src2, kReg ktmp, regF xt
match(Set dst (MaxHF src1 src2));
match(Set dst (MinHF src1 src2));
effect(TEMP_DEF dst, TEMP ktmp, TEMP xtmp1, TEMP xtmp2);
format %{ "scalar_min_max_fp16 $dst, $src1, $src2\t using $ktmp, $xtmp1 and $xtmp2 as TEMP" %}
ins_encode %{
int opcode = this->ideal_Opcode();
__ scalar_max_min_fp16(opcode, $dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, $ktmp$$KRegister,
$xtmp1$$XMMRegister, $xtmp2$$XMMRegister);
__ sminmax_fp16(opcode, $dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, $ktmp$$KRegister,
$xtmp1$$XMMRegister, $xtmp2$$XMMRegister);
%}
ins_pipe( pipe_slow );
%}
@ -25487,8 +25490,9 @@ instruct vector_minmax_HF_mem_avx10_2(vec dst, vec src1, memory src2)
format %{ "vector_min_max_fp16_mem $dst, $src1, $src2" %}
ins_encode %{
int vlen_enc = vector_length_encoding(this);
int function = this->ideal_Opcode() == Op_MinVHF ? AVX10_2_MINMAX_MIN_COMPARE_SIGN : AVX10_2_MINMAX_MAX_COMPARE_SIGN;
__ evminmaxph($dst$$XMMRegister, k0, $src1$$XMMRegister, $src2$$Address, true, function, vlen_enc);
int opcode = this->ideal_Opcode();
__ vminmax_fp16_avx10_2(opcode, $dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address,
k0, vlen_enc);
%}
ins_pipe( pipe_slow );
%}
@ -25501,8 +25505,9 @@ instruct vector_minmax_HF_reg_avx10_2(vec dst, vec src1, vec src2)
format %{ "vector_min_max_fp16 $dst, $src1, $src2" %}
ins_encode %{
int vlen_enc = vector_length_encoding(this);
int function = this->ideal_Opcode() == Op_MinVHF ? AVX10_2_MINMAX_MIN_COMPARE_SIGN : AVX10_2_MINMAX_MAX_COMPARE_SIGN;
__ evminmaxph($dst$$XMMRegister, k0, $src1$$XMMRegister, $src2$$XMMRegister, true, function, vlen_enc);
int opcode = this->ideal_Opcode();
__ vminmax_fp16_avx10_2(opcode, $dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister,
k0, vlen_enc);
%}
ins_pipe( pipe_slow );
%}
@ -25517,8 +25522,8 @@ instruct vector_minmax_HF_reg(vec dst, vec src1, vec src2, kReg ktmp, vec xtmp1,
ins_encode %{
int vlen_enc = vector_length_encoding(this);
int opcode = this->ideal_Opcode();
__ vector_max_min_fp16(opcode, $dst$$XMMRegister, $src2$$XMMRegister, $src1$$XMMRegister, $ktmp$$KRegister,
$xtmp1$$XMMRegister, $xtmp2$$XMMRegister, vlen_enc);
__ vminmax_fp16(opcode, $dst$$XMMRegister, $src2$$XMMRegister, $src1$$XMMRegister, $ktmp$$KRegister,
$xtmp1$$XMMRegister, $xtmp2$$XMMRegister, vlen_enc);
%}
ins_pipe( pipe_slow );
%}

View File

@ -35,16 +35,16 @@
#include <dirent.h>
ExplicitHugePageSupport::ExplicitHugePageSupport() :
_initialized(false), _pagesizes(), _pre_allocated_pagesizes(), _default_hugepage_size(SIZE_MAX), _inconsistent(false) {}
_initialized{false}, _os_supported{}, _pre_allocated{}, _default_hugepage_size{0}, _inconsistent{false} {}
os::PageSizes ExplicitHugePageSupport::pagesizes() const {
os::PageSizes ExplicitHugePageSupport::os_supported() const {
assert(_initialized, "Not initialized");
return _pagesizes;
return _os_supported;
}
os::PageSizes ExplicitHugePageSupport::pre_allocated_pagesizes() const {
os::PageSizes ExplicitHugePageSupport::pre_allocated() const {
assert(_initialized, "Not initialized");
return _pre_allocated_pagesizes;
return _pre_allocated;
}
size_t ExplicitHugePageSupport::default_hugepage_size() const {
@ -68,7 +68,7 @@ static size_t scan_default_hugepagesize() {
// format has been changed), we'll set largest page size to 0
FILE *fp = os::fopen("/proc/meminfo", "r");
if (fp) {
if (fp != nullptr) {
while (!feof(fp)) {
int x = 0;
char buf[16];
@ -81,7 +81,7 @@ static size_t scan_default_hugepagesize() {
// skip to next line
for (;;) {
int ch = fgetc(fp);
if (ch == EOF || ch == (int)'\n') break;
if (ch == EOF || ch == '\n') break;
}
}
}
@ -151,7 +151,7 @@ static os::PageSizes filter_pre_allocated_hugepages(os::PageSizes pagesizes) {
void ExplicitHugePageSupport::print_on(outputStream* os) {
if (_initialized) {
os->print_cr("Explicit hugepage support:");
for (size_t s = _pagesizes.smallest(); s != 0; s = _pagesizes.next_larger(s)) {
for (size_t s = _os_supported.smallest(); s != 0; s = _os_supported.next_larger(s)) {
os->print_cr(" hugepage size: " EXACTFMT, EXACTFMTARGS(s));
}
os->print_cr(" default hugepage size: " EXACTFMT, EXACTFMTARGS(_default_hugepage_size));
@ -166,13 +166,13 @@ void ExplicitHugePageSupport::print_on(outputStream* os) {
void ExplicitHugePageSupport::scan_os() {
_default_hugepage_size = scan_default_hugepagesize();
if (_default_hugepage_size > 0) {
_pagesizes = scan_hugepages();
_pre_allocated_pagesizes = filter_pre_allocated_hugepages(_pagesizes);
_os_supported = scan_hugepages();
_pre_allocated = filter_pre_allocated_hugepages(_os_supported);
// See https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt: /proc/meminfo should match
// /sys/kernel/mm/hugepages/hugepages-xxxx. However, we may run on a broken kernel (e.g. on WSL)
// that only exposes /proc/meminfo but not /sys/kernel/mm/hugepages. In that case, we are not
// sure about the state of hugepage support by the kernel, so we won't use explicit hugepages.
if (!_pagesizes.contains(_default_hugepage_size)) {
if (!_os_supported.contains(_default_hugepage_size)) {
log_info(pagesize)("Unexpected configuration: default pagesize (%zu) "
"has no associated directory in /sys/kernel/mm/hugepages.", _default_hugepage_size);
_inconsistent = true;
@ -187,7 +187,7 @@ void ExplicitHugePageSupport::scan_os() {
}
THPSupport::THPSupport() :
_initialized(false), _mode(THPMode::never), _pagesize(SIZE_MAX) {}
_initialized{false}, _mode{THPMode::never}, _pagesize{0} {}
THPMode THPSupport::mode() const {
@ -221,7 +221,6 @@ void THPSupport::scan_os() {
}
// Scan large page size for THP from hpage_pmd_size
_pagesize = 0;
if (read_number_file("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size", &_pagesize)) {
assert(_pagesize > 0, "Expected");
}

View File

@ -45,10 +45,10 @@ class ExplicitHugePageSupport {
// All supported hugepage sizes (sizes for which entries exist
// in /sys/kernel/mm/hugepages/hugepage-xxx)
os::PageSizes _pagesizes;
os::PageSizes _os_supported;
// Above pages filtered for where the contents of file nr_hugepages was larger than zero
os::PageSizes _pre_allocated_pagesizes;
os::PageSizes _pre_allocated;
// Contains the default hugepage. The "default hugepage size" is the one that
// - is marked in /proc/meminfo as "Hugepagesize"
@ -63,8 +63,8 @@ public:
void scan_os();
os::PageSizes pagesizes() const;
os::PageSizes pre_allocated_pagesizes() const;
os::PageSizes os_supported() const;
os::PageSizes pre_allocated() const;
size_t default_hugepage_size() const;
void print_on(outputStream* os);

View File

@ -1313,7 +1313,7 @@ bool os::is_primordial_thread(void) {
// Find the virtual memory area that contains addr
static bool find_vma(address addr, address* vma_low, address* vma_high) {
FILE *fp = os::fopen("/proc/self/maps", "r");
if (fp) {
if (fp != nullptr) {
address low, high;
while (!feof(fp)) {
if (fscanf(fp, "%p-%p", &low, &high) == 2) {
@ -1326,7 +1326,7 @@ static bool find_vma(address addr, address* vma_low, address* vma_high) {
}
for (;;) {
int ch = fgetc(fp);
if (ch == EOF || ch == (int)'\n') break;
if (ch == EOF || ch == '\n') break;
}
}
fclose(fp);
@ -3818,8 +3818,8 @@ static int hugetlbfs_page_size_flag(size_t page_size) {
}
static bool hugetlbfs_sanity_check(size_t page_size) {
const os::PageSizes page_sizes = HugePages::explicit_hugepage_info().pagesizes();
assert(page_sizes.contains(page_size), "Invalid page sizes passed (%zu)", page_size);
const os::PageSizes os_supported = HugePages::explicit_hugepage_info().os_supported();
assert(os_supported.contains(page_size), "Invalid page sizes passed (%zu)", page_size);
// Include the page size flag to ensure we sanity check the correct page size.
int flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | hugetlbfs_page_size_flag(page_size);
@ -3833,16 +3833,16 @@ static bool hugetlbfs_sanity_check(size_t page_size) {
log_info(pagesize)("Large page size (" EXACTFMT ") failed sanity check, "
"checking if smaller large page sizes are usable",
EXACTFMTARGS(page_size));
for (size_t page_size_ = page_sizes.next_smaller(page_size);
page_size_ > os::vm_page_size();
page_size_ = page_sizes.next_smaller(page_size_)) {
flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | hugetlbfs_page_size_flag(page_size_);
p = mmap(nullptr, page_size_, PROT_READ|PROT_WRITE, flags, -1, 0);
for (size_t size = os_supported.next_smaller(page_size);
size > os::vm_page_size();
size = os_supported.next_smaller(size)) {
flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | hugetlbfs_page_size_flag(size);
p = mmap(nullptr, size, PROT_READ|PROT_WRITE, flags, -1, 0);
if (p != MAP_FAILED) {
// Mapping succeeded, sanity check passed.
munmap(p, page_size_);
munmap(p, size);
log_info(pagesize)("Large page size (" EXACTFMT ") passed sanity check",
EXACTFMTARGS(page_size_));
EXACTFMTARGS(size));
return true;
}
}
@ -4024,7 +4024,7 @@ void os::Linux::large_page_init() {
// - os::large_page_size() is the default explicit hugepage size (/proc/meminfo "Hugepagesize")
// - os::pagesizes() contains all hugepage sizes the kernel supports, regardless whether there
// are pages configured in the pool or not (from /sys/kernel/hugepages/hugepage-xxxx ...)
os::PageSizes all_large_pages = HugePages::explicit_hugepage_info().pagesizes();
os::PageSizes all_large_pages = HugePages::explicit_hugepage_info().os_supported();
const size_t default_large_page_size = HugePages::default_explicit_hugepage_size();
// 3) Consistency check and post-processing
@ -4068,7 +4068,7 @@ void os::Linux::large_page_init() {
// Populate _page_sizes with _large_page_size (default large page size) even if not pre-allocated.
// Then, populate _page_sizes with all smaller large page sizes that have been pre-allocated.
os::PageSizes pre_allocated = HugePages::explicit_hugepage_info().pre_allocated_pagesizes();
os::PageSizes pre_allocated = HugePages::explicit_hugepage_info().pre_allocated();
for (size_t page_size = _large_page_size; page_size != 0; page_size = pre_allocated.next_smaller(page_size)) {
_page_sizes.add(page_size);
}
@ -4133,12 +4133,12 @@ static char* reserve_memory_special_huge_tlbfs(size_t bytes,
size_t page_size,
char* req_addr,
bool exec) {
const os::PageSizes page_sizes = HugePages::explicit_hugepage_info().pagesizes();
const os::PageSizes os_supported = HugePages::explicit_hugepage_info().os_supported();
assert(UseLargePages, "only for Huge TLBFS large pages");
assert(is_aligned(req_addr, alignment), "Must be");
assert(is_aligned(req_addr, page_size), "Must be");
assert(is_aligned(alignment, os::vm_allocation_granularity()), "Must be");
assert(page_sizes.contains(page_size), "Must be a valid page size");
assert(os_supported.contains(page_size), "Must be a valid page size");
assert(page_size > os::vm_page_size(), "Must be a large page size");
assert(bytes >= page_size, "Shouldn't allocate large pages for small sizes");
@ -4384,7 +4384,7 @@ int os::Linux::get_namespace_pid(int vmid) {
os::snprintf_checked(fname, sizeof(fname), "/proc/%d/status", vmid);
FILE *fp = os::fopen(fname, "r");
if (fp) {
if (fp != nullptr) {
int pid, nspid;
int ret;
while (!feof(fp) && !ferror(fp)) {
@ -4398,7 +4398,7 @@ int os::Linux::get_namespace_pid(int vmid) {
}
for (;;) {
int ch = fgetc(fp);
if (ch == EOF || ch == (int)'\n') break;
if (ch == EOF || ch == '\n') break;
}
}
fclose(fp);
@ -5461,5 +5461,3 @@ void os::print_open_file_descriptors(outputStream* st) {
st->print_cr("Open File Descriptors: %d", fds);
}
}

View File

@ -888,6 +888,14 @@ void* os::lookup_function(const char* name) {
return dlsym(RTLD_DEFAULT, name);
}
int64_t os::ftell(FILE* file) {
return ::ftell(file);
}
int os::fseek(FILE* file, int64_t offset, int whence) {
return ::fseek(file, offset, whence);
}
jlong os::lseek(int fd, jlong offset, int whence) {
return (jlong) ::lseek(fd, offset, whence);
}

View File

@ -5114,6 +5114,13 @@ jlong os::seek_to_file_offset(int fd, jlong offset) {
return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET);
}
int64_t os::ftell(FILE* file) {
return ::_ftelli64(file);
}
int os::fseek(FILE* file, int64_t offset, int whence) {
return ::_fseeki64(file,offset, whence);
}
jlong os::lseek(int fd, jlong offset, int whence) {
return (jlong) ::_lseeki64(fd, offset, whence);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2011, 2015, Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -27,7 +27,6 @@
#define OS_CPU_BSD_ZERO_ATOMICACCESS_BSD_ZERO_HPP
#include "orderAccess_bsd_zero.hpp"
#include "runtime/os.hpp"
// Implementation of class AtomicAccess

View File

@ -36,40 +36,42 @@
#include <sys/auxv.h>
#include <sys/prctl.h>
static constexpr uint64_t feature_bit(int n) { return nth_bit<uint64_t>(n); }
#ifndef HWCAP_ISA_I
#define HWCAP_ISA_I nth_bit('I' - 'A')
#define HWCAP_ISA_I feature_bit('I' - 'A')
#endif
#ifndef HWCAP_ISA_M
#define HWCAP_ISA_M nth_bit('M' - 'A')
#define HWCAP_ISA_M feature_bit('M' - 'A')
#endif
#ifndef HWCAP_ISA_A
#define HWCAP_ISA_A nth_bit('A' - 'A')
#define HWCAP_ISA_A feature_bit('A' - 'A')
#endif
#ifndef HWCAP_ISA_F
#define HWCAP_ISA_F nth_bit('F' - 'A')
#define HWCAP_ISA_F feature_bit('F' - 'A')
#endif
#ifndef HWCAP_ISA_D
#define HWCAP_ISA_D nth_bit('D' - 'A')
#define HWCAP_ISA_D feature_bit('D' - 'A')
#endif
#ifndef HWCAP_ISA_C
#define HWCAP_ISA_C nth_bit('C' - 'A')
#define HWCAP_ISA_C feature_bit('C' - 'A')
#endif
#ifndef HWCAP_ISA_Q
#define HWCAP_ISA_Q nth_bit('Q' - 'A')
#define HWCAP_ISA_Q feature_bit('Q' - 'A')
#endif
#ifndef HWCAP_ISA_H
#define HWCAP_ISA_H nth_bit('H' - 'A')
#define HWCAP_ISA_H feature_bit('H' - 'A')
#endif
#ifndef HWCAP_ISA_V
#define HWCAP_ISA_V nth_bit('V' - 'A')
#define HWCAP_ISA_V feature_bit('V' - 'A')
#endif
#define read_csr(csr) \

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,6 +32,7 @@
#include "cds/lambdaProxyClassDictionary.hpp"
#include "cds/regeneratedClasses.hpp"
#include "classfile/systemDictionaryShared.hpp"
#include "classfile/vmClasses.hpp"
#include "logging/log.hpp"
#include "memory/metaspaceClosure.hpp"
#include "oops/instanceKlass.hpp"
@ -169,6 +170,7 @@ void AOTArtifactFinder::find_artifacts() {
end_scanning_for_oops();
TrainingData::cleanup_training_data();
check_critical_classes();
}
void AOTArtifactFinder::start_scanning_for_oops() {
@ -233,6 +235,7 @@ void AOTArtifactFinder::add_cached_instance_class(InstanceKlass* ik) {
bool created;
_seen_classes->put_if_absent(ik, &created);
if (created) {
check_critical_class(ik);
append_to_all_cached_classes(ik);
// All super types must be added.
@ -310,3 +313,25 @@ void AOTArtifactFinder::all_cached_classes_do(MetaspaceClosure* it) {
it->push(_all_cached_classes->adr_at(i));
}
}
void AOTArtifactFinder::check_critical_classes() {
if (CDSConfig::is_dumping_static_archive()) {
// vmClasses are store in the AOT cache (or AOT config file, or static archive).
// If any of the vmClasses is excluded, (usually due to incompatible JVMTI agent),
// the resulting cache/config/archive is unusable.
for (auto id : EnumRange<vmClassID>{}) {
check_critical_class(vmClasses::klass_at(id));
}
}
}
void AOTArtifactFinder::check_critical_class(InstanceKlass* ik) {
if (SystemDictionaryShared::is_excluded_class(ik)) {
ResourceMark rm;
const char* msg = err_msg("Critical class %s has been excluded. %s cannot be written.",
ik->external_name(),
CDSConfig::type_of_archive_being_written());
AOTMetaspace::unrecoverable_writing_error(msg);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -81,12 +81,14 @@ class AOTArtifactFinder : AllStatic {
static void add_cached_type_array_class(TypeArrayKlass* tak);
static void add_cached_instance_class(InstanceKlass* ik);
static void append_to_all_cached_classes(Klass* k);
static void check_critical_class(InstanceKlass* ik);
public:
static void initialize();
static void find_artifacts();
static void add_cached_class(Klass* k);
static void add_aot_inited_class(InstanceKlass* ik);
static void all_cached_classes_do(MetaspaceClosure* it);
static void check_critical_classes();
static void dispose();
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2024, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -234,7 +234,8 @@ bool AOTClassInitializer::can_archive_initialized_mirror(InstanceKlass* ik) {
}
void AOTClassInitializer::call_runtime_setup(JavaThread* current, InstanceKlass* ik) {
assert(ik->has_aot_initialized_mirror(), "sanity");
precond(ik->has_aot_initialized_mirror());
precond(!AOTLinkedClassBulkLoader::is_initializing_classes_early());
if (ik->is_runtime_setup_required()) {
if (log_is_enabled(Info, aot, init)) {
ResourceMark rm;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2024, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -116,11 +116,24 @@ void AOTLinkedClassBulkLoader::preload_classes_in_table(Array<InstanceKlass*>* c
}
}
#ifdef ASSERT
// true iff we are inside AOTLinkedClassBulkLoader::link_classes(), when
// we are moving classes into the fully_initialized state before the
// JVM is able to execute any bytecodes.
static bool _is_initializing_classes_early = false;
bool AOTLinkedClassBulkLoader::is_initializing_classes_early() {
return _is_initializing_classes_early;
}
#endif
// Some cached heap objects may hold references to methods in aot-linked
// classes (via MemberName). We need to make sure all classes are
// linked before executing any bytecode.
void AOTLinkedClassBulkLoader::link_classes(JavaThread* current) {
DEBUG_ONLY(_is_initializing_classes_early = true);
link_classes_impl(current);
DEBUG_ONLY(_is_initializing_classes_early = false);
if (current->has_pending_exception()) {
exit_on_exception(current);
}
@ -135,6 +148,13 @@ void AOTLinkedClassBulkLoader::link_classes_impl(TRAPS) {
link_classes_in_table(table->boot2(), CHECK);
link_classes_in_table(table->platform(), CHECK);
link_classes_in_table(table->app(), CHECK);
init_classes_for_loader(Handle(), AOTLinkedClassTable::get()->boot1(), /*early_only=*/true, CHECK);
init_classes_for_loader(Handle(), AOTLinkedClassTable::get()->boot2(), /*early_only=*/true, CHECK);
init_classes_for_loader(Handle(), AOTLinkedClassTable::get()->platform(), /*early_only=*/true, CHECK);
init_classes_for_loader(Handle(), AOTLinkedClassTable::get()->app(), /*early_only=*/true, CHECK);
log_info(aot, init)("------ finished early class init");
}
void AOTLinkedClassBulkLoader::link_classes_in_table(Array<InstanceKlass*>* classes, TRAPS) {
@ -216,7 +236,7 @@ void AOTLinkedClassBulkLoader::validate_module(Klass* k, const char* category_na
#endif
void AOTLinkedClassBulkLoader::init_javabase_classes(JavaThread* current) {
init_classes_for_loader(Handle(), AOTLinkedClassTable::get()->boot1(), current);
init_classes_for_loader(Handle(), AOTLinkedClassTable::get()->boot1(), /*early_only=*/false, current);
if (current->has_pending_exception()) {
exit_on_exception(current);
}
@ -246,9 +266,9 @@ void AOTLinkedClassBulkLoader::init_non_javabase_classes_impl(TRAPS) {
assert(h_system_loader() != nullptr, "must be");
AOTLinkedClassTable* table = AOTLinkedClassTable::get();
init_classes_for_loader(Handle(), table->boot2(), CHECK);
init_classes_for_loader(h_platform_loader, table->platform(), CHECK);
init_classes_for_loader(h_system_loader, table->app(), CHECK);
init_classes_for_loader(Handle(), table->boot2(), /*early_only=*/false, CHECK);
init_classes_for_loader(h_platform_loader, table->platform(), /*early_only=*/false, CHECK);
init_classes_for_loader(h_system_loader, table->app(), /*early_only=*/false, CHECK);
if (Universe::is_fully_initialized() && VerifyDuringStartup) {
// Make sure we're still in a clean state.
@ -324,22 +344,80 @@ void AOTLinkedClassBulkLoader::initiate_loading(JavaThread* current, const char*
}
}
// Some AOT-linked classes for <class_loader> must be initialized early. This includes
// - classes that were AOT-initialized by AOTClassInitializer
// - the classes of all objects that are reachable from the archived mirrors of
// the AOT-linked classes for <class_loader>.
void AOTLinkedClassBulkLoader::init_classes_for_loader(Handle class_loader, Array<InstanceKlass*>* classes, TRAPS) {
// Can we move ik into fully_initialized state before the JVM is able to execute
// bytecodes?
static bool is_early_init_possible(InstanceKlass* ik) {
if (ik->is_runtime_setup_required()) {
// Bytecodes need to be executed in order to initialize this class.
if (log_is_enabled(Debug, aot, init)) {
ResourceMark rm;
log_debug(aot, init)("No early init %s: needs runtimeSetup()",
ik->external_name());
}
return false;
}
if (ik->super() != nullptr && !ik->super()->is_initialized()) {
// is_runtime_setup_required() == true for a super type
if (log_is_enabled(Debug, aot, init)) {
ResourceMark rm;
log_debug(aot, init)("No early init %s: super type %s not initialized",
ik->external_name(), ik->super()->external_name());
}
return false;
}
Array<InstanceKlass*>* interfaces = ik->local_interfaces();
int num_interfaces = interfaces->length();
for (int i = 0; i < num_interfaces; i++) {
InstanceKlass* intf = interfaces->at(i);
if (!intf->is_initialized() && intf->interface_needs_clinit_execution_as_super(/*also_check_supers*/false)) {
// is_runtime_setup_required() == true for this interface
if (log_is_enabled(Debug, aot, init)) {
ResourceMark rm;
log_debug(aot, init)("No early init %s: interface type %s not initialized",
ik->external_name(), intf->external_name());
}
return false;
}
}
return true;
}
// Normally, classes are initialized on demand. However, some AOT-linked classes
// for the class_loader must be proactively intialized, including:
// - Classes that have an AOT-initialized mirror (they were AOT-initialized by
// AOTClassInitializer during the assembly phase).
// - The classes of all objects that are reachable from the archived mirrors of
// the AOT-linked classes for the class_loader. These are recorded in the special
// subgraph.
//
// (early_only == true) means that this function is called before the JVM
// is capable of executing Java bytecodes.
void AOTLinkedClassBulkLoader::init_classes_for_loader(Handle class_loader, Array<InstanceKlass*>* classes,
bool early_only, TRAPS) {
if (classes != nullptr) {
for (int i = 0; i < classes->length(); i++) {
InstanceKlass* ik = classes->at(i);
assert(ik->class_loader_data() != nullptr, "must be");
if (ik->has_aot_initialized_mirror()) {
ik->initialize_with_aot_initialized_mirror(CHECK);
bool do_init = ik->has_aot_initialized_mirror();
if (do_init && early_only && !is_early_init_possible(ik)) {
// ik will be proactively initialized later when init_classes_for_loader()
// is called again with (early_only == false).
do_init = false;
}
if (do_init) {
ik->initialize_with_aot_initialized_mirror(early_only, CHECK);
}
}
}
HeapShared::init_classes_for_special_subgraph(class_loader, CHECK);
if (!early_only) {
HeapShared::init_classes_for_special_subgraph(class_loader, CHECK);
}
}
void AOTLinkedClassBulkLoader::replay_training_at_init(Array<InstanceKlass*>* classes, TRAPS) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2024, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -56,7 +56,7 @@ class AOTLinkedClassBulkLoader : AllStatic {
static void link_classes_impl(TRAPS);
static void link_classes_in_table(Array<InstanceKlass*>* classes, TRAPS);
static void init_non_javabase_classes_impl(TRAPS);
static void init_classes_for_loader(Handle class_loader, Array<InstanceKlass*>* classes, TRAPS);
static void init_classes_for_loader(Handle class_loader, Array<InstanceKlass*>* classes, bool early_only, TRAPS);
static void replay_training_at_init(Array<InstanceKlass*>* classes, TRAPS) NOT_CDS_RETURN;
#ifdef ASSERT
@ -73,8 +73,9 @@ public:
static void init_javabase_classes(JavaThread* current) NOT_CDS_RETURN;
static void init_non_javabase_classes(JavaThread* current) NOT_CDS_RETURN;
static void exit_on_exception(JavaThread* current);
static void replay_training_at_init_for_preloaded_classes(TRAPS) NOT_CDS_RETURN;
static bool is_initializing_classes_early() NOT_DEBUG({return false;});
};
#endif // SHARE_CDS_AOTLINKEDCLASSBULKLOADER_HPP

View File

@ -589,7 +589,6 @@ public:
}
Klass* real_klass() {
assert(UseCompressedClassPointers, "heap archiving requires UseCompressedClassPointers");
return _data._klass;
}

View File

@ -54,7 +54,7 @@ public:
// Can this VM map archived heap region? Currently only G1+compressed{oops,cp}
static bool can_map() {
CDS_JAVA_HEAP_ONLY(return (UseG1GC && UseCompressedClassPointers);)
CDS_JAVA_HEAP_ONLY(return UseG1GC;)
NOT_CDS_JAVA_HEAP(return false;)
}

View File

@ -450,7 +450,6 @@ int AOTMappedHeapWriter::filler_array_length(size_t fill_bytes) {
}
HeapWord* AOTMappedHeapWriter::init_filler_array_at_buffer_top(int array_length, size_t fill_bytes) {
assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
Klass* oak = Universe::objectArrayKlass(); // already relocated to point to archived klass
HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_used);
memset(mem, 0, fill_bytes);
@ -724,7 +723,6 @@ template <typename T> void AOTMappedHeapWriter::mark_oop_pointer(T* buffered_add
}
void AOTMappedHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass) {
assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));

View File

@ -250,9 +250,9 @@ static bool shared_base_too_high(char* specified_base, char* aligned_base, size_
static char* compute_shared_base(size_t cds_max) {
char* specified_base = (char*)SharedBaseAddress;
size_t alignment = AOTMetaspace::core_region_alignment();
if (UseCompressedClassPointers && CompressedKlassPointers::needs_class_space()) {
alignment = MAX2(alignment, Metaspace::reserve_alignment());
}
#if INCLUDE_CLASS_SPACE
alignment = MAX2(alignment, Metaspace::reserve_alignment());
#endif
if (SharedBaseAddress == 0) {
// Special meaning of -XX:SharedBaseAddress=0 -> Always map archive at os-selected address.
@ -1187,8 +1187,8 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS
CDSConfig::enable_dumping_aot_code();
{
builder.start_ac_region();
// Write the contents to AOT code region and close AOTCodeCache before packing the region
AOTCodeCache::close();
// Write the contents to AOT code region before packing the region
AOTCodeCache::dump();
builder.end_ac_region();
}
CDSConfig::disable_dumping_aot_code();
@ -1637,32 +1637,29 @@ MapArchiveResult AOTMetaspace::map_archives(FileMapInfo* static_mapinfo, FileMap
aot_log_debug(aot)("Failed to reserve spaces (use_requested_addr=%u)", (unsigned)use_requested_addr);
} else {
if (Metaspace::using_class_space()) {
prot_zone_size = protection_zone_size();
}
CLASS_SPACE_ONLY(prot_zone_size = protection_zone_size();)
#ifdef ASSERT
// Some sanity checks after reserving address spaces for archives
// and class space.
assert(archive_space_rs.is_reserved(), "Sanity");
if (Metaspace::using_class_space()) {
assert(archive_space_rs.base() == mapped_base_address &&
archive_space_rs.size() > protection_zone_size(),
"Archive space must lead and include the protection zone");
// Class space must closely follow the archive space. Both spaces
// must be aligned correctly.
assert(class_space_rs.is_reserved() && class_space_rs.size() > 0,
"A class space should have been reserved");
assert(class_space_rs.base() >= archive_space_rs.end(),
"class space should follow the cds archive space");
assert(is_aligned(archive_space_rs.base(),
core_region_alignment()),
"Archive space misaligned");
assert(is_aligned(class_space_rs.base(),
Metaspace::reserve_alignment()),
"class space misaligned");
}
#endif // ASSERT
#if INCLUDE_CLASS_SPACE
assert(archive_space_rs.base() == mapped_base_address &&
archive_space_rs.size() > protection_zone_size(),
"Archive space must lead and include the protection zone");
// Class space must closely follow the archive space. Both spaces
// must be aligned correctly.
assert(class_space_rs.is_reserved() && class_space_rs.size() > 0,
"A class space should have been reserved");
assert(class_space_rs.base() >= archive_space_rs.end(),
"class space should follow the cds archive space");
assert(is_aligned(archive_space_rs.base(),
core_region_alignment()),
"Archive space misaligned");
assert(is_aligned(class_space_rs.base(),
Metaspace::reserve_alignment()),
"class space misaligned");
#endif // INCLUDE_CLASS_SPACE
aot_log_info(aot)("Reserved archive_space_rs [" INTPTR_FORMAT " - " INTPTR_FORMAT "] (%zu) bytes%s",
p2i(archive_space_rs.base()), p2i(archive_space_rs.end()), archive_space_rs.size(),
@ -1764,67 +1761,60 @@ MapArchiveResult AOTMetaspace::map_archives(FileMapInfo* static_mapinfo, FileMap
if (result == MAP_ARCHIVE_SUCCESS) {
SharedBaseAddress = (size_t)mapped_base_address;
#ifdef _LP64
if (Metaspace::using_class_space()) {
assert(prot_zone_size > 0 &&
*(mapped_base_address) == 'P' &&
*(mapped_base_address + prot_zone_size - 1) == 'P',
"Protection zone was overwritten?");
// Set up ccs in metaspace.
Metaspace::initialize_class_space(class_space_rs);
#if INCLUDE_CLASS_SPACE
assert(prot_zone_size > 0 &&
*(mapped_base_address) == 'P' &&
*(mapped_base_address + prot_zone_size - 1) == 'P',
"Protection zone was overwritten?");
// Set up ccs in metaspace.
Metaspace::initialize_class_space(class_space_rs);
// Set up compressed Klass pointer encoding: the encoding range must
// cover both archive and class space.
const address klass_range_start = (address)mapped_base_address;
const size_t klass_range_size = (address)class_space_rs.end() - klass_range_start;
if (INCLUDE_CDS_JAVA_HEAP || UseCompactObjectHeaders) {
// The CDS archive may contain narrow Klass IDs that were precomputed at archive generation time:
// - every archived java object header (only if INCLUDE_CDS_JAVA_HEAP)
// - every archived Klass' prototype (only if +UseCompactObjectHeaders)
//
// In order for those IDs to still be valid, we need to dictate base and shift: base should be the
// mapping start (including protection zone), shift should be the shift used at archive generation time.
CompressedKlassPointers::initialize_for_given_encoding(
klass_range_start, klass_range_size,
klass_range_start, ArchiveBuilder::precomputed_narrow_klass_shift() // precomputed encoding, see ArchiveBuilder
);
assert(CompressedKlassPointers::base() == klass_range_start, "must be");
} else {
// Let JVM freely choose encoding base and shift
CompressedKlassPointers::initialize(klass_range_start, klass_range_size);
assert(CompressedKlassPointers::base() == nullptr ||
CompressedKlassPointers::base() == klass_range_start, "must be");
}
// Establish protection zone, but only if we need one
if (CompressedKlassPointers::base() == klass_range_start) {
CompressedKlassPointers::establish_protection_zone(klass_range_start, prot_zone_size);
}
// Set up compressed Klass pointer encoding: the encoding range must
// cover both archive and class space.
const address klass_range_start = (address)mapped_base_address;
const size_t klass_range_size = (address)class_space_rs.end() - klass_range_start;
if (INCLUDE_CDS_JAVA_HEAP || UseCompactObjectHeaders) {
// The CDS archive may contain narrow Klass IDs that were precomputed at archive generation time:
// - every archived java object header (only if INCLUDE_CDS_JAVA_HEAP)
// - every archived Klass' prototype (only if +UseCompactObjectHeaders)
//
// In order for those IDs to still be valid, we need to dictate base and shift: base should be the
// mapping start (including protection zone), shift should be the shift used at archive generation time.
CompressedKlassPointers::initialize_for_given_encoding(
klass_range_start, klass_range_size,
klass_range_start, ArchiveBuilder::precomputed_narrow_klass_shift() // precomputed encoding, see ArchiveBuilder
);
assert(CompressedKlassPointers::base() == klass_range_start, "must be");
} else {
// Let JVM freely choose encoding base and shift
CompressedKlassPointers::initialize(klass_range_start, klass_range_size);
assert(CompressedKlassPointers::base() == nullptr ||
CompressedKlassPointers::base() == klass_range_start, "must be");
}
// Establish protection zone, but only if we need one
if (CompressedKlassPointers::base() == klass_range_start) {
CompressedKlassPointers::establish_protection_zone(klass_range_start, prot_zone_size);
}
if (static_mapinfo->can_use_heap_region()) {
if (static_mapinfo->object_streaming_mode()) {
HeapShared::initialize_loading_mode(HeapArchiveMode::_streaming);
} else {
// map_or_load_heap_region() compares the current narrow oop and klass encodings
// with the archived ones, so it must be done after all encodings are determined.
static_mapinfo->map_or_load_heap_region();
HeapShared::initialize_loading_mode(HeapArchiveMode::_mapping);
}
if (static_mapinfo->can_use_heap_region()) {
if (static_mapinfo->object_streaming_mode()) {
HeapShared::initialize_loading_mode(HeapArchiveMode::_streaming);
} else {
FileMapRegion* r = static_mapinfo->region_at(AOTMetaspace::hp);
if (r->used() > 0) {
if (static_mapinfo->object_streaming_mode()) {
AOTMetaspace::report_loading_error("Cannot use CDS heap data.");
} else {
if (!UseCompressedOops && !AOTMappedHeapLoader::can_map()) {
AOTMetaspace::report_loading_error("Cannot use CDS heap data. Selected GC not compatible -XX:-UseCompressedOops");
} else {
AOTMetaspace::report_loading_error("Cannot use CDS heap data. UseEpsilonGC, UseG1GC, UseSerialGC, UseParallelGC, or UseShenandoahGC are required.");
}
}
}
// map_or_load_heap_region() compares the current narrow oop and klass encodings
// with the archived ones, so it must be done after all encodings are determined.
static_mapinfo->map_or_load_heap_region();
HeapShared::initialize_loading_mode(HeapArchiveMode::_mapping);
}
} else {
FileMapRegion* r = static_mapinfo->region_at(AOTMetaspace::hp);
if (r->used() > 0) {
AOTMetaspace::report_loading_error("Cannot use CDS heap data.");
}
if (!CDSConfig::is_dumping_static_archive()) {
CDSConfig::stop_using_full_module_graph("No CDS heap data");
}
}
#endif // _LP64
#endif // INCLUDE_CLASS_SPACE
log_info(aot)("initial optimized module handling: %s", CDSConfig::is_using_optimized_module_handling() ? "enabled" : "disabled");
log_info(aot)("initial full module graph: %s", CDSConfig::is_using_full_module_graph() ? "enabled" : "disabled");
} else {
@ -1857,8 +1847,13 @@ MapArchiveResult AOTMetaspace::map_archives(FileMapInfo* static_mapinfo, FileMap
// (The gap may result from different alignment requirements between metaspace
// and CDS)
//
// If UseCompressedClassPointers is disabled, only one address space will be
// reserved:
// The range encompassing both spaces will be suitable to en/decode narrow Klass
// pointers: the base will be valid for encoding the range [Base, End) and not
// surpass the max. range for that encoding.
//
// On 32-bit, a "narrow" Klass is just the pointer itself, and the Klass encoding
// range encompasses the whole address range. Consequently, we can "decode" and
// "encode" any pointer anywhere, and so are free to place the CDS archive anywhere:
//
// +-- Base address End
// | |
@ -1872,27 +1867,21 @@ MapArchiveResult AOTMetaspace::map_archives(FileMapInfo* static_mapinfo, FileMap
// use_archive_base_addr address is false, this base address is determined
// by the platform.
//
// If UseCompressedClassPointers=1, the range encompassing both spaces will be
// suitable to en/decode narrow Klass pointers: the base will be valid for
// encoding, the range [Base, End) and not surpass the max. range for that encoding.
//
// Return:
//
// - On success:
// - total_space_rs will be reserved as whole for archive_space_rs and
// class_space_rs if UseCompressedClassPointers is true.
// class_space_rs on 64-bit.
// On Windows, try reserve archive_space_rs and class_space_rs
// separately first if use_archive_base_addr is true.
// - archive_space_rs will be reserved and large enough to host static and
// if needed dynamic archive: [Base, A).
// archive_space_rs.base and size will be aligned to CDS reserve
// granularity.
// - class_space_rs: If UseCompressedClassPointers=1, class_space_rs will
// be reserved. Its start address will be aligned to metaspace reserve
// alignment, which may differ from CDS alignment. It will follow the cds
// archive space, close enough such that narrow class pointer encoding
// covers both spaces.
// If UseCompressedClassPointers=0, class_space_rs remains unreserved.
// - class_space_rs: On 64-bit, class_space_rs will be reserved. Its start
// address will be aligned to metaspace reserve alignment, which may differ
// from CDS alignment. It will follow the cds archive space, close enough
// such that narrow class pointer encoding covers both spaces.
// - On error: null is returned and the spaces remain unreserved.
char* AOTMetaspace::reserve_address_space_for_archives(FileMapInfo* static_mapinfo,
FileMapInfo* dynamic_mapinfo,
@ -1908,32 +1897,34 @@ char* AOTMetaspace::reserve_address_space_for_archives(FileMapInfo* static_mapin
size_t archive_end_offset = (dynamic_mapinfo == nullptr) ? static_mapinfo->mapping_end_offset() : dynamic_mapinfo->mapping_end_offset();
size_t archive_space_size = align_up(archive_end_offset, archive_space_alignment);
if (!Metaspace::using_class_space()) {
// Get the simple case out of the way first:
// no compressed class space, simple allocation.
#if !INCLUDE_CLASS_SPACE
// When running without class space, requested archive base should be aligned to cds core alignment.
assert(is_aligned(base_address, archive_space_alignment),
"Archive base address unaligned: " PTR_FORMAT ", needs alignment: %zu.",
p2i(base_address), archive_space_alignment);
// Get the simple case out of the way first:
// no compressed class space, simple allocation.
archive_space_rs = MemoryReserver::reserve((char*)base_address,
archive_space_size,
archive_space_alignment,
os::vm_page_size(),
mtNone);
if (archive_space_rs.is_reserved()) {
assert(base_address == nullptr ||
(address)archive_space_rs.base() == base_address, "Sanity");
// Register archive space with NMT.
MemTracker::record_virtual_memory_tag(archive_space_rs, mtClassShared);
return archive_space_rs.base();
}
return nullptr;
// When running without class space, requested archive base should be aligned to cds core alignment.
assert(is_aligned(base_address, archive_space_alignment),
"Archive base address unaligned: " PTR_FORMAT ", needs alignment: %zu.",
p2i(base_address), archive_space_alignment);
archive_space_rs = MemoryReserver::reserve((char*)base_address,
archive_space_size,
archive_space_alignment,
os::vm_page_size(),
mtNone);
if (archive_space_rs.is_reserved()) {
assert(base_address == nullptr ||
(address)archive_space_rs.base() == base_address, "Sanity");
// Register archive space with NMT.
MemTracker::record_virtual_memory_tag(archive_space_rs, mtClassShared);
return archive_space_rs.base();
}
#ifdef _LP64
return nullptr;
#else
// INCLUDE_CLASS_SPACE=1
// Complex case: two spaces adjacent to each other, both to be addressable
// with narrow class pointers.
// We reserve the whole range spanning both spaces, then split that range up.
@ -2045,11 +2036,7 @@ char* AOTMetaspace::reserve_address_space_for_archives(FileMapInfo* static_mapin
return archive_space_rs.base();
#else
ShouldNotReachHere();
return nullptr;
#endif
#endif // INCLUDE_CLASS_SPACE
}
void AOTMetaspace::release_reserved_spaces(ReservedSpace& total_space_rs,

View File

@ -369,7 +369,6 @@ template <typename T> void AOTStreamedHeapWriter::map_oop_field_in_buffer(oop ob
}
void AOTStreamedHeapWriter::update_header_for_buffered_addr(address buffered_addr, oop src_obj, Klass* src_klass) {
assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
markWord mw = markWord::prototype();

View File

@ -1092,20 +1092,17 @@ class RelocateBufferToRequested : public BitMapClosure {
}
};
#ifdef _LP64
int ArchiveBuilder::precomputed_narrow_klass_shift() {
// Legacy Mode:
// We use 32 bits for narrowKlass, which should cover the full 4G Klass range. Shift can be 0.
// Standard Mode:
// We use 32 bits for narrowKlass, which should cover a full 4G Klass range. Shift can be 0.
// CompactObjectHeader Mode:
// narrowKlass is much smaller, and we use the highest possible shift value to later get the maximum
// Klass encoding range.
//
// Note that all of this may change in the future, if we decide to correct the pre-calculated
// narrow Klass IDs at archive load time.
assert(UseCompressedClassPointers, "Only needed for compressed class pointers");
return UseCompactObjectHeaders ? CompressedKlassPointers::max_shift() : 0;
}
#endif // _LP64
void ArchiveBuilder::relocate_to_requested() {
if (!ro_region()->is_packed()) {

View File

@ -484,7 +484,6 @@ public:
void print_stats();
void report_out_of_space(const char* name, size_t needed_bytes);
#ifdef _LP64
// The CDS archive contains pre-computed narrow Klass IDs. It carries them in the headers of
// archived heap objects. With +UseCompactObjectHeaders, it also carries them in prototypes
// in Klass.
@ -504,7 +503,6 @@ public:
// TinyClassPointer Mode:
// We use the highest possible shift value to maximize the encoding range size.
static int precomputed_narrow_klass_shift();
#endif // _LP64
};

View File

@ -360,8 +360,8 @@ char* DumpRegion::allocate_metaspace_obj(size_t num_bytes, address src, Metaspac
bool is_instance_class = is_class && ((Klass*)src)->is_instance_klass();
#ifdef _LP64
// More strict alignments needed for UseCompressedClassPointers
if (is_class && UseCompressedClassPointers) {
// More strict alignments needed for Klass objects
if (is_class) {
size_t klass_alignment = checked_cast<size_t>(nth_bit(ArchiveBuilder::precomputed_narrow_klass_shift()));
alignment = MAX2(alignment, klass_alignment);
precond(is_aligned(alignment, SharedSpaceObjectAlignment));

View File

@ -108,6 +108,8 @@ void CDSConfig::ergo_initialize() {
}
AOTMapLogger::ergo_initialize();
setup_compiler_args();
}
const char* CDSConfig::default_archive_path() {
@ -635,8 +637,6 @@ bool CDSConfig::check_vm_args_consistency(bool patch_mod_javabase, bool mode_fla
FLAG_SET_ERGO_IF_DEFAULT(AOTClassLinking, true);
}
setup_compiler_args();
if (AOTClassLinking) {
// If AOTClassLinking is specified, enable all AOT optimizations by default.
FLAG_SET_ERGO_IF_DEFAULT(AOTInvokeDynamicLinking, true);
@ -891,10 +891,6 @@ const char* CDSConfig::type_of_archive_being_written() {
// If an incompatible VM options is found, return a text message that explains why
static const char* check_options_incompatible_with_dumping_heap() {
#if INCLUDE_CDS_JAVA_HEAP
if (!UseCompressedClassPointers) {
return "UseCompressedClassPointers must be true";
}
return nullptr;
#else
return "JVM not configured for writing Java heap objects";
@ -972,17 +968,27 @@ bool CDSConfig::is_loading_heap() {
}
bool CDSConfig::is_dumping_klass_subgraphs() {
if (is_dumping_classic_static_archive() || is_dumping_final_static_archive()) {
if (is_dumping_aot_linked_classes()) {
// KlassSubGraphs (see heapShared.cpp) is a legacy mechanism for archiving oops. It
// has been superceded by AOT class linking. This feature is used only when
// AOT class linking is disabled.
//
// KlassSubGraphs are disabled in the preimage static archive, which contains a very
// limited set of oops.
return is_dumping_heap() && !is_dumping_aot_linked_classes();
} else {
return false;
}
if (is_dumping_preimage_static_archive()) {
// KlassSubGraphs are disabled in the preimage static archive, which contains a very
// limited set of oops.
return false;
}
if (!is_dumping_full_module_graph()) {
// KlassSubGraphs cannot be partially disabled. Since some of the KlassSubGraphs
// are used for (legacy support) of the archived full module graph, if
// is_dumping_full_module_graph() is calse, we must disable all KlassSubGraphs.
return false;
}
return is_dumping_heap();
}
bool CDSConfig::is_using_klass_subgraphs() {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -41,7 +41,6 @@ class Symbol;
class DumpTimeClassInfo: public CHeapObj<mtClass> {
bool _excluded;
bool _is_aot_tooling_class;
bool _is_early_klass;
bool _has_checked_exclusion;
class DTLoaderConstraint {
@ -143,7 +142,6 @@ public:
_clsfile_crc32 = -1;
_excluded = false;
_is_aot_tooling_class = false;
_is_early_klass = JvmtiExport::is_early_phase();
_verifier_constraints = nullptr;
_verifier_constraint_flags = nullptr;
_loader_constraints = nullptr;
@ -219,11 +217,6 @@ public:
_is_aot_tooling_class = true;
}
// Was this class loaded while JvmtiExport::is_early_phase()==true
bool is_early_klass() {
return _is_early_klass;
}
// simple accessors
void set_excluded() { _excluded = true; }
bool has_checked_exclusion() const { return _has_checked_exclusion; }

View File

@ -225,15 +225,9 @@ void FileMapHeader::populate(FileMapInfo *info, size_t core_region_alignment,
}
#endif
_compressed_oops = UseCompressedOops;
_compressed_class_ptrs = UseCompressedClassPointers;
if (UseCompressedClassPointers) {
#ifdef _LP64
_narrow_klass_pointer_bits = CompressedKlassPointers::narrow_klass_pointer_bits();
_narrow_klass_shift = ArchiveBuilder::precomputed_narrow_klass_shift();
#endif
} else {
_narrow_klass_pointer_bits = _narrow_klass_shift = -1;
}
_narrow_klass_pointer_bits = CompressedKlassPointers::narrow_klass_pointer_bits();
_narrow_klass_shift = ArchiveBuilder::precomputed_narrow_klass_shift();
// Which JIT compier is used
_compiler_type = (u1)CompilerConfig::compiler_type();
_type_profile_level = TypeProfileLevel;
@ -295,7 +289,6 @@ void FileMapHeader::print(outputStream* st) {
st->print_cr("- max_heap_size: %zu", _max_heap_size);
st->print_cr("- narrow_oop_mode: %d", _narrow_oop_mode);
st->print_cr("- compressed_oops: %d", _compressed_oops);
st->print_cr("- compressed_class_ptrs: %d", _compressed_class_ptrs);
st->print_cr("- narrow_klass_pointer_bits: %d", _narrow_klass_pointer_bits);
st->print_cr("- narrow_klass_shift: %d", _narrow_klass_shift);
st->print_cr("- cloned_vtables: %u", cast_to_u4(_cloned_vtables));
@ -1535,10 +1528,34 @@ bool FileMapInfo::can_use_heap_region() {
if (!has_heap_region()) {
return false;
}
if (!object_streaming_mode() && !Universe::heap()->can_load_archived_objects() && !UseG1GC) {
// Incompatible object format
if (!object_streaming_mode() && !AOTMappedHeapLoader::can_use()) {
// Currently this happens only when using ZGC with an AOT cache generated with -XX:-AOTStreamableObjects
AOTMetaspace::report_loading_error("CDS heap data cannot be used by the selected GC. "
"Please choose a different GC or rebuild AOT cache "
"with -XX:+AOTStreamableObjects");
return false;
}
if (CDSConfig::is_using_aot_linked_classes()) {
assert(!JvmtiExport::should_post_class_file_load_hook(), "already checked");
assert(CDSConfig::is_using_full_module_graph(), "already checked");
} else {
if (JvmtiExport::should_post_class_file_load_hook()) {
AOTMetaspace::report_loading_error("CDS heap data is disabled because JVMTI ClassFileLoadHook is in use.");
return false;
}
if (!CDSConfig::is_using_full_module_graph()) {
if (CDSConfig::is_dumping_final_static_archive()) {
// We are loading the preimage static archive, which has no KlassSubGraphs.
// See CDSConfig::is_dumping_klass_subgraphs()
} else {
AOTMetaspace::report_loading_error("CDS heap data is disabled because archived full module graph is not used.");
return false;
}
}
}
if (JvmtiExport::should_post_class_file_load_hook() && JvmtiExport::has_early_class_hook_env()) {
ShouldNotReachHere(); // CDS should have been disabled.
// The archived objects are mapped at JVM start-up, but we don't know if
@ -1902,11 +1919,12 @@ bool FileMapHeader::validate() {
_has_platform_or_app_classes = false;
}
aot_log_info(aot)("The %s was created with UseCompressedOops = %d, UseCompressedClassPointers = %d, UseCompactObjectHeaders = %d",
file_type, compressed_oops(), compressed_class_pointers(), compact_headers());
if (compressed_oops() != UseCompressedOops || compressed_class_pointers() != UseCompressedClassPointers) {
aot_log_warning(aot)("Unable to use %s.\nThe saved state of UseCompressedOops and UseCompressedClassPointers is "
"different from runtime, CDS will be disabled.", file_type);
aot_log_info(aot)("The %s was created with UseCompressedOops = %d, UseCompactObjectHeaders = %d",
file_type, compressed_oops(), compact_headers());
if (compressed_oops() != UseCompressedOops) {
aot_log_warning(aot)("Unable to use %s.\nThe saved state of UseCompressedOops (%d) is "
"different from runtime (%d), CDS will be disabled.", file_type,
compressed_oops(), UseCompressedOops);
return false;
}

View File

@ -120,7 +120,6 @@ private:
CompressedOops::Mode _narrow_oop_mode; // compressed oop encoding mode
bool _object_streaming_mode; // dump was created for object streaming
bool _compressed_oops; // save the flag UseCompressedOops
bool _compressed_class_ptrs; // save the flag UseCompressedClassPointers
int _narrow_klass_pointer_bits; // save number of bits in narrowKlass
int _narrow_klass_shift; // save shift width used to pre-compute narrowKlass IDs in archived heap objects
narrowPtr _cloned_vtables; // The address of the first cloned vtable
@ -200,7 +199,6 @@ public:
bool has_platform_or_app_classes() const { return _has_platform_or_app_classes; }
bool has_aot_linked_classes() const { return _has_aot_linked_classes; }
bool compressed_oops() const { return _compressed_oops; }
bool compressed_class_pointers() const { return _compressed_class_ptrs; }
int narrow_klass_pointer_bits() const { return _narrow_klass_pointer_bits; }
int narrow_klass_shift() const { return _narrow_klass_shift; }
bool has_full_module_graph() const { return _has_full_module_graph; }

View File

@ -131,17 +131,14 @@ static ArchivableStaticFieldInfo archive_subgraph_entry_fields[] = {
{"java/lang/module/Configuration", "EMPTY_CONFIGURATION"},
{"jdk/internal/math/FDBigInteger", "archivedCaches"},
#ifndef PRODUCT
{nullptr, nullptr}, // Extra slot for -XX:ArchiveHeapTestClass
#endif
{nullptr, nullptr},
};
// full module graph
static ArchivableStaticFieldInfo fmg_archive_subgraph_entry_fields[] = {
// full module graph support
{"jdk/internal/loader/ArchivedClassLoaders", "archivedClassLoaders"},
{ARCHIVED_BOOT_LAYER_CLASS, ARCHIVED_BOOT_LAYER_FIELD},
{"java/lang/Module$ArchivedData", "archivedData"},
#ifndef PRODUCT
{nullptr, nullptr}, // Extra slot for -XX:ArchiveHeapTestClass
#endif
{nullptr, nullptr},
};
@ -164,8 +161,7 @@ bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) {
assert(CDSConfig::is_dumping_heap(), "dump-time only");
if (CDSConfig::is_dumping_klass_subgraphs()) {
// Legacy CDS archive support (to be deprecated)
return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) ||
is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik);
return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik);
} else {
return false;
}
@ -568,6 +564,7 @@ bool HeapShared::archive_object(oop obj, oop referrer, KlassSubGraphInfo* subgra
return false;
}
AOTArtifactFinder::add_cached_class(obj->klass());
AOTOopChecker::check(obj); // Make sure contents of this oop are safe.
count_allocation(obj->size());
@ -925,7 +922,7 @@ void HeapShared::start_scanning_for_oops() {
// The special subgraph doesn't belong to any class. We use Object_klass() here just
// for convenience.
_dump_time_special_subgraph = init_subgraph_info(vmClasses::Object_klass(), false);
_dump_time_special_subgraph = init_subgraph_info(vmClasses::Object_klass());
// Cache for recording where the archived objects are copied to
create_archived_object_cache();
@ -1003,12 +1000,7 @@ void HeapShared::archive_subgraphs() {
assert(CDSConfig::is_dumping_heap(), "must be");
if (CDSConfig::is_dumping_klass_subgraphs()) {
archive_object_subgraphs(archive_subgraph_entry_fields,
false /* is_full_module_graph */);
if (CDSConfig::is_dumping_full_module_graph()) {
archive_object_subgraphs(fmg_archive_subgraph_entry_fields,
true /* is_full_module_graph */);
}
archive_object_subgraphs(archive_subgraph_entry_fields);
}
}
@ -1021,12 +1013,11 @@ HeapShared::RunTimeKlassSubGraphInfoTable HeapShared::_run_time_subgraph_info_
// Get the subgraph_info for Klass k. A new subgraph_info is created if
// there is no existing one for k. The subgraph_info records the "buffered"
// address of the class.
KlassSubGraphInfo* HeapShared::init_subgraph_info(Klass* k, bool is_full_module_graph) {
KlassSubGraphInfo* HeapShared::init_subgraph_info(Klass* k) {
assert(CDSConfig::is_dumping_heap(), "dump time only");
bool created;
KlassSubGraphInfo* info =
_dump_time_subgraph_info_table->put_if_absent(k, KlassSubGraphInfo(k, is_full_module_graph),
&created);
_dump_time_subgraph_info_table->put_if_absent(k, KlassSubGraphInfo(k), &created);
assert(created, "must not initialize twice");
return info;
}
@ -1114,7 +1105,6 @@ void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k) {
}
_subgraph_object_klasses->append_if_missing(orig_k);
_has_non_early_klasses |= is_non_early_klass(orig_k);
}
void KlassSubGraphInfo::check_allowed_klass(InstanceKlass* ik) {
@ -1157,45 +1147,11 @@ void KlassSubGraphInfo::check_allowed_klass(InstanceKlass* ik) {
AOTMetaspace::unrecoverable_writing_error();
}
bool KlassSubGraphInfo::is_non_early_klass(Klass* k) {
if (k->is_objArray_klass()) {
k = ObjArrayKlass::cast(k)->bottom_klass();
}
if (k->is_instance_klass()) {
if (!SystemDictionaryShared::is_early_klass(InstanceKlass::cast(k))) {
ResourceMark rm;
log_info(aot, heap)("non-early: %s", k->external_name());
return true;
} else {
return false;
}
} else {
return false;
}
}
// Initialize an archived subgraph_info_record from the given KlassSubGraphInfo.
void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) {
_k = ArchiveBuilder::get_buffered_klass(info->klass());
_entry_field_records = nullptr;
_subgraph_object_klasses = nullptr;
_is_full_module_graph = info->is_full_module_graph();
if (_is_full_module_graph) {
// Consider all classes referenced by the full module graph as early -- we will be
// allocating objects of these classes during JVMTI early phase, so they cannot
// be processed by (non-early) JVMTI ClassFileLoadHook
_has_non_early_klasses = false;
} else {
_has_non_early_klasses = info->has_non_early_klasses();
}
if (_has_non_early_klasses) {
ResourceMark rm;
log_info(aot, heap)(
"Subgraph of klass %s has non-early klasses and cannot be used when JVMTI ClassFileLoadHook is enabled",
_k->external_name());
}
// populate the entry fields
GrowableArray<int>* entry_fields = info->subgraph_entry_fields();
@ -1353,15 +1309,10 @@ static void verify_the_heap(Klass* k, const char* which) {
// Before GC can execute, we must ensure that all oops reachable from HeapShared::roots()
// have a valid klass. I.e., oopDesc::klass() must have already been resolved.
//
// Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI
// ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In
// this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots.
void HeapShared::resolve_classes(JavaThread* current) {
assert(CDSConfig::is_using_archive(), "runtime only!");
if (CDSConfig::is_using_klass_subgraphs()) {
resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields);
resolve_classes_for_subgraphs(current, fmg_archive_subgraph_entry_fields);
}
}
@ -1509,24 +1460,6 @@ HeapShared::resolve_or_init_classes_for_subgraph_of(Klass* k, bool do_init, TRAP
}
return nullptr;
} else {
if (record->is_full_module_graph() && !CDSConfig::is_using_full_module_graph()) {
if (log_is_enabled(Info, aot, heap)) {
ResourceMark rm(THREAD);
log_info(aot, heap)("subgraph %s cannot be used because full module graph is disabled",
k->external_name());
}
return nullptr;
}
if (record->has_non_early_klasses() && JvmtiExport::should_post_class_file_load_hook()) {
if (log_is_enabled(Info, aot, heap)) {
ResourceMark rm(THREAD);
log_info(aot, heap)("subgraph %s cannot be used because JVMTI ClassFileLoadHook is enabled",
k->external_name());
}
return nullptr;
}
if (log_is_enabled(Info, aot, heap)) {
ResourceMark rm;
log_info(aot, heap)("%s subgraph %s ", do_init ? "init" : "resolve", k->external_name());
@ -1608,8 +1541,8 @@ void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphI
// mirror after this point.
if (log_is_enabled(Info, aot, heap)) {
ResourceMark rm;
log_info(aot, heap)("initialize_from_archived_subgraph %s " PTR_FORMAT "%s%s",
k->external_name(), p2i(k), JvmtiExport::is_early_phase() ? " (early)" : "",
log_info(aot, heap)("initialize_from_archived_subgraph %s " PTR_FORMAT "%s",
k->external_name(), p2i(k),
k->has_aot_initialized_mirror() ? " (aot-inited)" : "");
}
}
@ -2075,9 +2008,9 @@ void HeapShared::set_has_been_seen_during_subgraph_recording(oop obj) {
++ _num_new_walked_objs;
}
void HeapShared::start_recording_subgraph(InstanceKlass *k, const char* class_name, bool is_full_module_graph) {
void HeapShared::start_recording_subgraph(InstanceKlass *k, const char* class_name) {
log_info(aot, heap)("Start recording subgraph(s) for archived fields in %s", class_name);
init_subgraph_info(k, is_full_module_graph);
init_subgraph_info(k);
init_seen_objects_table();
_num_new_walked_objs = 0;
_num_new_archived_objs = 0;
@ -2209,9 +2142,6 @@ void HeapShared::init_subgraph_entry_fields(TRAPS) {
_dump_time_subgraph_info_table = new (mtClass)DumpTimeKlassSubGraphInfoTable();
if (CDSConfig::is_dumping_klass_subgraphs()) {
init_subgraph_entry_fields(archive_subgraph_entry_fields, CHECK);
if (CDSConfig::is_dumping_full_module_graph()) {
init_subgraph_entry_fields(fmg_archive_subgraph_entry_fields, CHECK);
}
}
}
@ -2310,8 +2240,7 @@ void HeapShared::init_heap_writer() {
}
}
void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
bool is_full_module_graph) {
void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[]) {
_num_total_subgraph_recordings = 0;
_num_total_walked_objs = 0;
_num_total_archived_objs = 0;
@ -2327,7 +2256,7 @@ void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
for (int i = 0; fields[i].valid(); ) {
ArchivableStaticFieldInfo* info = &fields[i];
const char* klass_name = info->klass_name;
start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
start_recording_subgraph(info->klass, klass_name);
// If you have specified consecutive fields of the same klass in
// fields[], these will be archived in the same

View File

@ -66,21 +66,12 @@ class KlassSubGraphInfo: public CHeapObj<mtClass> {
// For each entry field, it is a tuple of field_offset, field_value
GrowableArray<int>* _subgraph_entry_fields;
// Does this KlassSubGraphInfo belong to the archived full module graph
bool _is_full_module_graph;
// Does this KlassSubGraphInfo references any classes that were loaded while
// JvmtiExport::is_early_phase()!=true. If so, this KlassSubGraphInfo cannot be
// used at runtime if JVMTI ClassFileLoadHook is enabled.
bool _has_non_early_klasses;
static bool is_non_early_klass(Klass* k);
static void check_allowed_klass(InstanceKlass* ik);
public:
KlassSubGraphInfo(Klass* k, bool is_full_module_graph) :
KlassSubGraphInfo(Klass* k) :
_k(k), _subgraph_object_klasses(nullptr),
_subgraph_entry_fields(nullptr),
_is_full_module_graph(is_full_module_graph),
_has_non_early_klasses(false) {}
_subgraph_entry_fields(nullptr) {}
~KlassSubGraphInfo() {
if (_subgraph_object_klasses != nullptr) {
@ -104,8 +95,6 @@ class KlassSubGraphInfo: public CHeapObj<mtClass> {
return _subgraph_object_klasses == nullptr ? 0 :
_subgraph_object_klasses->length();
}
bool is_full_module_graph() const { return _is_full_module_graph; }
bool has_non_early_klasses() const { return _has_non_early_klasses; }
};
// An archived record of object sub-graphs reachable from static
@ -114,7 +103,6 @@ class KlassSubGraphInfo: public CHeapObj<mtClass> {
class ArchivedKlassSubGraphInfoRecord {
private:
Klass* _k;
bool _is_full_module_graph;
bool _has_non_early_klasses;
// contains pairs of field offset and value for each subgraph entry field
@ -130,7 +118,6 @@ class ArchivedKlassSubGraphInfoRecord {
Klass* klass() const { return _k; }
Array<int>* entry_field_records() const { return _entry_field_records; }
Array<Klass*>* subgraph_object_klasses() const { return _subgraph_object_klasses; }
bool is_full_module_graph() const { return _is_full_module_graph; }
bool has_non_early_klasses() const { return _has_non_early_klasses; }
};
#endif // INCLUDE_CDS_JAVA_HEAP
@ -270,8 +257,7 @@ private:
static CachedOopInfo make_cached_oop_info(oop obj, oop referrer);
static ArchivedKlassSubGraphInfoRecord* archive_subgraph_info(KlassSubGraphInfo* info);
static void archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
bool is_full_module_graph);
static void archive_object_subgraphs(ArchivableStaticFieldInfo fields[]);
// Archive object sub-graph starting from the given static field
// in Klass k's mirror.
@ -285,7 +271,7 @@ private:
static void verify_subgraph_from(oop orig_obj) PRODUCT_RETURN;
static void check_special_subgraph_classes();
static KlassSubGraphInfo* init_subgraph_info(Klass *k, bool is_full_module_graph);
static KlassSubGraphInfo* init_subgraph_info(Klass *k);
static KlassSubGraphInfo* get_subgraph_info(Klass *k);
static void init_subgraph_entry_fields(TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
@ -340,8 +326,7 @@ private:
static size_t _num_total_recorded_klasses;
static size_t _num_total_verifications;
static void start_recording_subgraph(InstanceKlass *k, const char* klass_name,
bool is_full_module_graph);
static void start_recording_subgraph(InstanceKlass *k, const char* klass_name);
static void done_recording_subgraph(InstanceKlass *k, const char* klass_name);
static bool has_been_seen_during_subgraph_recording(oop obj);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -224,6 +224,7 @@ void ClassPrinter::print_flags_help(outputStream* os) {
os->print_cr(" 0x%02x - print info for invokehandle", PRINT_METHOD_HANDLE);
os->print_cr(" 0x%02x - print details of the C++ and Java objects that represent classes", PRINT_CLASS_DETAILS);
os->print_cr(" 0x%02x - print details of the C++ objects that represent methods", PRINT_METHOD_DETAILS);
os->print_cr(" 0x%02x - print MethodData", PRINT_METHOD_DATA);
os->cr();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -54,6 +54,7 @@ public:
PRINT_METHOD_HANDLE = 1 << 4, // extra information for invokehandle
PRINT_CLASS_DETAILS = 1 << 5, // print details of the C++ and Java objects that represent classes
PRINT_METHOD_DETAILS = 1 << 6, // print details of the C++ objects that represent methods
PRINT_METHOD_DATA = 1 << 7, // print MethodData - requires MDO lock
};
static bool has_mode(int flags, Mode mode) {
return (flags & static_cast<int>(mode)) != 0;

View File

@ -89,11 +89,9 @@ DEBUG_ONLY(bool SystemDictionaryShared::_class_loading_may_happen = true;)
#ifdef ASSERT
static void check_klass_after_loading(const Klass* k) {
#ifdef _LP64
if (k != nullptr && UseCompressedClassPointers) {
if (k != nullptr) {
CompressedKlassPointers::check_encodable(k);
}
#endif
}
#endif
@ -373,11 +371,6 @@ bool SystemDictionaryShared::is_jfr_event_class(InstanceKlass *k) {
return false;
}
bool SystemDictionaryShared::is_early_klass(InstanceKlass* ik) {
DumpTimeClassInfo* info = _dumptime_table->get(ik);
return (info != nullptr) ? info->is_early_klass() : false;
}
bool SystemDictionaryShared::check_self_exclusion(InstanceKlass* k) {
bool log_warning = false;
const char* error = check_self_exclusion_helper(k, log_warning);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -199,7 +199,6 @@ private:
static void iterate_verification_constraint_names(InstanceKlass* k, DumpTimeClassInfo* info, Function func);
public:
static bool is_early_klass(InstanceKlass* k); // Was k loaded while JvmtiExport::is_early_phase()==true
static bool has_archived_enum_objs(InstanceKlass* ik);
static void set_has_archived_enum_objs(InstanceKlass* ik);

View File

@ -284,11 +284,11 @@ bool AOTCodeCache::open_cache(bool is_dumping, bool is_using) {
return true;
}
void AOTCodeCache::close() {
void AOTCodeCache::dump() {
if (is_on()) {
delete _cache; // Free memory
_cache = nullptr;
opened_cache = nullptr;
assert(is_on_for_dump(), "should be called only when dumping AOT code");
MutexLocker ml(Compile_lock);
_cache->finish_write();
}
}
@ -304,7 +304,6 @@ AOTCodeCache::AOTCodeCache(bool is_dumping, bool is_using) :
_store_size(0),
_for_use(is_using),
_for_dump(is_dumping),
_closing(false),
_failed(false),
_lookup_failed(false),
_table(nullptr),
@ -381,30 +380,6 @@ void AOTCodeCache::init_early_c1_table() {
}
}
AOTCodeCache::~AOTCodeCache() {
if (_closing) {
return; // Already closed
}
// Stop any further access to cache.
_closing = true;
MutexLocker ml(Compile_lock);
if (for_dump()) { // Finalize cache
finish_write();
}
_load_buffer = nullptr;
if (_C_store_buffer != nullptr) {
FREE_C_HEAP_ARRAY(char, _C_store_buffer);
_C_store_buffer = nullptr;
_store_buffer = nullptr;
}
if (_table != nullptr) {
MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
delete _table;
_table = nullptr;
}
}
void AOTCodeCache::Config::record(uint cpu_features_offset) {
_flags = 0;
#ifdef ASSERT
@ -413,9 +388,6 @@ void AOTCodeCache::Config::record(uint cpu_features_offset) {
if (UseCompressedOops) {
_flags |= compressedOops;
}
if (UseCompressedClassPointers) {
_flags |= compressedClassPointers;
}
if (UseTLAB) {
_flags |= useTLAB;
}
@ -499,10 +471,6 @@ bool AOTCodeCache::Config::verify(AOTCodeCache* cache) const {
return false;
}
if (((_flags & compressedClassPointers) != 0) != UseCompressedClassPointers) {
log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseCompressedClassPointers = %s", UseCompressedClassPointers ? "false" : "true");
return false;
}
if (_compressedKlassShift != (uint)CompressedKlassPointers::shift()) {
log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with CompressedKlassPointers::shift() = %d vs current %d", _compressedKlassShift, CompressedKlassPointers::shift());
return false;
@ -571,6 +539,9 @@ AOTCodeReader::AOTCodeReader(AOTCodeCache* cache, AOTCodeEntry* entry) {
_load_buffer = cache->cache_buffer();
_read_position = 0;
_lookup_failed = false;
_name = nullptr;
_reloc_data = nullptr;
_oop_maps = nullptr;
}
void AOTCodeReader::set_read_position(uint pos) {
@ -935,16 +906,6 @@ bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind
has_oop_maps = true;
}
#ifndef PRODUCT
// Write asm remarks
if (!cache->write_asm_remarks(blob)) {
return false;
}
if (!cache->write_dbg_strings(blob)) {
return false;
}
#endif /* PRODUCT */
if (!cache->write_relocations(blob)) {
if (!cache->failed()) {
// We may miss an address in AOT table - skip this code blob.
@ -953,6 +914,16 @@ bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind
return false;
}
#ifndef PRODUCT
// Write asm remarks after relocation info
if (!cache->write_asm_remarks(blob)) {
return false;
}
if (!cache->write_dbg_strings(blob)) {
return false;
}
#endif /* PRODUCT */
uint entry_size = cache->_write_position - entry_position;
AOTCodeEntry* entry = new(cache) AOTCodeEntry(entry_kind, encode_id(entry_kind, id),
entry_position, entry_size, name_offset, name_size,
@ -1014,39 +985,28 @@ CodeBlob* AOTCodeReader::compile_code_blob(const char* name) {
set_lookup_failed(); // Skip this blob
return nullptr;
}
_name = stored_name;
// Read archived code blob
uint offset = entry_position + _entry->blob_offset();
CodeBlob* archived_blob = (CodeBlob*)addr(offset);
offset += archived_blob->size();
address reloc_data = (address)addr(offset);
_reloc_data = (address)addr(offset);
offset += archived_blob->relocation_size();
set_read_position(offset);
ImmutableOopMapSet* oop_maps = nullptr;
if (_entry->has_oop_maps()) {
oop_maps = read_oop_map_set();
_oop_maps = read_oop_map_set();
}
CodeBlob* code_blob = CodeBlob::create(archived_blob,
stored_name,
reloc_data,
oop_maps
);
// CodeBlob::restore() calls AOTCodeReader::restore()
CodeBlob* code_blob = CodeBlob::create(archived_blob, this);
if (code_blob == nullptr) { // no space left in CodeCache
return nullptr;
}
#ifndef PRODUCT
code_blob->asm_remarks().init();
read_asm_remarks(code_blob->asm_remarks());
code_blob->dbg_strings().init();
read_dbg_strings(code_blob->dbg_strings());
#endif // PRODUCT
fix_relocations(code_blob);
#ifdef ASSERT
LogStreamHandle(Trace, aot, codecache, stubs) log;
if (log.is_enabled()) {
@ -1057,6 +1017,25 @@ CodeBlob* AOTCodeReader::compile_code_blob(const char* name) {
return code_blob;
}
void AOTCodeReader::restore(CodeBlob* code_blob) {
precond(AOTCodeCache::is_on_for_use());
precond(_name != nullptr);
precond(_reloc_data != nullptr);
code_blob->set_name(_name);
code_blob->restore_mutable_data(_reloc_data);
code_blob->set_oop_maps(_oop_maps);
fix_relocations(code_blob);
#ifndef PRODUCT
code_blob->asm_remarks().init();
read_asm_remarks(code_blob->asm_remarks());
code_blob->dbg_strings().init();
read_dbg_strings(code_blob->dbg_strings());
#endif // PRODUCT
}
// ------------ process code and data --------------
// Can't use -1. It is valid value for jump to iteself destination
@ -1545,18 +1524,6 @@ void AOTCodeAddressTable::init_early_c1() {
#undef SET_ADDRESS
AOTCodeAddressTable::~AOTCodeAddressTable() {
if (_extrs_addr != nullptr) {
FREE_C_HEAP_ARRAY(address, _extrs_addr);
}
if (_stubs_addr != nullptr) {
FREE_C_HEAP_ARRAY(address, _stubs_addr);
}
if (_shared_blobs_addr != nullptr) {
FREE_C_HEAP_ARRAY(address, _shared_blobs_addr);
}
}
#ifdef PRODUCT
#define MAX_STR_COUNT 200
#else

View File

@ -152,7 +152,6 @@ public:
_early_c1_complete(false),
_complete(false)
{ }
~AOTCodeAddressTable();
void init_extrs();
void init_early_stubs();
void init_shared_blobs();
@ -178,12 +177,11 @@ protected:
none = 0,
debugVM = 1,
compressedOops = 2,
compressedClassPointers = 4,
useTLAB = 8,
systemClassAssertions = 16,
userClassAssertions = 32,
enableContendedPadding = 64,
restrictContendedPadding = 128
useTLAB = 4,
systemClassAssertions = 8,
userClassAssertions = 16,
enableContendedPadding = 32,
restrictContendedPadding = 64
};
uint _flags;
uint _cpu_features_offset; // offset in the cache where cpu features are stored
@ -260,7 +258,6 @@ private:
uint _store_size; // Used when writing cache
bool _for_use; // AOT cache is open for using AOT code
bool _for_dump; // AOT cache is open for dumping AOT code
bool _closing; // Closing cache file
bool _failed; // Failed read/write to/from cache (cache is broken?)
bool _lookup_failed; // Failed to lookup for info (skip only this code load)
@ -290,7 +287,6 @@ private:
public:
AOTCodeCache(bool is_dumping, bool is_using);
~AOTCodeCache();
const char* cache_buffer() const { return _load_buffer; }
bool failed() const { return _failed; }
@ -314,8 +310,6 @@ public:
bool for_use() const { return _for_use && !_failed; }
bool for_dump() const { return _for_dump && !_failed; }
bool closing() const { return _closing; }
AOTCodeEntry* add_entry() {
_store_entries_cnt++;
_store_entries -= 1;
@ -375,8 +369,8 @@ public:
static AOTCodeCache* cache() { assert(_passed_init2, "Too early to ask"); return _cache; }
static void initialize() NOT_CDS_RETURN;
static void init2() NOT_CDS_RETURN;
static void close() NOT_CDS_RETURN;
static bool is_on() CDS_ONLY({ return cache() != nullptr && !_cache->closing(); }) NOT_CDS_RETURN_(false);
static void dump() NOT_CDS_RETURN;
static bool is_on() CDS_ONLY({ return cache() != nullptr; }) NOT_CDS_RETURN_(false);
static bool is_on_for_use() CDS_ONLY({ return is_on() && _cache->for_use(); }) NOT_CDS_RETURN_(false);
static bool is_on_for_dump() CDS_ONLY({ return is_on() && _cache->for_dump(); }) NOT_CDS_RETURN_(false);
static bool is_dumping_stub() NOT_CDS_RETURN_(false);
@ -408,11 +402,13 @@ private:
void clear_lookup_failed() { _lookup_failed = false; }
bool lookup_failed() const { return _lookup_failed; }
AOTCodeEntry* aot_code_entry() { return (AOTCodeEntry*)_entry; }
public:
AOTCodeReader(AOTCodeCache* cache, AOTCodeEntry* entry);
// Values used by restore(code_blob).
// They should be set before calling it.
const char* _name;
address _reloc_data;
ImmutableOopMapSet* _oop_maps;
CodeBlob* compile_code_blob(const char* name);
AOTCodeEntry* aot_code_entry() { return (AOTCodeEntry*)_entry; }
ImmutableOopMapSet* read_oop_map_set();
@ -421,6 +417,13 @@ public:
void read_asm_remarks(AsmRemarks& asm_remarks);
void read_dbg_strings(DbgStrings& dbg_strings);
#endif // PRODUCT
public:
AOTCodeReader(AOTCodeCache* cache, AOTCodeEntry* entry);
CodeBlob* compile_code_blob(const char* name);
void restore(CodeBlob* code_blob);
};
// code cache internal runtime constants area used by AOT code

View File

@ -22,6 +22,7 @@
*
*/
#include "code/aotCodeCache.hpp"
#include "code/codeBlob.hpp"
#include "code/codeCache.hpp"
#include "code/relocInfo.hpp"
@ -188,22 +189,6 @@ CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, int size, uint16_t heade
assert(_mutable_data == blob_end(), "sanity");
}
void CodeBlob::restore_mutable_data(address reloc_data) {
// Relocation data is now stored as part of the mutable data area; allocate it before copy relocations
if (_mutable_data_size > 0) {
_mutable_data = (address)os::malloc(_mutable_data_size, mtCode);
if (_mutable_data == nullptr) {
vm_exit_out_of_memory(_mutable_data_size, OOM_MALLOC_ERROR, "codebuffer: no space for mutable data");
}
} else {
_mutable_data = blob_end(); // default value
}
if (_relocation_size > 0) {
assert(_mutable_data_size > 0, "relocation is part of mutable data section");
memcpy((address)relocation_begin(), reloc_data, relocation_size());
}
}
void CodeBlob::purge() {
assert(_mutable_data != nullptr, "should never be null");
if (_mutable_data != blob_end()) {
@ -240,6 +225,23 @@ void CodeBlob::print_code_on(outputStream* st) {
Disassembler::decode(this, st);
}
#if INCLUDE_CDS
void CodeBlob::restore_mutable_data(address reloc_data) {
// Relocation data is now stored as part of the mutable data area; allocate it before copy relocations
if (_mutable_data_size > 0) {
_mutable_data = (address)os::malloc(_mutable_data_size, mtCode);
if (_mutable_data == nullptr) {
vm_exit_out_of_memory(_mutable_data_size, OOM_MALLOC_ERROR, "codebuffer: no space for mutable data");
}
} else {
_mutable_data = blob_end(); // default value
}
if (_relocation_size > 0) {
assert(_mutable_data_size > 0, "relocation is part of mutable data section");
memcpy((address)relocation_begin(), reloc_data, relocation_size());
}
}
void CodeBlob::prepare_for_archiving_impl() {
set_name(nullptr);
_oop_maps = nullptr;
@ -269,24 +271,15 @@ void CodeBlob::post_restore() {
vptr(_kind)->post_restore(this);
}
CodeBlob* CodeBlob::restore(address code_cache_buffer,
const char* name,
address archived_reloc_data,
ImmutableOopMapSet* archived_oop_maps)
CodeBlob* CodeBlob::restore(address code_cache_buffer, AOTCodeReader* reader)
{
copy_to(code_cache_buffer);
CodeBlob* code_blob = (CodeBlob*)code_cache_buffer;
code_blob->set_name(name);
code_blob->restore_mutable_data(archived_reloc_data);
code_blob->set_oop_maps(archived_oop_maps);
reader->restore(code_blob);
return code_blob;
}
CodeBlob* CodeBlob::create(CodeBlob* archived_blob,
const char* name,
address archived_reloc_data,
ImmutableOopMapSet* archived_oop_maps
)
CodeBlob* CodeBlob::create(CodeBlob* archived_blob, AOTCodeReader* reader)
{
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
@ -298,10 +291,7 @@ CodeBlob* CodeBlob::create(CodeBlob* archived_blob,
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
address code_cache_buffer = (address)CodeCache::allocate(size, CodeBlobType::NonNMethod);
if (code_cache_buffer != nullptr) {
blob = archived_blob->restore(code_cache_buffer,
name,
archived_reloc_data,
archived_oop_maps);
blob = archived_blob->restore(code_cache_buffer, reader);
assert(blob != nullptr, "sanity check");
// Flush the code block
@ -315,6 +305,8 @@ CodeBlob* CodeBlob::create(CodeBlob* archived_blob,
return blob;
}
#endif // INCLUDE_CDS
//-----------------------------------------------------------------------------------------
// Creates a RuntimeBlob from a CodeBuffer and copy code and relocation info.

View File

@ -34,6 +34,7 @@
#include "utilities/align.hpp"
#include "utilities/macros.hpp"
class AOTCodeReader;
class ImmutableOopMap;
class ImmutableOopMapSet;
class JNIHandleBlock;
@ -107,9 +108,6 @@ class CodeBlob {
friend class VMStructs;
friend class JVMCIVMStructs;
private:
void restore_mutable_data(address reloc_data);
protected:
// order fields from large to small to minimize padding between fields
ImmutableOopMapSet* _oop_maps; // OopMap for this CodeBlob
@ -169,8 +167,8 @@ protected:
void operator delete(void* p) { }
void prepare_for_archiving_impl();
void post_restore_impl();
void prepare_for_archiving_impl() NOT_CDS_RETURN;
void post_restore_impl() NOT_CDS_RETURN;
public:
@ -304,6 +302,9 @@ public:
void use_strings(DbgStrings &strings) { _dbg_strings.share(strings); }
#endif
#if INCLUDE_CDS
void restore_mutable_data(address reloc_data);
void copy_to(address buffer) {
memcpy(buffer, this, this->size());
}
@ -314,11 +315,9 @@ public:
// methods to restore a blob from AOT code cache into the CodeCache
void post_restore();
CodeBlob* restore(address code_cache_buffer, const char* name, address archived_reloc_data, ImmutableOopMapSet* archived_oop_maps);
static CodeBlob* create(CodeBlob* archived_blob,
const char* name,
address archived_reloc_data,
ImmutableOopMapSet* archived_oop_maps);
CodeBlob* restore(address code_cache_buffer, AOTCodeReader* reader);
static CodeBlob* create(CodeBlob* archived_blob, AOTCodeReader* reader);
#endif
};
//----------------------------------------------------------------------------------------------------

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -76,11 +76,7 @@ CompiledICData::CompiledICData()
// Inline cache callsite info is initialized once the first time it is resolved
void CompiledICData::initialize(CallInfo* call_info, Klass* receiver_klass) {
_speculated_method = call_info->selected_method();
if (UseCompressedClassPointers) {
_speculated_klass = (uintptr_t)CompressedKlassPointers::encode_not_null(receiver_klass);
} else {
_speculated_klass = (uintptr_t)receiver_klass;
}
_speculated_klass = (uintptr_t)CompressedKlassPointers::encode_not_null(receiver_klass);
if (call_info->call_kind() == CallInfo::itable_call) {
assert(call_info->resolved_method() != nullptr, "virtual or interface method must be found");
_itable_defc_klass = call_info->resolved_method()->method_holder();
@ -133,12 +129,7 @@ Klass* CompiledICData::speculated_klass() const {
if (is_speculated_klass_unloaded()) {
return nullptr;
}
if (UseCompressedClassPointers) {
return CompressedKlassPointers::decode_not_null((narrowKlass)_speculated_klass);
} else {
return (Klass*)_speculated_klass;
}
return CompressedKlassPointers::decode_not_null((narrowKlass)_speculated_klass);
}
//-----------------------------------------------------------------------------

View File

@ -1306,9 +1306,7 @@ nmethod::nmethod(
_deopt_handler_entry_offset = 0;
_unwind_handler_offset = 0;
CHECKED_CAST(_oops_size, uint16_t, align_up(code_buffer->total_oop_size(), oopSize));
uint16_t metadata_size;
CHECKED_CAST(metadata_size, uint16_t, align_up(code_buffer->total_metadata_size(), wordSize));
int metadata_size = align_up(code_buffer->total_metadata_size(), wordSize);
JVMCI_ONLY( _metadata_size = metadata_size; )
assert(_mutable_data_size == _relocation_size + metadata_size,
"wrong mutable data size: %d != %d + %d",
@ -1446,7 +1444,6 @@ nmethod::nmethod(const nmethod &nm) : CodeBlob(nm._name, nm._kind, nm._size, nm.
_deopt_handler_entry_offset = nm._deopt_handler_entry_offset;
_unwind_handler_offset = nm._unwind_handler_offset;
_num_stack_arg_slots = nm._num_stack_arg_slots;
_oops_size = nm._oops_size;
#if INCLUDE_JVMCI
_metadata_size = nm._metadata_size;
#endif
@ -1749,9 +1746,7 @@ nmethod::nmethod(
_unwind_handler_offset = -1;
}
CHECKED_CAST(_oops_size, uint16_t, align_up(code_buffer->total_oop_size(), oopSize));
uint16_t metadata_size;
CHECKED_CAST(metadata_size, uint16_t, align_up(code_buffer->total_metadata_size(), wordSize));
int metadata_size = align_up(code_buffer->total_metadata_size(), wordSize);
JVMCI_ONLY( _metadata_size = metadata_size; )
int jvmci_data_size = 0 JVMCI_ONLY( + align_up(compiler->is_jvmci() ? jvmci_data->size() : 0, oopSize));
assert(_mutable_data_size == _relocation_size + metadata_size + jvmci_data_size,

View File

@ -235,11 +235,10 @@ class nmethod : public CodeBlob {
// Number of arguments passed on the stack
uint16_t _num_stack_arg_slots;
uint16_t _oops_size;
#if INCLUDE_JVMCI
// _metadata_size is not specific to JVMCI. In the non-JVMCI case, it can be derived as:
// _metadata_size = mutable_data_size - relocation_size
uint16_t _metadata_size;
int _metadata_size;
#endif
// Offset in immutable data section

View File

@ -185,46 +185,6 @@ intx CompilerConfig::scaled_freq_log(intx freq_log, double scale) {
}
}
void CompilerConfig::set_client_emulation_mode_flags() {
assert(has_c1(), "Must have C1 compiler present");
CompilationModeFlag::set_quick_only();
FLAG_SET_ERGO(ProfileInterpreter, false);
#if INCLUDE_JVMCI
FLAG_SET_ERGO(EnableJVMCI, false);
FLAG_SET_ERGO(UseJVMCICompiler, false);
#endif
if (FLAG_IS_DEFAULT(InitialCodeCacheSize)) {
FLAG_SET_ERGO(InitialCodeCacheSize, 160*K);
}
if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
FLAG_SET_ERGO(ReservedCodeCacheSize, 32*M);
}
if (FLAG_IS_DEFAULT(NonProfiledCodeHeapSize)) {
FLAG_SET_ERGO(NonProfiledCodeHeapSize, 27*M);
}
if (FLAG_IS_DEFAULT(ProfiledCodeHeapSize)) {
FLAG_SET_ERGO(ProfiledCodeHeapSize, 0);
}
if (FLAG_IS_DEFAULT(NonNMethodCodeHeapSize)) {
FLAG_SET_ERGO(NonNMethodCodeHeapSize, 5*M);
}
if (FLAG_IS_DEFAULT(CodeCacheExpansionSize)) {
FLAG_SET_ERGO(CodeCacheExpansionSize, 32*K);
}
if (FLAG_IS_DEFAULT(CICompilerCount)) {
FLAG_SET_ERGO(CICompilerCount, 1);
}
}
bool CompilerConfig::is_compilation_mode_selected() {
return !FLAG_IS_DEFAULT(TieredCompilation) ||
!FLAG_IS_DEFAULT(TieredStopAtLevel) ||
!FLAG_IS_DEFAULT(CompilationMode)
JVMCI_ONLY(|| !FLAG_IS_DEFAULT(EnableJVMCI)
|| !FLAG_IS_DEFAULT(UseJVMCICompiler));
}
static bool check_legacy_flags() {
JVMFlag* compile_threshold_flag = JVMFlag::flag_from_enum(FLAG_MEMBER_ENUM(CompileThreshold));
if (JVMFlagAccess::check_constraint(compile_threshold_flag, JVMFlagLimit::get_constraint(compile_threshold_flag)->constraint_func(), false) != JVMFlag::SUCCESS) {
@ -540,29 +500,11 @@ bool CompilerConfig::check_args_consistency(bool status) {
return status;
}
bool CompilerConfig::should_set_client_emulation_mode_flags() {
#if !COMPILER1_OR_COMPILER2
return false;
#endif
return has_c1() &&
is_compilation_mode_selected() &&
!has_c2() &&
!is_jvmci_compiler();
}
void CompilerConfig::ergo_initialize() {
#if !COMPILER1_OR_COMPILER2
return;
#endif
// This property is also checked when selecting the heap size. Since client
// emulation mode influences Java heap memory usage, part of the logic must
// occur before choosing the heap size.
if (should_set_client_emulation_mode_flags()) {
set_client_emulation_mode_flags();
}
set_legacy_emulation_flags();
set_compilation_policy_flags();
@ -581,9 +523,6 @@ void CompilerConfig::ergo_initialize() {
}
if (ProfileInterpreter && CompilerConfig::is_c1_simple_only()) {
if (!FLAG_IS_DEFAULT(ProfileInterpreter)) {
warning("ProfileInterpreter disabled due to client emulation mode");
}
FLAG_SET_CMDLINE(ProfileInterpreter, false);
}

View File

@ -151,14 +151,10 @@ public:
inline static CompilerType compiler_type();
static bool should_set_client_emulation_mode_flags();
private:
static bool is_compilation_mode_selected();
static void set_compilation_policy_flags();
static void set_jvmci_specific_flags();
static void set_legacy_emulation_flags();
static void set_client_emulation_mode_flags();
};
#endif // SHARE_COMPILER_COMPILERDEFINITIONS_HPP

Some files were not shown because too many files have changed in this diff Show More