Merge branch 'master' into fix-c2-convi2l-8356184

This commit is contained in:
katkerem 2026-02-17 18:00:12 +00:00
commit 9890e3fa52
474 changed files with 20600 additions and 12028 deletions

View File

@ -965,9 +965,8 @@ rather than <code>NULL</code>. See the paper for reasons to avoid
<code>NULL</code>.</p>
<p>Don't use (constant expression or literal) 0 for pointers. Note that
C++14 removed non-literal 0 constants from <em>null pointer
constants</em>, though some compilers continue to treat them as such.
For historical reasons there may be lingering uses of 0 as a
pointer.</p>
constants</em>, though some compilers continue to treat them as
such.</p>
<h3 id="atomic">&lt;atomic&gt;</h3>
<p>Do not use facilities provided by the <code>&lt;atomic&gt;</code>
header (<a

View File

@ -884,8 +884,7 @@ rather than `NULL`. See the paper for reasons to avoid `NULL`.
Don't use (constant expression or literal) 0 for pointers. Note that C++14
removed non-literal 0 constants from _null pointer constants_, though some
compilers continue to treat them as such. For historical reasons there may be
lingering uses of 0 as a pointer.
compilers continue to treat them as such.
### &lt;atomic&gt;

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -209,7 +209,10 @@ AC_DEFUN([FLAGS_SETUP_WARNINGS],
BUILD_CC_DISABLE_WARNING_PREFIX="-wd"
CFLAGS_WARNINGS_ARE_ERRORS="-WX"
WARNINGS_ENABLE_ALL="-W3"
WARNINGS_ENABLE_ALL_NORMAL="-W3"
WARNINGS_ENABLE_ADDITIONAL=""
WARNINGS_ENABLE_ADDITIONAL_CXX=""
WARNINGS_ENABLE_ADDITIONAL_JVM=""
DISABLED_WARNINGS="4800 5105"
;;
@ -218,14 +221,16 @@ AC_DEFUN([FLAGS_SETUP_WARNINGS],
BUILD_CC_DISABLE_WARNING_PREFIX="-Wno-"
CFLAGS_WARNINGS_ARE_ERRORS="-Werror"
WARNINGS_ENABLE_ALL_NORMAL="-Wall -Wextra"
# Additional warnings that are not activated by -Wall and -Wextra
WARNINGS_ENABLE_ADDITIONAL="-Winvalid-pch -Wpointer-arith -Wreturn-type \
WARNINGS_ENABLE_ADDITIONAL="-Wformat=2 \
-Winvalid-pch -Wpointer-arith -Wreturn-type \
-Wsign-compare -Wtrampolines -Wtype-limits -Wundef -Wuninitialized \
-Wunused-const-variable=1 -Wunused-function -Wunused-result \
-Wunused-value"
WARNINGS_ENABLE_ADDITIONAL_CXX="-Woverloaded-virtual -Wreorder"
WARNINGS_ENABLE_ALL_CFLAGS="-Wall -Wextra -Wformat=2 $WARNINGS_ENABLE_ADDITIONAL"
WARNINGS_ENABLE_ALL_CXXFLAGS="$WARNINGS_ENABLE_ALL_CFLAGS $WARNINGS_ENABLE_ADDITIONAL_CXX"
WARNINGS_ENABLE_ADDITIONAL_JVM="-Wzero-as-null-pointer-constant"
# These warnings will never be turned on, since they generate too many
# false positives.
@ -241,16 +246,24 @@ AC_DEFUN([FLAGS_SETUP_WARNINGS],
BUILD_CC_DISABLE_WARNING_PREFIX="-Wno-"
CFLAGS_WARNINGS_ARE_ERRORS="-Werror"
WARNINGS_ENABLE_ALL_NORMAL="-Wall -Wextra"
# Additional warnings that are not activated by -Wall and -Wextra
WARNINGS_ENABLE_ADDITIONAL="-Wpointer-arith -Wsign-compare -Wreorder \
WARNINGS_ENABLE_ADDITIONAL="-Wformat=2 \
-Wpointer-arith -Wsign-compare -Wreorder \
-Wunused-function -Wundef -Wunused-value -Woverloaded-virtual"
WARNINGS_ENABLE_ALL="-Wall -Wextra -Wformat=2 $WARNINGS_ENABLE_ADDITIONAL"
WARNINGS_ENABLE_ADDITIONAL_CXX=""
WARNINGS_ENABLE_ADDITIONAL_JVM="-Wzero-as-null-pointer-constant"
# These warnings will never be turned on, since they generate too many
# false positives.
DISABLED_WARNINGS="unknown-warning-option unused-parameter"
;;
esac
WARNINGS_ENABLE_ALL="$WARNINGS_ENABLE_ALL_NORMAL $WARNINGS_ENABLE_ADDITIONAL"
WARNINGS_ENABLE_ALL_CXX="$WARNINGS_ENABLE_ALL $WARNINGS_ENABLE_ADDITIONAL_CXX"
WARNINGS_ENABLE_ALL_JVM="$WARNINGS_ENABLE_ALL_CXX $WARNINGS_ENABLE_ADDITIONAL_JVM"
AC_SUBST(DISABLE_WARNING_PREFIX)
AC_SUBST(BUILD_CC_DISABLE_WARNING_PREFIX)
AC_SUBST(CFLAGS_WARNINGS_ARE_ERRORS)
@ -604,19 +617,9 @@ AC_DEFUN([FLAGS_SETUP_CFLAGS_HELPER],
ADLC_LANGSTD_CXXFLAGS="$LANGSTD_CXXFLAGS"
# CFLAGS WARNINGS STUFF
# Set JVM_CFLAGS warning handling
if test "x$TOOLCHAIN_TYPE" = xgcc; then
WARNING_CFLAGS_JDK_CONLY="$WARNINGS_ENABLE_ALL_CFLAGS"
WARNING_CFLAGS_JDK_CXXONLY="$WARNINGS_ENABLE_ALL_CXXFLAGS"
WARNING_CFLAGS_JVM="$WARNINGS_ENABLE_ALL_CXXFLAGS"
elif test "x$TOOLCHAIN_TYPE" = xclang; then
WARNING_CFLAGS="$WARNINGS_ENABLE_ALL"
elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
WARNING_CFLAGS="$WARNINGS_ENABLE_ALL"
fi
WARNING_CFLAGS_JDK_CONLY="$WARNINGS_ENABLE_ALL"
WARNING_CFLAGS_JDK_CXXONLY="$WARNINGS_ENABLE_ALL_CXX"
WARNING_CFLAGS_JVM="$WARNINGS_ENABLE_ALL_JVM"
# Set some additional per-OS defines.
@ -878,12 +881,12 @@ AC_DEFUN([FLAGS_SETUP_CFLAGS_CPU_DEP],
CFLAGS_JVM_COMMON="$ALWAYS_CFLAGS_JVM $ALWAYS_DEFINES_JVM \
$TOOLCHAIN_CFLAGS_JVM ${$1_TOOLCHAIN_CFLAGS_JVM} \
$OS_CFLAGS $OS_CFLAGS_JVM $CFLAGS_OS_DEF_JVM $DEBUG_CFLAGS_JVM \
$WARNING_CFLAGS $WARNING_CFLAGS_JVM $JVM_PICFLAG $FILE_MACRO_CFLAGS \
$WARNING_CFLAGS_JVM $JVM_PICFLAG $FILE_MACRO_CFLAGS \
$REPRODUCIBLE_CFLAGS $BRANCH_PROTECTION_CFLAGS"
CFLAGS_JDK_COMMON="$ALWAYS_DEFINES_JDK $TOOLCHAIN_CFLAGS_JDK \
$OS_CFLAGS $CFLAGS_OS_DEF_JDK $DEBUG_CFLAGS_JDK $DEBUG_OPTIONS_FLAGS_JDK \
$WARNING_CFLAGS $WARNING_CFLAGS_JDK $DEBUG_SYMBOLS_CFLAGS_JDK \
$DEBUG_SYMBOLS_CFLAGS_JDK \
$FILE_MACRO_CFLAGS $REPRODUCIBLE_CFLAGS $BRANCH_PROTECTION_CFLAGS"
# Use ${$2EXTRA_CFLAGS} to block EXTRA_CFLAGS to be added to build flags.

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -28,7 +28,7 @@
################################################################################
# Minimum supported versions
JTREG_MINIMUM_VERSION=8.1
JTREG_MINIMUM_VERSION=8.2.1
GTEST_MINIMUM_VERSION=1.14.0
################################################################################

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -26,7 +26,7 @@
# Versions and download locations for dependencies used by GitHub Actions (GHA)
GTEST_VERSION=1.14.0
JTREG_VERSION=8.1+1
JTREG_VERSION=8.2.1+1
LINUX_X64_BOOT_JDK_EXT=tar.gz
LINUX_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk25/bd75d5f9689641da8e1daabeccb5528b/36/GPL/openjdk-25_linux-x64_bin.tar.gz

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1174,9 +1174,9 @@ var getJibProfilesDependencies = function (input, common) {
jtreg: {
server: "jpg",
product: "jtreg",
version: "8.1",
version: "8.2.1",
build_number: "1",
file: "bundles/jtreg-8.1+1.zip",
file: "bundles/jtreg-8.2.1+1.zip",
environment_name: "JT_HOME",
environment_path: input.get("jtreg", "home_path") + "/bin",
configure_args: "--with-jtreg=" + input.get("jtreg", "home_path"),

View File

@ -78,7 +78,7 @@ else ifeq ($(BASE_OS), Fedora)
endif
BASE_URL := http://fedora.riscv.rocks/repos-dist/f$(BASE_OS_VERSION)/latest/$(ARCH)/Packages/
else
LATEST_ARCHIVED_OS_VERSION := 36
LATEST_ARCHIVED_OS_VERSION := 41
ifeq ($(filter aarch64 armhfp x86_64, $(ARCH)), )
FEDORA_TYPE := fedora-secondary
else

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2019, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -39,7 +39,7 @@ define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap nulls
define_pd_global(bool, DelayCompilerStubsGeneration, COMPILER2_OR_JVMCI);
define_pd_global(size_t, CodeCacheSegmentSize, 64);
define_pd_global(intx, CodeEntryAlignment, 64);
define_pd_global(uint, CodeEntryAlignment, 64);
define_pd_global(intx, OptoLoopAlignment, 16);
#define DEFAULT_STACK_YELLOW_PAGES (2)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2020, Red Hat Inc. All rights reserved.
* Copyright 2025 Arm Limited and/or its affiliates.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -664,16 +664,52 @@ void VM_Version::initialize() {
void VM_Version::insert_features_names(uint64_t features, stringStream& ss) {
int i = 0;
ss.join([&]() {
while (i < MAX_CPU_FEATURES) {
if (supports_feature((VM_Version::Feature_Flag)i)) {
return _features_names[i++];
const char* str = nullptr;
while ((i < MAX_CPU_FEATURES) && (str == nullptr)) {
if (supports_feature(features, (VM_Version::Feature_Flag)i)) {
str = _features_names[i];
}
i += 1;
}
return (const char*)nullptr;
return str;
}, ", ");
}
void VM_Version::get_cpu_features_name(void* features_buffer, stringStream& ss) {
uint64_t features = *(uint64_t*)features_buffer;
insert_features_names(features, ss);
}
void VM_Version::get_missing_features_name(void* features_set1, void* features_set2, stringStream& ss) {
uint64_t vm_features_set1 = *(uint64_t*)features_set1;
uint64_t vm_features_set2 = *(uint64_t*)features_set2;
int i = 0;
ss.join([&]() {
const char* str = nullptr;
while ((i < MAX_CPU_FEATURES) && (str == nullptr)) {
Feature_Flag flag = (Feature_Flag)i;
if (supports_feature(vm_features_set1, flag) && !supports_feature(vm_features_set2, flag)) {
str = _features_names[i];
}
i += 1;
}
return str;
}, ", ");
}
int VM_Version::cpu_features_size() {
return sizeof(_features);
}
void VM_Version::store_cpu_features(void* buf) {
*(uint64_t*)buf = _features;
}
bool VM_Version::supports_features(void* features_buffer) {
uint64_t features_to_test = *(uint64_t*)features_buffer;
return (_features & features_to_test) == features_to_test;
}
#if defined(LINUX)
static bool check_info_file(const char* fpath,
const char* virt1, VirtualizationType vt1,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -184,6 +184,9 @@ public:
static bool supports_feature(Feature_Flag flag) {
return (_features & BIT_MASK(flag)) != 0;
}
static bool supports_feature(uint64_t features, Feature_Flag flag) {
return (features & BIT_MASK(flag)) != 0;
}
static int cpu_family() { return _cpu; }
static int cpu_model() { return _model; }
@ -244,6 +247,20 @@ public:
static bool use_neon_for_vector(int vector_length_in_bytes) {
return vector_length_in_bytes <= 16;
}
static void get_cpu_features_name(void* features_buffer, stringStream& ss);
// Returns names of features present in features_set1 but not in features_set2
static void get_missing_features_name(void* features_set1, void* features_set2, stringStream& ss);
// Returns number of bytes required to store cpu features representation
static int cpu_features_size();
// Stores cpu features representation in the provided buffer. This representation is arch dependent.
// Size of the buffer must be same as returned by cpu_features_size()
static void store_cpu_features(void* buf);
static bool supports_features(void* features_to_test);
};
#endif // CPU_AARCH64_VM_VERSION_AARCH64_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,7 +37,7 @@ define_pd_global(bool, TrapBasedNullChecks, false); // Not needed
define_pd_global(bool, DelayCompilerStubsGeneration, false); // No need - only few compiler's stubs
define_pd_global(size_t, CodeCacheSegmentSize, 64);
define_pd_global(intx, CodeEntryAlignment, 16);
define_pd_global(uint, CodeEntryAlignment, 16);
define_pd_global(intx, OptoLoopAlignment, 16);
#define DEFAULT_STACK_YELLOW_PAGES (2)

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2019 SAP SE. All rights reserved.
* Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -44,7 +44,7 @@ define_pd_global(intx, CompileThreshold, 10000);
define_pd_global(intx, OnStackReplacePercentage, 140);
define_pd_global(intx, ConditionalMoveLimit, 3);
define_pd_global(intx, FreqInlineSize, 175);
define_pd_global(intx, FreqInlineSize, 325);
define_pd_global(intx, MinJumpTableSize, 10);
define_pd_global(intx, InteriorEntryAlignment, 16);
define_pd_global(size_t, NewSizeThreadIncrease, ScaleForWordSize(4*K));

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2024 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -60,7 +60,7 @@ define_pd_global(bool, VMContinuations, true);
// Use large code-entry alignment.
define_pd_global(size_t, CodeCacheSegmentSize, 128);
define_pd_global(intx, CodeEntryAlignment, 64);
define_pd_global(uint, CodeEntryAlignment, 64);
define_pd_global(intx, OptoLoopAlignment, 16);
define_pd_global(intx, InlineSmallCode, 1500);

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -49,11 +49,6 @@
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
// Workaround for C++ overloading nastiness on '0' for RegisterOrConstant.
inline static RegisterOrConstant constant(int value) {
return RegisterOrConstant(value);
}
void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg,
Register temp_reg, Register temp2_reg) {
if (VerifyMethodHandles) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -39,7 +39,7 @@ define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap nulls
define_pd_global(bool, DelayCompilerStubsGeneration, COMPILER2_OR_JVMCI);
define_pd_global(size_t, CodeCacheSegmentSize, 64 COMPILER1_AND_COMPILER2_PRESENT(+64)); // Tiered compilation has large code-entry alignment.
define_pd_global(intx, CodeEntryAlignment, 64);
define_pd_global(uint, CodeEntryAlignment, 64);
define_pd_global(intx, OptoLoopAlignment, 16);
#define DEFAULT_STACK_YELLOW_PAGES (2)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2024 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -129,6 +129,57 @@ void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* mas
}
}
static void generate_post_barrier(MacroAssembler* masm,
const Register store_addr,
const Register new_val,
const Register thread,
const Register tmp1,
const Register tmp2,
Label& done,
bool new_val_may_be_null) {
__ block_comment("generate_post_barrier {");
assert(thread == Z_thread, "must be");
assert_different_registers(store_addr, new_val, thread, tmp1, tmp2, noreg);
// Does store cross heap regions?
if (VM_Version::has_DistinctOpnds()) {
__ z_xgrk(tmp1, store_addr, new_val); // tmp1 := store address ^ new value
} else {
__ z_lgr(tmp1, store_addr);
__ z_xgr(tmp1, new_val);
}
__ z_srag(tmp1, tmp1, G1HeapRegion::LogOfHRGrainBytes); // tmp1 := ((store address ^ new value) >> LogOfHRGrainBytes)
__ branch_optimized(Assembler::bcondEqual, done);
// Crosses regions, storing null?
if (new_val_may_be_null) {
__ z_ltgr(new_val, new_val);
__ z_bre(done);
} else {
#ifdef ASSERT
__ z_ltgr(new_val, new_val);
__ asm_assert(Assembler::bcondNotZero, "null oop not allowed (G1 post)", 0x322); // Checked by caller.
#endif
}
__ z_srag(tmp1, store_addr, CardTable::card_shift());
Address card_table_addr(thread, in_bytes(G1ThreadLocalData::card_table_base_offset()));
__ z_alg(tmp1, card_table_addr); // tmp1 := card address
if(UseCondCardMark) {
__ z_cli(0, tmp1, G1CardTable::clean_card_val());
__ branch_optimized(Assembler::bcondNotEqual, done);
}
static_assert(G1CardTable::dirty_card_val() == 0, "must be to use z_mvi");
__ z_mvi(0, tmp1, G1CardTable::dirty_card_val()); // *(card address) := dirty_card_val
__ block_comment("} generate_post_barrier");
}
#if defined(COMPILER2)
#undef __
@ -204,57 +255,6 @@ void G1BarrierSetAssembler::generate_c2_pre_barrier_stub(MacroAssembler* masm,
BLOCK_COMMENT("} generate_c2_pre_barrier_stub");
}
static void generate_post_barrier(MacroAssembler* masm,
const Register store_addr,
const Register new_val,
const Register thread,
const Register tmp1,
const Register tmp2,
Label& done,
bool new_val_may_be_null) {
__ block_comment("generate_post_barrier {");
assert(thread == Z_thread, "must be");
assert_different_registers(store_addr, new_val, thread, tmp1, tmp2, noreg);
// Does store cross heap regions?
if (VM_Version::has_DistinctOpnds()) {
__ z_xgrk(tmp1, store_addr, new_val); // tmp1 := store address ^ new value
} else {
__ z_lgr(tmp1, store_addr);
__ z_xgr(tmp1, new_val);
}
__ z_srag(tmp1, tmp1, G1HeapRegion::LogOfHRGrainBytes); // tmp1 := ((store address ^ new value) >> LogOfHRGrainBytes)
__ branch_optimized(Assembler::bcondEqual, done);
// Crosses regions, storing null?
if (new_val_may_be_null) {
__ z_ltgr(new_val, new_val);
__ z_bre(done);
} else {
#ifdef ASSERT
__ z_ltgr(new_val, new_val);
__ asm_assert(Assembler::bcondNotZero, "null oop not allowed (G1 post)", 0x322); // Checked by caller.
#endif
}
__ z_srag(tmp1, store_addr, CardTable::card_shift());
Address card_table_addr(thread, in_bytes(G1ThreadLocalData::card_table_base_offset()));
__ z_alg(tmp1, card_table_addr); // tmp1 := card address
if(UseCondCardMark) {
__ z_cli(0, tmp1, G1CardTable::clean_card_val());
__ branch_optimized(Assembler::bcondNotEqual, done);
}
static_assert(G1CardTable::dirty_card_val() == 0, "must be to use z_mvi");
__ z_mvi(0, tmp1, G1CardTable::dirty_card_val()); // *(card address) := dirty_card_val
__ block_comment("} generate_post_barrier");
}
void G1BarrierSetAssembler::g1_write_barrier_post_c2(MacroAssembler* masm,
Register store_addr,
Register new_val,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2018 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -43,7 +43,7 @@ define_pd_global(size_t, CodeCacheSegmentSize, 256);
// Ideally, this is 256 (cache line size). This keeps code end data
// on separate lines. But we reduced it to 64 since 256 increased
// code size significantly by padding nops between IVC and second UEP.
define_pd_global(intx, CodeEntryAlignment, 64);
define_pd_global(uint, CodeEntryAlignment, 64);
define_pd_global(intx, OptoLoopAlignment, 2);
define_pd_global(intx, InlineSmallCode, 2000);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -46,9 +46,9 @@ define_pd_global(size_t, CodeCacheSegmentSize, 64 COMPILER1_AND_COMPILER2_PRES
// the uep and the vep doesn't get real alignment but just slops on by
// only assured that the entry instruction meets the 5 byte size requirement.
#if COMPILER2_OR_JVMCI
define_pd_global(intx, CodeEntryAlignment, 32);
define_pd_global(uint, CodeEntryAlignment, 32);
#else
define_pd_global(intx, CodeEntryAlignment, 16);
define_pd_global(uint, CodeEntryAlignment, 16);
#endif // COMPILER2_OR_JVMCI
define_pd_global(intx, OptoLoopAlignment, 16);
define_pd_global(intx, InlineSmallCode, 1000);

View File

@ -765,7 +765,7 @@ void MacroAssembler::align32() {
void MacroAssembler::align(uint modulus) {
// 8273459: Ensure alignment is possible with current segment alignment
assert(modulus <= (uintx)CodeEntryAlignment, "Alignment must be <= CodeEntryAlignment");
assert(modulus <= CodeEntryAlignment, "Alignment must be <= CodeEntryAlignment");
align(modulus, offset());
}

View File

@ -48,7 +48,7 @@ int VM_Version::_stepping;
bool VM_Version::_has_intel_jcc_erratum;
VM_Version::CpuidInfo VM_Version::_cpuid_info = { 0, };
#define DECLARE_CPU_FEATURE_NAME(id, name, bit) name,
#define DECLARE_CPU_FEATURE_NAME(id, name, bit) XSTR(name),
const char* VM_Version::_features_names[] = { CPU_FEATURE_FLAGS(DECLARE_CPU_FEATURE_NAME)};
#undef DECLARE_CPU_FEATURE_NAME
@ -1659,41 +1659,40 @@ void VM_Version::get_processor_features() {
if (FLAG_IS_DEFAULT(AllocatePrefetchInstr) && supports_3dnow_prefetch()) {
FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3);
}
#ifdef COMPILER2
if (UseAVX > 2) {
if (FLAG_IS_DEFAULT(ArrayOperationPartialInlineSize) ||
(!FLAG_IS_DEFAULT(ArrayOperationPartialInlineSize) &&
ArrayOperationPartialInlineSize != 0 &&
ArrayOperationPartialInlineSize != 16 &&
ArrayOperationPartialInlineSize != 32 &&
ArrayOperationPartialInlineSize != 64)) {
int inline_size = 0;
if (MaxVectorSize >= 64 && AVX3Threshold == 0) {
inline_size = 64;
} else if (MaxVectorSize >= 32) {
inline_size = 32;
} else if (MaxVectorSize >= 16) {
inline_size = 16;
}
if(!FLAG_IS_DEFAULT(ArrayOperationPartialInlineSize)) {
warning("Setting ArrayOperationPartialInlineSize as %d", inline_size);
}
ArrayOperationPartialInlineSize = inline_size;
}
if (ArrayOperationPartialInlineSize > MaxVectorSize) {
ArrayOperationPartialInlineSize = MaxVectorSize >= 16 ? MaxVectorSize : 0;
if (ArrayOperationPartialInlineSize) {
warning("Setting ArrayOperationPartialInlineSize as MaxVectorSize=%zd", MaxVectorSize);
} else {
warning("Setting ArrayOperationPartialInlineSize as %zd", ArrayOperationPartialInlineSize);
}
}
}
#endif
}
#ifdef COMPILER2
if (UseAVX > 2) {
if (FLAG_IS_DEFAULT(ArrayOperationPartialInlineSize) ||
(!FLAG_IS_DEFAULT(ArrayOperationPartialInlineSize) &&
ArrayOperationPartialInlineSize != 0 &&
ArrayOperationPartialInlineSize != 16 &&
ArrayOperationPartialInlineSize != 32 &&
ArrayOperationPartialInlineSize != 64)) {
int inline_size = 0;
if (MaxVectorSize >= 64 && AVX3Threshold == 0) {
inline_size = 64;
} else if (MaxVectorSize >= 32) {
inline_size = 32;
} else if (MaxVectorSize >= 16) {
inline_size = 16;
}
if(!FLAG_IS_DEFAULT(ArrayOperationPartialInlineSize)) {
warning("Setting ArrayOperationPartialInlineSize as %d", inline_size);
}
ArrayOperationPartialInlineSize = inline_size;
}
if (ArrayOperationPartialInlineSize > MaxVectorSize) {
ArrayOperationPartialInlineSize = MaxVectorSize >= 16 ? MaxVectorSize : 0;
if (ArrayOperationPartialInlineSize) {
warning("Setting ArrayOperationPartialInlineSize as MaxVectorSize=%zd", MaxVectorSize);
} else {
warning("Setting ArrayOperationPartialInlineSize as %zd", ArrayOperationPartialInlineSize);
}
}
}
if (FLAG_IS_DEFAULT(OptimizeFill)) {
if (MaxVectorSize < 32 || (!EnableX86ECoreOpts && !VM_Version::supports_avx512vlbw())) {
OptimizeFill = false;
@ -3298,12 +3297,50 @@ bool VM_Version::is_intrinsic_supported(vmIntrinsicID id) {
void VM_Version::insert_features_names(VM_Version::VM_Features features, stringStream& ss) {
int i = 0;
ss.join([&]() {
while (i < MAX_CPU_FEATURES) {
if (_features.supports_feature((VM_Version::Feature_Flag)i)) {
return _features_names[i++];
const char* str = nullptr;
while ((i < MAX_CPU_FEATURES) && (str == nullptr)) {
if (features.supports_feature((VM_Version::Feature_Flag)i)) {
str = _features_names[i];
}
i += 1;
}
return (const char*)nullptr;
return str;
}, ", ");
}
void VM_Version::get_cpu_features_name(void* features_buffer, stringStream& ss) {
VM_Features* features = (VM_Features*)features_buffer;
insert_features_names(*features, ss);
}
void VM_Version::get_missing_features_name(void* features_set1, void* features_set2, stringStream& ss) {
VM_Features* vm_features_set1 = (VM_Features*)features_set1;
VM_Features* vm_features_set2 = (VM_Features*)features_set2;
int i = 0;
ss.join([&]() {
const char* str = nullptr;
while ((i < MAX_CPU_FEATURES) && (str == nullptr)) {
Feature_Flag flag = (Feature_Flag)i;
if (vm_features_set1->supports_feature(flag) && !vm_features_set2->supports_feature(flag)) {
str = _features_names[i];
}
i += 1;
}
return str;
}, ", ");
}
int VM_Version::cpu_features_size() {
return sizeof(VM_Features);
}
void VM_Version::store_cpu_features(void* buf) {
VM_Features copy = _features;
copy.clear_feature(CPU_HT); // HT does not result in incompatibility of aot code cache
memcpy(buf, &copy, sizeof(VM_Features));
}
bool VM_Version::supports_features(void* features_buffer) {
VM_Features* features_to_test = (VM_Features*)features_buffer;
return _features.supports_features(features_to_test);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -377,84 +377,84 @@ protected:
*/
enum Feature_Flag {
#define CPU_FEATURE_FLAGS(decl) \
decl(CX8, "cx8", 0) /* next bits are from cpuid 1 (EDX) */ \
decl(CMOV, "cmov", 1) \
decl(FXSR, "fxsr", 2) \
decl(HT, "ht", 3) \
decl(CX8, cx8, 0) /* next bits are from cpuid 1 (EDX) */ \
decl(CMOV, cmov, 1) \
decl(FXSR, fxsr, 2) \
decl(HT, ht, 3) \
\
decl(MMX, "mmx", 4) \
decl(3DNOW_PREFETCH, "3dnowpref", 5) /* Processor supports 3dnow prefetch and prefetchw instructions */ \
decl(MMX, mmx, 4) \
decl(3DNOW_PREFETCH, 3dnowpref, 5) /* Processor supports 3dnow prefetch and prefetchw instructions */ \
/* may not necessarily support other 3dnow instructions */ \
decl(SSE, "sse", 6) \
decl(SSE2, "sse2", 7) \
decl(SSE, sse, 6) \
decl(SSE2, sse2, 7) \
\
decl(SSE3, "sse3", 8 ) /* SSE3 comes from cpuid 1 (ECX) */ \
decl(SSSE3, "ssse3", 9 ) \
decl(SSE4A, "sse4a", 10) \
decl(SSE4_1, "sse4.1", 11) \
decl(SSE3, sse3, 8 ) /* SSE3 comes from cpuid 1 (ECX) */ \
decl(SSSE3, ssse3, 9 ) \
decl(SSE4A, sse4a, 10) \
decl(SSE4_1, sse4.1, 11) \
\
decl(SSE4_2, "sse4.2", 12) \
decl(POPCNT, "popcnt", 13) \
decl(LZCNT, "lzcnt", 14) \
decl(TSC, "tsc", 15) \
decl(SSE4_2, sse4.2, 12) \
decl(POPCNT, popcnt, 13) \
decl(LZCNT, lzcnt, 14) \
decl(TSC, tsc, 15) \
\
decl(TSCINV_BIT, "tscinvbit", 16) \
decl(TSCINV, "tscinv", 17) \
decl(AVX, "avx", 18) \
decl(AVX2, "avx2", 19) \
decl(TSCINV_BIT, tscinvbit, 16) \
decl(TSCINV, tscinv, 17) \
decl(AVX, avx, 18) \
decl(AVX2, avx2, 19) \
\
decl(AES, "aes", 20) \
decl(ERMS, "erms", 21) /* enhanced 'rep movsb/stosb' instructions */ \
decl(CLMUL, "clmul", 22) /* carryless multiply for CRC */ \
decl(BMI1, "bmi1", 23) \
decl(AES, aes, 20) \
decl(ERMS, erms, 21) /* enhanced 'rep movsb/stosb' instructions */ \
decl(CLMUL, clmul, 22) /* carryless multiply for CRC */ \
decl(BMI1, bmi1, 23) \
\
decl(BMI2, "bmi2", 24) \
decl(RTM, "rtm", 25) /* Restricted Transactional Memory instructions */ \
decl(ADX, "adx", 26) \
decl(AVX512F, "avx512f", 27) /* AVX 512bit foundation instructions */ \
decl(BMI2, bmi2, 24) \
decl(RTM, rtm, 25) /* Restricted Transactional Memory instructions */ \
decl(ADX, adx, 26) \
decl(AVX512F, avx512f, 27) /* AVX 512bit foundation instructions */ \
\
decl(AVX512DQ, "avx512dq", 28) \
decl(AVX512PF, "avx512pf", 29) \
decl(AVX512ER, "avx512er", 30) \
decl(AVX512CD, "avx512cd", 31) \
decl(AVX512DQ, avx512dq, 28) \
decl(AVX512PF, avx512pf, 29) \
decl(AVX512ER, avx512er, 30) \
decl(AVX512CD, avx512cd, 31) \
\
decl(AVX512BW, "avx512bw", 32) /* Byte and word vector instructions */ \
decl(AVX512VL, "avx512vl", 33) /* EVEX instructions with smaller vector length */ \
decl(SHA, "sha", 34) /* SHA instructions */ \
decl(FMA, "fma", 35) /* FMA instructions */ \
decl(AVX512BW, avx512bw, 32) /* Byte and word vector instructions */ \
decl(AVX512VL, avx512vl, 33) /* EVEX instructions with smaller vector length */ \
decl(SHA, sha, 34) /* SHA instructions */ \
decl(FMA, fma, 35) /* FMA instructions */ \
\
decl(VZEROUPPER, "vzeroupper", 36) /* Vzeroupper instruction */ \
decl(AVX512_VPOPCNTDQ, "avx512_vpopcntdq", 37) /* Vector popcount */ \
decl(AVX512_VPCLMULQDQ, "avx512_vpclmulqdq", 38) /* Vector carryless multiplication */ \
decl(AVX512_VAES, "avx512_vaes", 39) /* Vector AES instruction */ \
decl(VZEROUPPER, vzeroupper, 36) /* Vzeroupper instruction */ \
decl(AVX512_VPOPCNTDQ, avx512_vpopcntdq, 37) /* Vector popcount */ \
decl(AVX512_VPCLMULQDQ, avx512_vpclmulqdq, 38) /* Vector carryless multiplication */ \
decl(AVX512_VAES, avx512_vaes, 39) /* Vector AES instruction */ \
\
decl(AVX512_VNNI, "avx512_vnni", 40) /* Vector Neural Network Instructions */ \
decl(FLUSH, "clflush", 41) /* flush instruction */ \
decl(FLUSHOPT, "clflushopt", 42) /* flusopth instruction */ \
decl(CLWB, "clwb", 43) /* clwb instruction */ \
decl(AVX512_VNNI, avx512_vnni, 40) /* Vector Neural Network Instructions */ \
decl(FLUSH, clflush, 41) /* flush instruction */ \
decl(FLUSHOPT, clflushopt, 42) /* flusopth instruction */ \
decl(CLWB, clwb, 43) /* clwb instruction */ \
\
decl(AVX512_VBMI2, "avx512_vbmi2", 44) /* VBMI2 shift left double instructions */ \
decl(AVX512_VBMI, "avx512_vbmi", 45) /* Vector BMI instructions */ \
decl(HV, "hv", 46) /* Hypervisor instructions */ \
decl(SERIALIZE, "serialize", 47) /* CPU SERIALIZE */ \
decl(RDTSCP, "rdtscp", 48) /* RDTSCP instruction */ \
decl(RDPID, "rdpid", 49) /* RDPID instruction */ \
decl(FSRM, "fsrm", 50) /* Fast Short REP MOV */ \
decl(GFNI, "gfni", 51) /* Vector GFNI instructions */ \
decl(AVX512_BITALG, "avx512_bitalg", 52) /* Vector sub-word popcount and bit gather instructions */\
decl(F16C, "f16c", 53) /* Half-precision and single precision FP conversion instructions*/ \
decl(PKU, "pku", 54) /* Protection keys for user-mode pages */ \
decl(OSPKE, "ospke", 55) /* OS enables protection keys */ \
decl(CET_IBT, "cet_ibt", 56) /* Control Flow Enforcement - Indirect Branch Tracking */ \
decl(CET_SS, "cet_ss", 57) /* Control Flow Enforcement - Shadow Stack */ \
decl(AVX512_IFMA, "avx512_ifma", 58) /* Integer Vector FMA instructions*/ \
decl(AVX_IFMA, "avx_ifma", 59) /* 256-bit VEX-coded variant of AVX512-IFMA*/ \
decl(APX_F, "apx_f", 60) /* Intel Advanced Performance Extensions*/ \
decl(SHA512, "sha512", 61) /* SHA512 instructions*/ \
decl(AVX512_FP16, "avx512_fp16", 62) /* AVX512 FP16 ISA support*/ \
decl(AVX10_1, "avx10_1", 63) /* AVX10 512 bit vector ISA Version 1 support*/ \
decl(AVX10_2, "avx10_2", 64) /* AVX10 512 bit vector ISA Version 2 support*/ \
decl(HYBRID, "hybrid", 65) /* Hybrid architecture */
decl(AVX512_VBMI2, avx512_vbmi2, 44) /* VBMI2 shift left double instructions */ \
decl(AVX512_VBMI, avx512_vbmi, 45) /* Vector BMI instructions */ \
decl(HV, hv, 46) /* Hypervisor instructions */ \
decl(SERIALIZE, serialize, 47) /* CPU SERIALIZE */ \
decl(RDTSCP, rdtscp, 48) /* RDTSCP instruction */ \
decl(RDPID, rdpid, 49) /* RDPID instruction */ \
decl(FSRM, fsrm, 50) /* Fast Short REP MOV */ \
decl(GFNI, gfni, 51) /* Vector GFNI instructions */ \
decl(AVX512_BITALG, avx512_bitalg, 52) /* Vector sub-word popcount and bit gather instructions */\
decl(F16C, f16c, 53) /* Half-precision and single precision FP conversion instructions*/ \
decl(PKU, pku, 54) /* Protection keys for user-mode pages */ \
decl(OSPKE, ospke, 55) /* OS enables protection keys */ \
decl(CET_IBT, cet_ibt, 56) /* Control Flow Enforcement - Indirect Branch Tracking */ \
decl(CET_SS, cet_ss, 57) /* Control Flow Enforcement - Shadow Stack */ \
decl(AVX512_IFMA, avx512_ifma, 58) /* Integer Vector FMA instructions*/ \
decl(AVX_IFMA, avx_ifma, 59) /* 256-bit VEX-coded variant of AVX512-IFMA*/ \
decl(APX_F, apx_f, 60) /* Intel Advanced Performance Extensions*/ \
decl(SHA512, sha512, 61) /* SHA512 instructions*/ \
decl(AVX512_FP16, avx512_fp16, 62) /* AVX512 FP16 ISA support*/ \
decl(AVX10_1, avx10_1, 63) /* AVX10 512 bit vector ISA Version 1 support*/ \
decl(AVX10_2, avx10_2, 64) /* AVX10 512 bit vector ISA Version 2 support*/ \
decl(HYBRID, hybrid, 65) /* Hybrid architecture */
#define DECLARE_CPU_FEATURE_FLAG(id, name, bit) CPU_##id = (bit),
CPU_FEATURE_FLAGS(DECLARE_CPU_FEATURE_FLAG)
@ -516,6 +516,15 @@ protected:
int idx = index(feature);
return (_features_bitmap[idx] & bit_mask(feature)) != 0;
}
bool supports_features(VM_Features* features_to_test) {
for (int i = 0; i < features_bitmap_element_count(); i++) {
if ((_features_bitmap[i] & features_to_test->_features_bitmap[i]) != features_to_test->_features_bitmap[i]) {
return false;
}
}
return true;
}
};
// CPU feature flags vector, can be affected by VM settings.
@ -1103,6 +1112,20 @@ public:
static bool supports_tscinv_ext(void);
static void initialize_cpu_information(void);
static void get_cpu_features_name(void* features_buffer, stringStream& ss);
// Returns names of features present in features_set1 but not in features_set2
static void get_missing_features_name(void* features_set1, void* features_set2, stringStream& ss);
// Returns number of bytes required to store cpu features representation
static int cpu_features_size();
// Stores cpu features representation in the provided buffer. This representation is arch dependent.
// Size of the buffer must be same as returned by cpu_features_size()
static void store_cpu_features(void* buf);
static bool supports_features(void* features_to_test);
};
#endif // CPU_X86_VM_VERSION_X86_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -39,7 +39,7 @@ define_pd_global(bool, UncommonNullCast, true);
define_pd_global(bool, DelayCompilerStubsGeneration, false); // Don't have compiler's stubs
define_pd_global(size_t, CodeCacheSegmentSize, 64 COMPILER1_AND_COMPILER2_PRESENT(+64)); // Tiered compilation has large code-entry alignment.
define_pd_global(intx, CodeEntryAlignment, 32);
define_pd_global(uint, CodeEntryAlignment, 32);
define_pd_global(intx, OptoLoopAlignment, 16);
define_pd_global(intx, InlineSmallCode, 1000);

View File

@ -1753,10 +1753,9 @@ bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
return true;
}
bool os::remove_stack_guard_pages(char* addr, size_t size) {
void os::remove_stack_guard_pages(char* addr, size_t size) {
// Do not call this; no need to commit stack pages on AIX.
ShouldNotReachHere();
return true;
}
void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {

View File

@ -143,12 +143,6 @@ static OSReturn get_jvm_load(double* jvm_uload, double* jvm_sload) {
return OS_OK;
}
static void update_prev_time(jvm_time_store_t* from, jvm_time_store_t* to) {
if (from && to) {
memcpy(to, from, sizeof(jvm_time_store_t));
}
}
static void update_prev_ticks(cpu_tick_store_t* from, cpu_tick_store_t* to) {
if (from && to) {
memcpy(to, from, sizeof(cpu_tick_store_t));

View File

@ -1782,10 +1782,8 @@ bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
return os::commit_memory(addr, size, !ExecMem);
}
// If this is a growable mapping, remove the guard pages entirely by
// munmap()ping them. If not, just call uncommit_memory().
bool os::remove_stack_guard_pages(char* addr, size_t size) {
return os::uncommit_memory(addr, size);
void os::remove_stack_guard_pages(char* addr, size_t size) {
os::uncommit_memory(addr, size);
}
// 'requested_addr' is only treated as a hint, the return value may or

View File

@ -3523,6 +3523,9 @@ bool os::pd_uncommit_memory(char* addr, size_t size, bool exec) {
log_trace(os, map)("mmap failed: " RANGEFMT " errno=(%s)",
RANGEFMTARGS(addr, size),
os::strerror(ep.saved_errno()));
if (ep.saved_errno() == ENOMEM) {
fatal("Failed to uncommit " RANGEFMT ". It is possible that the process's maximum number of mappings would have been exceeded. Try increasing the limit.", RANGEFMTARGS(addr, size));
}
return false;
}
return true;
@ -3633,14 +3636,16 @@ bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
// It's safe to always unmap guard pages for primordial thread because we
// always place it right after end of the mapped region.
bool os::remove_stack_guard_pages(char* addr, size_t size) {
uintptr_t stack_extent, stack_base;
void os::remove_stack_guard_pages(char* addr, size_t size) {
if (os::is_primordial_thread()) {
return ::munmap(addr, size) == 0;
if (::munmap(addr, size) != 0) {
fatal("Failed to munmap " RANGEFMT, RANGEFMTARGS(addr, size));
}
return;
}
return os::uncommit_memory(addr, size);
os::uncommit_memory(addr, size);
}
// 'requested_addr' is only treated as a hint, the return value may or

View File

@ -3281,11 +3281,10 @@ static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int fi
// Do manual alignment
aligned_base = align_up(extra_base, alignment);
bool rc = (file_desc != -1) ? os::unmap_memory(extra_base, extra_size) :
os::release_memory(extra_base, extra_size);
assert(rc, "release failed");
if (!rc) {
return nullptr;
if (file_desc != -1) {
os::unmap_memory(extra_base, extra_size);
} else {
os::release_memory(extra_base, extra_size);
}
// Attempt to map, into the just vacated space, the slightly smaller aligned area.
@ -3681,8 +3680,8 @@ bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
return os::commit_memory(addr, size, !ExecMem);
}
bool os::remove_stack_guard_pages(char* addr, size_t size) {
return os::uncommit_memory(addr, size);
void os::remove_stack_guard_pages(char* addr, size_t size) {
os::uncommit_memory(addr, size);
}
static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {

View File

@ -136,9 +136,9 @@ void *AdlArena::Acalloc( size_t items, size_t x ) {
}
//------------------------------realloc----------------------------------------
static size_t pointer_delta(const void *left, const void *right) {
assert(left >= right, "pointer delta underflow");
return (uintptr_t)left - (uintptr_t)right;
static size_t pointer_delta(const void* high, const void* low) {
assert(high >= low, "pointer delta underflow");
return (uintptr_t)high - (uintptr_t)low;
}
// Reallocate storage in AdlArena.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -468,9 +468,7 @@ void CodeBuffer::compute_final_layout(CodeBuffer* dest) const {
assert(!_finalize_stubs, "non-finalized stubs");
{
// not sure why this is here, but why not...
int alignSize = MAX2((intx) sizeof(jdouble), CodeEntryAlignment);
assert( (dest->_total_start - _insts.start()) % alignSize == 0, "copy must preserve alignment");
assert( (dest->_total_start - _insts.start()) % CodeEntryAlignment == 0, "copy must preserve alignment");
}
const CodeSection* prev_cs = nullptr;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -426,7 +426,8 @@ bool AOTClassLocation::check(const char* runtime_path, bool has_aot_linked_class
bool size_differs = _filesize != st.st_size;
bool time_differs = _check_time && (_timestamp != st.st_mtime);
if (size_differs || time_differs) {
aot_log_warning(aot)("This file is not the one used while building the shared archive file: '%s'%s%s",
aot_log_warning(aot)("This file is not the one used while building the %s: '%s'%s%s",
CDSConfig::type_of_archive_being_loaded(),
runtime_path,
time_differs ? ", timestamp has changed" : "",
size_differs ? ", size has changed" : "");
@ -448,6 +449,13 @@ void AOTClassLocationConfig::dumptime_init(JavaThread* current) {
java_lang_Throwable::print(current->pending_exception(), tty);
vm_exit_during_initialization("AOTClassLocationConfig::dumptime_init_helper() failed unexpectedly");
}
if (CDSConfig::is_dumping_final_static_archive()) {
// The _max_used_index is usually updated by ClassLoader::record_result(). However,
// when dumping the final archive, the classes are loaded from their images in
// the AOT config file, so we don't go through ClassLoader::record_result().
dumptime_update_max_used_index(runtime()->_max_used_index); // Same value as recorded in the training run.
}
}
void AOTClassLocationConfig::dumptime_init_helper(TRAPS) {

View File

@ -0,0 +1,30 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "cds/aotCompressedPointers.hpp"
#include "cds/archiveBuilder.hpp"
size_t AOTCompressedPointers::compute_byte_offset(address p) {
return ArchiveBuilder::current()->any_to_offset(p);
}

View File

@ -0,0 +1,142 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_CDS_AOTCOMPRESSEDPOINTERS_HPP
#define SHARE_CDS_AOTCOMPRESSEDPOINTERS_HPP
#include "cds/cds_globals.hpp"
#include "memory/allStatic.hpp"
#include "memory/metaspace.hpp"
#include "metaprogramming/enableIf.hpp"
#include "utilities/align.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
class AOTCompressedPointers: public AllStatic {
public:
// For space saving, we can encode the location of metadata objects in the "rw" and "ro"
// regions using a 32-bit offset from the bottom of the mapped AOT metaspace.
// Currently we allow only up to 2GB total size in the rw and ro regions (which are
// contiguous to each other).
enum class narrowPtr : u4;
static constexpr size_t MaxMetadataOffsetBytes = 0x7FFFFFFF;
// In the future, this could return a different numerical value than
// narrowp if the encoding contains shifts.
inline static size_t get_byte_offset(narrowPtr narrowp) {
return checked_cast<size_t>(narrowp);
}
inline static narrowPtr null() {
return static_cast<narrowPtr>(0);
}
// Encoding ------
// ptr can point to one of the following
// - an object in the ArchiveBuilder's buffer.
// - an object in the currently mapped AOT cache rw/ro regions.
// - an object that has been copied into the ArchiveBuilder's buffer.
template <typename T>
static narrowPtr encode_not_null(T ptr) {
address p = reinterpret_cast<address>(ptr);
return encode_byte_offset(compute_byte_offset(p));
}
template <typename T>
static narrowPtr encode(T ptr) { // may be null
if (ptr == nullptr) {
return null();
} else {
return encode_not_null(ptr);
}
}
// ptr must be in the currently mapped AOT cache rw/ro regions.
template <typename T>
static narrowPtr encode_address_in_cache(T ptr) {
assert(Metaspace::in_aot_cache(ptr), "must be");
address p = reinterpret_cast<address>(ptr);
address base = reinterpret_cast<address>(SharedBaseAddress);
return encode_byte_offset(pointer_delta(p, base, 1));
}
template <typename T>
static narrowPtr encode_address_in_cache_or_null(T ptr) {
if (ptr == nullptr) {
return null();
} else {
return encode_address_in_cache<T>(ptr);
}
}
// Decoding -----
// If base_address is null, decode an address within the mapped aot cache range.
template <typename T>
static T decode_not_null(narrowPtr narrowp, address base_address = nullptr) {
assert(narrowp != null(), "sanity");
if (base_address == nullptr) {
T p = reinterpret_cast<T>(reinterpret_cast<address>(SharedBaseAddress) + get_byte_offset(narrowp));
assert(Metaspace::in_aot_cache(p), "must be");
return p;
} else {
// This is usually called before the cache is fully mapped.
return reinterpret_cast<T>(base_address + get_byte_offset(narrowp));
}
}
template <typename T>
static T decode(narrowPtr narrowp, address base_address = nullptr) { // may be null
if (narrowp == null()) {
return nullptr;
} else {
return decode_not_null<T>(narrowp, base_address);
}
}
private:
static size_t compute_byte_offset(address p);
static narrowPtr encode_byte_offset(size_t offset) {
assert(offset != 0, "offset 0 is in protection zone");
precond(offset <= MaxMetadataOffsetBytes);
return checked_cast<narrowPtr>(offset);
}
};
// Type casts -- declared as global functions to save a few keystrokes
// A simple type cast. No change in numerical value.
inline AOTCompressedPointers::narrowPtr cast_from_u4(u4 narrowp) {
return checked_cast<AOTCompressedPointers::narrowPtr>(narrowp);
}
// A simple type cast. No change in numerical value.
// !!!DO NOT CALL THIS if you want a byte offset!!!
inline u4 cast_to_u4(AOTCompressedPointers::narrowPtr narrowp) {
return checked_cast<u4>(narrowp);
}
#endif // SHARE_CDS_AOTCOMPRESSEDPOINTERS_HPP

View File

@ -2106,7 +2106,7 @@ MapArchiveResult AOTMetaspace::map_archive(FileMapInfo* mapinfo, char* mapped_ba
// Currently, only static archive uses early serialized data.
char* buffer = mapinfo->early_serialized_data();
intptr_t* array = (intptr_t*)buffer;
ReadClosure rc(&array, (intptr_t)mapped_base_address);
ReadClosure rc(&array, (address)mapped_base_address);
early_serialize(&rc);
}
@ -2152,7 +2152,7 @@ void AOTMetaspace::initialize_shared_spaces() {
// shared string/symbol tables.
char* buffer = static_mapinfo->serialized_data();
intptr_t* array = (intptr_t*)buffer;
ReadClosure rc(&array, (intptr_t)SharedBaseAddress);
ReadClosure rc(&array, (address)SharedBaseAddress);
serialize(&rc);
// Finish initializing the heap dump mode used in the archive
@ -2164,7 +2164,7 @@ void AOTMetaspace::initialize_shared_spaces() {
if (dynamic_mapinfo != nullptr) {
intptr_t* buffer = (intptr_t*)dynamic_mapinfo->serialized_data();
ReadClosure rc(&buffer, (intptr_t)SharedBaseAddress);
ReadClosure rc(&buffer, (address)SharedBaseAddress);
DynamicArchive::serialize(&rc);
}

View File

@ -24,6 +24,7 @@
#include "cds/aotArtifactFinder.hpp"
#include "cds/aotClassLinker.hpp"
#include "cds/aotCompressedPointers.hpp"
#include "cds/aotLogging.hpp"
#include "cds/aotMapLogger.hpp"
#include "cds/aotMetaspace.hpp"
@ -175,10 +176,10 @@ ArchiveBuilder::ArchiveBuilder() :
_mapped_static_archive_bottom(nullptr),
_mapped_static_archive_top(nullptr),
_buffer_to_requested_delta(0),
_pz_region("pz", MAX_SHARED_DELTA), // protection zone -- used only during dumping; does NOT exist in cds archive.
_rw_region("rw", MAX_SHARED_DELTA),
_ro_region("ro", MAX_SHARED_DELTA),
_ac_region("ac", MAX_SHARED_DELTA),
_pz_region("pz"), // protection zone -- used only during dumping; does NOT exist in cds archive.
_rw_region("rw"),
_ro_region("ro"),
_ac_region("ac"),
_ptrmap(mtClassShared),
_rw_ptrmap(mtClassShared),
_ro_ptrmap(mtClassShared),
@ -990,16 +991,15 @@ void ArchiveBuilder::make_training_data_shareable() {
_src_obj_table.iterate_all(clean_td);
}
uintx ArchiveBuilder::buffer_to_offset(address p) const {
size_t ArchiveBuilder::buffer_to_offset(address p) const {
address requested_p = to_requested(p);
assert(requested_p >= _requested_static_archive_bottom, "must be");
return requested_p - _requested_static_archive_bottom;
return pointer_delta(requested_p, _requested_static_archive_bottom, 1);
}
uintx ArchiveBuilder::any_to_offset(address p) const {
size_t ArchiveBuilder::any_to_offset(address p) const {
if (is_in_mapped_static_archive(p)) {
assert(CDSConfig::is_dumping_dynamic_archive(), "must be");
return p - _mapped_static_archive_bottom;
return pointer_delta(p, _mapped_static_archive_bottom, 1);
}
if (!is_in_buffer_space(p)) {
// p must be a "source" address
@ -1008,7 +1008,7 @@ uintx ArchiveBuilder::any_to_offset(address p) const {
return buffer_to_offset(p);
}
address ArchiveBuilder::offset_to_buffered_address(u4 offset) const {
address ArchiveBuilder::offset_to_buffered_address(size_t offset) const {
address requested_addr = _requested_static_archive_bottom + offset;
address buffered_addr = requested_addr - _buffer_to_requested_delta;
assert(is_in_buffer_space(buffered_addr), "bad offset");

View File

@ -329,49 +329,22 @@ public:
return current()->buffer_to_requested_delta();
}
inline static u4 to_offset_u4(uintx offset) {
guarantee(offset <= MAX_SHARED_DELTA, "must be 32-bit offset " INTPTR_FORMAT, offset);
return (u4)offset;
}
public:
static const uintx MAX_SHARED_DELTA = ArchiveUtils::MAX_SHARED_DELTA;;
// The address p points to an object inside the output buffer. When the archive is mapped
// at the requested address, what's the offset of this object from _requested_static_archive_bottom?
uintx buffer_to_offset(address p) const;
size_t buffer_to_offset(address p) const;
// Same as buffer_to_offset, except that the address p points to either (a) an object
// inside the output buffer, or (b), an object in the currently mapped static archive.
uintx any_to_offset(address p) const;
// Same as buffer_to_offset, except that the address p points to one of the following:
// - an object in the ArchiveBuilder's buffer.
// - an object in the currently mapped AOT cache rw/ro regions.
// - an object that has been copied into the ArchiveBuilder's buffer.
size_t any_to_offset(address p) const;
// The reverse of buffer_to_offset()
address offset_to_buffered_address(u4 offset) const;
address offset_to_buffered_address(size_t offset) const;
template <typename T>
u4 buffer_to_offset_u4(T p) const {
uintx offset = buffer_to_offset((address)p);
return to_offset_u4(offset);
}
template <typename T>
u4 any_to_offset_u4(T p) const {
assert(p != nullptr, "must not be null");
uintx offset = any_to_offset((address)p);
return to_offset_u4(offset);
}
template <typename T>
u4 any_or_null_to_offset_u4(T p) const {
if (p == nullptr) {
return 0;
} else {
return any_to_offset_u4<T>(p);
}
}
template <typename T>
T offset_to_buffered(u4 offset) const {
T offset_to_buffered(size_t offset) const {
return (T)offset_to_buffered_address(offset);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,6 +22,7 @@
*
*/
#include "cds/aotCompressedPointers.hpp"
#include "cds/aotLogging.hpp"
#include "cds/aotMetaspace.hpp"
#include "cds/archiveBuilder.hpp"
@ -201,13 +202,13 @@ char* DumpRegion::expand_top_to(char* newtop) {
commit_to(newtop);
_top = newtop;
if (_max_delta > 0) {
if (ArchiveBuilder::is_active() && ArchiveBuilder::current()->is_in_buffer_space(_base)) {
uintx delta = ArchiveBuilder::current()->buffer_to_offset((address)(newtop-1));
if (delta > _max_delta) {
if (delta > AOTCompressedPointers::MaxMetadataOffsetBytes) {
// This is just a sanity check and should not appear in any real world usage. This
// happens only if you allocate more than 2GB of shared objects and would require
// millions of shared classes.
aot_log_error(aot)("Out of memory in the CDS archive: Please reduce the number of shared classes.");
aot_log_error(aot)("Out of memory in the %s: Please reduce the number of shared classes.", CDSConfig::type_of_archive_being_written());
AOTMetaspace::unrecoverable_writing_error();
}
}
@ -331,9 +332,8 @@ void WriteClosure::do_ptr(void** p) {
void ReadClosure::do_ptr(void** p) {
assert(*p == nullptr, "initializing previous initialized pointer.");
intptr_t obj = nextPtr();
assert(obj >= 0, "sanity.");
*p = (obj != 0) ? (void*)(_base_address + obj) : (void*)obj;
u4 narrowp = checked_cast<u4>(nextPtr());
*p = AOTCompressedPointers::decode<void*>(cast_from_u4(narrowp), _base_address);
}
void ReadClosure::do_u4(u4* p) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -153,7 +153,6 @@ private:
char* _base;
char* _top;
char* _end;
uintx _max_delta;
bool _is_packed;
ReservedSpace* _rs;
VirtualSpace* _vs;
@ -161,9 +160,9 @@ private:
void commit_to(char* newtop);
public:
DumpRegion(const char* name, uintx max_delta = 0)
DumpRegion(const char* name)
: _name(name), _base(nullptr), _top(nullptr), _end(nullptr),
_max_delta(max_delta), _is_packed(false),
_is_packed(false),
_rs(nullptr), _vs(nullptr) {}
char* expand_top_to(char* newtop);
@ -237,13 +236,13 @@ public:
class ReadClosure : public SerializeClosure {
private:
intptr_t** _ptr_array;
intptr_t _base_address;
address _base_address;
inline intptr_t nextPtr() {
return *(*_ptr_array)++;
}
public:
ReadClosure(intptr_t** ptr_array, intptr_t base_address) :
ReadClosure(intptr_t** ptr_array, address base_address) :
_ptr_array(ptr_array), _base_address(base_address) {}
void do_ptr(void** p);
@ -260,7 +259,6 @@ class ArchiveUtils {
template <typename T> static Array<T>* archive_ptr_array(GrowableArray<T>* tmp_array);
public:
static const uintx MAX_SHARED_DELTA = 0x7FFFFFFF;
static void log_to_classlist(BootstrapInfo* bootstrap_specifier, TRAPS) NOT_CDS_RETURN;
static bool has_aot_initialized_mirror(InstanceKlass* src_ik);
@ -273,50 +271,6 @@ public:
static Array<T>* archive_array(GrowableArray<T>* tmp_array) {
return archive_ptr_array(tmp_array);
}
// The following functions translate between a u4 offset and an address in the
// the range of the mapped CDS archive (e.g., Metaspace::in_aot_cache()).
// Since the first 16 bytes in this range are dummy data (see ArchiveBuilder::reserve_buffer()),
// we know that offset 0 never represents a valid object. As a result, an offset of 0
// is used to encode a nullptr.
//
// Use the "archived_address_or_null" variants if a nullptr may be encoded.
// offset must represent an object of type T in the mapped shared space. Return
// a direct pointer to this object.
template <typename T> T static offset_to_archived_address(u4 offset) {
assert(offset != 0, "sanity");
T p = (T)(SharedBaseAddress + offset);
assert(Metaspace::in_aot_cache(p), "must be");
return p;
}
template <typename T> T static offset_to_archived_address_or_null(u4 offset) {
if (offset == 0) {
return nullptr;
} else {
return offset_to_archived_address<T>(offset);
}
}
// p must be an archived object. Get its offset from SharedBaseAddress
template <typename T> static u4 archived_address_to_offset(T p) {
uintx pn = (uintx)p;
uintx base = (uintx)SharedBaseAddress;
assert(Metaspace::in_aot_cache(p), "must be");
assert(pn > base, "sanity"); // No valid object is stored at 0 offset from SharedBaseAddress
uintx offset = pn - base;
assert(offset <= MAX_SHARED_DELTA, "range check");
return static_cast<u4>(offset);
}
template <typename T> static u4 archived_address_or_null_to_offset(T p) {
if (p == nullptr) {
return 0;
} else {
return archived_address_to_offset<T>(p);
}
}
};
class HeapRootSegments {

View File

@ -25,6 +25,7 @@
#include "cds/aotArtifactFinder.hpp"
#include "cds/aotClassLinker.hpp"
#include "cds/aotClassLocation.hpp"
#include "cds/aotCompressedPointers.hpp"
#include "cds/aotLogging.hpp"
#include "cds/aotMetaspace.hpp"
#include "cds/archiveBuilder.hpp"
@ -75,13 +76,13 @@ public:
return 0;
}
u4 a_offset = ArchiveBuilder::current()->any_to_offset_u4(a_name);
u4 b_offset = ArchiveBuilder::current()->any_to_offset_u4(b_name);
u4 a_narrowp = cast_to_u4(AOTCompressedPointers::encode_not_null(a_name));
u4 b_narrowp = cast_to_u4(AOTCompressedPointers::encode_not_null(b_name));
if (a_offset < b_offset) {
if (a_narrowp < b_narrowp) {
return -1;
} else {
assert(a_offset > b_offset, "must be");
assert(a_narrowp > b_narrowp, "must be");
return 1;
}
}

View File

@ -298,11 +298,11 @@ void FileMapHeader::print(outputStream* st) {
st->print_cr("- compressed_class_ptrs: %d", _compressed_class_ptrs);
st->print_cr("- narrow_klass_pointer_bits: %d", _narrow_klass_pointer_bits);
st->print_cr("- narrow_klass_shift: %d", _narrow_klass_shift);
st->print_cr("- cloned_vtables_offset: 0x%zx", _cloned_vtables_offset);
st->print_cr("- early_serialized_data_offset: 0x%zx", _early_serialized_data_offset);
st->print_cr("- serialized_data_offset: 0x%zx", _serialized_data_offset);
st->print_cr("- cloned_vtables: %u", cast_to_u4(_cloned_vtables));
st->print_cr("- early_serialized_data: %u", cast_to_u4(_early_serialized_data));
st->print_cr("- serialized_data: %u", cast_to_u4(_serialized_data));
st->print_cr("- jvm_ident: %s", _jvm_ident);
st->print_cr("- class_location_config_offset: 0x%zx", _class_location_config_offset);
st->print_cr("- class_location_config: %d", cast_to_u4(_class_location_config));
st->print_cr("- verify_local: %d", _verify_local);
st->print_cr("- verify_remote: %d", _verify_remote);
st->print_cr("- has_platform_or_app_classes: %d", _has_platform_or_app_classes);
@ -1325,9 +1325,7 @@ char* FileMapInfo::map_auxiliary_region(int region_index, bool read_only) {
if (VerifySharedSpaces && !r->check_region_crc(mapped_base)) {
aot_log_error(aot)("region %d CRC error", region_index);
if (!os::unmap_memory(mapped_base, r->used_aligned())) {
fatal("os::unmap_memory of region %d failed", region_index);
}
os::unmap_memory(mapped_base, r->used_aligned());
return nullptr;
}
@ -1654,9 +1652,7 @@ void FileMapInfo::unmap_region(int i) {
// is released. Zero it so that we don't accidentally read its content.
aot_log_info(aot)("Region #%d (%s) is in a reserved space, it will be freed when the space is released", i, shared_region_name[i]);
} else {
if (!os::unmap_memory(mapped_base, size)) {
fatal("os::unmap_memory failed");
}
os::unmap_memory(mapped_base, size);
}
}
r->set_mapped_base(nullptr);
@ -1767,10 +1763,6 @@ void FileMapInfo::print(outputStream* st) const {
}
}
void FileMapHeader::set_as_offset(char* p, size_t *offset) {
*offset = ArchiveBuilder::current()->any_to_offset((address)p);
}
int FileMapHeader::compute_crc() {
char* start = (char*)this;
// start computing from the field after _header_size to end of base archive name.

View File

@ -25,6 +25,7 @@
#ifndef SHARE_CDS_FILEMAP_HPP
#define SHARE_CDS_FILEMAP_HPP
#include "cds/aotCompressedPointers.hpp"
#include "cds/aotMappedHeap.hpp"
#include "cds/aotMetaspace.hpp"
#include "cds/aotStreamedHeap.hpp"
@ -104,7 +105,7 @@ public:
class FileMapHeader: private CDSFileMapHeaderBase {
friend class CDSConstants;
friend class VMStructs;
using narrowPtr = AOTCompressedPointers::narrowPtr;
private:
// The following fields record the states of the VM during dump time.
// They are compared with the runtime states to see if the archive
@ -122,16 +123,16 @@ private:
bool _compressed_class_ptrs; // save the flag UseCompressedClassPointers
int _narrow_klass_pointer_bits; // save number of bits in narrowKlass
int _narrow_klass_shift; // save shift width used to pre-compute narrowKlass IDs in archived heap objects
size_t _cloned_vtables_offset; // The address of the first cloned vtable
size_t _early_serialized_data_offset; // Data accessed using {ReadClosure,WriteClosure}::serialize()
size_t _serialized_data_offset; // Data accessed using {ReadClosure,WriteClosure}::serialize()
narrowPtr _cloned_vtables; // The address of the first cloned vtable
narrowPtr _early_serialized_data; // Data accessed using {ReadClosure,WriteClosure}::serialize()
narrowPtr _serialized_data; // Data accessed using {ReadClosure,WriteClosure}::serialize()
// The following fields are all sanity checks for whether this archive
// will function correctly with this JVM and the bootclasspath it's
// invoked with.
char _jvm_ident[JVM_IDENT_MAX]; // identifier string of the jvm that created this dump
size_t _class_location_config_offset;
narrowPtr _class_location_config;
bool _verify_local; // BytecodeVerificationLocal setting
bool _verify_remote; // BytecodeVerificationRemote setting
@ -160,12 +161,8 @@ private:
bool _type_profile_casts;
int _spec_trap_limit_extra_entries;
template <typename T> T from_mapped_offset(size_t offset) const {
return (T)(mapped_base_address() + offset);
}
void set_as_offset(char* p, size_t *offset);
template <typename T> void set_as_offset(T p, size_t *offset) {
set_as_offset((char*)p, offset);
template <typename T> T decode(narrowPtr narrowp) const {
return AOTCompressedPointers::decode_not_null<T>(narrowp, reinterpret_cast<address>(mapped_base_address()));
}
public:
@ -193,9 +190,9 @@ public:
bool compact_headers() const { return _compact_headers; }
uintx max_heap_size() const { return _max_heap_size; }
CompressedOops::Mode narrow_oop_mode() const { return _narrow_oop_mode; }
char* cloned_vtables() const { return from_mapped_offset<char*>(_cloned_vtables_offset); }
char* early_serialized_data() const { return from_mapped_offset<char*>(_early_serialized_data_offset); }
char* serialized_data() const { return from_mapped_offset<char*>(_serialized_data_offset); }
char* cloned_vtables() const { return decode<char*>(_cloned_vtables); }
char* early_serialized_data() const { return decode<char*>(_early_serialized_data); }
char* serialized_data() const { return decode<char*>(_serialized_data); }
bool object_streaming_mode() const { return _object_streaming_mode; }
const char* jvm_ident() const { return _jvm_ident; }
char* requested_base_address() const { return _requested_base_address; }
@ -218,9 +215,9 @@ public:
void set_mapped_heap_header(AOTMappedHeapHeader header) { _mapped_heap_header = header; }
void set_has_platform_or_app_classes(bool v) { _has_platform_or_app_classes = v; }
void set_cloned_vtables(char* p) { set_as_offset(p, &_cloned_vtables_offset); }
void set_early_serialized_data(char* p) { set_as_offset(p, &_early_serialized_data_offset); }
void set_serialized_data(char* p) { set_as_offset(p, &_serialized_data_offset); }
void set_cloned_vtables(char* p) { _cloned_vtables = AOTCompressedPointers::encode_not_null(p); }
void set_early_serialized_data(char* p) { _early_serialized_data = AOTCompressedPointers::encode_not_null(p); }
void set_serialized_data(char* p) { _serialized_data = AOTCompressedPointers::encode_not_null(p); }
void set_mapped_base_address(char* p) { _mapped_base_address = p; }
void set_rw_ptrmap_start_pos(size_t n) { _rw_ptrmap_start_pos = n; }
void set_ro_ptrmap_start_pos(size_t n) { _ro_ptrmap_start_pos = n; }
@ -228,11 +225,11 @@ public:
void copy_base_archive_name(const char* name);
void set_class_location_config(AOTClassLocationConfig* table) {
set_as_offset(table, &_class_location_config_offset);
_class_location_config = AOTCompressedPointers::encode_not_null(table);
}
AOTClassLocationConfig* class_location_config() {
return from_mapped_offset<AOTClassLocationConfig*>(_class_location_config_offset);
return decode<AOTClassLocationConfig*>(_class_location_config);
}
void set_requested_base(char* b) {

View File

@ -25,6 +25,7 @@
#include "cds/aotArtifactFinder.hpp"
#include "cds/aotClassInitializer.hpp"
#include "cds/aotClassLocation.hpp"
#include "cds/aotCompressedPointers.hpp"
#include "cds/aotLogging.hpp"
#include "cds/aotMappedHeapLoader.hpp"
#include "cds/aotMappedHeapWriter.hpp"
@ -1148,8 +1149,7 @@ public:
ArchivedKlassSubGraphInfoRecord* record = HeapShared::archive_subgraph_info(&info);
Klass* buffered_k = ArchiveBuilder::get_buffered_klass(klass);
unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary((address)buffered_k);
u4 delta = ArchiveBuilder::current()->any_to_offset_u4(record);
_writer->add(hash, delta);
_writer->add(hash, AOTCompressedPointers::encode_not_null(record));
}
return true; // keep on iterating
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,6 +23,7 @@
*/
#include "cds/aotClassFilter.hpp"
#include "cds/aotCompressedPointers.hpp"
#include "cds/aotMetaspace.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/cdsConfig.hpp"
@ -52,7 +53,7 @@
#include "runtime/mutexLocker.hpp"
GrowableArrayCHeap<char*, mtClassShared>* LambdaFormInvokers::_lambdaform_lines = nullptr;
Array<u4>* LambdaFormInvokers::_static_archive_invokers = nullptr;
Array<AOTCompressedPointers::narrowPtr>* LambdaFormInvokers::_static_archive_invokers = nullptr;
static bool _stop_appending = false;
#define NUM_FILTER 4
@ -252,7 +253,7 @@ void LambdaFormInvokers::dump_static_archive_invokers() {
}
}
if (count > 0) {
_static_archive_invokers = ArchiveBuilder::new_ro_array<u4>(count);
_static_archive_invokers = ArchiveBuilder::new_ro_array<narrowPtr>(count);
int index = 0;
for (int i = 0; i < len; i++) {
char* str = _lambdaform_lines->at(i);
@ -261,7 +262,7 @@ void LambdaFormInvokers::dump_static_archive_invokers() {
Array<char>* line = ArchiveBuilder::new_ro_array<char>((int)str_len);
strncpy(line->adr_at(0), str, str_len);
_static_archive_invokers->at_put(index, ArchiveBuilder::current()->any_to_offset_u4(line));
_static_archive_invokers->at_put(index, AOTCompressedPointers::encode_not_null(line));
index++;
}
}
@ -274,8 +275,8 @@ void LambdaFormInvokers::dump_static_archive_invokers() {
void LambdaFormInvokers::read_static_archive_invokers() {
if (_static_archive_invokers != nullptr) {
for (int i = 0; i < _static_archive_invokers->length(); i++) {
u4 offset = _static_archive_invokers->at(i);
Array<char>* line = ArchiveUtils::offset_to_archived_address<Array<char>*>(offset);
narrowPtr encoded = _static_archive_invokers->at(i);
Array<char>* line = AOTCompressedPointers::decode_not_null<Array<char>*>(encoded);
char* str = line->adr_at(0);
append(str);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,6 +24,8 @@
#ifndef SHARE_CDS_LAMBDAFORMINVOKERS_HPP
#define SHARE_CDS_LAMBDAFORMINVOKERS_HPP
#include "cds/aotCompressedPointers.hpp"
#include "memory/allStatic.hpp"
#include "oops/oopHandle.hpp"
#include "runtime/handles.hpp"
@ -35,10 +37,11 @@ class Array;
class SerializeClosure;
class LambdaFormInvokers : public AllStatic {
using narrowPtr = AOTCompressedPointers::narrowPtr;
private:
static GrowableArrayCHeap<char*, mtClassShared>* _lambdaform_lines;
// For storing LF form lines (LF_RESOLVE only) in read only table.
static Array<u4>* _static_archive_invokers;
static Array<narrowPtr>* _static_archive_invokers;
static void regenerate_class(char* name, ClassFileStream& st, TRAPS);
public:
static void append(char* line);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,6 +23,7 @@
*/
#include "cds/aotClassFilter.hpp"
#include "cds/aotCompressedPointers.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/cdsProtectionDomain.hpp"
@ -49,11 +50,11 @@ unsigned int LambdaProxyClassKey::hash() const {
}
unsigned int RunTimeLambdaProxyClassKey::hash() const {
return primitive_hash<u4>(_caller_ik) +
primitive_hash<u4>(_invoked_name) +
primitive_hash<u4>(_invoked_type) +
primitive_hash<u4>(_method_type) +
primitive_hash<u4>(_instantiated_method_type);
return primitive_hash<u4>(cast_to_u4(_caller_ik)) +
primitive_hash<u4>(cast_to_u4(_invoked_name)) +
primitive_hash<u4>(cast_to_u4(_invoked_type)) +
primitive_hash<u4>(cast_to_u4(_method_type)) +
primitive_hash<u4>(cast_to_u4(_instantiated_method_type));
}
#ifndef PRODUCT
@ -71,12 +72,12 @@ void LambdaProxyClassKey::print_on(outputStream* st) const {
void RunTimeLambdaProxyClassKey::print_on(outputStream* st) const {
ResourceMark rm;
st->print_cr("LambdaProxyClassKey : " INTPTR_FORMAT " hash: %0x08x", p2i(this), hash());
st->print_cr("_caller_ik : %d", _caller_ik);
st->print_cr("_instantiated_method_type : %d", _instantiated_method_type);
st->print_cr("_invoked_name : %d", _invoked_name);
st->print_cr("_invoked_type : %d", _invoked_type);
st->print_cr("_member_method : %d", _member_method);
st->print_cr("_method_type : %d", _method_type);
st->print_cr("_caller_ik : %d", cast_to_u4(_caller_ik));
st->print_cr("_instantiated_method_type : %d", cast_to_u4(_instantiated_method_type));
st->print_cr("_invoked_name : %d", cast_to_u4(_invoked_name));
st->print_cr("_invoked_type : %d", cast_to_u4(_invoked_type));
st->print_cr("_member_method : %d", cast_to_u4(_member_method));
st->print_cr("_method_type : %d", cast_to_u4(_method_type));
}
void RunTimeLambdaProxyClassInfo::print_on(outputStream* st) const {
@ -418,8 +419,7 @@ public:
(RunTimeLambdaProxyClassInfo*)ArchiveBuilder::ro_region_alloc(byte_size);
runtime_info->init(key, info);
unsigned int hash = runtime_info->hash();
u4 delta = _builder->any_to_offset_u4((void*)runtime_info);
_writer->add(hash, delta);
_writer->add(hash, AOTCompressedPointers::encode_not_null(runtime_info));
return true;
}
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,8 +25,9 @@
#ifndef SHARE_CDS_LAMBDAPROXYCLASSINFO_HPP
#define SHARE_CDS_LAMBDAPROXYCLASSINFO_HPP
#include "cds/aotCompressedPointers.hpp"
#include "cds/aotMetaspace.hpp"
#include "cds/archiveBuilder.hpp"
#include "classfile/compactHashtable.hpp"
#include "classfile/javaClasses.hpp"
#include "memory/metaspaceClosure.hpp"
#include "utilities/growableArray.hpp"
@ -132,19 +133,20 @@ public:
};
class RunTimeLambdaProxyClassKey {
u4 _caller_ik;
u4 _invoked_name;
u4 _invoked_type;
u4 _method_type;
u4 _member_method;
u4 _instantiated_method_type;
using narrowPtr = AOTCompressedPointers::narrowPtr;
narrowPtr _caller_ik;
narrowPtr _invoked_name;
narrowPtr _invoked_type;
narrowPtr _method_type;
narrowPtr _member_method;
narrowPtr _instantiated_method_type;
RunTimeLambdaProxyClassKey(u4 caller_ik,
u4 invoked_name,
u4 invoked_type,
u4 method_type,
u4 member_method,
u4 instantiated_method_type) :
RunTimeLambdaProxyClassKey(narrowPtr caller_ik,
narrowPtr invoked_name,
narrowPtr invoked_type,
narrowPtr method_type,
narrowPtr member_method,
narrowPtr instantiated_method_type) :
_caller_ik(caller_ik),
_invoked_name(invoked_name),
_invoked_type(invoked_type),
@ -154,15 +156,12 @@ class RunTimeLambdaProxyClassKey {
public:
static RunTimeLambdaProxyClassKey init_for_dumptime(LambdaProxyClassKey& key) {
assert(ArchiveBuilder::is_active(), "sanity");
ArchiveBuilder* b = ArchiveBuilder::current();
u4 caller_ik = b->any_to_offset_u4(key.caller_ik());
u4 invoked_name = b->any_to_offset_u4(key.invoked_name());
u4 invoked_type = b->any_to_offset_u4(key.invoked_type());
u4 method_type = b->any_to_offset_u4(key.method_type());
u4 member_method = b->any_or_null_to_offset_u4(key.member_method()); // could be null
u4 instantiated_method_type = b->any_to_offset_u4(key.instantiated_method_type());
narrowPtr caller_ik = AOTCompressedPointers::encode_not_null(key.caller_ik());
narrowPtr invoked_name = AOTCompressedPointers::encode_not_null(key.invoked_name());
narrowPtr invoked_type = AOTCompressedPointers::encode_not_null(key.invoked_type());
narrowPtr method_type = AOTCompressedPointers::encode_not_null(key.method_type());
narrowPtr member_method = AOTCompressedPointers::encode(key.member_method()); // could be null
narrowPtr instantiated_method_type = AOTCompressedPointers::encode_not_null(key.instantiated_method_type());
return RunTimeLambdaProxyClassKey(caller_ik, invoked_name, invoked_type, method_type,
member_method, instantiated_method_type);
@ -176,12 +175,12 @@ public:
Symbol* instantiated_method_type) {
// All parameters must be in shared space, or else you'd get an assert in
// ArchiveUtils::to_offset().
return RunTimeLambdaProxyClassKey(ArchiveUtils::archived_address_to_offset(caller_ik),
ArchiveUtils::archived_address_to_offset(invoked_name),
ArchiveUtils::archived_address_to_offset(invoked_type),
ArchiveUtils::archived_address_to_offset(method_type),
ArchiveUtils::archived_address_or_null_to_offset(member_method), // could be null
ArchiveUtils::archived_address_to_offset(instantiated_method_type));
return RunTimeLambdaProxyClassKey(AOTCompressedPointers::encode_address_in_cache(caller_ik),
AOTCompressedPointers::encode_address_in_cache(invoked_name),
AOTCompressedPointers::encode_address_in_cache(invoked_type),
AOTCompressedPointers::encode_address_in_cache(method_type),
AOTCompressedPointers::encode_address_in_cache_or_null(member_method), // could be null
AOTCompressedPointers::encode_address_in_cache(instantiated_method_type));
}
unsigned int hash() const;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,15 +22,15 @@
*
*/
#include "cds/aotCompressedPointers.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/dumpTimeClassInfo.hpp"
#include "cds/runTimeClassInfo.hpp"
#include "classfile/systemDictionaryShared.hpp"
void RunTimeClassInfo::init(DumpTimeClassInfo& info) {
ArchiveBuilder* builder = ArchiveBuilder::current();
InstanceKlass* k = info._klass;
_klass_offset = builder->any_to_offset_u4(k);
_klass = AOTCompressedPointers::encode_not_null(k);
if (!SystemDictionaryShared::is_builtin(k)) {
CrcInfo* c = crc();
@ -50,8 +50,8 @@ void RunTimeClassInfo::init(DumpTimeClassInfo& info) {
RTVerifierConstraint* vf_constraints = verifier_constraints();
char* flags = verifier_constraint_flags();
for (i = 0; i < _num_verifier_constraints; i++) {
vf_constraints[i]._name = builder->any_to_offset_u4(info._verifier_constraints->at(i).name());
vf_constraints[i]._from_name = builder->any_or_null_to_offset_u4(info._verifier_constraints->at(i).from_name());
vf_constraints[i]._name = AOTCompressedPointers::encode_not_null(info._verifier_constraints->at(i).name());
vf_constraints[i]._from_name = AOTCompressedPointers::encode(info._verifier_constraints->at(i).from_name());
}
for (i = 0; i < _num_verifier_constraints; i++) {
flags[i] = info._verifier_constraint_flags->at(i);
@ -61,14 +61,14 @@ void RunTimeClassInfo::init(DumpTimeClassInfo& info) {
if (_num_loader_constraints > 0) {
RTLoaderConstraint* ld_constraints = loader_constraints();
for (i = 0; i < _num_loader_constraints; i++) {
ld_constraints[i]._name = builder->any_to_offset_u4(info._loader_constraints->at(i).name());
ld_constraints[i]._name = AOTCompressedPointers::encode_not_null(info._loader_constraints->at(i).name());
ld_constraints[i]._loader_type1 = info._loader_constraints->at(i).loader_type1();
ld_constraints[i]._loader_type2 = info._loader_constraints->at(i).loader_type2();
}
}
if (k->is_hidden() && info.nest_host() != nullptr) {
_nest_host_offset = builder->any_to_offset_u4(info.nest_host());
_nest_host = AOTCompressedPointers::encode_not_null(info.nest_host());
}
if (k->has_archived_enum_objs()) {
int num = info.num_enum_klass_static_fields();
@ -83,11 +83,12 @@ void RunTimeClassInfo::init(DumpTimeClassInfo& info) {
InstanceKlass* RunTimeClassInfo::klass() const {
if (AOTMetaspace::in_aot_cache(this)) {
// <this> is inside a mmaped CDS archive.
return ArchiveUtils::offset_to_archived_address<InstanceKlass*>(_klass_offset);
return AOTCompressedPointers::decode_not_null<InstanceKlass*>(_klass);
} else {
// <this> is a temporary copy of a RunTimeClassInfo that's being initialized
// by the ArchiveBuilder.
return ArchiveBuilder::current()->offset_to_buffered<InstanceKlass*>(_klass_offset);
size_t byte_offset = AOTCompressedPointers::get_byte_offset(_klass);
return ArchiveBuilder::current()->offset_to_buffered<InstanceKlass*>(byte_offset);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,6 +25,7 @@
#ifndef SHARE_CDS_RUNTIMECLASSINFO_HPP
#define SHARE_CDS_RUNTIMECLASSINFO_HPP
#include "cds/aotCompressedPointers.hpp"
#include "cds/aotMetaspace.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/archiveUtils.hpp"
@ -41,8 +42,10 @@ class Method;
class Symbol;
class RunTimeClassInfo {
public:
enum : char {
using narrowPtr = AOTCompressedPointers::narrowPtr;
public:
enum : char {
FROM_FIELD_IS_PROTECTED = 1 << 0,
FROM_IS_ARRAY = 1 << 1,
FROM_IS_OBJECT = 1 << 2
@ -56,19 +59,19 @@ class RunTimeClassInfo {
// This is different than DumpTimeClassInfo::DTVerifierConstraint. We use
// u4 instead of Symbol* to save space on 64-bit CPU.
struct RTVerifierConstraint {
u4 _name;
u4 _from_name;
Symbol* name() { return ArchiveUtils::offset_to_archived_address<Symbol*>(_name); }
narrowPtr _name;
narrowPtr _from_name;
Symbol* name() { return AOTCompressedPointers::decode_not_null<Symbol*>(_name); }
Symbol* from_name() {
return (_from_name == 0) ? nullptr : ArchiveUtils::offset_to_archived_address<Symbol*>(_from_name);
return AOTCompressedPointers::decode<Symbol*>(_from_name);
}
};
struct RTLoaderConstraint {
u4 _name;
narrowPtr _name;
char _loader_type1;
char _loader_type2;
Symbol* constraint_name() { return ArchiveUtils::offset_to_archived_address<Symbol*>(_name); }
Symbol* constraint_name() { return AOTCompressedPointers::decode_not_null<Symbol*>(_name); }
};
struct RTEnumKlassStaticFields {
int _num;
@ -76,8 +79,8 @@ class RunTimeClassInfo {
};
private:
u4 _klass_offset;
u4 _nest_host_offset;
narrowPtr _klass;
narrowPtr _nest_host;
int _num_verifier_constraints;
int _num_loader_constraints;
@ -185,7 +188,7 @@ public:
InstanceKlass* nest_host() {
assert(!ArchiveBuilder::is_active(), "not called when dumping archive");
return ArchiveUtils::offset_to_archived_address_or_null<InstanceKlass*>(_nest_host_offset);
return AOTCompressedPointers::decode<InstanceKlass*>(_nest_host); // may be null
}
RTLoaderConstraint* loader_constraints() {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1017,7 +1017,8 @@ public:
};
static int skip_annotation_value(const u1*, int, int); // fwd decl
static int skip_annotation_value(const u1* buffer, int limit, int index, int recursion_depth); // fwd decl
static const int max_recursion_depth = 5;
// Safely increment index by val if does not pass limit
#define SAFE_ADD(index, limit, val) \
@ -1025,23 +1026,29 @@ if (index >= limit - val) return limit; \
index += val;
// Skip an annotation. Return >=limit if there is any problem.
static int skip_annotation(const u1* buffer, int limit, int index) {
static int skip_annotation(const u1* buffer, int limit, int index, int recursion_depth = 0) {
assert(buffer != nullptr, "invariant");
if (recursion_depth > max_recursion_depth) {
return limit;
}
// annotation := atype:u2 do(nmem:u2) {member:u2 value}
// value := switch (tag:u1) { ... }
SAFE_ADD(index, limit, 4); // skip atype and read nmem
int nmem = Bytes::get_Java_u2((address)buffer + index - 2);
while (--nmem >= 0 && index < limit) {
SAFE_ADD(index, limit, 2); // skip member
index = skip_annotation_value(buffer, limit, index);
index = skip_annotation_value(buffer, limit, index, recursion_depth + 1);
}
return index;
}
// Skip an annotation value. Return >=limit if there is any problem.
static int skip_annotation_value(const u1* buffer, int limit, int index) {
static int skip_annotation_value(const u1* buffer, int limit, int index, int recursion_depth) {
assert(buffer != nullptr, "invariant");
if (recursion_depth > max_recursion_depth) {
return limit;
}
// value := switch (tag:u1) {
// case B, C, I, S, Z, D, F, J, c: con:u2;
// case e: e_class:u2 e_name:u2;
@ -1073,12 +1080,12 @@ static int skip_annotation_value(const u1* buffer, int limit, int index) {
SAFE_ADD(index, limit, 2); // read nval
int nval = Bytes::get_Java_u2((address)buffer + index - 2);
while (--nval >= 0 && index < limit) {
index = skip_annotation_value(buffer, limit, index);
index = skip_annotation_value(buffer, limit, index, recursion_depth + 1);
}
}
break;
case '@':
index = skip_annotation(buffer, limit, index);
index = skip_annotation(buffer, limit, index, recursion_depth + 1);
break;
default:
return limit; // bad tag byte

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,6 +25,7 @@
#ifndef SHARE_CLASSFILE_COMPACTHASHTABLE_HPP
#define SHARE_CLASSFILE_COMPACTHASHTABLE_HPP
#include "cds/aotCompressedPointers.hpp"
#include "cds/cds_globals.hpp"
#include "oops/array.hpp"
#include "oops/symbol.hpp"
@ -123,6 +124,9 @@ public:
~CompactHashtableWriter();
void add(unsigned int hash, u4 encoded_value);
void add(unsigned int hash, AOTCompressedPointers::narrowPtr encoded_value) {
add(hash, cast_to_u4(encoded_value));
}
void dump(SimpleCompactHashtable *cht, const char* table_name);
private:
@ -371,11 +375,11 @@ public:
//
// OffsetCompactHashtable -- This is used to store many types of objects
// in the CDS archive. On 64-bit platforms, we save space by using a 32-bit
// offset from the CDS base address.
// narrowPtr from the CDS base address.
template <typename V>
inline V read_value_from_compact_hashtable(address base_address, u4 offset) {
return (V)(base_address + offset);
inline V read_value_from_compact_hashtable(address base_address, u4 narrowp) {
return AOTCompressedPointers::decode_not_null<V>(cast_from_u4(narrowp), base_address);
}
template <

View File

@ -22,6 +22,7 @@
*
*/
#include "cds/aotCompressedPointers.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/dynamicArchive.hpp"
@ -690,7 +691,7 @@ void SymbolTable::copy_shared_symbol_table(GrowableArray<Symbol*>* symbols,
assert(fixed_hash == hash_symbol((const char*)sym->bytes(), sym->utf8_length(), false),
"must not rehash during dumping");
sym->set_permanent();
writer->add(fixed_hash, builder->buffer_to_offset_u4((address)sym));
writer->add(fixed_hash, AOTCompressedPointers::encode_not_null(sym));
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,6 +25,7 @@
#include "cds/aotClassFilter.hpp"
#include "cds/aotClassLocation.hpp"
#include "cds/aotCompressedPointers.hpp"
#include "cds/aotLogging.hpp"
#include "cds/aotMetaspace.hpp"
#include "cds/archiveBuilder.hpp"
@ -1282,11 +1283,10 @@ unsigned int SystemDictionaryShared::hash_for_shared_dictionary(address ptr) {
class CopySharedClassInfoToArchive : StackObj {
CompactHashtableWriter* _writer;
bool _is_builtin;
ArchiveBuilder *_builder;
public:
CopySharedClassInfoToArchive(CompactHashtableWriter* writer,
bool is_builtin)
: _writer(writer), _is_builtin(is_builtin), _builder(ArchiveBuilder::current()) {}
: _writer(writer), _is_builtin(is_builtin) {}
void do_entry(InstanceKlass* k, DumpTimeClassInfo& info) {
if (!info.is_excluded() && info.is_builtin() == _is_builtin) {
@ -1299,11 +1299,10 @@ public:
Symbol* name = info._klass->name();
name = ArchiveBuilder::current()->get_buffered_addr(name);
hash = SystemDictionaryShared::hash_for_shared_dictionary((address)name);
u4 delta = _builder->buffer_to_offset_u4((address)record);
if (_is_builtin && info._klass->is_hidden()) {
// skip
} else {
_writer->add(hash, delta);
_writer->add(hash, AOTCompressedPointers::encode_not_null(record));
}
if (log_is_enabled(Trace, aot, hashtables)) {
ResourceMark rm;

View File

@ -399,7 +399,7 @@ AOTCodeCache::~AOTCodeCache() {
}
}
void AOTCodeCache::Config::record() {
void AOTCodeCache::Config::record(uint cpu_features_offset) {
_flags = 0;
#ifdef ASSERT
_flags |= debugVM;
@ -430,9 +430,50 @@ void AOTCodeCache::Config::record() {
_compressedKlassShift = CompressedKlassPointers::shift();
_contendedPaddingWidth = ContendedPaddingWidth;
_gc = (uint)Universe::heap()->kind();
_cpu_features_offset = cpu_features_offset;
}
bool AOTCodeCache::Config::verify() const {
bool AOTCodeCache::Config::verify_cpu_features(AOTCodeCache* cache) const {
LogStreamHandle(Debug, aot, codecache, init) log;
uint offset = _cpu_features_offset;
uint cpu_features_size = *(uint *)cache->addr(offset);
assert(cpu_features_size == (uint)VM_Version::cpu_features_size(), "must be");
offset += sizeof(uint);
void* cached_cpu_features_buffer = (void *)cache->addr(offset);
if (log.is_enabled()) {
ResourceMark rm; // required for stringStream::as_string()
stringStream ss;
VM_Version::get_cpu_features_name(cached_cpu_features_buffer, ss);
log.print_cr("CPU features recorded in AOTCodeCache: %s", ss.as_string());
}
if (VM_Version::supports_features(cached_cpu_features_buffer)) {
if (log.is_enabled()) {
ResourceMark rm; // required for stringStream::as_string()
stringStream ss;
char* runtime_cpu_features = NEW_RESOURCE_ARRAY(char, VM_Version::cpu_features_size());
VM_Version::store_cpu_features(runtime_cpu_features);
VM_Version::get_missing_features_name(runtime_cpu_features, cached_cpu_features_buffer, ss);
if (!ss.is_empty()) {
log.print_cr("Additional runtime CPU features: %s", ss.as_string());
}
}
} else {
if (log.is_enabled()) {
ResourceMark rm; // required for stringStream::as_string()
stringStream ss;
char* runtime_cpu_features = NEW_RESOURCE_ARRAY(char, VM_Version::cpu_features_size());
VM_Version::store_cpu_features(runtime_cpu_features);
VM_Version::get_missing_features_name(cached_cpu_features_buffer, runtime_cpu_features, ss);
log.print_cr("AOT Code Cache disabled: required cpu features are missing: %s", ss.as_string());
}
return false;
}
return true;
}
bool AOTCodeCache::Config::verify(AOTCodeCache* cache) const {
// First checks affect all cached AOT code
#ifdef ASSERT
if ((_flags & debugVM) == 0) {
@ -478,6 +519,9 @@ bool AOTCodeCache::Config::verify() const {
AOTStubCaching = false;
}
if (!verify_cpu_features(cache)) {
return false;
}
return true;
}
@ -679,6 +723,17 @@ extern "C" {
}
}
void AOTCodeCache::store_cpu_features(char*& buffer, uint buffer_size) {
uint* size_ptr = (uint *)buffer;
*size_ptr = buffer_size;
buffer += sizeof(uint);
VM_Version::store_cpu_features(buffer);
log_debug(aot, codecache, exit)("CPU features recorded in AOTCodeCache: %s", VM_Version::features_string());
buffer += buffer_size;
buffer = align_up(buffer, DATA_ALIGNMENT);
}
bool AOTCodeCache::finish_write() {
if (!align_write()) {
return false;
@ -698,23 +753,32 @@ bool AOTCodeCache::finish_write() {
uint store_count = _store_entries_cnt;
if (store_count > 0) {
uint header_size = (uint)align_up(sizeof(AOTCodeCache::Header), DATA_ALIGNMENT);
uint header_size = (uint)align_up(sizeof(AOTCodeCache::Header), DATA_ALIGNMENT);
uint code_count = store_count;
uint search_count = code_count * 2;
uint search_size = search_count * sizeof(uint);
uint entries_size = (uint)align_up(code_count * sizeof(AOTCodeEntry), DATA_ALIGNMENT); // In bytes
// _write_position includes size of code and strings
uint code_alignment = code_count * DATA_ALIGNMENT; // We align_up code size when storing it.
uint total_size = header_size + _write_position + code_alignment + search_size + entries_size;
uint cpu_features_size = VM_Version::cpu_features_size();
uint total_cpu_features_size = sizeof(uint) + cpu_features_size; // sizeof(uint) to store cpu_features_size
uint total_size = header_size + _write_position + code_alignment + search_size + entries_size +
align_up(total_cpu_features_size, DATA_ALIGNMENT);
assert(total_size < max_aot_code_size(), "AOT Code size (" UINT32_FORMAT " bytes) is greater than AOTCodeMaxSize(" UINT32_FORMAT " bytes).", total_size, max_aot_code_size());
// Create ordered search table for entries [id, index];
uint* search = NEW_C_HEAP_ARRAY(uint, search_count, mtCode);
// Allocate in AOT Cache buffer
char* buffer = (char *)AOTCacheAccess::allocate_aot_code_region(total_size + DATA_ALIGNMENT);
char* start = align_up(buffer, DATA_ALIGNMENT);
char* current = start + header_size; // Skip header
uint cpu_features_offset = current - start;
store_cpu_features(current, cpu_features_size);
assert(is_aligned(current, DATA_ALIGNMENT), "sanity check");
assert(current < start + total_size, "sanity check");
// Create ordered search table for entries [id, index];
uint* search = NEW_C_HEAP_ARRAY(uint, search_count, mtCode);
AOTCodeEntry* entries_address = _store_entries; // Pointer to latest entry
uint adapters_count = 0;
uint shared_blobs_count = 0;
@ -790,7 +854,7 @@ bool AOTCodeCache::finish_write() {
header->init(size, (uint)strings_count, strings_offset,
entries_count, new_entries_offset,
adapters_count, shared_blobs_count,
C1_blobs_count, C2_blobs_count);
C1_blobs_count, C2_blobs_count, cpu_features_offset);
log_info(aot, codecache, exit)("Wrote %d AOT code entries to AOT Code Cache", entries_count);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -185,10 +185,12 @@ protected:
restrictContendedPadding = 128
};
uint _flags;
uint _cpu_features_offset; // offset in the cache where cpu features are stored
public:
void record();
bool verify() const;
void record(uint cpu_features_offset);
bool verify_cpu_features(AOTCodeCache* cache) const;
bool verify(AOTCodeCache* cache) const;
};
class Header : public CHeapObj<mtCode> {
@ -206,14 +208,15 @@ protected:
uint _shared_blobs_count;
uint _C1_blobs_count;
uint _C2_blobs_count;
Config _config;
Config _config; // must be the last element as there is trailing data stored immediately after Config
public:
void init(uint cache_size,
uint strings_count, uint strings_offset,
uint entries_count, uint entries_offset,
uint adapters_count, uint shared_blobs_count,
uint C1_blobs_count, uint C2_blobs_count) {
uint C1_blobs_count, uint C2_blobs_count,
uint cpu_features_offset) {
_version = AOT_CODE_VERSION;
_cache_size = cache_size;
_strings_count = strings_count;
@ -224,7 +227,7 @@ protected:
_shared_blobs_count = shared_blobs_count;
_C1_blobs_count = C1_blobs_count;
_C2_blobs_count = C2_blobs_count;
_config.record();
_config.record(cpu_features_offset);
}
@ -239,8 +242,8 @@ protected:
uint C2_blobs_count() const { return _C2_blobs_count; }
bool verify(uint load_size) const;
bool verify_config() const { // Called after Universe initialized
return _config.verify();
bool verify_config(AOTCodeCache* cache) const { // Called after Universe initialized
return _config.verify(cache);
}
};
@ -320,6 +323,8 @@ public:
AOTCodeEntry* find_entry(AOTCodeEntry::Kind kind, uint id);
void store_cpu_features(char*& buffer, uint buffer_size);
bool finish_write();
bool write_relocations(CodeBlob& code_blob);
@ -361,7 +366,7 @@ private:
static bool open_cache(bool is_dumping, bool is_using);
bool verify_config() {
if (for_use()) {
return _load_header->verify_config();
return _load_header->verify_config(this);
}
return true;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -336,6 +336,7 @@ RuntimeBlob::RuntimeBlob(
void RuntimeBlob::free(RuntimeBlob* blob) {
assert(blob != nullptr, "caller must check for nullptr");
MACOS_AARCH64_ONLY(os::thread_wx_enable_write());
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
blob->purge();
{

View File

@ -1139,7 +1139,7 @@ size_t CodeCache::freelists_length() {
void icache_init();
void CodeCache::initialize() {
assert(CodeCacheSegmentSize >= (size_t)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
assert(CodeCacheSegmentSize >= CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
#ifdef COMPILER2
assert(CodeCacheSegmentSize >= (size_t)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops");
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -406,7 +406,7 @@ void CompilerConfig::set_compilation_policy_flags() {
if (CompilerConfig::is_tiered() && CompilerConfig::is_c2_enabled()) {
#ifdef COMPILER2
// Some inlining tuning
#if defined(X86) || defined(AARCH64) || defined(RISCV64)
#if defined(X86) || defined(AARCH64) || defined(RISCV64) || defined(PPC64)
if (FLAG_IS_DEFAULT(InlineSmallCode)) {
FLAG_SET_DEFAULT(InlineSmallCode, 2500);
}

View File

@ -24,10 +24,9 @@
#include "gc/g1/g1BlockOffsetTable.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1HeapRegion.inline.hpp"
#include "gc/g1/g1RegionToSpaceMapper.hpp"
#include "gc/shared/memset_with_concurrent_readers.hpp"
#include "logging/log.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/os.hpp"
size_t G1BlockOffsetTable::compute_size(size_t mem_region_words) {
@ -52,6 +51,12 @@ void G1BlockOffsetTable::set_offset_array(Atomic<uint8_t>* addr, uint8_t offset)
addr->store_relaxed(offset);
}
static void check_offset(size_t offset, const char* msg) {
assert(offset < CardTable::card_size_in_words(),
"%s - offset: %zu, N_words: %u",
msg, offset, CardTable::card_size_in_words());
}
void G1BlockOffsetTable::set_offset_array(Atomic<uint8_t>* addr, HeapWord* high, HeapWord* low) {
assert(high >= low, "addresses out of order");
size_t offset = pointer_delta(high, low);

View File

@ -37,19 +37,12 @@
// for each such subregion indicates how far back one must go to find the
// start of the chunk that includes the first word of the subregion.
class G1BlockOffsetTable : public CHeapObj<mtGC> {
private:
// The reserved region covered by the table.
MemRegion _reserved;
// Biased array-start of BOT array for fast BOT entry translation
Atomic<uint8_t>* _offset_base;
void check_offset(size_t offset, const char* msg) const {
assert(offset < CardTable::card_size_in_words(),
"%s - offset: %zu, N_words: %u",
msg, offset, CardTable::card_size_in_words());
}
// Bounds checking accessors:
// For performance these have to devolve to array accesses in product builds.
inline uint8_t offset_array(Atomic<uint8_t>* addr) const;
@ -85,7 +78,6 @@ private:
}
public:
// Return the number of slots needed for an offset array
// that covers mem_region_words words.
static size_t compute_size(size_t mem_region_words);
@ -99,22 +91,14 @@ public:
// in the heap parameter.
G1BlockOffsetTable(MemRegion heap, G1RegionToSpaceMapper* storage);
static bool is_crossing_card_boundary(HeapWord* const obj_start,
HeapWord* const obj_end) {
HeapWord* cur_card_boundary = align_up_by_card_size(obj_start);
// strictly greater-than
return obj_end > cur_card_boundary;
}
inline static bool is_crossing_card_boundary(HeapWord* const obj_start,
HeapWord* const obj_end);
// Returns the address of the start of the block reaching into the card containing
// "addr".
inline HeapWord* block_start_reaching_into_card(const void* addr) const;
void update_for_block(HeapWord* blk_start, HeapWord* blk_end) {
if (is_crossing_card_boundary(blk_start, blk_end)) {
update_for_block_work(blk_start, blk_end);
}
}
inline void update_for_block(HeapWord* blk_start, HeapWord* blk_end);
};
#endif // SHARE_GC_G1_G1BLOCKOFFSETTABLE_HPP

View File

@ -27,10 +27,7 @@
#include "gc/g1/g1BlockOffsetTable.hpp"
#include "gc/g1/g1HeapRegion.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/memset_with_concurrent_readers.hpp"
#include "oops/oop.inline.hpp"
inline HeapWord* G1BlockOffsetTable::block_start_reaching_into_card(const void* addr) const {
assert(_reserved.contains(addr), "invalid address");
@ -70,4 +67,17 @@ inline HeapWord* G1BlockOffsetTable::addr_for_entry(const Atomic<uint8_t>* const
return result;
}
inline bool G1BlockOffsetTable::is_crossing_card_boundary(HeapWord* const obj_start,
HeapWord* const obj_end) {
HeapWord* cur_card_boundary = align_up_by_card_size(obj_start);
// strictly greater-than
return obj_end > cur_card_boundary;
}
inline void G1BlockOffsetTable::update_for_block(HeapWord* blk_start, HeapWord* blk_end) {
if (is_crossing_card_boundary(blk_start, blk_end)) {
update_for_block_work(blk_start, blk_end);
}
}
#endif // SHARE_GC_G1_G1BLOCKOFFSETTABLE_INLINE_HPP

View File

@ -24,6 +24,7 @@
#include "classfile/classLoaderData.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "cppstdlib/new.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1BatchedTask.hpp"
#include "gc/g1/g1CardSetMemory.hpp"
@ -519,8 +520,8 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
_max_concurrent_workers(0),
_region_mark_stats(NEW_C_HEAP_ARRAY(G1RegionMarkStats, _g1h->max_num_regions(), mtGC)),
_top_at_mark_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_num_regions(), mtGC)),
_top_at_rebuild_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_num_regions(), mtGC)),
_top_at_mark_starts(NEW_C_HEAP_ARRAY(Atomic<HeapWord*>, _g1h->max_num_regions(), mtGC)),
_top_at_rebuild_starts(NEW_C_HEAP_ARRAY(Atomic<HeapWord*>, _g1h->max_num_regions(), mtGC)),
_needs_remembered_set_rebuild(false)
{
assert(G1CGC_lock != nullptr, "CGC_lock must be initialized");
@ -564,6 +565,12 @@ void G1ConcurrentMark::fully_initialize() {
_tasks[i] = new G1CMTask(i, this, task_queue, _region_mark_stats);
}
for (uint i = 0; i < _g1h->max_num_regions(); i++) {
::new (&_region_mark_stats[i]) G1RegionMarkStats{};
::new (&_top_at_mark_starts[i]) Atomic<HeapWord*>{};
::new (&_top_at_rebuild_starts[i]) Atomic<HeapWord*>{};
}
reset_at_marking_complete();
}
@ -576,7 +583,7 @@ PartialArrayStateManager* G1ConcurrentMark::partial_array_state_manager() const
}
void G1ConcurrentMark::reset() {
_has_aborted = false;
_has_aborted.store_relaxed(false);
reset_marking_for_restart();
@ -588,7 +595,7 @@ void G1ConcurrentMark::reset() {
uint max_num_regions = _g1h->max_num_regions();
for (uint i = 0; i < max_num_regions; i++) {
_top_at_rebuild_starts[i] = nullptr;
_top_at_rebuild_starts[i].store_relaxed(nullptr);
_region_mark_stats[i].clear();
}
@ -600,7 +607,7 @@ void G1ConcurrentMark::clear_statistics(G1HeapRegion* r) {
for (uint j = 0; j < _max_num_tasks; ++j) {
_tasks[j]->clear_mark_stats_cache(region_idx);
}
_top_at_rebuild_starts[region_idx] = nullptr;
_top_at_rebuild_starts[region_idx].store_relaxed(nullptr);
_region_mark_stats[region_idx].clear();
}
@ -636,7 +643,7 @@ void G1ConcurrentMark::reset_marking_for_restart() {
}
clear_has_overflown();
_finger = _heap.start();
_finger.store_relaxed(_heap.start());
for (uint i = 0; i < _max_num_tasks; ++i) {
_tasks[i]->reset_for_restart();
@ -657,14 +664,14 @@ void G1ConcurrentMark::set_concurrency(uint active_tasks) {
void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
set_concurrency(active_tasks);
_concurrent = concurrent;
_concurrent.store_relaxed(concurrent);
if (!concurrent) {
// At this point we should be in a STW phase, and completed marking.
assert_at_safepoint_on_vm_thread();
assert(out_of_regions(),
"only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT,
p2i(_finger), p2i(_heap.end()));
p2i(finger()), p2i(_heap.end()));
}
}
@ -695,8 +702,8 @@ void G1ConcurrentMark::reset_at_marking_complete() {
}
G1ConcurrentMark::~G1ConcurrentMark() {
FREE_C_HEAP_ARRAY(HeapWord*, _top_at_mark_starts);
FREE_C_HEAP_ARRAY(HeapWord*, _top_at_rebuild_starts);
FREE_C_HEAP_ARRAY(Atomic<HeapWord*>, _top_at_mark_starts);
FREE_C_HEAP_ARRAY(Atomic<HeapWord*>, _top_at_rebuild_starts);
FREE_C_HEAP_ARRAY(G1RegionMarkStats, _region_mark_stats);
// The G1ConcurrentMark instance is never freed.
ShouldNotReachHere();
@ -921,6 +928,8 @@ public:
bool do_heap_region(G1HeapRegion* r) override {
if (r->is_old_or_humongous() && !r->is_collection_set_candidate() && !r->in_collection_set()) {
_cm->update_top_at_mark_start(r);
} else {
_cm->reset_top_at_mark_start(r);
}
return false;
}
@ -1163,7 +1172,7 @@ void G1ConcurrentMark::concurrent_cycle_start() {
}
uint G1ConcurrentMark::completed_mark_cycles() const {
return AtomicAccess::load(&_completed_mark_cycles);
return _completed_mark_cycles.load_relaxed();
}
void G1ConcurrentMark::concurrent_cycle_end(bool mark_cycle_completed) {
@ -1172,7 +1181,7 @@ void G1ConcurrentMark::concurrent_cycle_end(bool mark_cycle_completed) {
_g1h->trace_heap_after_gc(_gc_tracer_cm);
if (mark_cycle_completed) {
AtomicAccess::inc(&_completed_mark_cycles, memory_order_relaxed);
_completed_mark_cycles.add_then_fetch(1u, memory_order_relaxed);
}
if (has_aborted()) {
@ -1186,7 +1195,7 @@ void G1ConcurrentMark::concurrent_cycle_end(bool mark_cycle_completed) {
}
void G1ConcurrentMark::mark_from_roots() {
_restart_for_overflow = false;
_restart_for_overflow.store_relaxed(false);
uint active_workers = calc_active_marking_workers();
@ -1355,7 +1364,7 @@ void G1ConcurrentMark::remark() {
}
} else {
// We overflowed. Restart concurrent marking.
_restart_for_overflow = true;
_restart_for_overflow.store_relaxed(true);
verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyLocation::RemarkOverflow);
@ -1784,44 +1793,45 @@ void G1ConcurrentMark::clear_bitmap_for_region(G1HeapRegion* hr) {
}
G1HeapRegion* G1ConcurrentMark::claim_region(uint worker_id) {
// "checkpoint" the finger
HeapWord* finger = _finger;
// "Checkpoint" the finger.
HeapWord* local_finger = finger();
while (finger < _heap.end()) {
assert(_g1h->is_in_reserved(finger), "invariant");
while (local_finger < _heap.end()) {
assert(_g1h->is_in_reserved(local_finger), "invariant");
G1HeapRegion* curr_region = _g1h->heap_region_containing_or_null(finger);
G1HeapRegion* curr_region = _g1h->heap_region_containing_or_null(local_finger);
// Make sure that the reads below do not float before loading curr_region.
OrderAccess::loadload();
// Above heap_region_containing may return null as we always scan claim
// until the end of the heap. In this case, just jump to the next region.
HeapWord* end = curr_region != nullptr ? curr_region->end() : finger + G1HeapRegion::GrainWords;
HeapWord* end = curr_region != nullptr ? curr_region->end() : local_finger + G1HeapRegion::GrainWords;
// Is the gap between reading the finger and doing the CAS too long?
HeapWord* res = AtomicAccess::cmpxchg(&_finger, finger, end);
if (res == finger && curr_region != nullptr) {
// we succeeded
HeapWord* res = _finger.compare_exchange(local_finger, end);
if (res == local_finger && curr_region != nullptr) {
// We succeeded.
HeapWord* bottom = curr_region->bottom();
HeapWord* limit = top_at_mark_start(curr_region);
log_trace(gc, marking)("Claim region %u bottom " PTR_FORMAT " tams " PTR_FORMAT, curr_region->hrm_index(), p2i(curr_region->bottom()), p2i(top_at_mark_start(curr_region)));
// notice that _finger == end cannot be guaranteed here since,
// someone else might have moved the finger even further
assert(_finger >= end, "the finger should have moved forward");
// Notice that _finger == end cannot be guaranteed here since,
// someone else might have moved the finger even further.
assert(finger() >= end, "The finger should have moved forward");
if (limit > bottom) {
return curr_region;
} else {
assert(limit == bottom,
"the region limit should be at bottom");
"The region limit should be at bottom");
// We return null and the caller should try calling
// claim_region() again.
return nullptr;
}
} else {
assert(_finger > finger, "the finger should have moved forward");
// read it again
finger = _finger;
// Read the finger again.
HeapWord* next_finger = finger();
assert(next_finger > local_finger, "The finger should have moved forward " PTR_FORMAT " " PTR_FORMAT, p2i(local_finger), p2i(next_finger));
local_finger = next_finger;
}
}
@ -1957,7 +1967,7 @@ bool G1ConcurrentMark::concurrent_cycle_abort() {
void G1ConcurrentMark::abort_marking_threads() {
assert(!_root_regions.scan_in_progress(), "still doing root region scan");
_has_aborted = true;
_has_aborted.store_relaxed(true);
_first_overflow_barrier_sync.abort();
_second_overflow_barrier_sync.abort();
}

View File

@ -368,7 +368,7 @@ class G1ConcurrentMark : public CHeapObj<mtGC> {
// For grey objects
G1CMMarkStack _global_mark_stack; // Grey objects behind global finger
HeapWord* volatile _finger; // The global finger, region aligned,
Atomic<HeapWord*> _finger; // The global finger, region aligned,
// always pointing to the end of the
// last claimed region
@ -395,19 +395,19 @@ class G1ConcurrentMark : public CHeapObj<mtGC> {
WorkerThreadsBarrierSync _second_overflow_barrier_sync;
// Number of completed mark cycles.
volatile uint _completed_mark_cycles;
Atomic<uint> _completed_mark_cycles;
// This is set by any task, when an overflow on the global data
// structures is detected
volatile bool _has_overflown;
Atomic<bool> _has_overflown;
// True: marking is concurrent, false: we're in remark
volatile bool _concurrent;
Atomic<bool> _concurrent;
// Set at the end of a Full GC so that marking aborts
volatile bool _has_aborted;
Atomic<bool> _has_aborted;
// Used when remark aborts due to an overflow to indicate that
// another concurrent marking phase should start
volatile bool _restart_for_overflow;
Atomic<bool> _restart_for_overflow;
ConcurrentGCTimer* _gc_timer_cm;
@ -461,8 +461,8 @@ class G1ConcurrentMark : public CHeapObj<mtGC> {
void print_and_reset_taskqueue_stats();
HeapWord* finger() { return _finger; }
bool concurrent() { return _concurrent; }
HeapWord* finger() { return _finger.load_relaxed(); }
bool concurrent() { return _concurrent.load_relaxed(); }
uint active_tasks() { return _num_active_tasks; }
TaskTerminator* terminator() { return &_terminator; }
@ -487,7 +487,7 @@ class G1ConcurrentMark : public CHeapObj<mtGC> {
// to satisfy an allocation without doing a GC. This is fine, because all
// objects in those regions will be considered live anyway because of
// SATB guarantees (i.e. their TAMS will be equal to bottom).
bool out_of_regions() { return _finger >= _heap.end(); }
bool out_of_regions() { return finger() >= _heap.end(); }
// Returns the task with the given id
G1CMTask* task(uint id) {
@ -499,10 +499,10 @@ class G1ConcurrentMark : public CHeapObj<mtGC> {
// Access / manipulation of the overflow flag which is set to
// indicate that the global stack has overflown
bool has_overflown() { return _has_overflown; }
void set_has_overflown() { _has_overflown = true; }
void clear_has_overflown() { _has_overflown = false; }
bool restart_for_overflow() { return _restart_for_overflow; }
bool has_overflown() { return _has_overflown.load_relaxed(); }
void set_has_overflown() { _has_overflown.store_relaxed(true); }
void clear_has_overflown() { _has_overflown.store_relaxed(false); }
bool restart_for_overflow() { return _restart_for_overflow.load_relaxed(); }
// Methods to enter the two overflow sync barriers
void enter_first_sync_barrier(uint worker_id);
@ -516,12 +516,12 @@ class G1ConcurrentMark : public CHeapObj<mtGC> {
G1RegionMarkStats* _region_mark_stats;
// Top pointer for each region at the start of marking. Must be valid for all committed
// regions.
HeapWord* volatile* _top_at_mark_starts;
Atomic<HeapWord*>* _top_at_mark_starts;
// Top pointer for each region at the start of the rebuild remembered set process
// for regions which remembered sets need to be rebuilt. A null for a given region
// means that this region does not be scanned during the rebuilding remembered
// set phase at all.
HeapWord* volatile* _top_at_rebuild_starts;
Atomic<HeapWord*>* _top_at_rebuild_starts;
// True when Remark pause selected regions for rebuilding.
bool _needs_remembered_set_rebuild;
public:
@ -679,7 +679,7 @@ public:
uint completed_mark_cycles() const;
bool has_aborted() { return _has_aborted; }
bool has_aborted() { return _has_aborted.load_relaxed(); }
void print_summary_info();

View File

@ -194,11 +194,11 @@ inline void G1CMTask::process_array_chunk(objArrayOop obj, size_t start, size_t
inline void G1ConcurrentMark::update_top_at_mark_start(G1HeapRegion* r) {
uint const region = r->hrm_index();
assert(region < _g1h->max_num_regions(), "Tried to access TAMS for region %u out of bounds", region);
_top_at_mark_starts[region] = r->top();
_top_at_mark_starts[region].store_relaxed(r->top());
}
inline void G1ConcurrentMark::reset_top_at_mark_start(G1HeapRegion* r) {
_top_at_mark_starts[r->hrm_index()] = r->bottom();
_top_at_mark_starts[r->hrm_index()].store_relaxed(r->bottom());
}
inline HeapWord* G1ConcurrentMark::top_at_mark_start(const G1HeapRegion* r) const {
@ -207,7 +207,7 @@ inline HeapWord* G1ConcurrentMark::top_at_mark_start(const G1HeapRegion* r) cons
inline HeapWord* G1ConcurrentMark::top_at_mark_start(uint region) const {
assert(region < _g1h->max_num_regions(), "Tried to access TARS for region %u out of bounds", region);
return _top_at_mark_starts[region];
return _top_at_mark_starts[region].load_relaxed();
}
inline bool G1ConcurrentMark::obj_allocated_since_mark_start(oop obj) const {
@ -217,7 +217,7 @@ inline bool G1ConcurrentMark::obj_allocated_since_mark_start(oop obj) const {
}
inline HeapWord* G1ConcurrentMark::top_at_rebuild_start(G1HeapRegion* r) const {
return _top_at_rebuild_starts[r->hrm_index()];
return _top_at_rebuild_starts[r->hrm_index()].load_relaxed();
}
inline void G1ConcurrentMark::update_top_at_rebuild_start(G1HeapRegion* r) {
@ -225,10 +225,10 @@ inline void G1ConcurrentMark::update_top_at_rebuild_start(G1HeapRegion* r) {
uint const region = r->hrm_index();
assert(region < _g1h->max_num_regions(), "Tried to access TARS for region %u out of bounds", region);
assert(_top_at_rebuild_starts[region] == nullptr,
assert(top_at_rebuild_start(r) == nullptr,
"TARS for region %u has already been set to " PTR_FORMAT " should be null",
region, p2i(_top_at_rebuild_starts[region]));
_top_at_rebuild_starts[region] = r->top();
region, p2i(top_at_rebuild_start(r)));
_top_at_rebuild_starts[region].store_relaxed(r->top());
}
inline void G1CMTask::update_liveness(oop const obj, const size_t obj_size) {

View File

@ -44,6 +44,8 @@ struct G1RegionMarkStats {
Atomic<size_t> _live_words;
Atomic<size_t> _incoming_refs;
G1RegionMarkStats() : _live_words(0), _incoming_refs(0) { }
// Clear all members.
void clear() {
_live_words.store_relaxed(0);

View File

@ -497,10 +497,6 @@ class G1PostEvacuateCollectionSetCleanupTask2::ProcessEvacuationFailedRegionsTas
G1CollectedHeap* g1h = G1CollectedHeap::heap();
G1ConcurrentMark* cm = g1h->concurrent_mark();
HeapWord* top_at_mark_start = cm->top_at_mark_start(r);
assert(top_at_mark_start == r->bottom(), "TAMS must not have been set for region %u", r->hrm_index());
assert(cm->live_bytes(r->hrm_index()) == 0, "Marking live bytes must not be set for region %u", r->hrm_index());
// Concurrent mark does not mark through regions that we retain (they are root
// regions wrt to marking), so we must clear their mark data (tams, bitmap, ...)
// set eagerly or during evacuation failure.

View File

@ -30,8 +30,11 @@
#include "gc/parallel/psScavenge.hpp"
inline bool ParallelScavengeHeap::should_alloc_in_eden(const size_t size) const {
const size_t eden_size = young_gen()->eden_space()->capacity_in_words();
return size < eden_size / 2;
const size_t max_young_gen_bytes = young_gen()->max_gen_size();
const size_t survivor_size_bytes = young_gen()->from_space()->capacity_in_bytes();
const size_t max_eden_size_bytes = max_young_gen_bytes - survivor_size_bytes * 2;
const size_t max_eden_size_words = max_eden_size_bytes / HeapWordSize;
return size < max_eden_size_words / 2;
}
inline bool ParallelScavengeHeap::is_in_young(const void* p) const {

View File

@ -78,12 +78,13 @@ bool PSVirtualSpace::shrink_by(size_t bytes) {
}
char* const base_addr = committed_high_addr() - bytes;
bool result = special() || os::uncommit_memory(base_addr, bytes);
if (result) {
_committed_high_addr -= bytes;
if (!special()) {
os::uncommit_memory(base_addr, bytes);
}
return result;
_committed_high_addr -= bytes;
return true;
}
#ifndef PRODUCT

View File

@ -169,9 +169,7 @@ void CardTable::resize_covered_region(MemRegion new_region) {
// Shrink.
MemRegion delta = MemRegion(new_committed.end(),
old_committed.word_size() - new_committed.word_size());
bool res = os::uncommit_memory((char*)delta.start(),
delta.byte_size());
assert(res, "uncommit should succeed");
os::uncommit_memory((char*)delta.start(), delta.byte_size());
}
log_trace(gc, barrier)("CardTable::resize_covered_region: ");

View File

@ -459,13 +459,16 @@ size_t ThreadLocalAllocBuffer::end_reserve() {
}
size_t ThreadLocalAllocBuffer::estimated_used_bytes() const {
// Data races due to unsynchronized access like the following reads to _start
// and _top are undefined behavior. Atomic<T> would not provide any additional
// guarantees, so use AtomicAccess directly.
HeapWord* start = AtomicAccess::load(&_start);
HeapWord* top = AtomicAccess::load(&_top);
// There has been a race when retrieving _top and _start. Return 0.
if (_top < _start) {
// If there has been a race when retrieving _top and _start, return 0.
if (top < start) {
return 0;
}
size_t used_bytes = pointer_delta(_top, _start, 1);
size_t used_bytes = pointer_delta(top, start, 1);
// Comparing diff with the maximum allowed size will ensure that we don't add
// the used bytes from a semi-initialized TLAB ending up with implausible values.
// In this case also just return 0.

View File

@ -32,8 +32,10 @@
class ThreadLocalAllocStats;
// ThreadLocalAllocBuffer: a descriptor for thread-local storage used by
// the threads for allocation. It is thread-private at any time.
// ThreadLocalAllocBuffer is a descriptor for thread-local storage used by
// mutator threads for local/private allocation. As a TLAB is thread-private,
// there is no concurrent/parallel access to its memory or its members,
// other than by estimated_used_bytes().
//
// Heap sampling is performed via the end and allocation_end
// fields.
@ -123,7 +125,7 @@ public:
// Due to races with concurrent allocations and/or resetting the TLAB the return
// value may be inconsistent with any other metrics (e.g. total allocated
// bytes), and may just incorrectly return 0.
// Intented fo external inspection only where accuracy is not 100% required.
// Intended for external inspection only where accuracy is not 100% required.
size_t estimated_used_bytes() const;
// Allocate size HeapWords. The memory is NOT initialized to zero.

View File

@ -157,7 +157,7 @@
declare_toplevel_type(CollectedHeap*) \
declare_toplevel_type(ContiguousSpace*) \
declare_toplevel_type(HeapWord*) \
declare_toplevel_type(HeapWord* volatile) \
declare_toplevel_type(Atomic<HeapWord*>) \
declare_toplevel_type(MemRegion*) \
declare_toplevel_type(ThreadLocalAllocBuffer*) \
\

View File

@ -30,6 +30,7 @@
#include "gc/shenandoah/shenandoahGeneration.hpp"
#include "gc/shenandoah/shenandoahGenerationalHeap.inline.hpp"
#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
#include "gc/shenandoah/shenandoahInPlacePromoter.hpp"
#include "gc/shenandoah/shenandoahOldGeneration.hpp"
#include "gc/shenandoah/shenandoahTrace.hpp"
#include "gc/shenandoah/shenandoahYoungGeneration.hpp"
@ -403,17 +404,9 @@ size_t ShenandoahGenerationalHeuristics::select_aged_regions(const size_t old_pr
bool* const candidate_regions_for_promotion_by_copy = heap->collection_set()->preselected_regions();
ShenandoahMarkingContext* const ctx = heap->marking_context();
const size_t old_garbage_threshold =
(ShenandoahHeapRegion::region_size_bytes() * heap->old_generation()->heuristics()->get_old_garbage_threshold()) / 100;
const size_t pip_used_threshold = (ShenandoahHeapRegion::region_size_bytes() * ShenandoahGenerationalMinPIPUsage) / 100;
size_t promo_potential = 0;
size_t candidates = 0;
// Tracks the padding of space above top in regions eligible for promotion in place
size_t promote_in_place_pad = 0;
// Sort the promotion-eligible regions in order of increasing live-data-bytes so that we can first reclaim regions that require
// less evacuation effort. This prioritizes garbage first, expanding the allocation pool early before we reclaim regions that
// have more live data.
@ -422,20 +415,7 @@ size_t ShenandoahGenerationalHeuristics::select_aged_regions(const size_t old_pr
ResourceMark rm;
AgedRegionData* sorted_regions = NEW_RESOURCE_ARRAY(AgedRegionData, num_regions);
ShenandoahFreeSet* freeset = heap->free_set();
// Any region that is to be promoted in place needs to be retired from its Collector or Mutator partition.
idx_t pip_low_collector_idx = freeset->max_regions();
idx_t pip_high_collector_idx = -1;
idx_t pip_low_mutator_idx = freeset->max_regions();
idx_t pip_high_mutator_idx = -1;
size_t collector_regions_to_pip = 0;
size_t mutator_regions_to_pip = 0;
size_t pip_mutator_regions = 0;
size_t pip_collector_regions = 0;
size_t pip_mutator_bytes = 0;
size_t pip_collector_bytes = 0;
ShenandoahInPlacePromotionPlanner in_place_promotions(heap);
for (idx_t i = 0; i < num_regions; i++) {
ShenandoahHeapRegion* const r = heap->get_region(i);
@ -444,77 +424,19 @@ size_t ShenandoahGenerationalHeuristics::select_aged_regions(const size_t old_pr
continue;
}
if (heap->is_tenurable(r)) {
if ((r->garbage() < old_garbage_threshold) && (r->used() > pip_used_threshold)) {
if (in_place_promotions.is_eligible(r)) {
// We prefer to promote this region in place because it has a small amount of garbage and a large usage.
HeapWord* tams = ctx->top_at_mark_start(r);
HeapWord* original_top = r->top();
if (!heap->is_concurrent_old_mark_in_progress() && tams == original_top) {
// No allocations from this region have been made during concurrent mark. It meets all the criteria
// for in-place-promotion. Though we only need the value of top when we fill the end of the region,
// we use this field to indicate that this region should be promoted in place during the evacuation
// phase.
r->save_top_before_promote();
size_t remnant_bytes = r->free();
size_t remnant_words = remnant_bytes / HeapWordSize;
assert(ShenandoahHeap::min_fill_size() <= PLAB::min_size(), "Implementation makes invalid assumptions");
if (remnant_words >= ShenandoahHeap::min_fill_size()) {
ShenandoahHeap::fill_with_object(original_top, remnant_words);
// Fill the remnant memory within this region to assure no allocations prior to promote in place. Otherwise,
// newly allocated objects will not be parsable when promote in place tries to register them. Furthermore, any
// new allocations would not necessarily be eligible for promotion. This addresses both issues.
r->set_top(r->end());
// The region r is either in the Mutator or Collector partition if remnant_words > heap()->plab_min_size.
// Otherwise, the region is in the NotFree partition.
ShenandoahFreeSetPartitionId p = free_set->membership(i);
if (p == ShenandoahFreeSetPartitionId::Mutator) {
mutator_regions_to_pip++;
if (i < pip_low_mutator_idx) {
pip_low_mutator_idx = i;
}
if (i > pip_high_mutator_idx) {
pip_high_mutator_idx = i;
}
pip_mutator_regions++;
pip_mutator_bytes += remnant_bytes;
} else if (p == ShenandoahFreeSetPartitionId::Collector) {
collector_regions_to_pip++;
if (i < pip_low_collector_idx) {
pip_low_collector_idx = i;
}
if (i > pip_high_collector_idx) {
pip_high_collector_idx = i;
}
pip_collector_regions++;
pip_collector_bytes += remnant_bytes;
} else {
assert((p == ShenandoahFreeSetPartitionId::NotFree) && (remnant_words < heap->plab_min_size()),
"Should be NotFree if not in Collector or Mutator partitions");
// In this case, the memory is already counted as used and the region has already been retired. There is
// no need for further adjustments to used. Further, the remnant memory for this region will not be
// unallocated or made available to OldCollector after pip.
remnant_bytes = 0;
}
promote_in_place_pad += remnant_bytes;
free_set->prepare_to_promote_in_place(i, remnant_bytes);
} else {
// Since the remnant is so small that this region has already been retired, we don't have to worry about any
// accidental allocations occurring within this region before the region is promoted in place.
// This region was already not in the Collector or Mutator set, so no need to remove it.
assert(free_set->membership(i) == ShenandoahFreeSetPartitionId::NotFree, "sanity");
}
}
// Else, we do not promote this region (either in place or by copy) because it has received new allocations.
// During evacuation, we exclude from promotion regions for which age > tenure threshold, garbage < garbage-threshold,
// used > pip_used_threshold, and get_top_before_promote() != tams
// Note that if this region has been used recently for allocation, it will not be promoted and it will
// not be selected for promotion by evacuation.
in_place_promotions.prepare(r);
} else {
// Record this promotion-eligible candidate region. After sorting and selecting the best candidates below,
// we may still decide to exclude this promotion-eligible region from the current collection set. If this
// happens, we will consider this region as part of the anticipated promotion potential for the next GC
// pass; see further below.
sorted_regions[candidates]._region = r;
sorted_regions[candidates++]._live_data = r->get_live_data_bytes();
sorted_regions[candidates]._live_data = r->get_live_data_bytes();
candidates++;
}
} else {
// We only evacuate & promote objects from regular regions whose garbage() is above old-garbage-threshold.
@ -533,7 +455,7 @@ size_t ShenandoahGenerationalHeuristics::select_aged_regions(const size_t old_pr
// in the current cycle and we will anticipate that they will be promoted in the next cycle. This will cause
// us to reserve more old-gen memory so that these objects can be promoted in the subsequent cycle.
if (heap->is_aging_cycle() && heap->age_census()->is_tenurable(r->age() + 1)) {
if (r->garbage() >= old_garbage_threshold) {
if (r->garbage() >= in_place_promotions.old_garbage_threshold()) {
promo_potential += r->get_live_data_bytes();
}
}
@ -542,21 +464,7 @@ size_t ShenandoahGenerationalHeuristics::select_aged_regions(const size_t old_pr
// Subsequent regions may be selected if they have smaller live data.
}
if (pip_mutator_regions + pip_collector_regions > 0) {
freeset->account_for_pip_regions(pip_mutator_regions, pip_mutator_bytes, pip_collector_regions, pip_collector_bytes);
}
// Retire any regions that have been selected for promote in place
if (collector_regions_to_pip > 0) {
freeset->shrink_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId::Collector,
pip_low_collector_idx, pip_high_collector_idx,
collector_regions_to_pip);
}
if (mutator_regions_to_pip > 0) {
freeset->shrink_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId::Mutator,
pip_low_mutator_idx, pip_high_mutator_idx,
mutator_regions_to_pip);
}
in_place_promotions.update_free_set();
// Sort in increasing order according to live data bytes. Note that candidates represents the number of regions
// that qualify to be promoted by evacuation.
@ -589,8 +497,6 @@ size_t ShenandoahGenerationalHeuristics::select_aged_regions(const size_t old_pr
}
log_info(gc, ergo)("Promotion potential of aged regions with sufficient garbage: " PROPERFMT, PROPERFMTARGS(promo_potential));
heap->old_generation()->set_pad_for_promote_in_place(promote_in_place_pad);
heap->old_generation()->set_promotion_potential(promo_potential);
return old_consumed;
}

View File

@ -128,7 +128,7 @@ private:
// The value of command-line argument ShenandoahOldGarbageThreshold represents the percent of garbage that must
// be present within an old-generation region before that region is considered a good candidate for inclusion in
// the collection set under normal circumstances. For our purposes, normal circustances are when the memory consumed
// the collection set under normal circumstances. For our purposes, normal circumstances are when the memory consumed
// by the old generation is less than 50% of the soft heap capacity. When the old generation grows beyond the 50%
// threshold, we dynamically adjust the old garbage threshold, allowing us to invest in packing the old generation
// more tightly so that more memory can be made available to the more frequent young GC cycles. This variable

View File

@ -24,14 +24,11 @@
*/
#include "gc/shenandoah/shenandoahAsserts.hpp"
#include "gc/shenandoah/shenandoahFreeSet.hpp"
#include "gc/shenandoah/shenandoahGeneration.hpp"
#include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
#include "gc/shenandoah/shenandoahGenerationalHeap.inline.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahOldGeneration.hpp"
#include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
#include "gc/shenandoah/shenandoahInPlacePromoter.hpp"
#include "gc/shenandoah/shenandoahUtils.hpp"
#include "gc/shenandoah/shenandoahYoungGeneration.hpp"
class ShenandoahConcurrentEvacuator : public ObjectClosure {
private:
@ -77,10 +74,10 @@ void ShenandoahGenerationalEvacuationTask::work(uint worker_id) {
void ShenandoahGenerationalEvacuationTask::do_work() {
if (_only_promote_regions) {
// No allocations will be made, do not enter oom-during-evac protocol.
assert(ShenandoahHeap::heap()->collection_set()->is_empty(), "Should not have a collection set here");
assert(_heap->collection_set()->is_empty(), "Should not have a collection set here");
promote_regions();
} else {
assert(!ShenandoahHeap::heap()->collection_set()->is_empty(), "Should have a collection set here");
assert(!_heap->collection_set()->is_empty(), "Should have a collection set here");
ShenandoahEvacOOMScope oom_evac_scope;
evacuate_and_promote_regions();
}
@ -95,16 +92,16 @@ void log_region(const ShenandoahHeapRegion* r, LogStream* ls) {
}
void ShenandoahGenerationalEvacuationTask::promote_regions() {
ShenandoahHeapRegion* r;
LogTarget(Debug, gc) lt;
ShenandoahInPlacePromoter promoter(_heap);
ShenandoahHeapRegion* r;
while ((r = _regions->next()) != nullptr) {
if (lt.is_enabled()) {
LogStream ls(lt);
log_region(r, &ls);
}
maybe_promote_region(r);
promoter.maybe_promote_region(r);
if (_heap->check_cancelled_gc_and_yield(_concurrent)) {
break;
@ -115,6 +112,7 @@ void ShenandoahGenerationalEvacuationTask::promote_regions() {
void ShenandoahGenerationalEvacuationTask::evacuate_and_promote_regions() {
LogTarget(Debug, gc) lt;
ShenandoahConcurrentEvacuator cl(_heap);
ShenandoahInPlacePromoter promoter(_heap);
ShenandoahHeapRegion* r;
while ((r = _regions->next()) != nullptr) {
@ -127,7 +125,7 @@ void ShenandoahGenerationalEvacuationTask::evacuate_and_promote_regions() {
assert(r->has_live(), "Region %zu should have been reclaimed early", r->index());
_heap->marked_object_iterate(r, &cl);
} else {
maybe_promote_region(r);
promoter.maybe_promote_region(r);
}
if (_heap->check_cancelled_gc_and_yield(_concurrent)) {
@ -135,182 +133,3 @@ void ShenandoahGenerationalEvacuationTask::evacuate_and_promote_regions() {
}
}
}
void ShenandoahGenerationalEvacuationTask::maybe_promote_region(ShenandoahHeapRegion* r) {
if (r->is_young() && r->is_active() && _heap->is_tenurable(r)) {
if (r->is_humongous_start()) {
// We promote humongous_start regions along with their affiliated continuations during evacuation rather than
// doing this work during a safepoint. We cannot put humongous regions into the collection set because that
// triggers the load-reference barrier (LRB) to copy on reference fetch.
//
// Aged humongous continuation regions are handled with their start region. If an aged regular region has
// more garbage than the old garbage threshold, we'll promote by evacuation. If there is room for evacuation
// in this cycle, the region will be in the collection set. If there is not room, the region will be promoted
// by evacuation in some future GC cycle.
// We do not promote primitive arrays because there's no performance penalty keeping them in young. When/if they
// become garbage, reclaiming the memory from young is much quicker and more efficient than reclaiming them from old.
oop obj = cast_to_oop(r->bottom());
if (!obj->is_typeArray()) {
promote_humongous(r);
}
} else if (r->is_regular() && (r->get_top_before_promote() != nullptr)) {
// Likewise, we cannot put promote-in-place regions into the collection set because that would also trigger
// the LRB to copy on reference fetch.
//
// If an aged regular region has received allocations during the current cycle, we do not promote because the
// newly allocated objects do not have appropriate age; this region's age will be reset to zero at end of cycle.
promote_in_place(r);
}
}
}
// When we promote a region in place, we can continue to use the established marking context to guide subsequent remembered
// set scans of this region's content. The region will be coalesced and filled prior to the next old-gen marking effort.
// We identify the entirety of the region as DIRTY to force the next remembered set scan to identify the "interesting pointers"
// contained herein.
void ShenandoahGenerationalEvacuationTask::promote_in_place(ShenandoahHeapRegion* region) {
assert(!_generation->is_old(), "Sanity check");
ShenandoahMarkingContext* const marking_context = _heap->young_generation()->complete_marking_context();
HeapWord* const tams = marking_context->top_at_mark_start(region);
size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
{
const size_t old_garbage_threshold =
(region_size_bytes * _heap->old_generation()->heuristics()->get_old_garbage_threshold()) / 100;
assert(!_heap->is_concurrent_old_mark_in_progress(), "Cannot promote in place during old marking");
assert(region->garbage_before_padded_for_promote() < old_garbage_threshold,
"Region %zu has too much garbage for promotion", region->index());
assert(region->is_young(), "Only young regions can be promoted");
assert(region->is_regular(), "Use different service to promote humongous regions");
assert(_heap->is_tenurable(region), "Only promote regions that are sufficiently aged");
assert(region->get_top_before_promote() == tams, "Region %zu has been used for allocations before promotion", region->index());
}
ShenandoahOldGeneration* const old_gen = _heap->old_generation();
ShenandoahYoungGeneration* const young_gen = _heap->young_generation();
// Rebuild the remembered set information and mark the entire range as DIRTY. We do NOT scan the content of this
// range to determine which cards need to be DIRTY. That would force us to scan the region twice, once now, and
// once during the subsequent remembered set scan. Instead, we blindly (conservatively) mark everything as DIRTY
// now and then sort out the CLEAN pages during the next remembered set scan.
//
// Rebuilding the remembered set consists of clearing all object registrations (reset_object_range()) here,
// then registering every live object and every coalesced range of free objects in the loop that follows.
ShenandoahScanRemembered* const scanner = old_gen->card_scan();
scanner->reset_object_range(region->bottom(), region->end());
scanner->mark_range_as_dirty(region->bottom(), region->get_top_before_promote() - region->bottom());
HeapWord* obj_addr = region->bottom();
while (obj_addr < tams) {
oop obj = cast_to_oop(obj_addr);
if (marking_context->is_marked(obj)) {
assert(obj->klass() != nullptr, "klass should not be null");
// This thread is responsible for registering all objects in this region. No need for lock.
scanner->register_object_without_lock(obj_addr);
obj_addr += obj->size();
} else {
HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, tams);
assert(next_marked_obj <= tams, "next marked object cannot exceed tams");
size_t fill_size = next_marked_obj - obj_addr;
assert(fill_size >= ShenandoahHeap::min_fill_size(), "previously allocated objects known to be larger than min_size");
ShenandoahHeap::fill_with_object(obj_addr, fill_size);
scanner->register_object_without_lock(obj_addr);
obj_addr = next_marked_obj;
}
}
// We do not need to scan above TAMS because restored top equals tams
assert(obj_addr == tams, "Expect loop to terminate when obj_addr equals tams");
{
ShenandoahHeapLocker locker(_heap->lock());
HeapWord* update_watermark = region->get_update_watermark();
// pip_unpadded is memory too small to be filled above original top
size_t pip_unpadded = (region->end() - region->top()) * HeapWordSize;
assert((region->top() == region->end())
|| (pip_unpadded == (size_t) ((region->end() - region->top()) * HeapWordSize)), "Invariant");
assert(pip_unpadded < ShenandoahHeap::min_fill_size() * HeapWordSize, "Sanity");
size_t pip_pad_bytes = (region->top() - region->get_top_before_promote()) * HeapWordSize;
assert((pip_unpadded == 0) || (pip_pad_bytes == 0), "Only one of pip_unpadded and pip_pad_bytes is non-zero");
// Now that this region is affiliated with old, we can allow it to receive allocations, though it may not be in the
// is_collector_free range. We'll add it to that range below.
region->restore_top_before_promote();
#ifdef ASSERT
size_t region_to_be_used_in_old = region->used();
assert(region_to_be_used_in_old + pip_pad_bytes + pip_unpadded == region_size_bytes, "invariant");
#endif
// The update_watermark was likely established while we had the artificially high value of top. Make it sane now.
assert(update_watermark >= region->top(), "original top cannot exceed preserved update_watermark");
region->set_update_watermark(region->top());
// Transfer this region from young to old, increasing promoted_reserve if available space exceeds plab_min_size()
_heap->free_set()->add_promoted_in_place_region_to_old_collector(region);
region->set_affiliation(OLD_GENERATION);
region->set_promoted_in_place();
}
}
void ShenandoahGenerationalEvacuationTask::promote_humongous(ShenandoahHeapRegion* region) {
ShenandoahMarkingContext* marking_context = _heap->marking_context();
oop obj = cast_to_oop(region->bottom());
assert(_generation->is_mark_complete(), "sanity");
assert(region->is_young(), "Only young regions can be promoted");
assert(region->is_humongous_start(), "Should not promote humongous continuation in isolation");
assert(_heap->is_tenurable(region), "Only promote regions that are sufficiently aged");
assert(marking_context->is_marked(obj), "promoted humongous object should be alive");
const size_t used_bytes = obj->size() * HeapWordSize;
const size_t spanned_regions = ShenandoahHeapRegion::required_regions(used_bytes);
const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
const size_t humongous_waste = spanned_regions * region_size_bytes - obj->size() * HeapWordSize;
const size_t index_limit = region->index() + spanned_regions;
ShenandoahOldGeneration* const old_gen = _heap->old_generation();
ShenandoahGeneration* const young_gen = _heap->young_generation();
{
// We need to grab the heap lock in order to avoid a race when changing the affiliations of spanned_regions from
// young to old.
ShenandoahHeapLocker locker(_heap->lock());
// We promote humongous objects unconditionally, without checking for availability. We adjust
// usage totals, including humongous waste, after evacuation is done.
log_debug(gc)("promoting humongous region %zu, spanning %zu", region->index(), spanned_regions);
// For this region and each humongous continuation region spanned by this humongous object, change
// affiliation to OLD_GENERATION and adjust the generation-use tallies. The remnant of memory
// in the last humongous region that is not spanned by obj is currently not used.
for (size_t i = region->index(); i < index_limit; i++) {
ShenandoahHeapRegion* r = _heap->get_region(i);
log_debug(gc)("promoting humongous region %zu, from " PTR_FORMAT " to " PTR_FORMAT,
r->index(), p2i(r->bottom()), p2i(r->top()));
// We mark the entire humongous object's range as dirty after loop terminates, so no need to dirty the range here
r->set_affiliation(OLD_GENERATION);
r->set_promoted_in_place();
}
ShenandoahFreeSet* freeset = _heap->free_set();
freeset->transfer_humongous_regions_from_mutator_to_old_collector(spanned_regions, humongous_waste);
}
// Since this region may have served previously as OLD, it may hold obsolete object range info.
HeapWord* const humongous_bottom = region->bottom();
ShenandoahScanRemembered* const scanner = old_gen->card_scan();
scanner->reset_object_range(humongous_bottom, humongous_bottom + spanned_regions * ShenandoahHeapRegion::region_size_words());
// Since the humongous region holds only one object, no lock is necessary for this register_object() invocation.
scanner->register_object_without_lock(humongous_bottom);
if (obj->is_typeArray()) {
// Primitive arrays don't need to be scanned.
log_debug(gc)("Clean cards for promoted humongous object (Region %zu) from " PTR_FORMAT " to " PTR_FORMAT,
region->index(), p2i(humongous_bottom), p2i(humongous_bottom + obj->size()));
scanner->mark_range_as_clean(humongous_bottom, obj->size());
} else {
log_debug(gc)("Dirty cards for promoted humongous object (Region %zu) from " PTR_FORMAT " to " PTR_FORMAT,
region->index(), p2i(humongous_bottom), p2i(humongous_bottom + obj->size()));
scanner->mark_range_as_dirty(humongous_bottom, obj->size());
}
}

View File

@ -27,6 +27,7 @@
#include "gc/shared/workerThread.hpp"
class ShenandoahGeneration;
class ShenandoahGenerationalHeap;
class ShenandoahHeapRegion;
class ShenandoahRegionIterator;

View File

@ -86,6 +86,7 @@
#include "nmt/memTracker.hpp"
#include "oops/compressedOops.inline.hpp"
#include "prims/jvmtiTagMap.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/globals.hpp"
#include "runtime/interfaceSupport.inline.hpp"
@ -201,9 +202,9 @@ jint ShenandoahHeap::initialize() {
assert(num_min_regions <= _num_regions, "sanity");
_minimum_size = num_min_regions * reg_size_bytes;
_soft_max_size = clamp(SoftMaxHeapSize, min_capacity(), max_capacity());
_soft_max_size.store_relaxed(clamp(SoftMaxHeapSize, min_capacity(), max_capacity()));
_committed = _initial_size;
_committed.store_relaxed(_initial_size);
size_t heap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
@ -725,17 +726,17 @@ size_t ShenandoahHeap::used() const {
}
size_t ShenandoahHeap::committed() const {
return AtomicAccess::load(&_committed);
return _committed.load_relaxed();
}
void ShenandoahHeap::increase_committed(size_t bytes) {
shenandoah_assert_heaplocked_or_safepoint();
_committed += bytes;
_committed.fetch_then_add(bytes, memory_order_relaxed);
}
void ShenandoahHeap::decrease_committed(size_t bytes) {
shenandoah_assert_heaplocked_or_safepoint();
_committed -= bytes;
_committed.fetch_then_sub(bytes, memory_order_relaxed);
}
size_t ShenandoahHeap::capacity() const {
@ -747,7 +748,7 @@ size_t ShenandoahHeap::max_capacity() const {
}
size_t ShenandoahHeap::soft_max_capacity() const {
size_t v = AtomicAccess::load(&_soft_max_size);
size_t v = _soft_max_size.load_relaxed();
assert(min_capacity() <= v && v <= max_capacity(),
"Should be in bounds: %zu <= %zu <= %zu",
min_capacity(), v, max_capacity());
@ -758,7 +759,7 @@ void ShenandoahHeap::set_soft_max_capacity(size_t v) {
assert(min_capacity() <= v && v <= max_capacity(),
"Should be in bounds: %zu <= %zu <= %zu",
min_capacity(), v, max_capacity());
AtomicAccess::store(&_soft_max_size, v);
_soft_max_size.store_relaxed(v);
}
size_t ShenandoahHeap::min_capacity() const {
@ -1775,12 +1776,7 @@ void ShenandoahHeap::scan_roots_for_iteration(ShenandoahScanObjectStack* oop_sta
void ShenandoahHeap::reclaim_aux_bitmap_for_iteration() {
if (!_aux_bitmap_region_special) {
bool success = os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size());
if (!success) {
log_warning(gc)("Auxiliary marking bitmap uncommit failed: " PTR_FORMAT " (%zu bytes)",
p2i(_aux_bitmap_region.start()), _aux_bitmap_region.byte_size());
assert(false, "Auxiliary marking bitmap uncommit should always succeed");
}
os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size());
}
}
@ -1946,7 +1942,7 @@ private:
size_t const _stride;
shenandoah_padding(0);
volatile size_t _index;
Atomic<size_t> _index;
shenandoah_padding(1);
public:
@ -1959,8 +1955,8 @@ public:
size_t stride = _stride;
size_t max = _heap->num_regions();
while (AtomicAccess::load(&_index) < max) {
size_t cur = AtomicAccess::fetch_then_add(&_index, stride, memory_order_relaxed);
while (_index.load_relaxed() < max) {
size_t cur = _index.fetch_then_add(stride, memory_order_relaxed);
size_t start = cur;
size_t end = MIN2(cur + stride, max);
if (start >= max) break;
@ -2626,11 +2622,7 @@ void ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
size_t len = _bitmap_bytes_per_slice;
char* addr = (char*) _bitmap_region.start() + off;
bool success = os::uncommit_memory(addr, len);
if (!success) {
log_warning(gc)("Bitmap slice uncommit failed: " PTR_FORMAT " (%zu bytes)", p2i(addr), len);
assert(false, "Bitmap slice uncommit should always succeed");
}
os::uncommit_memory(addr, len);
}
void ShenandoahHeap::forbid_uncommit() {
@ -2712,11 +2704,11 @@ ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
_index(0) {}
void ShenandoahRegionIterator::reset() {
_index = 0;
_index.store_relaxed(0);
}
bool ShenandoahRegionIterator::has_next() const {
return _index < _heap->num_regions();
return _index.load_relaxed() < _heap->num_regions();
}
ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {

View File

@ -88,7 +88,7 @@ private:
ShenandoahHeap* _heap;
shenandoah_padding(0);
volatile size_t _index;
Atomic<size_t> _index;
shenandoah_padding(1);
// No implicit copying: iterators should be passed by reference to capture the state
@ -208,9 +208,9 @@ private:
size_t _initial_size;
size_t _minimum_size;
volatile size_t _soft_max_size;
Atomic<size_t> _soft_max_size;
shenandoah_padding(0);
volatile size_t _committed;
Atomic<size_t> _committed;
shenandoah_padding(1);
public:
@ -340,7 +340,7 @@ private:
ShenandoahSharedFlag _full_gc_move_in_progress;
ShenandoahSharedFlag _concurrent_strong_root_in_progress;
size_t _gc_no_progress_count;
Atomic<size_t> _gc_no_progress_count;
// This updates the singular, global gc state. This call must happen on a safepoint.
void set_gc_state_at_safepoint(uint mask, bool value);

View File

@ -49,7 +49,7 @@
#include "gc/shenandoah/shenandoahWorkGroup.hpp"
#include "oops/compressedOops.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/atomic.hpp"
#include "runtime/javaThread.hpp"
#include "runtime/objectMonitor.inline.hpp"
#include "runtime/prefetch.inline.hpp"
@ -61,7 +61,7 @@ inline ShenandoahHeap* ShenandoahHeap::heap() {
}
inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() {
size_t new_index = AtomicAccess::add(&_index, (size_t) 1, memory_order_relaxed);
size_t new_index = _index.add_then_fetch((size_t) 1, memory_order_relaxed);
// get_region() provides the bounds-check and returns null on OOB.
return _heap->get_region(new_index - 1);
}
@ -75,15 +75,15 @@ inline WorkerThreads* ShenandoahHeap::safepoint_workers() {
}
inline void ShenandoahHeap::notify_gc_progress() {
AtomicAccess::store(&_gc_no_progress_count, (size_t) 0);
_gc_no_progress_count.store_relaxed((size_t) 0);
}
inline void ShenandoahHeap::notify_gc_no_progress() {
AtomicAccess::inc(&_gc_no_progress_count);
_gc_no_progress_count.add_then_fetch((size_t) 1);
}
inline size_t ShenandoahHeap::get_gc_no_progress_count() const {
return AtomicAccess::load(&_gc_no_progress_count);
return _gc_no_progress_count.load_relaxed();
}
inline size_t ShenandoahHeap::heap_region_index_containing(const void* addr) const {

View File

@ -816,11 +816,7 @@ void ShenandoahHeapRegion::do_commit() {
void ShenandoahHeapRegion::do_uncommit() {
ShenandoahHeap* heap = ShenandoahHeap::heap();
if (!heap->is_heap_region_special()) {
bool success = os::uncommit_memory((char *) bottom(), RegionSizeBytes);
if (!success) {
log_warning(gc)("Region uncommit failed: " PTR_FORMAT " (%zu bytes)", p2i(bottom()), RegionSizeBytes);
assert(false, "Region uncommit should always succeed");
}
os::uncommit_memory((char *) bottom(), RegionSizeBytes);
}
if (!heap->is_bitmap_region_special()) {
heap->uncommit_bitmap_slice(this);

View File

@ -0,0 +1,311 @@
/*
* Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "gc/shared/plab.hpp"
#include "gc/shenandoah/shenandoahFreeSet.hpp"
#include "gc/shenandoah/shenandoahGenerationalHeap.inline.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
#include "gc/shenandoah/shenandoahInPlacePromoter.hpp"
#include "gc/shenandoah/shenandoahMarkingContext.hpp"
#include "gc/shenandoah/shenandoahOldGeneration.hpp"
#include "gc/shenandoah/shenandoahYoungGeneration.hpp"
ShenandoahInPlacePromotionPlanner::RegionPromotions::RegionPromotions(ShenandoahFreeSet* free_set)
: _low_idx(free_set->max_regions())
, _high_idx(-1)
, _regions(0)
, _bytes(0)
, _free_set(free_set)
{
}
void ShenandoahInPlacePromotionPlanner::RegionPromotions::increment(idx_t region_index, size_t remnant_bytes) {
if (region_index < _low_idx) {
_low_idx = region_index;
}
if (region_index > _high_idx) {
_high_idx = region_index;
}
_regions++;
_bytes += remnant_bytes;
}
void ShenandoahInPlacePromotionPlanner::RegionPromotions::update_free_set(ShenandoahFreeSetPartitionId partition_id) const {
if (_regions > 0) {
_free_set->shrink_interval_if_range_modifies_either_boundary(partition_id, _low_idx, _high_idx, _regions);
}
}
ShenandoahInPlacePromotionPlanner::ShenandoahInPlacePromotionPlanner(const ShenandoahGenerationalHeap* heap)
: _old_garbage_threshold(ShenandoahHeapRegion::region_size_bytes() * heap->old_generation()->heuristics()->get_old_garbage_threshold() / 100)
, _pip_used_threshold(ShenandoahHeapRegion::region_size_bytes() * ShenandoahGenerationalMinPIPUsage / 100)
, _heap(heap)
, _free_set(_heap->free_set())
, _marking_context(_heap->marking_context())
, _mutator_regions(_free_set)
, _collector_regions(_free_set)
, _pip_padding_bytes(0)
{
}
bool ShenandoahInPlacePromotionPlanner::is_eligible(const ShenandoahHeapRegion* region) const {
return region->garbage() < _old_garbage_threshold && region->used() > _pip_used_threshold;
}
void ShenandoahInPlacePromotionPlanner::prepare(ShenandoahHeapRegion* r) {
HeapWord* tams = _marking_context->top_at_mark_start(r);
HeapWord* original_top = r->top();
if (_heap->is_concurrent_mark_in_progress() || tams != original_top) {
// We do not promote this region (either in place or by copy) because it has received new allocations.
// During evacuation, we exclude from promotion regions for which age > tenure threshold, garbage < garbage-threshold,
// used > pip_used_threshold, and get_top_before_promote() != tams.
// TODO: Such a region should have had its age reset to zero when it was used for allocation?
return;
}
// No allocations from this region have been made during concurrent mark. It meets all the criteria
// for in-place-promotion. Though we only need the value of top when we fill the end of the region,
// we use this field to indicate that this region should be promoted in place during the evacuation
// phase.
r->save_top_before_promote();
size_t remnant_bytes = r->free();
size_t remnant_words = remnant_bytes / HeapWordSize;
assert(ShenandoahHeap::min_fill_size() <= PLAB::min_size(), "Implementation makes invalid assumptions");
if (remnant_words >= ShenandoahHeap::min_fill_size()) {
ShenandoahHeap::fill_with_object(original_top, remnant_words);
// Fill the remnant memory within this region to assure no allocations prior to promote in place. Otherwise,
// newly allocated objects will not be parsable when promote in place tries to register them. Furthermore, any
// new allocations would not necessarily be eligible for promotion. This addresses both issues.
r->set_top(r->end());
// The region r is either in the Mutator or Collector partition if remnant_words > heap()->plab_min_size.
// Otherwise, the region is in the NotFree partition.
const idx_t i = r->index();
ShenandoahFreeSetPartitionId p = _free_set->membership(i);
if (p == ShenandoahFreeSetPartitionId::Mutator) {
_mutator_regions.increment(i, remnant_bytes);
} else if (p == ShenandoahFreeSetPartitionId::Collector) {
_collector_regions.increment(i, remnant_bytes);
} else {
assert((p == ShenandoahFreeSetPartitionId::NotFree) && (remnant_words < _heap->plab_min_size()),
"Should be NotFree if not in Collector or Mutator partitions");
// In this case, the memory is already counted as used and the region has already been retired. There is
// no need for further adjustments to used. Further, the remnant memory for this region will not be
// unallocated or made available to OldCollector after pip.
remnant_bytes = 0;
}
_pip_padding_bytes += remnant_bytes;
_free_set->prepare_to_promote_in_place(i, remnant_bytes);
} else {
// Since the remnant is so small that this region has already been retired, we don't have to worry about any
// accidental allocations occurring within this region before the region is promoted in place.
// This region was already not in the Collector or Mutator set, so no need to remove it.
assert(_free_set->membership(r->index()) == ShenandoahFreeSetPartitionId::NotFree, "sanity");
}
}
void ShenandoahInPlacePromotionPlanner::update_free_set() const {
_heap->old_generation()->set_pad_for_promote_in_place(_pip_padding_bytes);
if (_mutator_regions._regions + _collector_regions._regions > 0) {
_free_set->account_for_pip_regions(_mutator_regions._regions, _mutator_regions._bytes,
_collector_regions._regions, _collector_regions._bytes);
}
// Retire any regions that have been selected for promote in place
_mutator_regions.update_free_set(ShenandoahFreeSetPartitionId::Mutator);
_collector_regions.update_free_set(ShenandoahFreeSetPartitionId::Collector);
}
void ShenandoahInPlacePromoter::maybe_promote_region(ShenandoahHeapRegion* r) const {
if (r->is_young() && r->is_active() && _heap->is_tenurable(r)) {
if (r->is_humongous_start()) {
// We promote humongous_start regions along with their affiliated continuations during evacuation rather than
// doing this work during a safepoint. We cannot put humongous regions into the collection set because that
// triggers the load-reference barrier (LRB) to copy on reference fetch.
//
// Aged humongous continuation regions are handled with their start region. If an aged regular region has
// more garbage than ShenandoahOldGarbageThreshold, we'll promote by evacuation. If there is room for evacuation
// in this cycle, the region will be in the collection set. If there is no room, the region will be promoted
// by evacuation in some future GC cycle.
// We do not promote primitive arrays because there's no performance penalty keeping them in young. When/if they
// become garbage, reclaiming the memory from young is much quicker and more efficient than reclaiming them from old.
oop obj = cast_to_oop(r->bottom());
if (!obj->is_typeArray()) {
promote_humongous(r);
}
} else if (r->is_regular() && (r->get_top_before_promote() != nullptr)) {
// Likewise, we cannot put promote-in-place regions into the collection set because that would also trigger
// the LRB to copy on reference fetch.
//
// If an aged regular region has received allocations during the current cycle, we do not promote because the
// newly allocated objects do not have appropriate age; this region's age will be reset to zero at end of cycle.
promote(r);
}
}
}
// When we promote a region in place, we can continue to use the established marking context to guide subsequent remembered
// set scans of this region's content. The region will be coalesced and filled prior to the next old-gen marking effort.
// We identify the entirety of the region as DIRTY to force the next remembered set scan to identify the "interesting pointers"
// contained herein.
void ShenandoahInPlacePromoter::promote(ShenandoahHeapRegion* region) const {
ShenandoahMarkingContext* const marking_context = _heap->young_generation()->complete_marking_context();
HeapWord* const tams = marking_context->top_at_mark_start(region);
size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
{
const size_t old_garbage_threshold =
(region_size_bytes * _heap->old_generation()->heuristics()->get_old_garbage_threshold()) / 100;
assert(!_heap->is_concurrent_old_mark_in_progress(), "Cannot promote in place during old marking");
assert(region->garbage_before_padded_for_promote() < old_garbage_threshold,
"Region %zu has too much garbage for promotion", region->index());
assert(region->is_young(), "Only young regions can be promoted");
assert(region->is_regular(), "Use different service to promote humongous regions");
assert(_heap->is_tenurable(region), "Only promote regions that are sufficiently aged");
assert(region->get_top_before_promote() == tams, "Region %zu has been used for allocations before promotion", region->index());
}
ShenandoahOldGeneration* const old_gen = _heap->old_generation();
// Rebuild the remembered set information and mark the entire range as DIRTY. We do NOT scan the content of this
// range to determine which cards need to be DIRTY. That would force us to scan the region twice, once now, and
// once during the subsequent remembered set scan. Instead, we blindly (conservatively) mark everything as DIRTY
// now and then sort out the CLEAN pages during the next remembered set scan.
//
// Rebuilding the remembered set consists of clearing all object registrations (reset_object_range()) here,
// then registering every live object and every coalesced range of free objects in the loop that follows.
ShenandoahScanRemembered* const scanner = old_gen->card_scan();
scanner->reset_object_range(region->bottom(), region->end());
scanner->mark_range_as_dirty(region->bottom(), region->get_top_before_promote() - region->bottom());
HeapWord* obj_addr = region->bottom();
while (obj_addr < tams) {
oop obj = cast_to_oop(obj_addr);
if (marking_context->is_marked(obj)) {
assert(obj->klass() != nullptr, "klass should not be null");
// This thread is responsible for registering all objects in this region. No need for lock.
scanner->register_object_without_lock(obj_addr);
obj_addr += obj->size();
} else {
HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, tams);
assert(next_marked_obj <= tams, "next marked object cannot exceed tams");
size_t fill_size = next_marked_obj - obj_addr;
assert(fill_size >= ShenandoahHeap::min_fill_size(), "previously allocated objects known to be larger than min_size");
ShenandoahHeap::fill_with_object(obj_addr, fill_size);
scanner->register_object_without_lock(obj_addr);
obj_addr = next_marked_obj;
}
}
// We do not need to scan above TAMS because restored top equals tams
assert(obj_addr == tams, "Expect loop to terminate when obj_addr equals tams");
{
ShenandoahHeapLocker locker(_heap->lock());
#ifdef ASSERT
HeapWord* update_watermark = region->get_update_watermark();
// pip_unpadded is memory too small to be filled above original top
size_t pip_unpadded = (region->end() - region->top()) * HeapWordSize;
assert((region->top() == region->end())
|| (pip_unpadded == (size_t) ((region->end() - region->top()) * HeapWordSize)), "Invariant");
assert(pip_unpadded < ShenandoahHeap::min_fill_size() * HeapWordSize, "Sanity");
size_t pip_pad_bytes = (region->top() - region->get_top_before_promote()) * HeapWordSize;
assert((pip_unpadded == 0) || (pip_pad_bytes == 0), "Only one of pip_unpadded and pip_pad_bytes is non-zero");
#endif
// Now that this region is affiliated with old, we can allow it to receive allocations, though it may not be in the
// is_collector_free range. We'll add it to that range below.
region->restore_top_before_promote();
assert(region->used() + pip_pad_bytes + pip_unpadded == region_size_bytes, "invariant");
// The update_watermark was likely established while we had the artificially high value of top. Make it sane now.
assert(update_watermark >= region->top(), "original top cannot exceed preserved update_watermark");
region->set_update_watermark(region->top());
// Transfer this region from young to old, increasing promoted_reserve if available space exceeds plab_min_size()
_heap->free_set()->add_promoted_in_place_region_to_old_collector(region);
region->set_affiliation(OLD_GENERATION);
region->set_promoted_in_place();
}
}
void ShenandoahInPlacePromoter::promote_humongous(ShenandoahHeapRegion* region) const {
oop obj = cast_to_oop(region->bottom());
assert(region->is_young(), "Only young regions can be promoted");
assert(region->is_humongous_start(), "Should not promote humongous continuation in isolation");
assert(_heap->is_tenurable(region), "Only promote regions that are sufficiently aged");
assert(_heap->marking_context()->is_marked(obj), "Promoted humongous object should be alive");
assert(!obj->is_typeArray(), "Don't promote humongous primitives");
const size_t used_bytes = obj->size() * HeapWordSize;
const size_t spanned_regions = ShenandoahHeapRegion::required_regions(used_bytes);
const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
const size_t humongous_waste = spanned_regions * region_size_bytes - obj->size() * HeapWordSize;
const size_t index_limit = region->index() + spanned_regions;
ShenandoahOldGeneration* const old_gen = _heap->old_generation();
{
// We need to grab the heap lock in order to avoid a race when changing the affiliations of spanned_regions from
// young to old.
ShenandoahHeapLocker locker(_heap->lock());
// We promote humongous objects unconditionally, without checking for availability. We adjust
// usage totals, including humongous waste, after evacuation is done.
log_debug(gc)("promoting humongous region %zu, spanning %zu", region->index(), spanned_regions);
// For this region and each humongous continuation region spanned by this humongous object, change
// affiliation to OLD_GENERATION and adjust the generation-use tallies. The remnant of memory
// in the last humongous region that is not spanned by obj is currently not used.
for (size_t i = region->index(); i < index_limit; i++) {
ShenandoahHeapRegion* r = _heap->get_region(i);
log_debug(gc)("promoting humongous region %zu, from " PTR_FORMAT " to " PTR_FORMAT,
r->index(), p2i(r->bottom()), p2i(r->top()));
// We mark the entire humongous object's range as dirty after loop terminates, so no need to dirty the range here
r->set_affiliation(OLD_GENERATION);
r->set_promoted_in_place();
}
ShenandoahFreeSet* freeset = _heap->free_set();
freeset->transfer_humongous_regions_from_mutator_to_old_collector(spanned_regions, humongous_waste);
}
// Since this region may have served previously as OLD, it may hold obsolete object range info.
HeapWord* const humongous_bottom = region->bottom();
ShenandoahScanRemembered* const scanner = old_gen->card_scan();
scanner->reset_object_range(humongous_bottom, humongous_bottom + spanned_regions * ShenandoahHeapRegion::region_size_words());
// Since the humongous region holds only one object, no lock is necessary for this register_object() invocation.
scanner->register_object_without_lock(humongous_bottom);
log_debug(gc)("Dirty cards for promoted humongous object (Region %zu) from " PTR_FORMAT " to " PTR_FORMAT,
region->index(), p2i(humongous_bottom), p2i(humongous_bottom + obj->size()));
scanner->mark_range_as_dirty(humongous_bottom, obj->size());
}

View File

@ -0,0 +1,91 @@
/*
* Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHINPLACEPROMOTER_HPP
#define SHARE_GC_SHENANDOAH_SHENANDOAHINPLACEPROMOTER_HPP
#include "gc/shenandoah/shenandoahSimpleBitMap.hpp"
class ShenandoahFreeSet;
class ShenandoahMarkingContext;
class ShenandoahGenerationalHeap;
class ShenandoahHeapRegion;
class ShenandoahInPlacePromotionPlanner {
using idx_t = ShenandoahSimpleBitMap::idx_t;
struct RegionPromotions {
idx_t _low_idx;
idx_t _high_idx;
size_t _regions;
size_t _bytes;
ShenandoahFreeSet* _free_set;
explicit RegionPromotions(ShenandoahFreeSet* free_set);
void increment(idx_t region_index, size_t remnant_bytes);
void update_free_set(ShenandoahFreeSetPartitionId partition_id) const;
};
const size_t _old_garbage_threshold;
const size_t _pip_used_threshold;
const ShenandoahGenerationalHeap* _heap;
ShenandoahFreeSet* _free_set;
const ShenandoahMarkingContext* _marking_context;
// Any region that is to be promoted in place needs to be retired from its Collector or Mutator partition.
RegionPromotions _mutator_regions;
RegionPromotions _collector_regions;
// Tracks the padding of space above top in regions eligible for promotion in place
size_t _pip_padding_bytes;
public:
explicit ShenandoahInPlacePromotionPlanner(const ShenandoahGenerationalHeap* heap);
// Returns true if this region has garbage below and usage above the configurable thresholds
bool is_eligible(const ShenandoahHeapRegion* region) const;
// Prepares the region for promotion by moving top to the end to prevent allocations
void prepare(ShenandoahHeapRegion* region);
// Notifies the free set of in place promotions
void update_free_set() const;
size_t old_garbage_threshold() const { return _old_garbage_threshold; }
};
class ShenandoahInPlacePromoter {
ShenandoahGenerationalHeap* _heap;
public:
explicit ShenandoahInPlacePromoter(ShenandoahGenerationalHeap* heap) : _heap(heap) {}
// If the region still meets the criteria for promotion in place, it will be promoted
void maybe_promote_region(ShenandoahHeapRegion* region) const;
private:
void promote(ShenandoahHeapRegion* region) const;
void promote_humongous(ShenandoahHeapRegion* region) const;
};
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHINPLACEPROMOTER_HPP

View File

@ -24,7 +24,6 @@
#include "gc/shenandoah/shenandoahLock.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/javaThread.hpp"
#include "runtime/os.hpp"
@ -46,8 +45,8 @@ void ShenandoahLock::contended_lock_internal(JavaThread* java_thread) {
int ctr = os::is_MP() ? 0xFF : 0;
int yields = 0;
// Apply TTAS to avoid more expensive CAS calls if the lock is still held by other thread.
while (AtomicAccess::load(&_state) == locked ||
AtomicAccess::cmpxchg(&_state, unlocked, locked) != unlocked) {
while (_state.load_relaxed() == locked ||
_state.compare_exchange(unlocked, locked) != unlocked) {
if (ctr > 0 && !SafepointSynchronize::is_synchronizing()) {
// Lightly contended, spin a little if no safepoint is pending.
SpinPause();
@ -113,11 +112,11 @@ ShenandoahReentrantLock::~ShenandoahReentrantLock() {
void ShenandoahReentrantLock::lock() {
Thread* const thread = Thread::current();
Thread* const owner = AtomicAccess::load(&_owner);
Thread* const owner = _owner.load_relaxed();
if (owner != thread) {
ShenandoahSimpleLock::lock();
AtomicAccess::store(&_owner, thread);
_owner.store_relaxed(thread);
}
_count++;
@ -130,13 +129,13 @@ void ShenandoahReentrantLock::unlock() {
_count--;
if (_count == 0) {
AtomicAccess::store(&_owner, (Thread*)nullptr);
_owner.store_relaxed((Thread*)nullptr);
ShenandoahSimpleLock::unlock();
}
}
bool ShenandoahReentrantLock::owned_by_self() const {
Thread* const thread = Thread::current();
Thread* const owner = AtomicAccess::load(&_owner);
Thread* const owner = _owner.load_relaxed();
return owner == thread;
}

View File

@ -27,6 +27,7 @@
#include "gc/shenandoah/shenandoahPadding.hpp"
#include "memory/allocation.hpp"
#include "runtime/atomic.hpp"
#include "runtime/javaThread.hpp"
#include "runtime/safepoint.hpp"
@ -35,9 +36,9 @@ private:
enum LockState { unlocked = 0, locked = 1 };
shenandoah_padding(0);
volatile LockState _state;
Atomic<LockState> _state;
shenandoah_padding(1);
Thread* volatile _owner;
Atomic<Thread*> _owner;
shenandoah_padding(2);
template<bool ALLOW_BLOCK>
@ -48,33 +49,33 @@ public:
ShenandoahLock() : _state(unlocked), _owner(nullptr) {};
void lock(bool allow_block_for_safepoint) {
assert(AtomicAccess::load(&_owner) != Thread::current(), "reentrant locking attempt, would deadlock");
assert(_owner.load_relaxed() != Thread::current(), "reentrant locking attempt, would deadlock");
if ((allow_block_for_safepoint && SafepointSynchronize::is_synchronizing()) ||
(AtomicAccess::cmpxchg(&_state, unlocked, locked) != unlocked)) {
(_state.compare_exchange(unlocked, locked) != unlocked)) {
// 1. Java thread, and there is a pending safepoint. Dive into contended locking
// immediately without trying anything else, and block.
// 2. Fast lock fails, dive into contended lock handling.
contended_lock(allow_block_for_safepoint);
}
assert(AtomicAccess::load(&_state) == locked, "must be locked");
assert(AtomicAccess::load(&_owner) == nullptr, "must not be owned");
DEBUG_ONLY(AtomicAccess::store(&_owner, Thread::current());)
assert(_state.load_relaxed() == locked, "must be locked");
assert(_owner.load_relaxed() == nullptr, "must not be owned");
DEBUG_ONLY(_owner.store_relaxed(Thread::current());)
}
void unlock() {
assert(AtomicAccess::load(&_owner) == Thread::current(), "sanity");
DEBUG_ONLY(AtomicAccess::store(&_owner, (Thread*)nullptr);)
assert(_owner.load_relaxed() == Thread::current(), "sanity");
DEBUG_ONLY(_owner.store_relaxed((Thread*)nullptr);)
OrderAccess::fence();
AtomicAccess::store(&_state, unlocked);
_state.store_relaxed(unlocked);
}
void contended_lock(bool allow_block_for_safepoint);
bool owned_by_self() {
#ifdef ASSERT
return _state == locked && _owner == Thread::current();
return _state.load_relaxed() == locked && _owner.load_relaxed() == Thread::current();
#else
ShouldNotReachHere();
return false;
@ -111,7 +112,7 @@ public:
class ShenandoahReentrantLock : public ShenandoahSimpleLock {
private:
Thread* volatile _owner;
Atomic<Thread*> _owner;
uint64_t _count;
public:

View File

@ -433,8 +433,8 @@ void ShenandoahNMethodTableSnapshot::parallel_nmethods_do(NMethodClosure *f) {
ShenandoahNMethod** const list = _list->list();
size_t max = (size_t)_limit;
while (_claimed < max) {
size_t cur = AtomicAccess::fetch_then_add(&_claimed, stride, memory_order_relaxed);
while (_claimed.load_relaxed() < max) {
size_t cur = _claimed.fetch_then_add(stride, memory_order_relaxed);
size_t start = cur;
size_t end = MIN2(cur + stride, max);
if (start >= max) break;
@ -457,8 +457,8 @@ void ShenandoahNMethodTableSnapshot::concurrent_nmethods_do(NMethodClosure* cl)
ShenandoahNMethod** list = _list->list();
size_t max = (size_t)_limit;
while (_claimed < max) {
size_t cur = AtomicAccess::fetch_then_add(&_claimed, stride, memory_order_relaxed);
while (_claimed.load_relaxed() < max) {
size_t cur = _claimed.fetch_then_add(stride, memory_order_relaxed);
size_t start = cur;
size_t end = MIN2(cur + stride, max);
if (start >= max) break;

View File

@ -30,6 +30,7 @@
#include "gc/shenandoah/shenandoahLock.hpp"
#include "gc/shenandoah/shenandoahPadding.hpp"
#include "memory/allocation.hpp"
#include "runtime/atomic.hpp"
#include "utilities/growableArray.hpp"
// ShenandoahNMethod tuple records the internal locations of oop slots within reclocation stream in
@ -115,7 +116,7 @@ private:
int _limit;
shenandoah_padding(0);
volatile size_t _claimed;
Atomic<size_t> _claimed;
shenandoah_padding(1);
public:

View File

@ -27,7 +27,7 @@
#include "gc/shenandoah/shenandoahPadding.hpp"
#include "memory/allocation.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/atomic.hpp"
typedef int32_t ShenandoahSharedValue;
typedef struct ShenandoahSharedFlag {
@ -37,7 +37,7 @@ typedef struct ShenandoahSharedFlag {
};
shenandoah_padding(0);
volatile ShenandoahSharedValue value;
Atomic<ShenandoahSharedValue> value;
shenandoah_padding(1);
ShenandoahSharedFlag() {
@ -45,19 +45,19 @@ typedef struct ShenandoahSharedFlag {
}
void set() {
AtomicAccess::release_store_fence(&value, (ShenandoahSharedValue)SET);
value.release_store_fence((ShenandoahSharedValue)SET);
}
void unset() {
AtomicAccess::release_store_fence(&value, (ShenandoahSharedValue)UNSET);
value.release_store_fence((ShenandoahSharedValue)UNSET);
}
bool is_set() const {
return AtomicAccess::load_acquire(&value) == SET;
return value.load_acquire() == SET;
}
bool is_unset() const {
return AtomicAccess::load_acquire(&value) == UNSET;
return value.load_acquire() == UNSET;
}
void set_cond(bool val) {
@ -72,7 +72,7 @@ typedef struct ShenandoahSharedFlag {
if (is_set()) {
return false;
}
ShenandoahSharedValue old = AtomicAccess::cmpxchg(&value, (ShenandoahSharedValue)UNSET, (ShenandoahSharedValue)SET);
ShenandoahSharedValue old = value.compare_exchange((ShenandoahSharedValue)UNSET, (ShenandoahSharedValue)SET);
return old == UNSET; // success
}
@ -80,17 +80,13 @@ typedef struct ShenandoahSharedFlag {
if (!is_set()) {
return false;
}
ShenandoahSharedValue old = AtomicAccess::cmpxchg(&value, (ShenandoahSharedValue)SET, (ShenandoahSharedValue)UNSET);
ShenandoahSharedValue old = value.compare_exchange((ShenandoahSharedValue)SET, (ShenandoahSharedValue)UNSET);
return old == SET; // success
}
volatile ShenandoahSharedValue* addr_of() {
return &value;
}
private:
volatile ShenandoahSharedValue* operator&() {
fatal("Use addr_of() instead");
fatal("Not supported");
return nullptr;
}
@ -105,7 +101,7 @@ private:
typedef struct ShenandoahSharedBitmap {
shenandoah_padding(0);
volatile ShenandoahSharedValue value;
Atomic<ShenandoahSharedValue> value;
shenandoah_padding(1);
ShenandoahSharedBitmap() {
@ -116,7 +112,7 @@ typedef struct ShenandoahSharedBitmap {
assert (mask < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity");
ShenandoahSharedValue mask_val = (ShenandoahSharedValue) mask;
while (true) {
ShenandoahSharedValue ov = AtomicAccess::load_acquire(&value);
ShenandoahSharedValue ov = value.load_acquire();
// We require all bits of mask_val to be set
if ((ov & mask_val) == mask_val) {
// already set
@ -124,7 +120,7 @@ typedef struct ShenandoahSharedBitmap {
}
ShenandoahSharedValue nv = ov | mask_val;
if (AtomicAccess::cmpxchg(&value, ov, nv) == ov) {
if (value.compare_exchange(ov, nv) == ov) {
// successfully set: if value returned from cmpxchg equals ov, then nv has overwritten value.
return;
}
@ -135,14 +131,14 @@ typedef struct ShenandoahSharedBitmap {
assert (mask < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity");
ShenandoahSharedValue mask_val = (ShenandoahSharedValue) mask;
while (true) {
ShenandoahSharedValue ov = AtomicAccess::load_acquire(&value);
ShenandoahSharedValue ov = value.load_acquire();
if ((ov & mask_val) == 0) {
// already unset
return;
}
ShenandoahSharedValue nv = ov & ~mask_val;
if (AtomicAccess::cmpxchg(&value, ov, nv) == ov) {
if (value.compare_exchange(ov, nv) == ov) {
// successfully unset
return;
}
@ -150,7 +146,7 @@ typedef struct ShenandoahSharedBitmap {
}
void clear() {
AtomicAccess::release_store_fence(&value, (ShenandoahSharedValue)0);
value.release_store_fence((ShenandoahSharedValue)0);
}
// Returns true iff any bit set in mask is set in this.value.
@ -161,18 +157,18 @@ typedef struct ShenandoahSharedBitmap {
// Returns true iff all bits set in mask are set in this.value.
bool is_set_exactly(uint mask) const {
assert (mask < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity");
uint uvalue = AtomicAccess::load_acquire(&value);
uint uvalue = value.load_acquire();
return (uvalue & mask) == mask;
}
// Returns true iff all bits set in mask are unset in this.value.
bool is_unset(uint mask) const {
assert (mask < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity");
return (AtomicAccess::load_acquire(&value) & (ShenandoahSharedValue) mask) == 0;
return (value.load_acquire() & (ShenandoahSharedValue) mask) == 0;
}
bool is_clear() const {
return (AtomicAccess::load_acquire(&value)) == 0;
return (value.load_acquire()) == 0;
}
void set_cond(uint mask, bool val) {
@ -183,17 +179,13 @@ typedef struct ShenandoahSharedBitmap {
}
}
volatile ShenandoahSharedValue* addr_of() {
return &value;
}
ShenandoahSharedValue raw_value() const {
return value;
return value.load_relaxed();
}
private:
volatile ShenandoahSharedValue* operator&() {
fatal("Use addr_of() instead");
fatal("Not supported");
return nullptr;
}
@ -210,42 +202,36 @@ template<class T>
struct ShenandoahSharedEnumFlag {
typedef uint32_t EnumValueType;
shenandoah_padding(0);
volatile EnumValueType value;
Atomic<EnumValueType> value;
shenandoah_padding(1);
ShenandoahSharedEnumFlag() {
value = 0;
}
ShenandoahSharedEnumFlag() : value(0) {}
void set(T v) {
assert (v >= 0, "sanity");
assert (v < (sizeof(EnumValueType) * CHAR_MAX), "sanity");
AtomicAccess::release_store_fence(&value, (EnumValueType)v);
value.release_store_fence((EnumValueType)v);
}
T get() const {
return (T)AtomicAccess::load_acquire(&value);
return (T)value.load_acquire();
}
T cmpxchg(T new_value, T expected) {
assert (new_value >= 0, "sanity");
assert (new_value < (sizeof(EnumValueType) * CHAR_MAX), "sanity");
return (T)AtomicAccess::cmpxchg(&value, (EnumValueType)expected, (EnumValueType)new_value);
return (T)value.compare_exchange((EnumValueType)expected, (EnumValueType)new_value);
}
T xchg(T new_value) {
assert (new_value >= 0, "sanity");
assert (new_value < (sizeof(EnumValueType) * CHAR_MAX), "sanity");
return (T)AtomicAccess::xchg(&value, (EnumValueType)new_value);
}
volatile EnumValueType* addr_of() {
return &value;
return (T)value.exchange((EnumValueType)new_value);
}
private:
volatile T* operator&() {
fatal("Use addr_of() instead");
fatal("Not supported");
return nullptr;
}
@ -260,7 +246,7 @@ private:
typedef struct ShenandoahSharedSemaphore {
shenandoah_padding(0);
volatile ShenandoahSharedValue value;
Atomic<ShenandoahSharedValue> value;
shenandoah_padding(1);
static uint max_tokens() {
@ -269,17 +255,17 @@ typedef struct ShenandoahSharedSemaphore {
ShenandoahSharedSemaphore(uint tokens) {
assert(tokens <= max_tokens(), "sanity");
AtomicAccess::release_store_fence(&value, (ShenandoahSharedValue)tokens);
value.release_store_fence((ShenandoahSharedValue)tokens);
}
bool try_acquire() {
while (true) {
ShenandoahSharedValue ov = AtomicAccess::load_acquire(&value);
ShenandoahSharedValue ov = value.load_acquire();
if (ov == 0) {
return false;
}
ShenandoahSharedValue nv = ov - 1;
if (AtomicAccess::cmpxchg(&value, ov, nv) == ov) {
if (value.compare_exchange(ov, nv) == ov) {
// successfully set
return true;
}
@ -287,7 +273,7 @@ typedef struct ShenandoahSharedSemaphore {
}
void claim_all() {
AtomicAccess::release_store_fence(&value, (ShenandoahSharedValue)0);
value.release_store_fence((ShenandoahSharedValue)0);
}
} ShenandoahSharedSemaphore;

View File

@ -36,7 +36,7 @@
nonstatic_field(ShenandoahHeap, _regions, ShenandoahHeapRegion**) \
nonstatic_field(ShenandoahHeap, _log_min_obj_alignment_in_bytes, int) \
nonstatic_field(ShenandoahHeap, _free_set, ShenandoahFreeSet*) \
volatile_nonstatic_field(ShenandoahHeap, _committed, size_t) \
volatile_nonstatic_field(ShenandoahHeap, _committed, Atomic<size_t>) \
static_field(ShenandoahHeapRegion, RegionSizeBytes, size_t) \
static_field(ShenandoahHeapRegion, RegionSizeBytesShift, size_t) \
nonstatic_field(ShenandoahHeapRegion, _state, Atomic<ShenandoahHeapRegion::RegionState>) \

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -40,7 +40,7 @@
#define CDS_DYNAMIC_ARCHIVE_MAGIC 0xf00baba8
#define CDS_PREIMAGE_ARCHIVE_MAGIC 0xcafea07c
#define CDS_GENERIC_HEADER_SUPPORTED_MIN_VERSION 13
#define CURRENT_CDS_ARCHIVE_VERSION 19
#define CURRENT_CDS_ARCHIVE_VERSION 20
typedef struct CDSFileMapRegion {
int _crc; // CRC checksum of this region.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -59,7 +59,7 @@ class InterpreterCodelet: public Stub {
// General info/converters
int size() const { return _size; }
static int alignment() { return HeapWordSize; }
static int code_alignment() { return CodeEntryAlignment; }
static uint code_alignment() { return CodeEntryAlignment; }
// Code info
address code_begin() const { return align_up((address)this + sizeof(InterpreterCodelet), code_alignment()); }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -355,7 +355,7 @@ JVMCIObjectArray CompilerToVM::initialize_intrinsics(JVMCI_TRAPS) {
return vmIntrinsics;
}
#define PREDEFINED_CONFIG_FLAGS(do_bool_flag, do_int_flag, do_size_t_flag, do_intx_flag, do_uintx_flag) \
#define PREDEFINED_CONFIG_FLAGS(do_bool_flag, do_uint_flag, do_int_flag, do_size_t_flag, do_intx_flag, do_uintx_flag) \
do_int_flag(AllocateInstancePrefetchLines) \
do_int_flag(AllocatePrefetchDistance) \
do_intx_flag(AllocatePrefetchInstr) \
@ -367,7 +367,7 @@ JVMCIObjectArray CompilerToVM::initialize_intrinsics(JVMCI_TRAPS) {
do_bool_flag(CITime) \
do_bool_flag(CITimeEach) \
do_size_t_flag(CodeCacheSegmentSize) \
do_intx_flag(CodeEntryAlignment) \
do_uint_flag(CodeEntryAlignment) \
do_int_flag(ContendedPaddingWidth) \
do_bool_flag(DontCompileHugeMethods) \
do_bool_flag(EagerJVMCI) \
@ -554,16 +554,17 @@ jobjectArray readConfiguration0(JNIEnv *env, JVMCI_TRAPS) {
JVMCIENV->put_object_at(vmFlags, i++, vmFlagObj); \
}
#define ADD_BOOL_FLAG(name) ADD_FLAG(bool, name, BOXED_BOOLEAN)
#define ADD_UINT_FLAG(name) ADD_FLAG(uint, name, BOXED_LONG)
#define ADD_INT_FLAG(name) ADD_FLAG(int, name, BOXED_LONG)
#define ADD_SIZE_T_FLAG(name) ADD_FLAG(size_t, name, BOXED_LONG)
#define ADD_INTX_FLAG(name) ADD_FLAG(intx, name, BOXED_LONG)
#define ADD_UINTX_FLAG(name) ADD_FLAG(uintx, name, BOXED_LONG)
len = 0 + PREDEFINED_CONFIG_FLAGS(COUNT_FLAG, COUNT_FLAG, COUNT_FLAG, COUNT_FLAG, COUNT_FLAG);
len = 0 + PREDEFINED_CONFIG_FLAGS(COUNT_FLAG, COUNT_FLAG, COUNT_FLAG, COUNT_FLAG, COUNT_FLAG, COUNT_FLAG);
JVMCIObjectArray vmFlags = JVMCIENV->new_VMFlag_array(len, JVMCI_CHECK_NULL);
int i = 0;
JVMCIObject value;
PREDEFINED_CONFIG_FLAGS(ADD_BOOL_FLAG, ADD_INT_FLAG, ADD_SIZE_T_FLAG, ADD_INTX_FLAG, ADD_UINTX_FLAG)
PREDEFINED_CONFIG_FLAGS(ADD_BOOL_FLAG, ADD_UINT_FLAG, ADD_INT_FLAG, ADD_SIZE_T_FLAG, ADD_INTX_FLAG, ADD_UINTX_FLAG)
JVMCIObjectArray vmIntrinsics = CompilerToVM::initialize_intrinsics(JVMCI_CHECK_NULL);

View File

@ -87,8 +87,7 @@ E* MmapArrayAllocator<E>::allocate(size_t length, MemTag mem_tag) {
template <class E>
void MmapArrayAllocator<E>::free(E* addr, size_t length) {
bool result = os::release_memory((char*)addr, size_for(length));
assert(result, "Failed to release memory");
os::release_memory((char*)addr, size_for(length));
}
template <class E>

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2023 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -330,6 +330,9 @@ void* Arena::grow(size_t x, AllocFailType alloc_failmode) {
size_t len = MAX2(ARENA_ALIGN(x), (size_t) Chunk::size);
if (MemTracker::check_exceeds_limit(x, _mem_tag)) {
if (alloc_failmode == AllocFailStrategy::EXIT_OOM) {
vm_exit_out_of_memory(x, OOM_MALLOC_ERROR, "MallocLimit in Arena::grow");
}
return nullptr;
}

View File

@ -99,9 +99,7 @@ static char* reserve_memory_inner(char* requested_address,
}
// Base not aligned, retry.
if (!os::release_memory(base, size)) {
fatal("os::release_memory failed");
}
os::release_memory(base, size);
// Map using the requested alignment.
return os::reserve_memory_aligned(size, alignment, mem_tag, exec);
@ -231,13 +229,13 @@ ReservedSpace MemoryReserver::reserve(size_t size,
mem_tag);
}
bool MemoryReserver::release(const ReservedSpace& reserved) {
void MemoryReserver::release(const ReservedSpace& reserved) {
assert(reserved.is_reserved(), "Precondition");
if (reserved.special()) {
return os::release_memory_special(reserved.base(), reserved.size());
os::release_memory_special(reserved.base(), reserved.size());
} else {
return os::release_memory(reserved.base(), reserved.size());
os::release_memory(reserved.base(), reserved.size());
}
}
@ -266,9 +264,7 @@ static char* map_memory_to_file(char* requested_address,
// Base not aligned, retry.
if (!os::unmap_memory(base, size)) {
fatal("os::unmap_memory failed");
}
os::unmap_memory(base, size);
// Map using the requested alignment.
return os::map_memory_to_file_aligned(size, alignment, fd, mem_tag);

View File

@ -70,7 +70,7 @@ public:
MemTag mem_tag);
// Release reserved memory
static bool release(const ReservedSpace& reserved);
static void release(const ReservedSpace& reserved);
};
class CodeMemoryReserver : AllStatic {

View File

@ -190,10 +190,7 @@ void VirtualSpaceNode::uncommit_range(MetaWord* p, size_t word_size) {
}
// Uncommit...
if (os::uncommit_memory((char*)p, word_size * BytesPerWord) == false) {
// Note: this can actually happen, since uncommit may increase the number of mappings.
fatal("Failed to uncommit metaspace.");
}
os::uncommit_memory((char*)p, word_size * BytesPerWord);
ASAN_POISON_MEMORY_REGION((char*)p, word_size * BytesPerWord);

View File

@ -370,34 +370,22 @@ void VirtualSpace::shrink_by(size_t size) {
assert(middle_high_boundary() <= aligned_upper_new_high &&
aligned_upper_new_high + upper_needs <= upper_high_boundary(),
"must not shrink beyond region");
if (!os::uncommit_memory(aligned_upper_new_high, upper_needs, _executable)) {
DEBUG_ONLY(warning("os::uncommit_memory failed"));
return;
} else {
_upper_high -= upper_needs;
}
os::uncommit_memory(aligned_upper_new_high, upper_needs, _executable);
_upper_high -= upper_needs;
}
if (middle_needs > 0) {
assert(lower_high_boundary() <= aligned_middle_new_high &&
aligned_middle_new_high + middle_needs <= middle_high_boundary(),
"must not shrink beyond region");
if (!os::uncommit_memory(aligned_middle_new_high, middle_needs, _executable)) {
DEBUG_ONLY(warning("os::uncommit_memory failed"));
return;
} else {
_middle_high -= middle_needs;
}
os::uncommit_memory(aligned_middle_new_high, middle_needs, _executable);
_middle_high -= middle_needs;
}
if (lower_needs > 0) {
assert(low_boundary() <= aligned_lower_new_high &&
aligned_lower_new_high + lower_needs <= lower_high_boundary(),
"must not shrink beyond region");
if (!os::uncommit_memory(aligned_lower_new_high, lower_needs, _executable)) {
DEBUG_ONLY(warning("os::uncommit_memory failed"));
return;
} else {
_lower_high -= lower_needs;
}
os::uncommit_memory(aligned_lower_new_high, lower_needs, _executable);
_lower_high -= lower_needs;
}
_high -= size;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,6 +22,7 @@
*
*/
#include "cds/aotCompressedPointers.hpp"
#include "cds/cdsConfig.hpp"
#include "ci/ciEnv.hpp"
#include "ci/ciMetadata.hpp"
@ -512,8 +513,7 @@ void TrainingData::dump_training_data() {
#endif // ASSERT
td = ArchiveBuilder::current()->get_buffered_addr(td);
uint hash = TrainingData::Key::cds_hash(td->key());
u4 delta = ArchiveBuilder::current()->buffer_to_offset_u4((address)td);
writer.add(hash, delta);
writer.add(hash, AOTCompressedPointers::encode_not_null(td));
}
writer.dump(&_archived_training_data_dictionary_for_dumping, "training data dictionary");
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -694,7 +694,9 @@
"Print progress during Iterative Global Value Numbering") \
\
develop(uint, VerifyIterativeGVN, 0, \
"Verify Iterative Global Value Numbering =EDCBA, with:" \
"Verify Iterative Global Value Numbering =FEDCBA, with:" \
" F: verify Node::Ideal does not return nullptr if the node" \
"hash has changed" \
" E: verify node specific invariants" \
" D: verify Node::Identity did not miss opportunities" \
" C: verify Node::Ideal did not miss opportunities" \

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -2243,7 +2243,7 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
PhaseIterGVN* igvn = phase->is_IterGVN();
if (wait_for_cast_input_igvn(igvn)) {
igvn->_worklist.push(this);
return nullptr;
return progress;
}
uncasted = true;
uin = unique_input(phase, true);
@ -2320,6 +2320,7 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
PhaseIterGVN* igvn = phase->is_IterGVN();
for (uint i = 1; i < req(); i++) {
set_req_X(i, cast, igvn);
progress = this;
}
uin = cast;
}
@ -2338,7 +2339,7 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
#endif
// Identity may not return the expected uin, if it has to wait for the region, in irreducible case
assert(ident == uin || ident->is_top() || must_wait_for_region_in_irreducible_loop(phase), "Identity must clean this up");
return nullptr;
return progress;
}
Node* opt = nullptr;
@ -2529,7 +2530,7 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// Phi references itself through all other inputs then splitting the
// Phi through memory merges would create dead loop at later stage.
if (ii == top) {
return nullptr; // Delay optimization until graph is cleaned.
return progress; // Delay optimization until graph is cleaned.
}
if (ii->is_MergeMem()) {
MergeMemNode* n = ii->as_MergeMem();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -158,7 +158,7 @@ void ConstantTable::calculate_offsets_and_size() {
// Align size up to the next section start (which is insts; see
// CodeBuffer::align_at_start).
assert(_size == -1, "already set?");
_size = align_up(offset, (int)CodeEntryAlignment);
_size = align_up(offset, CodeEntryAlignment);
}
bool ConstantTable::emit(C2_MacroAssembler* masm) const {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -46,8 +46,8 @@ Node* StrIntrinsicNode::Ideal(PhaseGVN* phase, bool can_reshape) {
if (in(0) && in(0)->is_top()) return nullptr;
if (can_reshape) {
Node* mem = phase->transform(in(MemNode::Memory));
// If transformed to a MergeMem, get the desired slice
Node* mem = in(MemNode::Memory);
// If mem input is a MergeMem, get the desired slice
uint alias_idx = phase->C->get_alias_index(adr_type());
mem = mem->is_MergeMem() ? mem->as_MergeMem()->memory_at(alias_idx) : mem;
if (mem != in(MemNode::Memory)) {

Some files were not shown because too many files have changed in this diff Show More