diff --git a/.gitignore b/.gitignore
index 0743489f8ec..b6b4a1a559a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -16,6 +16,7 @@ NashornProfile.txt
**/JTreport/**
**/JTwork/**
/src/utils/LogCompilation/target/
+/src/utils/LogCompilation/logc.jar
/.project/
/.settings/
/compile_commands.json
diff --git a/bin/update_copyright_year.sh b/bin/update_copyright_year.sh
index fa7989d234b..fcdac6b935f 100644
--- a/bin/update_copyright_year.sh
+++ b/bin/update_copyright_year.sh
@@ -1,7 +1,7 @@
#!/bin/bash -f
#
-# Copyright (c) 2010, 2025, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2010, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -23,9 +23,13 @@
# questions.
#
-# Script to update the Copyright YEAR range in Mercurial & Git sources.
+# Script to update the Copyright YEAR range in Git sources.
# (Originally from xdono, Thanks!)
+# To update Copyright years for changes in a specific branch,
+# you use a command along these lines:
+# $ git diff upstream/master... | lsdiff | cut -d '/' -f 2- | bash bin/update_copyright_year.sh -m -
+
#------------------------------------------------------------
copyright="Copyright"
copyright_symbol="(c)"
@@ -47,7 +51,7 @@ rm -f -r ${tmp}
mkdir -p ${tmp}
total=0
-usage="Usage: `basename "$0"` [-c company] [-y year] [-h|f]"
+usage="Usage: `basename "$0"` [-c company] [-y year] [-m file] [-h|f]"
Help()
{
# Display Help
@@ -65,15 +69,18 @@ Help()
echo "-b Specifies the base reference for change set lookup."
echo "-f Updates the copyright for all change sets in a given year,"
echo " as specified by -y. Overrides -b flag."
+ echo "-m Read the list of modified files from the given file,"
+ echo " use - to read from stdin"
echo "-h Print this help."
echo
}
full_year=false
base_reference=master
+modified_files_origin="";
# Process options
-while getopts "b:c:fhy:" option; do
+while getopts "b:c:fhm:y:" option; do
case $option in
b) # supplied base reference
base_reference=${OPTARG}
@@ -91,6 +98,9 @@ while getopts "b:c:fhy:" option; do
y) # supplied company year
year=${OPTARG}
;;
+ m) # modified files will be read from the given origin
+ modified_files_origin="${OPTARG}"
+ ;;
\?) # illegal option
echo "$usage"
exit 1
@@ -110,18 +120,10 @@ git status &> /dev/null && git_found=true
if [ "$git_found" != "true" ]; then
echo "Error: Please execute script from within a JDK git repository."
exit 1
-else
- echo "Using Git version control system"
- vcs_status=(git ls-files -m)
- if [ "$full_year" = "true" ]; then
- vcs_list_changesets=(git log --no-merges --since="${year}-01-01T00:00:00Z" --until="${year}-12-31T23:59:59Z" --pretty=tformat:"%H")
- else
- vcs_list_changesets=(git log --no-merges "${base_reference}..HEAD" --since="${year}-01-01T00:00:00Z" --until="${year}-12-31T23:59:59Z" --pretty=tformat:"%H")
- fi
- vcs_changeset_message=(git log -1 --pretty=tformat:"%B") # followed by ${changeset}
- vcs_changeset_files=(git diff-tree --no-commit-id --name-only -r) # followed by ${changeset}
fi
+echo "Using Git version control system"
+
# Return true if it makes sense to edit this file
saneFileToCheck()
{
@@ -168,6 +170,25 @@ updateFile() # file
echo "${changed}"
}
+# Update the copyright year on files sent in stdin
+updateFiles() # stdin: list of files to update
+{
+ count=0
+ fcount=0
+ while read i; do
+ fcount=`expr ${fcount} '+' 1`
+ if [ `updateFile "${i}"` = "true" ] ; then
+ count=`expr ${count} '+' 1`
+ fi
+ done
+ if [ ${count} -gt 0 ] ; then
+ printf " UPDATED year on %d of %d files.\n" ${count} ${fcount}
+ total=`expr ${total} '+' ${count}`
+ else
+ printf " None of the %d files were changed.\n" ${fcount}
+ fi
+}
+
# Update the copyright year on all files changed by this changeset
updateChangesetFiles() # changeset
{
@@ -178,18 +199,7 @@ updateChangesetFiles() # changeset
| ${awk} -F' ' '{for(i=1;i<=NF;i++)print $i}' \
> ${files}
if [ -f "${files}" -a -s "${files}" ] ; then
- fcount=`cat ${files}| wc -l`
- for i in `cat ${files}` ; do
- if [ `updateFile "${i}"` = "true" ] ; then
- count=`expr ${count} '+' 1`
- fi
- done
- if [ ${count} -gt 0 ] ; then
- printf " UPDATED year on %d of %d files.\n" ${count} ${fcount}
- total=`expr ${total} '+' ${count}`
- else
- printf " None of the %d files were changed.\n" ${fcount}
- fi
+ cat ${files} | updateFiles
else
printf " ERROR: No files changed in the changeset? Must be a mistake.\n"
set -x
@@ -204,67 +214,80 @@ updateChangesetFiles() # changeset
}
# Check if repository is clean
+vcs_status=(git ls-files -m)
previous=`"${vcs_status[@]}"|wc -l`
if [ ${previous} -ne 0 ] ; then
echo "WARNING: This repository contains previously edited working set files."
echo " ${vcs_status[*]} | wc -l = `"${vcs_status[@]}" | wc -l`"
fi
-# Get all changesets this year
-all_changesets=${tmp}/all_changesets
-rm -f ${all_changesets}
-"${vcs_list_changesets[@]}" > ${all_changesets}
-
-# Check changeset to see if it is Copyright only changes, filter changesets
-if [ -s ${all_changesets} ] ; then
- echo "Changesets made in ${year}: `cat ${all_changesets} | wc -l`"
- index=0
- cat ${all_changesets} | while read changeset ; do
- index=`expr ${index} '+' 1`
- desc=${tmp}/desc.${changeset}
- rm -f ${desc}
- echo "------------------------------------------------"
- "${vcs_changeset_message[@]}" "${changeset}" > ${desc}
- printf "%d: %s\n%s\n" ${index} "${changeset}" "`cat ${desc}|head -1`"
- if [ "${year}" = "2010" ] ; then
- if cat ${desc} | grep -i -F "Added tag" > /dev/null ; then
- printf " EXCLUDED tag changeset.\n"
- elif cat ${desc} | grep -i -F rebrand > /dev/null ; then
- printf " EXCLUDED rebrand changeset.\n"
- elif cat ${desc} | grep -i -F copyright > /dev/null ; then
- printf " EXCLUDED copyright changeset.\n"
- else
- updateChangesetFiles ${changeset}
- fi
- else
- if cat ${desc} | grep -i -F "Added tag" > /dev/null ; then
- printf " EXCLUDED tag changeset.\n"
- elif cat ${desc} | grep -i -F "copyright year" > /dev/null ; then
- printf " EXCLUDED copyright year changeset.\n"
- else
- updateChangesetFiles ${changeset}
- fi
- fi
- rm -f ${desc}
- done
-fi
-
-if [ ${total} -gt 0 ] ; then
- echo "---------------------------------------------"
- echo "Updated the copyright year on a total of ${total} files."
- if [ ${previous} -eq 0 ] ; then
- echo "This count should match the count of modified files in the repository: ${vcs_status[*]}"
- else
- echo "WARNING: This repository contained previously edited working set files."
- fi
- echo " ${vcs_status[*]} | wc -l = `"${vcs_status[@]}" | wc -l`"
+if [ "x$modified_files_origin" != "x" ]; then
+ cat $modified_files_origin | updateFiles
else
- echo "---------------------------------------------"
- echo "No files were changed"
- if [ ${previous} -ne 0 ] ; then
- echo "WARNING: This repository contained previously edited working set files."
- fi
- echo " ${vcs_status[*]} | wc -l = `"${vcs_status[@]}" | wc -l`"
+ # Get all changesets this year
+ if [ "$full_year" = "true" ]; then
+ vcs_list_changesets=(git log --no-merges --since="${year}-01-01T00:00:00Z" --until="${year}-12-31T23:59:59Z" --pretty=tformat:"%H")
+ else
+ vcs_list_changesets=(git log --no-merges "${base_reference}..HEAD" --since="${year}-01-01T00:00:00Z" --until="${year}-12-31T23:59:59Z" --pretty=tformat:"%H")
+ fi
+ vcs_changeset_message=(git log -1 --pretty=tformat:"%B") # followed by ${changeset}
+ vcs_changeset_files=(git diff-tree --no-commit-id --name-only -r) # followed by ${changeset}
+
+ all_changesets=${tmp}/all_changesets
+ rm -f ${all_changesets}
+ "${vcs_list_changesets[@]}" > ${all_changesets}
+
+ # Check changeset to see if it is Copyright only changes, filter changesets
+ if [ -s ${all_changesets} ] ; then
+ echo "Changesets made in ${year}: `cat ${all_changesets} | wc -l`"
+ index=0
+ cat ${all_changesets} | while read changeset ; do
+ index=`expr ${index} '+' 1`
+ desc=${tmp}/desc.${changeset}
+ rm -f ${desc}
+ echo "------------------------------------------------"
+ "${vcs_changeset_message[@]}" "${changeset}" > ${desc}
+ printf "%d: %s\n%s\n" ${index} "${changeset}" "`cat ${desc}|head -1`"
+ if [ "${year}" = "2010" ] ; then
+ if cat ${desc} | grep -i -F "Added tag" > /dev/null ; then
+ printf " EXCLUDED tag changeset.\n"
+ elif cat ${desc} | grep -i -F rebrand > /dev/null ; then
+ printf " EXCLUDED rebrand changeset.\n"
+ elif cat ${desc} | grep -i -F copyright > /dev/null ; then
+ printf " EXCLUDED copyright changeset.\n"
+ else
+ updateChangesetFiles ${changeset}
+ fi
+ else
+ if cat ${desc} | grep -i -F "Added tag" > /dev/null ; then
+ printf " EXCLUDED tag changeset.\n"
+ elif cat ${desc} | grep -i -F "copyright year" > /dev/null ; then
+ printf " EXCLUDED copyright year changeset.\n"
+ else
+ updateChangesetFiles ${changeset}
+ fi
+ fi
+ rm -f ${desc}
+ done
+ fi
+
+ if [ ${total} -gt 0 ] ; then
+ echo "---------------------------------------------"
+ echo "Updated the copyright year on a total of ${total} files."
+ if [ ${previous} -eq 0 ] ; then
+ echo "This count should match the count of modified files in the repository: ${vcs_status[*]}"
+ else
+ echo "WARNING: This repository contained previously edited working set files."
+ fi
+ echo " ${vcs_status[*]} | wc -l = `"${vcs_status[@]}" | wc -l`"
+ else
+ echo "---------------------------------------------"
+ echo "No files were changed"
+ if [ ${previous} -ne 0 ] ; then
+ echo "WARNING: This repository contained previously edited working set files."
+ fi
+ echo " ${vcs_status[*]} | wc -l = `"${vcs_status[@]}" | wc -l`"
+ fi
fi
# Cleanup
diff --git a/doc/building.html b/doc/building.html
index 8e5a7625371..534888ef667 100644
--- a/doc/building.html
+++ b/doc/building.html
@@ -1385,10 +1385,9 @@ dpkg-deb -x /tmp/libasound2-dev_1.0.25-4_armhf.deb .
can specify it by --with-alsa.
X11
-You will need X11 libraries suitable for your target system.
-In most cases, using Debian's pre-built libraries work fine.
-Note that X11 is needed even if you only want to build a headless
-JDK.
+When not building a headless JDK, you will need X11 libraries
+suitable for your target system. In most cases, using Debian's
+pre-built libraries work fine.
Go to Debian
Package Search , search for the following packages for your
diff --git a/doc/building.md b/doc/building.md
index b626027f101..d653d36eb55 100644
--- a/doc/building.md
+++ b/doc/building.md
@@ -1178,10 +1178,8 @@ Note that alsa is needed even if you only want to build a headless JDK.
#### X11
-You will need X11 libraries suitable for your *target* system. In most cases,
-using Debian's pre-built libraries work fine.
-
-Note that X11 is needed even if you only want to build a headless JDK.
+When not building a headless JDK, you will need X11 libraries suitable for your
+*target* system. In most cases, using Debian's pre-built libraries work fine.
* Go to [Debian Package Search](https://www.debian.org/distrib/packages),
search for the following packages for your *target* system, and download them
diff --git a/doc/hotspot-style.html b/doc/hotspot-style.html
index 362245cd00a..c7126622c7d 100644
--- a/doc/hotspot-style.html
+++ b/doc/hotspot-style.html
@@ -965,9 +965,8 @@ rather than NULL. See the paper for reasons to avoid
NULL.
Don't use (constant expression or literal) 0 for pointers. Note that
C++14 removed non-literal 0 constants from null pointer
-constants , though some compilers continue to treat them as such.
-For historical reasons there may be lingering uses of 0 as a
-pointer.
+constants, though some compilers continue to treat them as
+such.
<atomic>
Do not use facilities provided by the <atomic>
header (Notes for Specific Tests
Non-US
locale
PKCS11 Tests
+SCTP Tests
Testing Ahead-of-time
Optimizations
@@ -621,6 +622,21 @@ element of the appropriate @Artifact class. (See
JTREG="JAVA_OPTIONS=-Djdk.test.lib.artifacts.nsslib-linux_aarch64=/path/to/NSS-libs"
For more notes about the PKCS11 tests, please refer to
test/jdk/sun/security/pkcs11/README.
+SCTP Tests
+The SCTP tests require the SCTP runtime library, which is often not
+installed by default in popular Linux distributions. Without this
+library, the SCTP tests will be skipped. If you want to enable the SCTP
+tests, you should install the SCTP library before running the tests.
+For distributions using the .deb packaging format and the apt tool
+(such as Debian, Ubuntu, etc.), try this:
+ sudo apt install libsctp1
+sudo modprobe sctp
+lsmod | grep sctp
+For distributions using the .rpm packaging format and the dnf tool
+(such as Fedora, Red Hat, etc.), try this:
+sudo dnf install -y lksctp-tools
+sudo modprobe sctp
+lsmod | grep sctp
Testing Ahead-of-time
Optimizations
One way to improve test coverage of ahead-of-time (AOT) optimizations
diff --git a/doc/testing.md b/doc/testing.md
index b95f59de9fd..d0e54aab02b 100644
--- a/doc/testing.md
+++ b/doc/testing.md
@@ -640,6 +640,32 @@ $ make test TEST="jtreg:sun/security/pkcs11/Secmod/AddTrustedCert.java" \
For more notes about the PKCS11 tests, please refer to
test/jdk/sun/security/pkcs11/README.
+
+### SCTP Tests
+
+The SCTP tests require the SCTP runtime library, which is often not installed
+by default in popular Linux distributions. Without this library, the SCTP tests
+will be skipped. If you want to enable the SCTP tests, you should install the
+SCTP library before running the tests.
+
+For distributions using the .deb packaging format and the apt tool
+(such as Debian, Ubuntu, etc.), try this:
+
+```
+sudo apt install libsctp1
+sudo modprobe sctp
+lsmod | grep sctp
+```
+
+For distributions using the .rpm packaging format and the dnf tool
+(such as Fedora, Red Hat, etc.), try this:
+
+```
+sudo dnf install -y lksctp-tools
+sudo modprobe sctp
+lsmod | grep sctp
+```
+
### Testing Ahead-of-time Optimizations
One way to improve test coverage of ahead-of-time (AOT) optimizations in
diff --git a/make/PreInit.gmk b/make/PreInit.gmk
index 3df44308dd9..8152587781c 100644
--- a/make/PreInit.gmk
+++ b/make/PreInit.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2012, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -66,7 +66,8 @@ CALLED_SPEC_TARGETS := $(filter-out $(ALL_GLOBAL_TARGETS), $(CALLED_TARGETS))
ifeq ($(CALLED_SPEC_TARGETS), )
SKIP_SPEC := true
endif
-ifeq ($(findstring p, $(MAKEFLAGS))$(findstring q, $(MAKEFLAGS)), pq)
+MFLAGS_SINGLE := $(filter-out --%, $(MFLAGS))
+ifeq ($(findstring p, $(MFLAGS_SINGLE))$(findstring q, $(MFLAGS_SINGLE)), pq)
SKIP_SPEC := true
endif
diff --git a/make/RunTests.gmk b/make/RunTests.gmk
index 946b1332edc..02ea632e3ec 100644
--- a/make/RunTests.gmk
+++ b/make/RunTests.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -972,6 +972,10 @@ define SetupRunJtregTestBody
JTREG_AUTO_PROBLEM_LISTS += ProblemList-enable-preview.txt
endif
+ ifneq ($$(findstring -XX:+UseCompactObjectHeaders, $$(JTREG_ALL_OPTIONS)), )
+ JTREG_AUTO_PROBLEM_LISTS += ProblemList-coh.txt
+ endif
+
ifneq ($$(JTREG_EXTRA_PROBLEM_LISTS), )
# Accept both absolute paths as well as relative to the current test root.
diff --git a/make/autoconf/flags-cflags.m4 b/make/autoconf/flags-cflags.m4
index 5a9fdc57c74..2d39d84f52e 100644
--- a/make/autoconf/flags-cflags.m4
+++ b/make/autoconf/flags-cflags.m4
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -69,22 +69,18 @@ AC_DEFUN([FLAGS_SETUP_DEBUG_SYMBOLS],
# Debug prefix mapping if supported by compiler
DEBUG_PREFIX_CFLAGS=
- UTIL_ARG_WITH(NAME: native-debug-symbols-level, TYPE: string,
- DEFAULT: "",
- RESULT: DEBUG_SYMBOLS_LEVEL,
+ UTIL_ARG_WITH(NAME: native-debug-symbols-level, TYPE: literal,
+ DEFAULT: [auto], VALID_VALUES: [auto 1 2 3],
+ CHECK_AVAILABLE: [
+ if test x$TOOLCHAIN_TYPE = xmicrosoft; then
+ AVAILABLE=false
+ fi
+ ],
DESC: [set the native debug symbol level (GCC and Clang only)],
- DEFAULT_DESC: [toolchain default])
- AC_SUBST(DEBUG_SYMBOLS_LEVEL)
-
- if test "x${TOOLCHAIN_TYPE}" = xgcc || \
- test "x${TOOLCHAIN_TYPE}" = xclang; then
- DEBUG_SYMBOLS_LEVEL_FLAGS="-g"
- if test "x${DEBUG_SYMBOLS_LEVEL}" != "x"; then
- DEBUG_SYMBOLS_LEVEL_FLAGS="-g${DEBUG_SYMBOLS_LEVEL}"
- FLAGS_COMPILER_CHECK_ARGUMENTS(ARGUMENT: [${DEBUG_SYMBOLS_LEVEL_FLAGS}],
- IF_FALSE: AC_MSG_ERROR("Debug info level ${DEBUG_SYMBOLS_LEVEL} is not supported"))
- fi
- fi
+ DEFAULT_DESC: [toolchain default],
+ IF_AUTO: [
+ RESULT=""
+ ])
# Debug symbols
if test "x$TOOLCHAIN_TYPE" = xgcc; then
@@ -111,8 +107,8 @@ AC_DEFUN([FLAGS_SETUP_DEBUG_SYMBOLS],
fi
# Debug info level should follow the debug format to be effective.
- CFLAGS_DEBUG_SYMBOLS="-gdwarf-4 ${DEBUG_SYMBOLS_LEVEL_FLAGS}"
- ASFLAGS_DEBUG_SYMBOLS="${DEBUG_SYMBOLS_LEVEL_FLAGS}"
+ CFLAGS_DEBUG_SYMBOLS="-gdwarf-4 -g${NATIVE_DEBUG_SYMBOLS_LEVEL}"
+ ASFLAGS_DEBUG_SYMBOLS="-g${NATIVE_DEBUG_SYMBOLS_LEVEL}"
elif test "x$TOOLCHAIN_TYPE" = xclang; then
if test "x$ALLOW_ABSOLUTE_PATHS_IN_OUTPUT" = "xfalse"; then
# Check if compiler supports -fdebug-prefix-map. If so, use that to make
@@ -132,8 +128,8 @@ AC_DEFUN([FLAGS_SETUP_DEBUG_SYMBOLS],
IF_FALSE: [GDWARF_FLAGS=""])
# Debug info level should follow the debug format to be effective.
- CFLAGS_DEBUG_SYMBOLS="${GDWARF_FLAGS} ${DEBUG_SYMBOLS_LEVEL_FLAGS}"
- ASFLAGS_DEBUG_SYMBOLS="${DEBUG_SYMBOLS_LEVEL_FLAGS}"
+ CFLAGS_DEBUG_SYMBOLS="${GDWARF_FLAGS} -g${NATIVE_DEBUG_SYMBOLS_LEVEL}"
+ ASFLAGS_DEBUG_SYMBOLS="-g${NATIVE_DEBUG_SYMBOLS_LEVEL}"
elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
CFLAGS_DEBUG_SYMBOLS="-Z7"
fi
@@ -213,8 +209,12 @@ AC_DEFUN([FLAGS_SETUP_WARNINGS],
BUILD_CC_DISABLE_WARNING_PREFIX="-wd"
CFLAGS_WARNINGS_ARE_ERRORS="-WX"
- WARNINGS_ENABLE_ALL="-W3"
+ WARNINGS_ENABLE_ALL_NORMAL="-W3"
+ WARNINGS_ENABLE_ADDITIONAL=""
+ WARNINGS_ENABLE_ADDITIONAL_CXX=""
+ WARNINGS_ENABLE_ADDITIONAL_JVM=""
DISABLED_WARNINGS="4800 5105"
+ CFLAGS_CONVERSION_WARNINGS=
;;
gcc)
@@ -222,14 +222,16 @@ AC_DEFUN([FLAGS_SETUP_WARNINGS],
BUILD_CC_DISABLE_WARNING_PREFIX="-Wno-"
CFLAGS_WARNINGS_ARE_ERRORS="-Werror"
+ WARNINGS_ENABLE_ALL_NORMAL="-Wall -Wextra"
+
# Additional warnings that are not activated by -Wall and -Wextra
- WARNINGS_ENABLE_ADDITIONAL="-Winvalid-pch -Wpointer-arith -Wreturn-type \
+ WARNINGS_ENABLE_ADDITIONAL="-Wformat=2 \
+ -Winvalid-pch -Wpointer-arith -Wreturn-type \
-Wsign-compare -Wtrampolines -Wtype-limits -Wundef -Wuninitialized \
-Wunused-const-variable=1 -Wunused-function -Wunused-result \
-Wunused-value"
WARNINGS_ENABLE_ADDITIONAL_CXX="-Woverloaded-virtual -Wreorder"
- WARNINGS_ENABLE_ALL_CFLAGS="-Wall -Wextra -Wformat=2 $WARNINGS_ENABLE_ADDITIONAL"
- WARNINGS_ENABLE_ALL_CXXFLAGS="$WARNINGS_ENABLE_ALL_CFLAGS $WARNINGS_ENABLE_ADDITIONAL_CXX"
+ WARNINGS_ENABLE_ADDITIONAL_JVM="-Wzero-as-null-pointer-constant"
# These warnings will never be turned on, since they generate too many
# false positives.
@@ -238,6 +240,7 @@ AC_DEFUN([FLAGS_SETUP_WARNINGS],
if test "x$OPENJDK_TARGET_CPU_ARCH" = "xppc"; then
DISABLED_WARNINGS="$DISABLED_WARNINGS psabi"
fi
+ CFLAGS_CONVERSION_WARNINGS="-Wconversion -Wno-float-conversion"
;;
clang)
@@ -245,22 +248,32 @@ AC_DEFUN([FLAGS_SETUP_WARNINGS],
BUILD_CC_DISABLE_WARNING_PREFIX="-Wno-"
CFLAGS_WARNINGS_ARE_ERRORS="-Werror"
+ WARNINGS_ENABLE_ALL_NORMAL="-Wall -Wextra"
+
# Additional warnings that are not activated by -Wall and -Wextra
- WARNINGS_ENABLE_ADDITIONAL="-Wpointer-arith -Wsign-compare -Wreorder \
+ WARNINGS_ENABLE_ADDITIONAL="-Wformat=2 \
+ -Wpointer-arith -Wsign-compare -Wreorder \
-Wunused-function -Wundef -Wunused-value -Woverloaded-virtual"
- WARNINGS_ENABLE_ALL="-Wall -Wextra -Wformat=2 $WARNINGS_ENABLE_ADDITIONAL"
+ WARNINGS_ENABLE_ADDITIONAL_CXX=""
+ WARNINGS_ENABLE_ADDITIONAL_JVM="-Wzero-as-null-pointer-constant"
# These warnings will never be turned on, since they generate too many
# false positives.
DISABLED_WARNINGS="unknown-warning-option unused-parameter"
+ CFLAGS_CONVERSION_WARNINGS="-Wimplicit-int-conversion"
;;
esac
+ WARNINGS_ENABLE_ALL="$WARNINGS_ENABLE_ALL_NORMAL $WARNINGS_ENABLE_ADDITIONAL"
+ WARNINGS_ENABLE_ALL_CXX="$WARNINGS_ENABLE_ALL $WARNINGS_ENABLE_ADDITIONAL_CXX"
+ WARNINGS_ENABLE_ALL_JVM="$WARNINGS_ENABLE_ALL_CXX $WARNINGS_ENABLE_ADDITIONAL_JVM"
+
AC_SUBST(DISABLE_WARNING_PREFIX)
AC_SUBST(BUILD_CC_DISABLE_WARNING_PREFIX)
AC_SUBST(CFLAGS_WARNINGS_ARE_ERRORS)
AC_SUBST(DISABLED_WARNINGS)
AC_SUBST(DISABLED_WARNINGS_C)
AC_SUBST(DISABLED_WARNINGS_CXX)
+ AC_SUBST(CFLAGS_CONVERSION_WARNINGS)
])
AC_DEFUN([FLAGS_SETUP_QUALITY_CHECKS],
@@ -608,19 +621,9 @@ AC_DEFUN([FLAGS_SETUP_CFLAGS_HELPER],
ADLC_LANGSTD_CXXFLAGS="$LANGSTD_CXXFLAGS"
# CFLAGS WARNINGS STUFF
- # Set JVM_CFLAGS warning handling
- if test "x$TOOLCHAIN_TYPE" = xgcc; then
- WARNING_CFLAGS_JDK_CONLY="$WARNINGS_ENABLE_ALL_CFLAGS"
- WARNING_CFLAGS_JDK_CXXONLY="$WARNINGS_ENABLE_ALL_CXXFLAGS"
- WARNING_CFLAGS_JVM="$WARNINGS_ENABLE_ALL_CXXFLAGS"
-
- elif test "x$TOOLCHAIN_TYPE" = xclang; then
- WARNING_CFLAGS="$WARNINGS_ENABLE_ALL"
-
- elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
- WARNING_CFLAGS="$WARNINGS_ENABLE_ALL"
-
- fi
+ WARNING_CFLAGS_JDK_CONLY="$WARNINGS_ENABLE_ALL"
+ WARNING_CFLAGS_JDK_CXXONLY="$WARNINGS_ENABLE_ALL_CXX"
+ WARNING_CFLAGS_JVM="$WARNINGS_ENABLE_ALL_JVM"
# Set some additional per-OS defines.
@@ -882,12 +885,12 @@ AC_DEFUN([FLAGS_SETUP_CFLAGS_CPU_DEP],
CFLAGS_JVM_COMMON="$ALWAYS_CFLAGS_JVM $ALWAYS_DEFINES_JVM \
$TOOLCHAIN_CFLAGS_JVM ${$1_TOOLCHAIN_CFLAGS_JVM} \
$OS_CFLAGS $OS_CFLAGS_JVM $CFLAGS_OS_DEF_JVM $DEBUG_CFLAGS_JVM \
- $WARNING_CFLAGS $WARNING_CFLAGS_JVM $JVM_PICFLAG $FILE_MACRO_CFLAGS \
+ $WARNING_CFLAGS_JVM $JVM_PICFLAG $FILE_MACRO_CFLAGS \
$REPRODUCIBLE_CFLAGS $BRANCH_PROTECTION_CFLAGS"
CFLAGS_JDK_COMMON="$ALWAYS_DEFINES_JDK $TOOLCHAIN_CFLAGS_JDK \
$OS_CFLAGS $CFLAGS_OS_DEF_JDK $DEBUG_CFLAGS_JDK $DEBUG_OPTIONS_FLAGS_JDK \
- $WARNING_CFLAGS $WARNING_CFLAGS_JDK $DEBUG_SYMBOLS_CFLAGS_JDK \
+ $DEBUG_SYMBOLS_CFLAGS_JDK \
$FILE_MACRO_CFLAGS $REPRODUCIBLE_CFLAGS $BRANCH_PROTECTION_CFLAGS"
# Use ${$2EXTRA_CFLAGS} to block EXTRA_CFLAGS to be added to build flags.
diff --git a/make/autoconf/lib-bundled.m4 b/make/autoconf/lib-bundled.m4
index a6266bec014..7aa5fdf2589 100644
--- a/make/autoconf/lib-bundled.m4
+++ b/make/autoconf/lib-bundled.m4
@@ -267,8 +267,8 @@ AC_DEFUN_ONCE([LIB_SETUP_ZLIB],
LIBZ_LIBS=""
if test "x$USE_EXTERNAL_LIBZ" = "xfalse"; then
LIBZ_CFLAGS="$LIBZ_CFLAGS -I$TOPDIR/src/java.base/share/native/libzip/zlib"
- if test "x$OPENJDK_TARGET_OS" = xmacosx; then
- LIBZ_CFLAGS="$LIBZ_CFLAGS -DHAVE_UNISTD_H"
+ if test "x$OPENJDK_TARGET_OS" = xmacosx -o "x$OPENJDK_TARGET_OS" = xaix -o "x$OPENJDK_TARGET_OS" = xlinux; then
+ LIBZ_CFLAGS="$LIBZ_CFLAGS -DHAVE_UNISTD_H=1 -DHAVE_STDARG_H=1"
fi
else
LIBZ_LIBS="-lz"
diff --git a/make/autoconf/lib-tests.m4 b/make/autoconf/lib-tests.m4
index 31d48055984..faaf229eacd 100644
--- a/make/autoconf/lib-tests.m4
+++ b/make/autoconf/lib-tests.m4
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,7 @@
################################################################################
# Minimum supported versions
-JTREG_MINIMUM_VERSION=8.1
+JTREG_MINIMUM_VERSION=8.2.1
GTEST_MINIMUM_VERSION=1.14.0
################################################################################
diff --git a/make/autoconf/libraries.m4 b/make/autoconf/libraries.m4
index 8dc3d55ed0c..5daacdc1ced 100644
--- a/make/autoconf/libraries.m4
+++ b/make/autoconf/libraries.m4
@@ -42,12 +42,12 @@ m4_include([lib-tests.m4])
AC_DEFUN_ONCE([LIB_DETERMINE_DEPENDENCIES],
[
# Check if X11 is needed
- if test "x$OPENJDK_TARGET_OS" = xwindows || test "x$OPENJDK_TARGET_OS" = xmacosx; then
- # No X11 support on windows or macosx
+ if test "x$OPENJDK_TARGET_OS" = xwindows ||
+ test "x$OPENJDK_TARGET_OS" = xmacosx ||
+ test "x$ENABLE_HEADLESS_ONLY" = xtrue; then
NEEDS_LIB_X11=false
else
- # All other instances need X11, even if building headless only, libawt still
- # needs X11 headers.
+ # All other instances need X11 for libawt.
NEEDS_LIB_X11=true
fi
diff --git a/make/autoconf/platform.m4 b/make/autoconf/platform.m4
index 1b247e159ac..90d5d795626 100644
--- a/make/autoconf/platform.m4
+++ b/make/autoconf/platform.m4
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -311,6 +311,12 @@ AC_DEFUN([PLATFORM_EXTRACT_TARGET_AND_BUILD],
else
OPENJDK_BUILD_OS_ENV="$VAR_OS"
fi
+ # Special handling for MSYS2 that reports a Cygwin triplet as the default host triplet.
+ case `uname` in
+ MSYS*)
+ OPENJDK_BUILD_OS_ENV=windows.msys2
+ ;;
+ esac
OPENJDK_BUILD_CPU="$VAR_CPU"
OPENJDK_BUILD_CPU_ARCH="$VAR_CPU_ARCH"
OPENJDK_BUILD_CPU_BITS="$VAR_CPU_BITS"
diff --git a/make/autoconf/spec.gmk.template b/make/autoconf/spec.gmk.template
index b3d58704c50..c4e5a23d31a 100644
--- a/make/autoconf/spec.gmk.template
+++ b/make/autoconf/spec.gmk.template
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -529,6 +529,7 @@ CFLAGS_WARNINGS_ARE_ERRORS := @CFLAGS_WARNINGS_ARE_ERRORS@
DISABLED_WARNINGS := @DISABLED_WARNINGS@
DISABLED_WARNINGS_C := @DISABLED_WARNINGS_C@
DISABLED_WARNINGS_CXX := @DISABLED_WARNINGS_CXX@
+CFLAGS_CONVERSION_WARNINGS := @CFLAGS_CONVERSION_WARNINGS@
# A global flag (true or false) determining if native warnings are considered errors.
WARNINGS_AS_ERRORS := @WARNINGS_AS_ERRORS@
diff --git a/make/common/MakeBase.gmk b/make/common/MakeBase.gmk
index 97ef88932cb..45cc3c77dea 100644
--- a/make/common/MakeBase.gmk
+++ b/make/common/MakeBase.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -141,6 +141,66 @@ endef
# Make sure logging is setup for everyone that includes MakeBase.gmk.
$(eval $(call SetupLogging))
+################################################################################
+# Make does not have support for VARARGS, you can send variable amount
+# of arguments, but you can for example not append a list at the end.
+# It is therefore not easy to send the elements of a list of unknown
+# length as argument to a function. This can somewhat be worked around
+# by sending a list as an argument, and then interpreting each element
+# of the list as an argument to the function. However, Make is
+# limited, and using this method you can not easily send spaces.
+#
+# We need to quote strings for two reasons when sending them as
+# "variable append packs":
+#
+# 1) variable appends can include spaces, and those must be preserved
+# 2) variable appends can include assignment strings ":=", and those
+# must be quoted to a form so that we can recognise the "append pack".
+# We recognise an "append pack" by its lack of strict assignment ":="
+
+Q := $(HASH)
+SpaceQ := $(Q)s
+AppendQ := $(Q)+
+AssignQ := $(Q)a
+QQ := $(Q)$(Q)
+
+# $(call Quote,echo "#trala:=") -> echo#s"##trala#a"
+Quote = $(subst :=,$(AssignQ),$(subst $(SPACE),$(SpaceQ),$(subst $(Q),$(QQ),$1)))
+
+# $(call Unquote,echo#s"##trala#a") -> echo "#trala:="
+Unquote = $(subst $(QQ),$(Q),$(subst $(SpaceQ),$(SPACE),$(subst $(AssignQ),:=,$1)))
+
+# $(call QuoteAppend,name,some value) -> name#+some#svalue
+# $(call QuoteAppend,bad+=name,some value) -> error
+QuoteAppend = $(if $(findstring +=,$1),$(error you can not have += in a variable name: "$1"),$(call Quote,$1)$(AppendQ)$(call Quote,$2))
+
+# $(call UnquoteAppendIndex,name#+some#svalue,1) -> name
+# $(call UnquoteAppendIndex,name#+some#svalue,2) -> some value
+UnquoteAppendIndex = $(call Unquote,$(word $2,$(subst $(AppendQ),$(SPACE),$1)))
+
+# $(call FilterFiles,dir,%.cpp) -> file1.cpp file2.cpp (without path)
+FilterFiles = $(filter $2,$(notdir $(call FindFiles,$1)))
+
+# $(call Unpack module_,file1.cpp_CXXFLAGS#+-Wconversion file2.cpp_CXXFLAGS#+-Wconversion) -> module_file1.cpp_CXXFLAGS += -Wconversion
+# module_file2.cpp_CXXFLAGS += -Wconversion
+Unpack = $(foreach pair,$2,$1$(call UnquoteAppendIndex,$(pair),1) += $(call UnquoteAppendIndex,$(pair),2)$(NEWLINE))
+
+# This macro takes four arguments:
+# $1: directory where to find files (striped), example: $(TOPDIR)/src/hotspot/share/gc/g1
+# $2: filter to match what to keep (striped), example: g1Concurrent%.cpp
+# $3: what flags to override (striped), example: _CXXFLAGS
+# $4: what value to append to the flag (striped), example: $(CFLAGS_CONVERSION_WARNINGS)
+#
+# The result will be a quoted string that can be unpacked to a list of
+# variable appendings (see macro Unpack above). You do not need to take
+# care of unpacking, it is done in NamedParamsMacroTemplate.
+#
+# This feature should only be used for warnings that we want to
+# incrementally add to the rest of the code base.
+#
+# $(call ExtendFlags,dir,%.cpp,_CXXFLAGS,-Wconversion) -> file1.cpp_CXXFLAGS#+-Wconversion file2.cpp_CXXFLAGS#+-Wconversion
+ExtendFlags = $(foreach file,$(call FilterFiles,$(strip $1),$(strip $2)),$(call QuoteAppend,$(file)$(strip $3),$(strip $4)))
+
################################################################################
MAX_PARAMS := 96
@@ -166,7 +226,10 @@ define NamedParamsMacroTemplate
Too many named arguments to macro, please update MAX_PARAMS in MakeBase.gmk))
# Iterate over 2 3 4... and evaluate the named parameters with $1_ as prefix
$(foreach i, $(PARAM_SEQUENCE), $(if $(strip $($i)), \
- $(strip $1)_$(strip $(call EscapeHash, $(call DoubleDollar, $($i))))$(NEWLINE)))
+ $(if $(findstring :=,$($i)), \
+ $(strip $1)_$(strip $(call EscapeHash, $(call DoubleDollar, $($i))))$(NEWLINE), \
+ $(call Unpack,$(strip $1)_,$($i)))))
+
# Debug print all named parameter names and values
$(if $(findstring $(LOG_LEVEL), trace), \
$(info $0 $(strip $1) $(foreach i, $(PARAM_SEQUENCE), \
diff --git a/make/conf/github-actions.conf b/make/conf/github-actions.conf
index bd73e909062..ebfc9191535 100644
--- a/make/conf/github-actions.conf
+++ b/make/conf/github-actions.conf
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
# Versions and download locations for dependencies used by GitHub Actions (GHA)
GTEST_VERSION=1.14.0
-JTREG_VERSION=8.1+1
+JTREG_VERSION=8.2.1+1
LINUX_X64_BOOT_JDK_EXT=tar.gz
LINUX_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk25/bd75d5f9689641da8e1daabeccb5528b/36/GPL/openjdk-25_linux-x64_bin.tar.gz
diff --git a/make/conf/jib-profiles.js b/make/conf/jib-profiles.js
index 93aeebc0dd6..76a94b7789e 100644
--- a/make/conf/jib-profiles.js
+++ b/make/conf/jib-profiles.js
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1174,9 +1174,9 @@ var getJibProfilesDependencies = function (input, common) {
jtreg: {
server: "jpg",
product: "jtreg",
- version: "8.1",
+ version: "8.2.1",
build_number: "1",
- file: "bundles/jtreg-8.1+1.zip",
+ file: "bundles/jtreg-8.2.1+1.zip",
environment_name: "JT_HOME",
environment_path: input.get("jtreg", "home_path") + "/bin",
configure_args: "--with-jtreg=" + input.get("jtreg", "home_path"),
diff --git a/make/devkit/Tools.gmk b/make/devkit/Tools.gmk
index db77f2f3f74..74c6d861777 100644
--- a/make/devkit/Tools.gmk
+++ b/make/devkit/Tools.gmk
@@ -78,7 +78,7 @@ else ifeq ($(BASE_OS), Fedora)
endif
BASE_URL := http://fedora.riscv.rocks/repos-dist/f$(BASE_OS_VERSION)/latest/$(ARCH)/Packages/
else
- LATEST_ARCHIVED_OS_VERSION := 36
+ LATEST_ARCHIVED_OS_VERSION := 41
ifeq ($(filter aarch64 armhfp x86_64, $(ARCH)), )
FEDORA_TYPE := fedora-secondary
else
diff --git a/make/hotspot/lib/CompileGtest.gmk b/make/hotspot/lib/CompileGtest.gmk
index 60912992134..327014b1e9d 100644
--- a/make/hotspot/lib/CompileGtest.gmk
+++ b/make/hotspot/lib/CompileGtest.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -61,7 +61,8 @@ $(eval $(call SetupJdkLibrary, BUILD_GTEST_LIBGTEST, \
INCLUDE_FILES := gtest-all.cc gmock-all.cc, \
DISABLED_WARNINGS_gcc := format-nonliteral maybe-uninitialized undef \
unused-result zero-as-null-pointer-constant, \
- DISABLED_WARNINGS_clang := format-nonliteral undef unused-result, \
+ DISABLED_WARNINGS_clang := format-nonliteral undef unused-result \
+ zero-as-null-pointer-constant, \
DISABLED_WARNINGS_microsoft := 4530, \
DEFAULT_CFLAGS := false, \
CFLAGS := $(JVM_CFLAGS) \
diff --git a/make/hotspot/lib/CompileJvm.gmk b/make/hotspot/lib/CompileJvm.gmk
index 39a549b7db0..f41693e05fb 100644
--- a/make/hotspot/lib/CompileJvm.gmk
+++ b/make/hotspot/lib/CompileJvm.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2013, 2025, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2013, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -105,7 +105,7 @@ DISABLED_WARNINGS_gcc := array-bounds comment delete-non-virtual-dtor \
DISABLED_WARNINGS_clang := delete-non-abstract-non-virtual-dtor \
invalid-offsetof missing-braces \
sometimes-uninitialized unknown-pragmas unused-but-set-variable \
- unused-function unused-local-typedef unused-private-field unused-variable
+ unused-local-typedef unused-private-field unused-variable
ifneq ($(DEBUG_LEVEL), release)
# Assert macro gives warning
@@ -190,6 +190,8 @@ $(eval $(call SetupJdkLibrary, BUILD_LIBJVM, \
abstract_vm_version.cpp_CXXFLAGS := $(CFLAGS_VM_VERSION), \
arguments.cpp_CXXFLAGS := $(CFLAGS_VM_VERSION), \
whitebox.cpp_CXXFLAGS := $(CFLAGS_SHIP_DEBUGINFO), \
+ $(call ExtendFlags, $(TOPDIR)/src/hotspot/share/gc/g1, \
+ g1Numa.cpp, _CXXFLAGS, $(CFLAGS_CONVERSION_WARNINGS)), \
DISABLED_WARNINGS_gcc := $(DISABLED_WARNINGS_gcc), \
DISABLED_WARNINGS_gcc_ad_$(HOTSPOT_TARGET_CPU_ARCH).cpp := nonnull, \
DISABLED_WARNINGS_gcc_bytecodeInterpreter.cpp := unused-label, \
diff --git a/make/jdk/src/classes/build/tools/cldrconverter/CLDRConverter.java b/make/jdk/src/classes/build/tools/cldrconverter/CLDRConverter.java
index 55dd6a8d6ad..ab878a4d2a5 100644
--- a/make/jdk/src/classes/build/tools/cldrconverter/CLDRConverter.java
+++ b/make/jdk/src/classes/build/tools/cldrconverter/CLDRConverter.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -293,8 +293,10 @@ public class CLDRConverter {
bundleGenerator = new ResourceBundleGenerator();
// Parse data independent of locales
- parseSupplemental();
+ // parseBCP47() must precede parseSupplemental(). The latter depends
+ // on IANA alias map, which is produced by the former.
parseBCP47();
+ parseSupplemental();
// rules maps
pluralRules = generateRules(handlerPlurals);
@@ -536,6 +538,12 @@ public class CLDRConverter {
// canonical tz name map
// alias -> primary
+ //
+ // Note that CLDR meta zones do not necessarily align with IANA's
+ // current time zone identifiers. For example, the CLDR "India"
+ // meta zone maps to "Asia/Calcutta", whereas IANA now uses
+ // "Asia/Kolkata" for the zone. Accordingly, "canonical" here is
+ // defined in terms of CLDR's zone mappings.
handlerTimeZone.getData().forEach((k, v) -> {
String[] ids = ((String)v).split("\\s");
for (int i = 1; i < ids.length; i++) {
diff --git a/make/jdk/src/classes/build/tools/cldrconverter/TimeZoneParseHandler.java b/make/jdk/src/classes/build/tools/cldrconverter/TimeZoneParseHandler.java
index 66e94e5ca06..8203a9f0b91 100644
--- a/make/jdk/src/classes/build/tools/cldrconverter/TimeZoneParseHandler.java
+++ b/make/jdk/src/classes/build/tools/cldrconverter/TimeZoneParseHandler.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,9 @@ package build.tools.cldrconverter;
import java.io.File;
import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
import org.xml.sax.Attributes;
import org.xml.sax.InputSource;
@@ -40,6 +43,10 @@ import org.xml.sax.SAXException;
class TimeZoneParseHandler extends AbstractLDMLHandler {
private static final String PREF_PREFIX = "preferred:";
+ // CLDR aliases to IANA ids map. The initial capacity is estimated
+ // from the number of aliases in timezone.xml as of CLDR v48
+ private final Map ianaAliasMap = HashMap.newHashMap(32);
+
@Override
public InputSource resolveEntity(String publicID, String systemID) throws IOException, SAXException {
// avoid HTTP traffic to unicode.org
@@ -61,7 +68,16 @@ class TimeZoneParseHandler extends AbstractLDMLHandler {
put(attributes.getValue("name"), PREF_PREFIX + preferred);
}
} else {
- put(attributes.getValue("name"), attributes.getValue("alias"));
+ var alias = attributes.getValue("alias");
+ var iana = attributes.getValue("iana");
+ if (iana != null) {
+ for (var a : alias.split("\\s+")) {
+ if (!a.equals(iana)) {
+ ianaAliasMap.put(a, iana);
+ }
+ }
+ }
+ put(attributes.getValue("name"), alias);
}
}
break;
@@ -80,4 +96,8 @@ class TimeZoneParseHandler extends AbstractLDMLHandler {
.forEach(e -> map.put(e.getKey(),
map.get(e.getValue().toString().substring(PREF_PREFIX.length()))));
}
+
+ Map getIanaAliasMap() {
+ return ianaAliasMap;
+ }
}
diff --git a/make/jdk/src/classes/build/tools/cldrconverter/WinZonesParseHandler.java b/make/jdk/src/classes/build/tools/cldrconverter/WinZonesParseHandler.java
index a584358f0cb..343e143b6ad 100644
--- a/make/jdk/src/classes/build/tools/cldrconverter/WinZonesParseHandler.java
+++ b/make/jdk/src/classes/build/tools/cldrconverter/WinZonesParseHandler.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -56,6 +56,7 @@ class WinZonesParseHandler extends AbstractLDMLHandler {
String zoneName = attributes.getValue("other");
String territory = attributes.getValue("territory");
String javatz = attributes.getValue("type").replaceFirst("\\s.*", "");
+ javatz = CLDRConverter.handlerTimeZone.getIanaAliasMap().getOrDefault(javatz, javatz);
put(zoneName + ":" + territory, javatz);
pushIgnoredContainer(qName);
break;
diff --git a/make/jdk/src/classes/build/tools/taglet/JSpec.java b/make/jdk/src/classes/build/tools/taglet/JSpec.java
index 1c301e94a8a..7e1e0ca215e 100644
--- a/make/jdk/src/classes/build/tools/taglet/JSpec.java
+++ b/make/jdk/src/classes/build/tools/taglet/JSpec.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -49,13 +49,15 @@ import static com.sun.source.doctree.DocTree.Kind.*;
* The tags can be used as follows:
*
*
- * @jls section-number description
+ * @jls chapter.section description
+ * @jls preview-feature-chapter.section description
*
*
* For example:
*
*
* @jls 3.4 Line Terminators
+ * @jls primitive-types-in-patterns-instanceof-switch-5.7.1 Exact Testing Conversions
*
*
* will produce the following HTML, depending on the file containing
@@ -64,10 +66,24 @@ import static com.sun.source.doctree.DocTree.Kind.*;
* {@code
* See Java Language Specification :
* 3.4 Line terminators
+ *
+ * 5.7.1 Exact Testing Conversions
+ * PREVIEW
* }
*
- * Copies of JLS and JVMS are expected to have been placed in the {@code specs}
- * folder. These documents are not included in open-source repositories.
+ * In inline tags (note you need manual JLS/JVMS prefix):
+ *
+ * JLS {@jls 3.4}
+ *
+ *
+ * produces (note the section sign and no trailing dot):
+ *
+ * JLS §3.4
+ *
+ *
+ * Copies of JLS, JVMS, and preview JLS and JVMS changes are expected to have
+ * been placed in the {@code specs} folder. These documents are not included
+ * in open-source repositories.
*/
public class JSpec implements Taglet {
@@ -87,9 +103,9 @@ public class JSpec implements Taglet {
}
}
- private String tagName;
- private String specTitle;
- private String idPrefix;
+ private final String tagName;
+ private final String specTitle;
+ private final String idPrefix;
JSpec(String tagName, String specTitle, String idPrefix) {
this.tagName = tagName;
@@ -98,7 +114,7 @@ public class JSpec implements Taglet {
}
// Note: Matches special cases like @jvms 6.5.checkcast
- private static final Pattern TAG_PATTERN = Pattern.compile("(?s)(.+ )?(?[1-9][0-9]*)(?[0-9a-z_.]*)( .*)?$");
+ private static final Pattern TAG_PATTERN = Pattern.compile("(?s)(.+ )?(?([a-z0-9]+-)+)?(?[1-9][0-9]*)(?[0-9a-z_.]*)( .*)?$");
/**
* Returns the set of locations in which the tag may be used.
@@ -157,19 +173,50 @@ public class JSpec implements Taglet {
.trim();
Matcher m = TAG_PATTERN.matcher(tagText);
if (m.find()) {
+ // preview-feature-4.6 is preview-feature-, 4, .6
+ String preview = m.group("preview"); // null if no preview feature
String chapter = m.group("chapter");
String section = m.group("section");
String rootParent = currentPath().replaceAll("[^/]+", "..");
- String url = String.format("%1$s/specs/%2$s/%2$s-%3$s.html#%2$s-%3$s%4$s",
- rootParent, idPrefix, chapter, section);
+ String url = preview == null ?
+ String.format("%1$s/specs/%2$s/%2$s-%3$s.html#%2$s-%3$s%4$s",
+ rootParent, idPrefix, chapter, section) :
+ String.format("%1$s/specs/%5$s%2$s.html#%2$s-%3$s%4$s",
+ rootParent, idPrefix, chapter, section, preview);
+
+ var literal = expand(contents).trim();
+ var prefix = (preview == null ? "" : preview) + chapter + section;
+ if (literal.startsWith(prefix)) {
+ var hasFullTitle = literal.length() > prefix.length();
+ if (hasFullTitle) {
+ // Drop the preview identifier
+ literal = chapter + section + literal.substring(prefix.length());
+ } else {
+ // No section sign if the tag refers to a chapter, like {@jvms 4}
+ String sectionSign = section.isEmpty() ? "" : "§";
+ // Change whole text to "§chapter.x" in inline tags.
+ literal = sectionSign + chapter + section;
+ }
+ }
sb.append("")
- .append(expand(contents))
+ .append(literal)
.append(" ");
+ if (preview != null) {
+ // Add PREVIEW superscript that links to JLS/JVMS 1.5.1
+ // "Restrictions on the Use of Preview Features"
+ // Similar to how APIs link to the Preview info box warning
+ var sectionLink = String.format("%1$s/specs/%2$s/%2$s-%3$s.html#%2$s-%3$s%4$s",
+ rootParent, idPrefix, "1", ".5.1");
+ sb.append("PREVIEW ");
+ }
+
if (tag.getKind() == DocTree.Kind.UNKNOWN_BLOCK_TAG) {
sb.append(" ");
}
diff --git a/make/modules/java.base/Copy.gmk b/make/modules/java.base/Copy.gmk
index b8c1f2c05fa..43b9db651e0 100644
--- a/make/modules/java.base/Copy.gmk
+++ b/make/modules/java.base/Copy.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -172,6 +172,10 @@ ifeq ($(USE_EXTERNAL_LIBZ), true)
LEGAL_EXCLUDES += zlib.md
endif
+ifneq ($(TOOLCHAIN_TYPE), gcc)
+ LEGAL_EXCLUDES += gcc.md
+endif
+
$(eval $(call SetupCopyLegalFiles, COPY_LEGAL, \
EXCLUDES := $(LEGAL_EXCLUDES), \
))
diff --git a/make/modules/java.desktop/lib/AwtLibraries.gmk b/make/modules/java.desktop/lib/AwtLibraries.gmk
index 463e09e12dc..8b6b50b9e62 100644
--- a/make/modules/java.desktop/lib/AwtLibraries.gmk
+++ b/make/modules/java.desktop/lib/AwtLibraries.gmk
@@ -88,6 +88,10 @@ LIBAWT_EXTRA_HEADER_DIRS := \
LIBAWT_CFLAGS := -D__MEDIALIB_OLD_NAMES -D__USE_J2D_NAMES -DMLIB_NO_LIBSUNMATH
+ifeq ($(ENABLE_HEADLESS_ONLY), true)
+ LIBAWT_CFLAGS += -DHEADLESS
+endif
+
ifeq ($(call isTargetOs, windows), true)
LIBAWT_CFLAGS += -EHsc -DUNICODE -D_UNICODE -DMLIB_OS64BIT
LIBAWT_RCFLAGS ?= -I$(TOPDIR)/src/java.base/windows/native/launcher/icons
@@ -167,11 +171,18 @@ ifeq ($(call isTargetOs, windows macosx), false)
$(TOPDIR)/src/$(MODULE)/$(OPENJDK_TARGET_OS_TYPE)/native/common/awt \
#
+ LIBAWT_HEADLESS_EXCLUDE_FILES := \
+ GLXGraphicsConfig.c \
+ GLXSurfaceData.c \
+ X11PMBlitLoops.c \
+ X11Renderer.c \
+ X11SurfaceData.c \
+ #
+
LIBAWT_HEADLESS_EXTRA_HEADER_DIRS := \
$(LIBAWT_DEFAULT_HEADER_DIRS) \
common/awt/debug \
common/font \
- common/java2d/opengl \
java.base:libjvm \
#
@@ -191,7 +202,8 @@ ifeq ($(call isTargetOs, windows macosx), false)
$(eval $(call SetupJdkLibrary, BUILD_LIBAWT_HEADLESS, \
NAME := awt_headless, \
EXTRA_SRC := $(LIBAWT_HEADLESS_EXTRA_SRC), \
- EXCLUDES := medialib, \
+ EXCLUDES := medialib opengl, \
+ EXCLUDE_FILES := $(LIBAWT_HEADLESS_EXCLUDE_FILES), \
ONLY_EXPORTED := $(LIBAWT_HEADLESS_ONLY_EXPORTED), \
OPTIMIZATION := LOW, \
CFLAGS := -DHEADLESS=true $(CUPS_CFLAGS) $(FONTCONFIG_CFLAGS) \
diff --git a/make/modules/jdk.jpackage/Lib.gmk b/make/modules/jdk.jpackage/Lib.gmk
index 704436bbde6..86b11bdafee 100644
--- a/make/modules/jdk.jpackage/Lib.gmk
+++ b/make/modules/jdk.jpackage/Lib.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -54,7 +54,7 @@ $(eval $(call SetupJdkExecutable, BUILD_JPACKAGEAPPLAUNCHER, \
SRC := applauncher, \
EXTRA_SRC := common, \
INCLUDE_FILES := $(JPACKAGEAPPLAUNCHER_INCLUDE_FILES), \
- OPTIMIZATION := LOW, \
+ OPTIMIZATION := SIZE, \
DISABLED_WARNINGS_clang_JvmLauncherLib.c := format-nonliteral, \
DISABLED_WARNINGS_clang_LinuxPackage.c := format-nonliteral, \
DISABLED_WARNINGS_clang_Log.cpp := unused-const-variable, \
@@ -91,7 +91,7 @@ ifeq ($(call isTargetOs, linux), true)
common, \
EXCLUDE_FILES := LinuxLauncher.c LinuxPackage.c, \
LINK_TYPE := C++, \
- OPTIMIZATION := LOW, \
+ OPTIMIZATION := SIZE, \
DISABLED_WARNINGS_gcc_Log.cpp := unused-const-variable, \
DISABLED_WARNINGS_clang_JvmLauncherLib.c := format-nonliteral, \
DISABLED_WARNINGS_clang_tstrings.cpp := format-nonliteral, \
diff --git a/make/modules/jdk.security.auth/Lib.gmk b/make/modules/jdk.security.auth/Lib.gmk
index 9ead32dbe12..96d609f08d6 100644
--- a/make/modules/jdk.security.auth/Lib.gmk
+++ b/make/modules/jdk.security.auth/Lib.gmk
@@ -31,13 +31,14 @@ include LibCommon.gmk
## Build libjaas
################################################################################
-$(eval $(call SetupJdkLibrary, BUILD_LIBJAAS, \
- NAME := jaas, \
- OPTIMIZATION := LOW, \
- EXTRA_HEADER_DIRS := java.base:libjava, \
- LIBS_windows := advapi32.lib mpr.lib netapi32.lib user32.lib, \
-))
-
-TARGETS += $(BUILD_LIBJAAS)
+ifeq ($(call isTargetOs, windows), true)
+ $(eval $(call SetupJdkLibrary, BUILD_LIBJAAS, \
+ NAME := jaas, \
+ OPTIMIZATION := LOW, \
+ EXTRA_HEADER_DIRS := java.base:libjava, \
+ LIBS_windows := advapi32.lib mpr.lib netapi32.lib user32.lib, \
+ ))
+ TARGETS += $(BUILD_LIBJAAS)
+endif
################################################################################
diff --git a/src/demo/share/jfc/TableExample/OldJTable.java b/src/demo/share/jfc/TableExample/OldJTable.java
deleted file mode 100644
index 8c77978fe8a..00000000000
--- a/src/demo/share/jfc/TableExample/OldJTable.java
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * - Neither the name of Oracle nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * This source code is provided to illustrate the usage of a given feature
- * or technique and has been deliberately simplified. Additional steps
- * required for a production-quality application, such as security checks,
- * input validation and proper error handling, might not be present in
- * this sample code.
- */
-
-
-
-import java.util.EventObject;
-import java.util.List;
-import javax.swing.JTable;
-import javax.swing.table.DefaultTableModel;
-import javax.swing.table.TableCellEditor;
-import javax.swing.table.TableCellRenderer;
-import javax.swing.table.TableColumn;
-
-
-/**
- * The OldJTable is an unsupported class containing some methods that were
- * deleted from the JTable between releases 0.6 and 0.7
- */
-@SuppressWarnings("serial")
-public class OldJTable extends JTable
-{
- /*
- * A new convenience method returning the index of the column in the
- * co-ordinate space of the view.
- */
- public int getColumnIndex(Object identifier) {
- return getColumnModel().getColumnIndex(identifier);
- }
-
-//
-// Methods deleted from the JTable because they only work with the
-// DefaultTableModel.
-//
-
- public TableColumn addColumn(Object columnIdentifier, int width) {
- return addColumn(columnIdentifier, width, null, null, null);
- }
-
- public TableColumn addColumn(Object columnIdentifier, List> columnData) {
- return addColumn(columnIdentifier, -1, null, null, columnData);
- }
-
- // Override the new JTable implementation - it will not add a column to the
- // DefaultTableModel.
- public TableColumn addColumn(Object columnIdentifier, int width,
- TableCellRenderer renderer,
- TableCellEditor editor) {
- return addColumn(columnIdentifier, width, renderer, editor, null);
- }
-
- public TableColumn addColumn(Object columnIdentifier, int width,
- TableCellRenderer renderer,
- TableCellEditor editor, List> columnData) {
- checkDefaultTableModel();
-
- // Set up the model side first
- DefaultTableModel m = (DefaultTableModel)getModel();
- m.addColumn(columnIdentifier, columnData.toArray());
-
- // The column will have been added to the end, so the index of the
- // column in the model is the last element.
- TableColumn newColumn = new TableColumn(
- m.getColumnCount()-1, width, renderer, editor);
- super.addColumn(newColumn);
- return newColumn;
- }
-
- // Not possilble to make this work the same way ... change it so that
- // it does not delete columns from the model.
- public void removeColumn(Object columnIdentifier) {
- super.removeColumn(getColumn(columnIdentifier));
- }
-
- public void addRow(Object[] rowData) {
- checkDefaultTableModel();
- ((DefaultTableModel)getModel()).addRow(rowData);
- }
-
- public void addRow(List> rowData) {
- checkDefaultTableModel();
- ((DefaultTableModel)getModel()).addRow(rowData.toArray());
- }
-
- public void removeRow(int rowIndex) {
- checkDefaultTableModel();
- ((DefaultTableModel)getModel()).removeRow(rowIndex);
- }
-
- public void moveRow(int startIndex, int endIndex, int toIndex) {
- checkDefaultTableModel();
- ((DefaultTableModel)getModel()).moveRow(startIndex, endIndex, toIndex);
- }
-
- public void insertRow(int rowIndex, Object[] rowData) {
- checkDefaultTableModel();
- ((DefaultTableModel)getModel()).insertRow(rowIndex, rowData);
- }
-
- public void insertRow(int rowIndex, List> rowData) {
- checkDefaultTableModel();
- ((DefaultTableModel)getModel()).insertRow(rowIndex, rowData.toArray());
- }
-
- public void setNumRows(int newSize) {
- checkDefaultTableModel();
- ((DefaultTableModel)getModel()).setNumRows(newSize);
- }
-
- public void setDataVector(Object[][] newData, List> columnIds) {
- checkDefaultTableModel();
- ((DefaultTableModel)getModel()).setDataVector(
- newData, columnIds.toArray());
- }
-
- public void setDataVector(Object[][] newData, Object[] columnIds) {
- checkDefaultTableModel();
- ((DefaultTableModel)getModel()).setDataVector(newData, columnIds);
- }
-
- protected void checkDefaultTableModel() {
- if(!(dataModel instanceof DefaultTableModel))
- throw new InternalError("In order to use this method, the data model must be an instance of DefaultTableModel.");
- }
-
-//
-// Methods removed from JTable in the move from identifiers to ints.
-//
-
- public Object getValueAt(Object columnIdentifier, int rowIndex) {
- return super.getValueAt(rowIndex, getColumnIndex(columnIdentifier));
- }
-
- public boolean isCellEditable(Object columnIdentifier, int rowIndex) {
- return super.isCellEditable(rowIndex, getColumnIndex(columnIdentifier));
- }
-
- public void setValueAt(Object aValue, Object columnIdentifier, int rowIndex) {
- super.setValueAt(aValue, rowIndex, getColumnIndex(columnIdentifier));
- }
-
- public boolean editColumnRow(Object identifier, int row) {
- return super.editCellAt(row, getColumnIndex(identifier));
- }
-
- public void moveColumn(Object columnIdentifier, Object targetColumnIdentifier) {
- moveColumn(getColumnIndex(columnIdentifier),
- getColumnIndex(targetColumnIdentifier));
- }
-
- public boolean isColumnSelected(Object identifier) {
- return isColumnSelected(getColumnIndex(identifier));
- }
-
- public TableColumn addColumn(int modelColumn, int width) {
- return addColumn(modelColumn, width, null, null);
- }
-
- public TableColumn addColumn(int modelColumn) {
- return addColumn(modelColumn, 75, null, null);
- }
-
- /**
- * Creates a new column with modelColumn , width ,
- * renderer , and editor and adds it to the end of
- * the JTable's array of columns. This method also retrieves the
- * name of the column using the model's getColumnName(modelColumn)
- * method, and sets the both the header value and the identifier
- * for this TableColumn accordingly.
- *
- * The modelColumn is the index of the column in the model which
- * will supply the data for this column in the table. This, like the
- * columnIdentifier in previous releases, does not change as the
- * columns are moved in the view.
- *
- * For the rest of the JTable API, and all of its associated classes,
- * columns are referred to in the co-ordinate system of the view, the
- * index of the column in the model is kept inside the TableColumn
- * and is used only to retrieve the information from the appropraite
- * column in the model.
- *
- *
- * @param modelColumn The index of the column in the model
- * @param width The new column's width. Or -1 to use
- * the default width
- * @param renderer The renderer used with the new column.
- * Or null to use the default renderer.
- * @param editor The editor used with the new column.
- * Or null to use the default editor.
- */
- public TableColumn addColumn(int modelColumn, int width,
- TableCellRenderer renderer,
- TableCellEditor editor) {
- TableColumn newColumn = new TableColumn(
- modelColumn, width, renderer, editor);
- addColumn(newColumn);
- return newColumn;
- }
-
-//
-// Methods that had their arguments switched.
-//
-
-// These won't work with the new table package.
-
-/*
- public Object getValueAt(int columnIndex, int rowIndex) {
- return super.getValueAt(rowIndex, columnIndex);
- }
-
- public boolean isCellEditable(int columnIndex, int rowIndex) {
- return super.isCellEditable(rowIndex, columnIndex);
- }
-
- public void setValueAt(Object aValue, int columnIndex, int rowIndex) {
- super.setValueAt(aValue, rowIndex, columnIndex);
- }
-*/
-
- public boolean editColumnRow(int columnIndex, int rowIndex) {
- return super.editCellAt(rowIndex, columnIndex);
- }
-
- public boolean editColumnRow(int columnIndex, int rowIndex, EventObject e){
- return super.editCellAt(rowIndex, columnIndex, e);
- }
-
-
-} // End Of Class OldJTable
diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad
index b9252cc56ff..9734c6845ea 100644
--- a/src/hotspot/cpu/aarch64/aarch64.ad
+++ b/src/hotspot/cpu/aarch64/aarch64.ad
@@ -1229,7 +1229,7 @@ public:
// predicate controlling addressing modes
bool size_fits_all_mem_uses(AddPNode* addp, int shift);
- // Convert BootTest condition to Assembler condition.
+ // Convert BoolTest condition to Assembler condition.
// Replicate the logic of cmpOpOper::ccode() and cmpOpUOper::ccode().
Assembler::Condition to_assembler_cond(BoolTest::mask cond);
%}
@@ -2579,7 +2579,7 @@ bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
return true;
}
-// Convert BootTest condition to Assembler condition.
+// Convert BoolTest condition to Assembler condition.
// Replicate the logic of cmpOpOper::ccode() and cmpOpUOper::ccode().
Assembler::Condition to_assembler_cond(BoolTest::mask cond) {
Assembler::Condition result;
@@ -3403,11 +3403,13 @@ encode %{
} else if (rtype == relocInfo::metadata_type) {
__ mov_metadata(dst_reg, (Metadata*)con);
} else {
- assert(rtype == relocInfo::none, "unexpected reloc type");
+ assert(rtype == relocInfo::none || rtype == relocInfo::external_word_type, "unexpected reloc type");
+ // load fake address constants using a normal move
if (! __ is_valid_AArch64_address(con) ||
con < (address)(uintptr_t)os::vm_page_size()) {
__ mov(dst_reg, con);
} else {
+ // no reloc so just use adrp and add
uint64_t offset;
__ adrp(dst_reg, con, offset);
__ add(dst_reg, dst_reg, offset);
@@ -4535,6 +4537,18 @@ operand immP_1()
interface(CONST_INTER);
%}
+// AOT Runtime Constants Address
+operand immAOTRuntimeConstantsAddress()
+%{
+ // Check if the address is in the range of AOT Runtime Constants
+ predicate(AOTRuntimeConstants::contains((address)(n->get_ptr())));
+ match(ConP);
+
+ op_cost(0);
+ format %{ %}
+ interface(CONST_INTER);
+%}
+
// Float and Double operands
// Double Immediate
operand immD()
@@ -6898,6 +6912,20 @@ instruct loadConP1(iRegPNoSp dst, immP_1 con)
ins_pipe(ialu_imm);
%}
+instruct loadAOTRCAddress(iRegPNoSp dst, immAOTRuntimeConstantsAddress con)
+%{
+ match(Set dst con);
+
+ ins_cost(INSN_COST);
+ format %{ "adr $dst, $con\t# AOT Runtime Constants Address" %}
+
+ ins_encode %{
+ __ load_aotrc_address($dst$$Register, (address)$con$$constant);
+ %}
+
+ ins_pipe(ialu_imm);
+%}
+
// Load Narrow Pointer Constant
instruct loadConN(iRegNNoSp dst, immN con)
diff --git a/src/hotspot/cpu/aarch64/aarch64_vector.ad b/src/hotspot/cpu/aarch64/aarch64_vector.ad
index 78ef121bd29..19f03d97a72 100644
--- a/src/hotspot/cpu/aarch64/aarch64_vector.ad
+++ b/src/hotspot/cpu/aarch64/aarch64_vector.ad
@@ -1,5 +1,5 @@
//
-// Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2020, 2025, Arm Limited. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
@@ -201,6 +201,8 @@ source %{
case Op_XorReductionV:
case Op_MinReductionV:
case Op_MaxReductionV:
+ case Op_UMinReductionV:
+ case Op_UMaxReductionV:
// Reductions with less than 8 bytes vector length are
// not supported.
if (length_in_bytes < 8) {
@@ -383,6 +385,8 @@ source %{
return !VM_Version::use_neon_for_vector(length_in_bytes);
case Op_MinReductionV:
case Op_MaxReductionV:
+ case Op_UMinReductionV:
+ case Op_UMaxReductionV:
// For BYTE/SHORT/INT/FLOAT/DOUBLE types, we prefer using NEON
// instructions rather than SVE predicated instructions for
// better performance.
@@ -4218,6 +4222,224 @@ instruct reduce_minD_masked(vRegD dst, vRegD dsrc, vReg vsrc, pRegGov pg) %{
ins_pipe(pipe_slow);
%}
+// -------------------- Vector reduction unsigned min/max ----------------------
+
+// reduction uminI
+
+instruct reduce_uminI_neon(iRegINoSp dst, iRegIorL2I isrc, vReg vsrc,
+ vReg tmp, rFlagsReg cr) %{
+ predicate(VM_Version::use_neon_for_vector(Matcher::vector_length_in_bytes(n->in(2))) &&
+ (Matcher::vector_element_basic_type(n->in(2)) == T_BYTE ||
+ Matcher::vector_element_basic_type(n->in(2)) == T_SHORT ||
+ Matcher::vector_element_basic_type(n->in(2)) == T_INT));
+ match(Set dst (UMinReductionV isrc vsrc));
+ effect(TEMP_DEF dst, TEMP tmp, KILL cr);
+ format %{ "reduce_uminI_neon $dst, $isrc, $vsrc\t# KILL $tmp, cr" %}
+ ins_encode %{
+ BasicType bt = Matcher::vector_element_basic_type(this, $vsrc);
+ uint length_in_bytes = Matcher::vector_length_in_bytes(this, $vsrc);
+ __ neon_reduce_minmax_integral(this->ideal_Opcode(), $dst$$Register, bt,
+ $isrc$$Register, $vsrc$$FloatRegister,
+ length_in_bytes, $tmp$$FloatRegister);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+instruct reduce_uminI_sve(iRegINoSp dst, iRegIorL2I isrc, vReg vsrc,
+ vRegD tmp, rFlagsReg cr) %{
+ predicate(!VM_Version::use_neon_for_vector(Matcher::vector_length_in_bytes(n->in(2))) &&
+ (Matcher::vector_element_basic_type(n->in(2)) == T_BYTE ||
+ Matcher::vector_element_basic_type(n->in(2)) == T_SHORT ||
+ Matcher::vector_element_basic_type(n->in(2)) == T_INT));
+ match(Set dst (UMinReductionV isrc vsrc));
+ effect(TEMP_DEF dst, TEMP tmp, KILL cr);
+ format %{ "reduce_uminI_sve $dst, $isrc, $vsrc\t# KILL $tmp, cr" %}
+ ins_encode %{
+ assert(UseSVE > 0, "must be sve");
+ BasicType bt = Matcher::vector_element_basic_type(this, $vsrc);
+ uint length_in_bytes = Matcher::vector_length_in_bytes(this, $vsrc);
+ assert(length_in_bytes == MaxVectorSize, "invalid vector length");
+ __ sve_reduce_integral(this->ideal_Opcode(), $dst$$Register, bt,
+ $isrc$$Register, $vsrc$$FloatRegister,
+ ptrue, $tmp$$FloatRegister);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+// reduction uminL
+
+instruct reduce_uminL_neon(iRegLNoSp dst, iRegL isrc, vReg vsrc, rFlagsReg cr) %{
+ predicate(UseSVE == 0 && Matcher::vector_element_basic_type(n->in(2)) == T_LONG);
+ match(Set dst (UMinReductionV isrc vsrc));
+ effect(TEMP_DEF dst, KILL cr);
+ format %{ "reduce_uminL_neon $dst, $isrc, $vsrc\t# 2L. KILL cr" %}
+ ins_encode %{
+ __ neon_reduce_minmax_integral(this->ideal_Opcode(), $dst$$Register, T_LONG,
+ $isrc$$Register, $vsrc$$FloatRegister,
+ /* vector_length_in_bytes */ 16, fnoreg);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+instruct reduce_uminL_sve(iRegLNoSp dst, iRegL isrc, vReg vsrc,
+ vRegD tmp, rFlagsReg cr) %{
+ predicate(UseSVE > 0 && Matcher::vector_element_basic_type(n->in(2)) == T_LONG);
+ match(Set dst (UMinReductionV isrc vsrc));
+ effect(TEMP_DEF dst, TEMP tmp, KILL cr);
+ format %{ "reduce_uminL_sve $dst, $isrc, $vsrc\t# KILL $tmp, cr" %}
+ ins_encode %{
+ uint length_in_bytes = Matcher::vector_length_in_bytes(this, $vsrc);
+ assert(length_in_bytes == MaxVectorSize, "invalid vector length");
+ __ sve_reduce_integral(this->ideal_Opcode(), $dst$$Register, T_LONG,
+ $isrc$$Register, $vsrc$$FloatRegister,
+ ptrue, $tmp$$FloatRegister);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+// reduction umin - predicated
+
+instruct reduce_uminI_masked(iRegINoSp dst, iRegIorL2I isrc, vReg vsrc, pRegGov pg,
+ vRegD tmp, rFlagsReg cr) %{
+ predicate(UseSVE > 0 &&
+ (Matcher::vector_element_basic_type(n->in(1)->in(2)) == T_BYTE ||
+ Matcher::vector_element_basic_type(n->in(1)->in(2)) == T_SHORT ||
+ Matcher::vector_element_basic_type(n->in(1)->in(2)) == T_INT));
+ match(Set dst (UMinReductionV (Binary isrc vsrc) pg));
+ effect(TEMP_DEF dst, TEMP tmp, KILL cr);
+ format %{ "reduce_uminI_masked $dst, $isrc, $pg, $vsrc\t# KILL $tmp, cr" %}
+ ins_encode %{
+ BasicType bt = Matcher::vector_element_basic_type(this, $vsrc);
+ __ sve_reduce_integral(this->ideal_Opcode(), $dst$$Register, bt,
+ $isrc$$Register, $vsrc$$FloatRegister,
+ $pg$$PRegister, $tmp$$FloatRegister);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+instruct reduce_uminL_masked(iRegLNoSp dst, iRegL isrc, vReg vsrc, pRegGov pg,
+ vRegD tmp, rFlagsReg cr) %{
+ predicate(UseSVE > 0 && Matcher::vector_element_basic_type(n->in(1)->in(2)) == T_LONG);
+ match(Set dst (UMinReductionV (Binary isrc vsrc) pg));
+ effect(TEMP_DEF dst, TEMP tmp, KILL cr);
+ format %{ "reduce_uminL_masked $dst, $isrc, $pg, $vsrc\t# KILL $tmp, cr" %}
+ ins_encode %{
+ BasicType bt = Matcher::vector_element_basic_type(this, $vsrc);
+ __ sve_reduce_integral(this->ideal_Opcode(), $dst$$Register, bt,
+ $isrc$$Register, $vsrc$$FloatRegister,
+ $pg$$PRegister, $tmp$$FloatRegister);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+// reduction umaxI
+
+instruct reduce_umaxI_neon(iRegINoSp dst, iRegIorL2I isrc, vReg vsrc,
+ vReg tmp, rFlagsReg cr) %{
+ predicate(VM_Version::use_neon_for_vector(Matcher::vector_length_in_bytes(n->in(2))) &&
+ (Matcher::vector_element_basic_type(n->in(2)) == T_BYTE ||
+ Matcher::vector_element_basic_type(n->in(2)) == T_SHORT ||
+ Matcher::vector_element_basic_type(n->in(2)) == T_INT));
+ match(Set dst (UMaxReductionV isrc vsrc));
+ effect(TEMP_DEF dst, TEMP tmp, KILL cr);
+ format %{ "reduce_umaxI_neon $dst, $isrc, $vsrc\t# KILL $tmp, cr" %}
+ ins_encode %{
+ BasicType bt = Matcher::vector_element_basic_type(this, $vsrc);
+ uint length_in_bytes = Matcher::vector_length_in_bytes(this, $vsrc);
+ __ neon_reduce_minmax_integral(this->ideal_Opcode(), $dst$$Register, bt,
+ $isrc$$Register, $vsrc$$FloatRegister,
+ length_in_bytes, $tmp$$FloatRegister);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+instruct reduce_umaxI_sve(iRegINoSp dst, iRegIorL2I isrc, vReg vsrc,
+ vRegD tmp, rFlagsReg cr) %{
+ predicate(!VM_Version::use_neon_for_vector(Matcher::vector_length_in_bytes(n->in(2))) &&
+ (Matcher::vector_element_basic_type(n->in(2)) == T_BYTE ||
+ Matcher::vector_element_basic_type(n->in(2)) == T_SHORT ||
+ Matcher::vector_element_basic_type(n->in(2)) == T_INT));
+ match(Set dst (UMaxReductionV isrc vsrc));
+ effect(TEMP_DEF dst, TEMP tmp, KILL cr);
+ format %{ "reduce_umaxI_sve $dst, $isrc, $vsrc\t# KILL $tmp, cr" %}
+ ins_encode %{
+ assert(UseSVE > 0, "must be sve");
+ BasicType bt = Matcher::vector_element_basic_type(this, $vsrc);
+ uint length_in_bytes = Matcher::vector_length_in_bytes(this, $vsrc);
+ assert(length_in_bytes == MaxVectorSize, "invalid vector length");
+ __ sve_reduce_integral(this->ideal_Opcode(), $dst$$Register, bt,
+ $isrc$$Register, $vsrc$$FloatRegister,
+ ptrue, $tmp$$FloatRegister);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+// reduction umaxL
+
+instruct reduce_umaxL_neon(iRegLNoSp dst, iRegL isrc, vReg vsrc, rFlagsReg cr) %{
+ predicate(UseSVE == 0 && Matcher::vector_element_basic_type(n->in(2)) == T_LONG);
+ match(Set dst (UMaxReductionV isrc vsrc));
+ effect(TEMP_DEF dst, KILL cr);
+ format %{ "reduce_umaxL_neon $dst, $isrc, $vsrc\t# 2L. KILL cr" %}
+ ins_encode %{
+ __ neon_reduce_minmax_integral(this->ideal_Opcode(), $dst$$Register, T_LONG,
+ $isrc$$Register, $vsrc$$FloatRegister,
+ /* vector_length_in_bytes */ 16, fnoreg);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+instruct reduce_umaxL_sve(iRegLNoSp dst, iRegL isrc, vReg vsrc,
+ vRegD tmp, rFlagsReg cr) %{
+ predicate(UseSVE > 0 && Matcher::vector_element_basic_type(n->in(2)) == T_LONG);
+ match(Set dst (UMaxReductionV isrc vsrc));
+ effect(TEMP_DEF dst, TEMP tmp, KILL cr);
+ format %{ "reduce_umaxL_sve $dst, $isrc, $vsrc\t# KILL $tmp, cr" %}
+ ins_encode %{
+ uint length_in_bytes = Matcher::vector_length_in_bytes(this, $vsrc);
+ assert(length_in_bytes == MaxVectorSize, "invalid vector length");
+ __ sve_reduce_integral(this->ideal_Opcode(), $dst$$Register, T_LONG,
+ $isrc$$Register, $vsrc$$FloatRegister,
+ ptrue, $tmp$$FloatRegister);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+// reduction umax - predicated
+
+instruct reduce_umaxI_masked(iRegINoSp dst, iRegIorL2I isrc, vReg vsrc, pRegGov pg,
+ vRegD tmp, rFlagsReg cr) %{
+ predicate(UseSVE > 0 &&
+ (Matcher::vector_element_basic_type(n->in(1)->in(2)) == T_BYTE ||
+ Matcher::vector_element_basic_type(n->in(1)->in(2)) == T_SHORT ||
+ Matcher::vector_element_basic_type(n->in(1)->in(2)) == T_INT));
+ match(Set dst (UMaxReductionV (Binary isrc vsrc) pg));
+ effect(TEMP_DEF dst, TEMP tmp, KILL cr);
+ format %{ "reduce_umaxI_masked $dst, $isrc, $pg, $vsrc\t# KILL $tmp, cr" %}
+ ins_encode %{
+ BasicType bt = Matcher::vector_element_basic_type(this, $vsrc);
+ __ sve_reduce_integral(this->ideal_Opcode(), $dst$$Register, bt,
+ $isrc$$Register, $vsrc$$FloatRegister,
+ $pg$$PRegister, $tmp$$FloatRegister);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+instruct reduce_umaxL_masked(iRegLNoSp dst, iRegL isrc, vReg vsrc, pRegGov pg,
+ vRegD tmp, rFlagsReg cr) %{
+ predicate(UseSVE > 0 && Matcher::vector_element_basic_type(n->in(1)->in(2)) == T_LONG);
+ match(Set dst (UMaxReductionV (Binary isrc vsrc) pg));
+ effect(TEMP_DEF dst, TEMP tmp, KILL cr);
+ format %{ "reduce_umaxL_masked $dst, $isrc, $pg, $vsrc\t# KILL $tmp, cr" %}
+ ins_encode %{
+ BasicType bt = Matcher::vector_element_basic_type(this, $vsrc);
+ __ sve_reduce_integral(this->ideal_Opcode(), $dst$$Register, bt,
+ $isrc$$Register, $vsrc$$FloatRegister,
+ $pg$$PRegister, $tmp$$FloatRegister);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
// ------------------------------ Vector reinterpret ---------------------------
instruct reinterpret_same_size(vReg dst_src) %{
diff --git a/src/hotspot/cpu/aarch64/aarch64_vector_ad.m4 b/src/hotspot/cpu/aarch64/aarch64_vector_ad.m4
index 66dc22c3758..48bffb3cf35 100644
--- a/src/hotspot/cpu/aarch64/aarch64_vector_ad.m4
+++ b/src/hotspot/cpu/aarch64/aarch64_vector_ad.m4
@@ -1,5 +1,5 @@
//
-// Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2020, 2025, Arm Limited. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
@@ -191,6 +191,8 @@ source %{
case Op_XorReductionV:
case Op_MinReductionV:
case Op_MaxReductionV:
+ case Op_UMinReductionV:
+ case Op_UMaxReductionV:
// Reductions with less than 8 bytes vector length are
// not supported.
if (length_in_bytes < 8) {
@@ -373,6 +375,8 @@ source %{
return !VM_Version::use_neon_for_vector(length_in_bytes);
case Op_MinReductionV:
case Op_MaxReductionV:
+ case Op_UMinReductionV:
+ case Op_UMaxReductionV:
// For BYTE/SHORT/INT/FLOAT/DOUBLE types, we prefer using NEON
// instructions rather than SVE predicated instructions for
// better performance.
@@ -2505,6 +2509,32 @@ REDUCE_MAXMIN_INT_PREDICATE(min, L, iRegL, MinReductionV)
REDUCE_MAXMIN_FP_PREDICATE(min, F, fsrc, MinReductionV, sve_fminv, fmins)
REDUCE_MAXMIN_FP_PREDICATE(min, D, dsrc, MinReductionV, sve_fminv, fmind)
+// -------------------- Vector reduction unsigned min/max ----------------------
+
+// reduction uminI
+REDUCE_MAXMIN_I_NEON(umin, UMinReductionV)
+REDUCE_MAXMIN_I_SVE(umin, UMinReductionV)
+
+// reduction uminL
+REDUCE_MAXMIN_L_NEON(umin, UMinReductionV)
+REDUCE_MAXMIN_L_SVE(umin, UMinReductionV)
+
+// reduction umin - predicated
+REDUCE_MAXMIN_INT_PREDICATE(umin, I, iRegIorL2I, UMinReductionV)
+REDUCE_MAXMIN_INT_PREDICATE(umin, L, iRegL, UMinReductionV)
+
+// reduction umaxI
+REDUCE_MAXMIN_I_NEON(umax, UMaxReductionV)
+REDUCE_MAXMIN_I_SVE(umax, UMaxReductionV)
+
+// reduction umaxL
+REDUCE_MAXMIN_L_NEON(umax, UMaxReductionV)
+REDUCE_MAXMIN_L_SVE(umax, UMaxReductionV)
+
+// reduction umax - predicated
+REDUCE_MAXMIN_INT_PREDICATE(umax, I, iRegIorL2I, UMaxReductionV)
+REDUCE_MAXMIN_INT_PREDICATE(umax, L, iRegL, UMaxReductionV)
+
// ------------------------------ Vector reinterpret ---------------------------
instruct reinterpret_same_size(vReg dst_src) %{
diff --git a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp
index 18807c667e3..19b3bb1a65b 100644
--- a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2024, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -2658,6 +2658,8 @@ template
INSN(uminv, 1, 0b011011, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
INSN(smaxp, 0, 0b101001, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
INSN(sminp, 0, 0b101011, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
+ INSN(umaxp, 1, 0b101001, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
+ INSN(uminp, 1, 0b101011, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
INSN(sqdmulh,0, 0b101101, false); // accepted arrangements: T4H, T8H, T2S, T4S
INSN(shsubv, 0, 0b001001, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
@@ -3490,7 +3492,9 @@ public:
INSN(sve_sub, 0b00000100, 0b000001000); // vector sub
INSN(sve_uaddv, 0b00000100, 0b000001001); // unsigned add reduction to scalar
INSN(sve_umax, 0b00000100, 0b001001000); // unsigned maximum vectors
+ INSN(sve_umaxv, 0b00000100, 0b001001001); // unsigned maximum reduction to scalar
INSN(sve_umin, 0b00000100, 0b001011000); // unsigned minimum vectors
+ INSN(sve_uminv, 0b00000100, 0b001011001); // unsigned minimum reduction to scalar
#undef INSN
// SVE floating-point arithmetic - predicate
@@ -4325,6 +4329,7 @@ public:
#undef INSN
Assembler(CodeBuffer* code) : AbstractAssembler(code) {
+ MACOS_AARCH64_ONLY(os::thread_wx_enable_write());
}
// Stack overflow checking
diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
index 37a6a130e0d..30048a2079d 100644
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
@@ -33,6 +33,7 @@
#include "c1/c1_ValueStack.hpp"
#include "ci/ciArrayKlass.hpp"
#include "ci/ciInstance.hpp"
+#include "code/aotCodeCache.hpp"
#include "code/compiledIC.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/gc_globals.hpp"
@@ -532,6 +533,15 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
case T_LONG: {
assert(patch_code == lir_patch_none, "no patching handled here");
+#if INCLUDE_CDS
+ if (AOTCodeCache::is_on_for_dump()) {
+ address b = c->as_pointer();
+ if (AOTRuntimeConstants::contains(b)) {
+ __ load_aotrc_address(dest->as_register_lo(), b);
+ break;
+ }
+ }
+#endif
__ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
break;
}
@@ -1218,43 +1228,11 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
__ bind(*op->stub()->continuation());
}
-void LIR_Assembler::type_profile_helper(Register mdo,
- ciMethodData *md, ciProfileData *data,
- Register recv, Label* update_done) {
+void LIR_Assembler::type_profile_helper(Register mdo, ciMethodData *md,
+ ciProfileData *data, Register recv) {
- // Given a profile data offset, generate an Address which points to
- // the corresponding slot in mdo->data().
- // Clobbers rscratch2.
- auto slot_at = [=](ByteSize offset) -> Address {
- return __ form_address(rscratch2, mdo,
- md->byte_offset_of_slot(data, offset),
- LogBytesPerWord);
- };
-
- for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
- Label next_test;
- // See if the receiver is receiver[n].
- __ ldr(rscratch1, slot_at(ReceiverTypeData::receiver_offset(i)));
- __ cmp(recv, rscratch1);
- __ br(Assembler::NE, next_test);
- __ addptr(slot_at(ReceiverTypeData::receiver_count_offset(i)),
- DataLayout::counter_increment);
- __ b(*update_done);
- __ bind(next_test);
- }
-
- // Didn't find receiver; find next empty slot and fill it in
- for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
- Label next_test;
- Address recv_addr(slot_at(ReceiverTypeData::receiver_offset(i)));
- __ ldr(rscratch1, recv_addr);
- __ cbnz(rscratch1, next_test);
- __ str(recv, recv_addr);
- __ mov(rscratch1, DataLayout::counter_increment);
- __ str(rscratch1, slot_at(ReceiverTypeData::receiver_count_offset(i)));
- __ b(*update_done);
- __ bind(next_test);
- }
+ int mdp_offset = md->byte_offset_of_slot(data, in_ByteSize(0));
+ __ profile_receiver_type(recv, mdo, mdp_offset);
}
void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
@@ -1316,14 +1294,9 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
__ b(*obj_is_null);
__ bind(not_null);
- Label update_done;
Register recv = k_RInfo;
__ load_klass(recv, obj);
- type_profile_helper(mdo, md, data, recv, &update_done);
- Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
- __ addptr(counter_addr, DataLayout::counter_increment);
-
- __ bind(update_done);
+ type_profile_helper(mdo, md, data, recv);
} else {
__ cbz(obj, *obj_is_null);
}
@@ -1430,13 +1403,9 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
__ b(done);
__ bind(not_null);
- Label update_done;
Register recv = k_RInfo;
__ load_klass(recv, value);
- type_profile_helper(mdo, md, data, recv, &update_done);
- Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
- __ addptr(counter_addr, DataLayout::counter_increment);
- __ bind(update_done);
+ type_profile_helper(mdo, md, data, recv);
} else {
__ cbz(value, done);
}
@@ -2540,13 +2509,9 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) {
// We know the type that will be seen at this call site; we can
// statically update the MethodData* rather than needing to do
- // dynamic tests on the receiver type
-
- // NOTE: we should probably put a lock around this search to
- // avoid collisions by concurrent compilations
+ // dynamic tests on the receiver type.
ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
- uint i;
- for (i = 0; i < VirtualCallData::row_limit(); i++) {
+ for (uint i = 0; i < VirtualCallData::row_limit(); i++) {
ciKlass* receiver = vc_data->receiver(i);
if (known_klass->equals(receiver)) {
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
@@ -2554,36 +2519,13 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
return;
}
}
-
- // Receiver type not found in profile data; select an empty slot
-
- // Note that this is less efficient than it should be because it
- // always does a write to the receiver part of the
- // VirtualCallData rather than just the first time
- for (i = 0; i < VirtualCallData::row_limit(); i++) {
- ciKlass* receiver = vc_data->receiver(i);
- if (receiver == nullptr) {
- __ mov_metadata(rscratch1, known_klass->constant_encoding());
- Address recv_addr =
- __ form_address(rscratch2, mdo,
- md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)),
- LogBytesPerWord);
- __ str(rscratch1, recv_addr);
- Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
- __ addptr(data_addr, DataLayout::counter_increment);
- return;
- }
- }
+ // Receiver type is not found in profile data.
+ // Fall back to runtime helper to handle the rest at runtime.
+ __ mov_metadata(recv, known_klass->constant_encoding());
} else {
__ load_klass(recv, recv);
- Label update_done;
- type_profile_helper(mdo, md, data, recv, &update_done);
- // Receiver did not match any saved receiver and there is no empty row for it.
- // Increment total counter to indicate polymorphic case.
- __ addptr(counter_addr, DataLayout::counter_increment);
-
- __ bind(update_done);
}
+ type_profile_helper(mdo, md, data, recv);
} else {
// Static call
__ addptr(counter_addr, DataLayout::counter_increment);
diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp
index 21916a5f7dd..5af06fc6a1c 100644
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp
@@ -50,9 +50,8 @@ friend class ArrayCopyStub;
Address stack_slot_address(int index, uint shift, Register tmp, int adjust = 0);
// Record the type of the receiver in ReceiverTypeData
- void type_profile_helper(Register mdo,
- ciMethodData *md, ciProfileData *data,
- Register recv, Label* update_done);
+ void type_profile_helper(Register mdo, ciMethodData *md,
+ ciProfileData *data, Register recv);
void add_debug_info_for_branch(address adr, CodeEmitInfo* info);
void casw(Register addr, Register newval, Register cmpval);
diff --git a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp
index 75897a16fe4..dc0b9eb9546 100644
--- a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,7 +30,9 @@
#include "opto/matcher.hpp"
#include "opto/output.hpp"
#include "opto/subnode.hpp"
+#include "runtime/objectMonitorTable.hpp"
#include "runtime/stubRoutines.hpp"
+#include "runtime/synchronizer.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/powerOfTwo.hpp"
@@ -221,37 +223,52 @@ void C2_MacroAssembler::fast_lock(Register obj, Register box, Register t1,
if (!UseObjectMonitorTable) {
assert(t1_monitor == t1_mark, "should be the same here");
} else {
+ const Register t1_hash = t1;
Label monitor_found;
- // Load cache address
- lea(t3_t, Address(rthread, JavaThread::om_cache_oops_offset()));
+ // Save the mark, we might need it to extract the hash.
+ mov(t3, t1_mark);
- const int num_unrolled = 2;
+ // Look for the monitor in the om_cache.
+
+ ByteSize cache_offset = JavaThread::om_cache_oops_offset();
+ ByteSize monitor_offset = OMCache::oop_to_monitor_difference();
+ const int num_unrolled = OMCache::CAPACITY;
for (int i = 0; i < num_unrolled; i++) {
- ldr(t1, Address(t3_t));
- cmp(obj, t1);
+ ldr(t1_monitor, Address(rthread, cache_offset + monitor_offset));
+ ldr(t2, Address(rthread, cache_offset));
+ cmp(obj, t2);
br(Assembler::EQ, monitor_found);
- increment(t3_t, in_bytes(OMCache::oop_to_oop_difference()));
+ cache_offset = cache_offset + OMCache::oop_to_oop_difference();
}
- Label loop;
+ // Look for the monitor in the table.
- // Search for obj in cache.
- bind(loop);
+ // Get the hash code.
+ ubfx(t1_hash, t3, markWord::hash_shift, markWord::hash_bits);
- // Check for match.
- ldr(t1, Address(t3_t));
- cmp(obj, t1);
- br(Assembler::EQ, monitor_found);
+ // Get the table and calculate the bucket's address
+ lea(t3, ExternalAddress(ObjectMonitorTable::current_table_address()));
+ ldr(t3, Address(t3));
+ ldr(t2, Address(t3, ObjectMonitorTable::table_capacity_mask_offset()));
+ ands(t1_hash, t1_hash, t2);
+ ldr(t3, Address(t3, ObjectMonitorTable::table_buckets_offset()));
- // Search until null encountered, guaranteed _null_sentinel at end.
- increment(t3_t, in_bytes(OMCache::oop_to_oop_difference()));
- cbnz(t1, loop);
- // Cache Miss, NE set from cmp above, cbnz does not set flags
- b(slow_path);
+ // Read the monitor from the bucket.
+ ldr(t1_monitor, Address(t3, t1_hash, Address::lsl(LogBytesPerWord)));
+
+ // Check if the monitor in the bucket is special (empty, tombstone or removed).
+ cmp(t1_monitor, (unsigned char)ObjectMonitorTable::SpecialPointerValues::below_is_special);
+ br(Assembler::LO, slow_path);
+
+ // Check if object matches.
+ ldr(t3, Address(t1_monitor, ObjectMonitor::object_offset()));
+ BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
+ bs_asm->try_resolve_weak_handle_in_c2(this, t3, t2, slow_path);
+ cmp(t3, obj);
+ br(Assembler::NE, slow_path);
bind(monitor_found);
- ldr(t1_monitor, Address(t3_t, OMCache::oop_to_monitor_difference()));
}
const Register t2_owner_addr = t2;
@@ -1960,50 +1977,76 @@ void C2_MacroAssembler::neon_reduce_logical(int opc, Register dst, BasicType bt,
BLOCK_COMMENT("} neon_reduce_logical");
}
-// Vector reduction min/max for integral type with ASIMD instructions.
+// Helper function to decode min/max reduction operation properties
+void C2_MacroAssembler::decode_minmax_reduction_opc(int opc, bool* is_min,
+ bool* is_unsigned,
+ Condition* cond) {
+ switch(opc) {
+ case Op_MinReductionV:
+ *is_min = true; *is_unsigned = false; *cond = LT; break;
+ case Op_MaxReductionV:
+ *is_min = false; *is_unsigned = false; *cond = GT; break;
+ case Op_UMinReductionV:
+ *is_min = true; *is_unsigned = true; *cond = LO; break;
+ case Op_UMaxReductionV:
+ *is_min = false; *is_unsigned = true; *cond = HI; break;
+ default:
+ ShouldNotReachHere();
+ }
+}
+
+// Vector reduction min/max/umin/umax for integral type with ASIMD instructions.
// Note: vtmp is not used and expected to be fnoreg for T_LONG case.
// Clobbers: rscratch1, rflags
void C2_MacroAssembler::neon_reduce_minmax_integral(int opc, Register dst, BasicType bt,
Register isrc, FloatRegister vsrc,
unsigned vector_length_in_bytes,
FloatRegister vtmp) {
- assert(opc == Op_MinReductionV || opc == Op_MaxReductionV, "unsupported");
+ assert(opc == Op_MinReductionV || opc == Op_MaxReductionV ||
+ opc == Op_UMinReductionV || opc == Op_UMaxReductionV, "unsupported");
assert(vector_length_in_bytes == 8 || vector_length_in_bytes == 16, "unsupported");
assert(bt == T_BYTE || bt == T_SHORT || bt == T_INT || bt == T_LONG, "unsupported");
assert_different_registers(dst, isrc);
bool isQ = vector_length_in_bytes == 16;
- bool is_min = opc == Op_MinReductionV;
-
+ bool is_min;
+ bool is_unsigned;
+ Condition cond;
+ decode_minmax_reduction_opc(opc, &is_min, &is_unsigned, &cond);
BLOCK_COMMENT("neon_reduce_minmax_integral {");
if (bt == T_LONG) {
assert(vtmp == fnoreg, "should be");
assert(isQ, "should be");
umov(rscratch1, vsrc, D, 0);
cmp(isrc, rscratch1);
- csel(dst, isrc, rscratch1, is_min ? LT : GT);
+ csel(dst, isrc, rscratch1, cond);
umov(rscratch1, vsrc, D, 1);
cmp(dst, rscratch1);
- csel(dst, dst, rscratch1, is_min ? LT : GT);
+ csel(dst, dst, rscratch1, cond);
} else {
SIMD_Arrangement size = esize2arrangement((unsigned)type2aelembytes(bt), isQ);
if (size == T2S) {
- is_min ? sminp(vtmp, size, vsrc, vsrc) : smaxp(vtmp, size, vsrc, vsrc);
+ // For T2S (2x32-bit elements), use pairwise instructions because
+ // uminv/umaxv/sminv/smaxv don't support arrangement 2S.
+ neon_minmaxp(is_unsigned, is_min, vtmp, size, vsrc, vsrc);
} else {
- is_min ? sminv(vtmp, size, vsrc) : smaxv(vtmp, size, vsrc);
+ // For other sizes, use reduction to scalar instructions.
+ neon_minmaxv(is_unsigned, is_min, vtmp, size, vsrc);
}
if (bt == T_INT) {
umov(dst, vtmp, S, 0);
+ } else if (is_unsigned) {
+ umov(dst, vtmp, elemType_to_regVariant(bt), 0);
} else {
smov(dst, vtmp, elemType_to_regVariant(bt), 0);
}
cmpw(dst, isrc);
- cselw(dst, dst, isrc, is_min ? LT : GT);
+ cselw(dst, dst, isrc, cond);
}
BLOCK_COMMENT("} neon_reduce_minmax_integral");
}
// Vector reduction for integral type with SVE instruction.
-// Supported operations are Add, And, Or, Xor, Max, Min.
+// Supported operations are Add, And, Or, Xor, Max, Min, UMax, UMin.
// rflags would be clobbered if opc is Op_MaxReductionV or Op_MinReductionV.
void C2_MacroAssembler::sve_reduce_integral(int opc, Register dst, BasicType bt, Register src1,
FloatRegister src2, PRegister pg, FloatRegister tmp) {
@@ -2075,35 +2118,27 @@ void C2_MacroAssembler::sve_reduce_integral(int opc, Register dst, BasicType bt,
}
break;
}
- case Op_MaxReductionV: {
- sve_smaxv(tmp, size, pg, src2);
- if (bt == T_INT || bt == T_LONG) {
+ case Op_MaxReductionV:
+ case Op_MinReductionV:
+ case Op_UMaxReductionV:
+ case Op_UMinReductionV: {
+ bool is_min;
+ bool is_unsigned;
+ Condition cond;
+ decode_minmax_reduction_opc(opc, &is_min, &is_unsigned, &cond);
+ sve_minmaxv(is_unsigned, is_min, tmp, size, pg, src2);
+ // Move result from vector to general register
+ if (is_unsigned || bt == T_INT || bt == T_LONG) {
umov(dst, tmp, size, 0);
} else {
smov(dst, tmp, size, 0);
}
if (bt == T_LONG) {
cmp(dst, src1);
- csel(dst, dst, src1, Assembler::GT);
+ csel(dst, dst, src1, cond);
} else {
cmpw(dst, src1);
- cselw(dst, dst, src1, Assembler::GT);
- }
- break;
- }
- case Op_MinReductionV: {
- sve_sminv(tmp, size, pg, src2);
- if (bt == T_INT || bt == T_LONG) {
- umov(dst, tmp, size, 0);
- } else {
- smov(dst, tmp, size, 0);
- }
- if (bt == T_LONG) {
- cmp(dst, src1);
- csel(dst, dst, src1, Assembler::LT);
- } else {
- cmpw(dst, src1);
- cselw(dst, dst, src1, Assembler::LT);
+ cselw(dst, dst, src1, cond);
}
break;
}
diff --git a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.hpp
index 412f0f37e9e..4f3a41da402 100644
--- a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -34,6 +34,37 @@
void neon_reduce_logical_helper(int opc, bool sf, Register Rd, Register Rn, Register Rm,
enum shift_kind kind = Assembler::LSL, unsigned shift = 0);
+ // Helper functions for min/max reduction operations
+
+ void decode_minmax_reduction_opc(int opc, bool* is_min, bool* is_unsigned, Condition* cond);
+
+ void neon_minmaxp(bool is_unsigned, bool is_min, FloatRegister dst,
+ SIMD_Arrangement size, FloatRegister src1, FloatRegister src2) {
+ auto m = is_unsigned ? (is_min ? &Assembler::uminp : &Assembler::umaxp)
+ : (is_min ? &Assembler::sminp : &Assembler::smaxp);
+ (this->*m)(dst, size, src1, src2);
+ }
+
+ // Typedefs used to disambiguate overloaded member functions.
+ typedef void (Assembler::*neon_reduction2)
+ (FloatRegister, SIMD_Arrangement, FloatRegister);
+
+ void neon_minmaxv(bool is_unsigned, bool is_min, FloatRegister dst,
+ SIMD_Arrangement size, FloatRegister src) {
+ auto m = is_unsigned ? (is_min ? (neon_reduction2)&Assembler::uminv
+ : (neon_reduction2)&Assembler::umaxv)
+ : (is_min ? &Assembler::sminv
+ : &Assembler::smaxv);
+ (this->*m)(dst, size, src);
+ }
+
+ void sve_minmaxv(bool is_unsigned, bool is_min, FloatRegister dst,
+ SIMD_RegVariant size, PRegister pg, FloatRegister src) {
+ auto m = is_unsigned ? (is_min ? &Assembler::sve_uminv : &Assembler::sve_umaxv)
+ : (is_min ? &Assembler::sve_sminv : &Assembler::sve_smaxv);
+ (this->*m)(dst, size, pg, src);
+ }
+
void select_from_two_vectors_neon(FloatRegister dst, FloatRegister src1,
FloatRegister src2, FloatRegister index,
FloatRegister tmp, unsigned vector_length_in_bytes);
diff --git a/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.cpp
index d7884c27a2c..68291720208 100644
--- a/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.cpp
@@ -23,6 +23,7 @@
*/
#include "asm/macroAssembler.inline.hpp"
+#include "code/aotCodeCache.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1BarrierSetAssembler.hpp"
#include "gc/g1/g1BarrierSetRuntime.hpp"
@@ -243,9 +244,25 @@ static void generate_post_barrier(MacroAssembler* masm,
assert_different_registers(store_addr, new_val, thread, tmp1, tmp2, noreg, rscratch1);
// Does store cross heap regions?
- __ eor(tmp1, store_addr, new_val); // tmp1 := store address ^ new value
- __ lsr(tmp1, tmp1, G1HeapRegion::LogOfHRGrainBytes); // tmp1 := ((store address ^ new value) >> LogOfHRGrainBytes)
- __ cbz(tmp1, done);
+ #if INCLUDE_CDS
+ // AOT code needs to load the barrier grain shift from the aot
+ // runtime constants area in the code cache otherwise we can compile
+ // it as an immediate operand
+ if (AOTCodeCache::is_on_for_dump()) {
+ address grain_shift_address = (address)AOTRuntimeConstants::grain_shift_address();
+ __ eor(tmp1, store_addr, new_val);
+ __ lea(tmp2, ExternalAddress(grain_shift_address));
+ __ ldrb(tmp2, tmp2);
+ __ lsrv(tmp1, tmp1, tmp2);
+ __ cbz(tmp1, done);
+ } else
+#endif
+ {
+ __ eor(tmp1, store_addr, new_val); // tmp1 := store address ^ new value
+ __ lsr(tmp1, tmp1, G1HeapRegion::LogOfHRGrainBytes); // tmp1 := ((store address ^ new value) >> LogOfHRGrainBytes)
+ __ cbz(tmp1, done);
+ }
+
// Crosses regions, storing null?
if (new_val_may_be_null) {
__ cbz(new_val, done);
diff --git a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp
index 021af3e5698..2a78d688097 100644
--- a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -441,6 +441,11 @@ OptoReg::Name BarrierSetAssembler::refine_register(const Node* node, OptoReg::Na
return opto_reg;
}
+void BarrierSetAssembler::try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path) {
+ // Load the oop from the weak handle.
+ __ ldr(obj, Address(obj));
+}
+
#undef __
#define __ _masm->
diff --git a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.hpp
index e69be999f00..c2581b2f962 100644
--- a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -135,6 +135,7 @@ public:
OptoReg::Name opto_reg);
OptoReg::Name refine_register(const Node* node,
OptoReg::Name opto_reg);
+ virtual void try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path);
#endif // COMPILER2
};
diff --git a/src/hotspot/cpu/aarch64/gc/shared/barrierSetNMethod_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/shared/barrierSetNMethod_aarch64.cpp
index 4d5ca01b6b4..3d5261c31d1 100644
--- a/src/hotspot/cpu/aarch64/gc/shared/barrierSetNMethod_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/gc/shared/barrierSetNMethod_aarch64.cpp
@@ -209,6 +209,10 @@ void BarrierSetNMethod::set_guard_value(nmethod* nm, int value, int bit_mask) {
bs_asm->increment_patching_epoch();
}
+ // Enable WXWrite: the function is called directly from nmethod_entry_barrier
+ // stub.
+ MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, Thread::current()));
+
NativeNMethodBarrier barrier(nm);
barrier.set_value(value, bit_mask);
}
diff --git a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp
index 9a035d9f40e..2f7707227b4 100644
--- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2022, Red Hat, Inc. All rights reserved.
* Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -85,26 +86,16 @@ void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, Dec
}
}
-void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm,
- Register obj,
- Register pre_val,
- Register thread,
- Register tmp,
- bool tosca_live,
- bool expand_call) {
- if (ShenandoahSATBBarrier) {
- satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, rscratch1, tosca_live, expand_call);
- }
-}
+void ShenandoahBarrierSetAssembler::satb_barrier(MacroAssembler* masm,
+ Register obj,
+ Register pre_val,
+ Register thread,
+ Register tmp1,
+ Register tmp2,
+ bool tosca_live,
+ bool expand_call) {
+ assert(ShenandoahSATBBarrier, "Should be checked by caller");
-void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
- Register obj,
- Register pre_val,
- Register thread,
- Register tmp1,
- Register tmp2,
- bool tosca_live,
- bool expand_call) {
// If expand_call is true then we expand the call_VM_leaf macro
// directly to skip generating the check by
// InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
@@ -358,20 +349,20 @@ void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet d
if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
__ enter(/*strip_ret_addr*/true);
__ push_call_clobbered_registers();
- satb_write_barrier_pre(masm /* masm */,
- noreg /* obj */,
- dst /* pre_val */,
- rthread /* thread */,
- tmp1 /* tmp1 */,
- tmp2 /* tmp2 */,
- true /* tosca_live */,
- true /* expand_call */);
+ satb_barrier(masm /* masm */,
+ noreg /* obj */,
+ dst /* pre_val */,
+ rthread /* thread */,
+ tmp1 /* tmp1 */,
+ tmp2 /* tmp2 */,
+ true /* tosca_live */,
+ true /* expand_call */);
__ pop_call_clobbered_registers();
__ leave();
}
}
-void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj) {
+void ShenandoahBarrierSetAssembler::card_barrier(MacroAssembler* masm, Register obj) {
assert(ShenandoahCardBarrier, "Should have been checked by caller");
__ lsr(obj, obj, CardTable::card_shift());
@@ -394,13 +385,13 @@ void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register o
void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
- bool on_oop = is_reference_type(type);
- if (!on_oop) {
+ // 1: non-reference types require no barriers
+ if (!is_reference_type(type)) {
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
return;
}
- // flatten object address if needed
+ // Flatten object address right away for simplicity: likely needed by barriers
if (dst.index() == noreg && dst.offset() == 0) {
if (dst.base() != tmp3) {
__ mov(tmp3, dst.base());
@@ -409,20 +400,26 @@ void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet
__ lea(tmp3, dst);
}
- shenandoah_write_barrier_pre(masm,
- tmp3 /* obj */,
- tmp2 /* pre_val */,
- rthread /* thread */,
- tmp1 /* tmp */,
- val != noreg /* tosca_live */,
- false /* expand_call */);
+ bool storing_non_null = (val != noreg);
+ // 2: pre-barrier: SATB needs the previous value
+ if (ShenandoahBarrierSet::need_satb_barrier(decorators, type)) {
+ satb_barrier(masm,
+ tmp3 /* obj */,
+ tmp2 /* pre_val */,
+ rthread /* thread */,
+ tmp1 /* tmp */,
+ rscratch1 /* tmp2 */,
+ storing_non_null /* tosca_live */,
+ false /* expand_call */);
+ }
+
+ // Store!
BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), val, noreg, noreg, noreg);
- bool in_heap = (decorators & IN_HEAP) != 0;
- bool needs_post_barrier = (val != noreg) && in_heap && ShenandoahCardBarrier;
- if (needs_post_barrier) {
- store_check(masm, tmp3);
+ // 3: post-barrier: card barrier needs store address
+ if (ShenandoahBarrierSet::need_card_barrier(decorators, type) && storing_non_null) {
+ card_barrier(masm, tmp3);
}
}
@@ -446,6 +443,30 @@ void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler
__ bind(done);
}
+#ifdef COMPILER2
+void ShenandoahBarrierSetAssembler::try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj,
+ Register tmp, Label& slow_path) {
+ assert_different_registers(obj, tmp);
+
+ Label done;
+
+ // Resolve weak handle using the standard implementation.
+ BarrierSetAssembler::try_resolve_weak_handle_in_c2(masm, obj, tmp, slow_path);
+
+ // Check if the reference is null, and if it is, take the fast path.
+ __ cbz(obj, done);
+
+ Address gc_state(rthread, ShenandoahThreadLocalData::gc_state_offset());
+ __ lea(tmp, gc_state);
+ __ ldrb(tmp, __ legitimize_address(gc_state, 1, tmp));
+
+ // Check if the heap is under weak-reference/roots processing, in
+ // which case we need to take the slow path.
+ __ tbnz(tmp, ShenandoahHeap::WEAK_ROOTS_BITPOS, slow_path);
+ __ bind(done);
+}
+#endif
+
// Special Shenandoah CAS implementation that handles false negatives due
// to concurrent evacuation. The service is more complex than a
// traditional CAS operation because the CAS operation is intended to
diff --git a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp
index c0e708e1292..d5d5ce8950e 100644
--- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2021, Red Hat, Inc. All rights reserved.
* Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -40,23 +41,16 @@ class StubCodeGenerator;
class ShenandoahBarrierSetAssembler: public BarrierSetAssembler {
private:
- void satb_write_barrier_pre(MacroAssembler* masm,
- Register obj,
- Register pre_val,
- Register thread,
- Register tmp1,
- Register tmp2,
- bool tosca_live,
- bool expand_call);
- void shenandoah_write_barrier_pre(MacroAssembler* masm,
- Register obj,
- Register pre_val,
- Register thread,
- Register tmp,
- bool tosca_live,
- bool expand_call);
+ void satb_barrier(MacroAssembler* masm,
+ Register obj,
+ Register pre_val,
+ Register thread,
+ Register tmp1,
+ Register tmp2,
+ bool tosca_live,
+ bool expand_call);
- void store_check(MacroAssembler* masm, Register obj);
+ void card_barrier(MacroAssembler* masm, Register obj);
void resolve_forward_pointer(MacroAssembler* masm, Register dst, Register tmp = noreg);
void resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp = noreg);
@@ -86,6 +80,9 @@ public:
Address dst, Register val, Register tmp1, Register tmp2, Register tmp3);
virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
Register obj, Register tmp, Label& slowpath);
+#ifdef COMPILER2
+ virtual void try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path);
+#endif
void cmpxchg_oop(MacroAssembler* masm, Register addr, Register expected, Register new_val,
bool acquire, bool release, bool is_cae, Register result);
};
diff --git a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp
index 07a2d6fbfa0..4f0977a414f 100644
--- a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1326,6 +1326,23 @@ void ZStoreBarrierStubC2Aarch64::emit_code(MacroAssembler& masm) {
register_stub(this);
}
+#undef __
+#define __ masm->
+
+void ZBarrierSetAssembler::try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path) {
+ // Resolve weak handle using the standard implementation.
+ BarrierSetAssembler::try_resolve_weak_handle_in_c2(masm, obj, tmp, slow_path);
+
+ // Check if the oop is bad, in which case we need to take the slow path.
+ __ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatMarkBadBeforeMov);
+ __ movzw(tmp, barrier_Relocation::unpatched);
+ __ tst(obj, tmp);
+ __ br(Assembler::NE, slow_path);
+
+ // Oop is okay, so we uncolor it.
+ __ lsr(obj, obj, ZPointerLoadShift);
+}
+
#undef __
#endif // COMPILER2
diff --git a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.hpp
index 487970ab0c5..fbbc5c1b517 100644
--- a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -191,6 +191,7 @@ public:
ZLoadBarrierStubC2* stub) const;
void generate_c2_store_barrier_stub(MacroAssembler* masm,
ZStoreBarrierStubC2* stub) const;
+ void try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path);
#endif // COMPILER2
void check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error);
diff --git a/src/hotspot/cpu/aarch64/globals_aarch64.hpp b/src/hotspot/cpu/aarch64/globals_aarch64.hpp
index 8e520314c8b..a59e83c4b69 100644
--- a/src/hotspot/cpu/aarch64/globals_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/globals_aarch64.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2019, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -39,7 +39,7 @@ define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap nulls
define_pd_global(bool, DelayCompilerStubsGeneration, COMPILER2_OR_JVMCI);
define_pd_global(size_t, CodeCacheSegmentSize, 64);
-define_pd_global(intx, CodeEntryAlignment, 64);
+define_pd_global(uint, CodeEntryAlignment, 64);
define_pd_global(intx, OptoLoopAlignment, 16);
#define DEFAULT_STACK_YELLOW_PAGES (2)
diff --git a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp
index 957c2aee1c1..2b506b241e0 100644
--- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp
@@ -240,15 +240,14 @@ void InterpreterMacroAssembler::load_resolved_klass_at_offset(
// Rsub_klass: subklass
//
// Kills:
-// r2, r5
+// r2
void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
Label& ok_is_subtype) {
assert(Rsub_klass != r0, "r0 holds superklass");
assert(Rsub_klass != r2, "r2 holds 2ndary super array length");
- assert(Rsub_klass != r5, "r5 holds 2ndary super array scan ptr");
// Profile the not-null value's klass.
- profile_typecheck(r2, Rsub_klass, r5); // blows r2, reloads r5
+ profile_typecheck(r2, Rsub_klass); // blows r2
// Do the check.
check_klass_subtype(Rsub_klass, r0, r2, ok_is_subtype); // blows r2
@@ -991,7 +990,6 @@ void InterpreterMacroAssembler::profile_final_call(Register mdp) {
void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
Register mdp,
- Register reg2,
bool receiver_can_be_null) {
if (ProfileInterpreter) {
Label profile_continue;
@@ -1009,7 +1007,7 @@ void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
}
// Record the receiver type.
- record_klass_in_profile(receiver, mdp, reg2);
+ profile_receiver_type(receiver, mdp, 0);
bind(skip_receiver_profile);
// The method data pointer needs to be updated to reflect the new target.
@@ -1018,131 +1016,6 @@ void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
}
}
-// This routine creates a state machine for updating the multi-row
-// type profile at a virtual call site (or other type-sensitive bytecode).
-// The machine visits each row (of receiver/count) until the receiver type
-// is found, or until it runs out of rows. At the same time, it remembers
-// the location of the first empty row. (An empty row records null for its
-// receiver, and can be allocated for a newly-observed receiver type.)
-// Because there are two degrees of freedom in the state, a simple linear
-// search will not work; it must be a decision tree. Hence this helper
-// function is recursive, to generate the required tree structured code.
-// It's the interpreter, so we are trading off code space for speed.
-// See below for example code.
-void InterpreterMacroAssembler::record_klass_in_profile_helper(
- Register receiver, Register mdp,
- Register reg2, int start_row,
- Label& done) {
- if (TypeProfileWidth == 0) {
- increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
- } else {
- record_item_in_profile_helper(receiver, mdp, reg2, 0, done, TypeProfileWidth,
- &VirtualCallData::receiver_offset, &VirtualCallData::receiver_count_offset);
- }
-}
-
-void InterpreterMacroAssembler::record_item_in_profile_helper(Register item, Register mdp,
- Register reg2, int start_row, Label& done, int total_rows,
- OffsetFunction item_offset_fn, OffsetFunction item_count_offset_fn) {
- int last_row = total_rows - 1;
- assert(start_row <= last_row, "must be work left to do");
- // Test this row for both the item and for null.
- // Take any of three different outcomes:
- // 1. found item => increment count and goto done
- // 2. found null => keep looking for case 1, maybe allocate this cell
- // 3. found something else => keep looking for cases 1 and 2
- // Case 3 is handled by a recursive call.
- for (int row = start_row; row <= last_row; row++) {
- Label next_test;
- bool test_for_null_also = (row == start_row);
-
- // See if the item is item[n].
- int item_offset = in_bytes(item_offset_fn(row));
- test_mdp_data_at(mdp, item_offset, item,
- (test_for_null_also ? reg2 : noreg),
- next_test);
- // (Reg2 now contains the item from the CallData.)
-
- // The item is item[n]. Increment count[n].
- int count_offset = in_bytes(item_count_offset_fn(row));
- increment_mdp_data_at(mdp, count_offset);
- b(done);
- bind(next_test);
-
- if (test_for_null_also) {
- Label found_null;
- // Failed the equality check on item[n]... Test for null.
- if (start_row == last_row) {
- // The only thing left to do is handle the null case.
- cbz(reg2, found_null);
- // Item did not match any saved item and there is no empty row for it.
- // Increment total counter to indicate polymorphic case.
- increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
- b(done);
- bind(found_null);
- break;
- }
- // Since null is rare, make it be the branch-taken case.
- cbz(reg2, found_null);
-
- // Put all the "Case 3" tests here.
- record_item_in_profile_helper(item, mdp, reg2, start_row + 1, done, total_rows,
- item_offset_fn, item_count_offset_fn);
-
- // Found a null. Keep searching for a matching item,
- // but remember that this is an empty (unused) slot.
- bind(found_null);
- }
- }
-
- // In the fall-through case, we found no matching item, but we
- // observed the item[start_row] is null.
-
- // Fill in the item field and increment the count.
- int item_offset = in_bytes(item_offset_fn(start_row));
- set_mdp_data_at(mdp, item_offset, item);
- int count_offset = in_bytes(item_count_offset_fn(start_row));
- mov(reg2, DataLayout::counter_increment);
- set_mdp_data_at(mdp, count_offset, reg2);
- if (start_row > 0) {
- b(done);
- }
-}
-
-// Example state machine code for three profile rows:
-// // main copy of decision tree, rooted at row[1]
-// if (row[0].rec == rec) { row[0].incr(); goto done; }
-// if (row[0].rec != nullptr) {
-// // inner copy of decision tree, rooted at row[1]
-// if (row[1].rec == rec) { row[1].incr(); goto done; }
-// if (row[1].rec != nullptr) {
-// // degenerate decision tree, rooted at row[2]
-// if (row[2].rec == rec) { row[2].incr(); goto done; }
-// if (row[2].rec != nullptr) { count.incr(); goto done; } // overflow
-// row[2].init(rec); goto done;
-// } else {
-// // remember row[1] is empty
-// if (row[2].rec == rec) { row[2].incr(); goto done; }
-// row[1].init(rec); goto done;
-// }
-// } else {
-// // remember row[0] is empty
-// if (row[1].rec == rec) { row[1].incr(); goto done; }
-// if (row[2].rec == rec) { row[2].incr(); goto done; }
-// row[0].init(rec); goto done;
-// }
-// done:
-
-void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
- Register mdp, Register reg2) {
- assert(ProfileInterpreter, "must be profiling");
- Label done;
-
- record_klass_in_profile_helper(receiver, mdp, reg2, 0, done);
-
- bind (done);
-}
-
void InterpreterMacroAssembler::profile_ret(Register return_bci,
Register mdp) {
if (ProfileInterpreter) {
@@ -1200,7 +1073,7 @@ void InterpreterMacroAssembler::profile_null_seen(Register mdp) {
}
}
-void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass, Register reg2) {
+void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass) {
if (ProfileInterpreter) {
Label profile_continue;
@@ -1213,7 +1086,7 @@ void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass,
mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
// Record the object type.
- record_klass_in_profile(klass, mdp, reg2);
+ profile_receiver_type(klass, mdp, 0);
}
update_mdp_by_constant(mdp, mdp_delta);
diff --git a/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp b/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp
index 2b230a3b73e..74d4430000d 100644
--- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp
@@ -273,15 +273,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
Register test_value_out,
Label& not_equal_continue);
- void record_klass_in_profile(Register receiver, Register mdp,
- Register reg2);
- void record_klass_in_profile_helper(Register receiver, Register mdp,
- Register reg2, int start_row,
- Label& done);
- void record_item_in_profile_helper(Register item, Register mdp,
- Register reg2, int start_row, Label& done, int total_rows,
- OffsetFunction item_offset_fn, OffsetFunction item_count_offset_fn);
-
void update_mdp_by_offset(Register mdp_in, int offset_of_offset);
void update_mdp_by_offset(Register mdp_in, Register reg, int offset_of_disp);
void update_mdp_by_constant(Register mdp_in, int constant);
@@ -295,11 +286,10 @@ class InterpreterMacroAssembler: public MacroAssembler {
void profile_call(Register mdp);
void profile_final_call(Register mdp);
void profile_virtual_call(Register receiver, Register mdp,
- Register scratch2,
bool receiver_can_be_null = false);
void profile_ret(Register return_bci, Register mdp);
void profile_null_seen(Register mdp);
- void profile_typecheck(Register mdp, Register klass, Register scratch);
+ void profile_typecheck(Register mdp, Register klass);
void profile_typecheck_failed(Register mdp);
void profile_switch_default(Register mdp);
void profile_switch_case(Register index_in_scratch, Register mdp,
diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
index b8a9afc123f..3e3e95be07e 100644
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2024, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -406,7 +406,6 @@ public:
offset <<= shift;
uint64_t target_page = ((uint64_t)insn_addr) + offset;
target_page &= ((uint64_t)-1) << shift;
- uint32_t insn2 = insn_at(insn_addr, 1);
target = address(target_page);
precond(inner != nullptr);
inner(insn_addr, target);
@@ -473,6 +472,7 @@ address MacroAssembler::target_addr_for_insn(address insn_addr) {
// Patch any kind of instruction; there may be several instructions.
// Return the total length (in bytes) of the instructions.
int MacroAssembler::pd_patch_instruction_size(address insn_addr, address target) {
+ MACOS_AARCH64_ONLY(os::thread_wx_enable_write());
return RelocActions::run(insn_addr, target);
}
@@ -481,6 +481,8 @@ int MacroAssembler::patch_oop(address insn_addr, address o) {
unsigned insn = *(unsigned*)insn_addr;
assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
+ MACOS_AARCH64_ONLY(os::thread_wx_enable_write());
+
// OOPs are either narrow (32 bits) or wide (48 bits). We encode
// narrow OOPs by setting the upper 16 bits in the first
// instruction.
@@ -510,18 +512,13 @@ int MacroAssembler::patch_narrow_klass(address insn_addr, narrowKlass n) {
assert(Instruction_aarch64::extract(insn->encoding(), 31, 21) == 0b11010010101 &&
nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
+ MACOS_AARCH64_ONLY(os::thread_wx_enable_write());
+
Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16);
Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff);
return 2 * NativeInstruction::instruction_size;
}
-address MacroAssembler::target_addr_for_insn_or_null(address insn_addr) {
- if (NativeInstruction::is_ldrw_to_zr(insn_addr)) {
- return nullptr;
- }
- return MacroAssembler::target_addr_for_insn(insn_addr);
-}
-
void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool in_nmethod, Register tmp) {
ldr(tmp, Address(rthread, JavaThread::polling_word_offset()));
if (at_return) {
@@ -1955,9 +1952,7 @@ void MacroAssembler::verify_secondary_supers_table(Register r_sub_klass,
const Register
r_array_base = temp1,
- r_array_length = temp2,
- r_array_index = noreg, // unused
- r_bitmap = noreg; // unused
+ r_array_length = temp2;
BLOCK_COMMENT("verify_secondary_supers_table {");
@@ -2118,6 +2113,161 @@ Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
}
}
+// Handle the receiver type profile update given the "recv" klass.
+//
+// Normally updates the ReceiverData (RD) that starts at "mdp" + "mdp_offset".
+// If there are no matching or claimable receiver entries in RD, updates
+// the polymorphic counter.
+//
+// This code expected to run by either the interpreter or JIT-ed code, without
+// extra synchronization. For safety, receiver cells are claimed atomically, which
+// avoids grossly misrepresenting the profiles under concurrent updates. For speed,
+// counter updates are not atomic.
+//
+void MacroAssembler::profile_receiver_type(Register recv, Register mdp, int mdp_offset) {
+ assert_different_registers(recv, mdp, rscratch1, rscratch2);
+
+ int base_receiver_offset = in_bytes(ReceiverTypeData::receiver_offset(0));
+ int end_receiver_offset = in_bytes(ReceiverTypeData::receiver_offset(ReceiverTypeData::row_limit()));
+ int poly_count_offset = in_bytes(CounterData::count_offset());
+ int receiver_step = in_bytes(ReceiverTypeData::receiver_offset(1)) - base_receiver_offset;
+ int receiver_to_count_step = in_bytes(ReceiverTypeData::receiver_count_offset(0)) - base_receiver_offset;
+
+ // Adjust for MDP offsets.
+ base_receiver_offset += mdp_offset;
+ end_receiver_offset += mdp_offset;
+ poly_count_offset += mdp_offset;
+
+#ifdef ASSERT
+ // We are about to walk the MDO slots without asking for offsets.
+ // Check that our math hits all the right spots.
+ for (uint c = 0; c < ReceiverTypeData::row_limit(); c++) {
+ int real_recv_offset = mdp_offset + in_bytes(ReceiverTypeData::receiver_offset(c));
+ int real_count_offset = mdp_offset + in_bytes(ReceiverTypeData::receiver_count_offset(c));
+ int offset = base_receiver_offset + receiver_step*c;
+ int count_offset = offset + receiver_to_count_step;
+ assert(offset == real_recv_offset, "receiver slot math");
+ assert(count_offset == real_count_offset, "receiver count math");
+ }
+ int real_poly_count_offset = mdp_offset + in_bytes(CounterData::count_offset());
+ assert(poly_count_offset == real_poly_count_offset, "poly counter math");
+#endif
+
+ // Corner case: no profile table. Increment poly counter and exit.
+ if (ReceiverTypeData::row_limit() == 0) {
+ increment(Address(mdp, poly_count_offset), DataLayout::counter_increment);
+ return;
+ }
+
+ Register offset = rscratch2;
+
+ Label L_loop_search_receiver, L_loop_search_empty;
+ Label L_restart, L_found_recv, L_found_empty, L_polymorphic, L_count_update;
+
+ // The code here recognizes three major cases:
+ // A. Fastest: receiver found in the table
+ // B. Fast: no receiver in the table, and the table is full
+ // C. Slow: no receiver in the table, free slots in the table
+ //
+ // The case A performance is most important, as perfectly-behaved code would end up
+ // there, especially with larger TypeProfileWidth. The case B performance is
+ // important as well, this is where bulk of code would land for normally megamorphic
+ // cases. The case C performance is not essential, its job is to deal with installation
+ // races, we optimize for code density instead. Case C needs to make sure that receiver
+ // rows are only claimed once. This makes sure we never overwrite a row for another
+ // receiver and never duplicate the receivers in the list, making profile type-accurate.
+ //
+ // It is very tempting to handle these cases in a single loop, and claim the first slot
+ // without checking the rest of the table. But, profiling code should tolerate free slots
+ // in the table, as class unloading can clear them. After such cleanup, the receiver
+ // we need might be _after_ the free slot. Therefore, we need to let at least full scan
+ // to complete, before trying to install new slots. Splitting the code in several tight
+ // loops also helpfully optimizes for cases A and B.
+ //
+ // This code is effectively:
+ //
+ // restart:
+ // // Fastest: receiver is already installed
+ // for (i = 0; i < receiver_count(); i++) {
+ // if (receiver(i) == recv) goto found_recv(i);
+ // }
+ //
+ // // Fast: no receiver, but profile is full
+ // for (i = 0; i < receiver_count(); i++) {
+ // if (receiver(i) == null) goto found_null(i);
+ // }
+ // goto polymorphic
+ //
+ // // Slow: try to install receiver
+ // found_null(i):
+ // CAS(&receiver(i), null, recv);
+ // goto restart
+ //
+ // polymorphic:
+ // count++;
+ // return
+ //
+ // found_recv(i):
+ // *receiver_count(i)++
+ //
+
+ bind(L_restart);
+
+ // Fastest: receiver is already installed
+ mov(offset, base_receiver_offset);
+ bind(L_loop_search_receiver);
+ ldr(rscratch1, Address(mdp, offset));
+ cmp(rscratch1, recv);
+ br(Assembler::EQ, L_found_recv);
+ add(offset, offset, receiver_step);
+ sub(rscratch1, offset, end_receiver_offset);
+ cbnz(rscratch1, L_loop_search_receiver);
+
+ // Fast: no receiver, but profile is full
+ mov(offset, base_receiver_offset);
+ bind(L_loop_search_empty);
+ ldr(rscratch1, Address(mdp, offset));
+ cbz(rscratch1, L_found_empty);
+ add(offset, offset, receiver_step);
+ sub(rscratch1, offset, end_receiver_offset);
+ cbnz(rscratch1, L_loop_search_empty);
+ b(L_polymorphic);
+
+ // Slow: try to install receiver
+ bind(L_found_empty);
+
+ // Atomically swing receiver slot: null -> recv.
+ //
+ // The update uses CAS, which clobbers rscratch1. Therefore, rscratch2
+ // is used to hold the destination address. This is safe because the
+ // offset is no longer needed after the address is computed.
+
+ lea(rscratch2, Address(mdp, offset));
+ cmpxchg(/*addr*/ rscratch2, /*expected*/ zr, /*new*/ recv, Assembler::xword,
+ /*acquire*/ false, /*release*/ false, /*weak*/ true, noreg);
+
+ // CAS success means the slot now has the receiver we want. CAS failure means
+ // something had claimed the slot concurrently: it can be the same receiver we want,
+ // or something else. Since this is a slow path, we can optimize for code density,
+ // and just restart the search from the beginning.
+ b(L_restart);
+
+ // Counter updates:
+
+ // Increment polymorphic counter instead of receiver slot.
+ bind(L_polymorphic);
+ mov(offset, poly_count_offset);
+ b(L_count_update);
+
+ // Found a receiver, convert its slot offset to corresponding count offset.
+ bind(L_found_recv);
+ add(offset, offset, receiver_to_count_step);
+
+ bind(L_count_update);
+ increment(Address(mdp, offset), DataLayout::counter_increment);
+}
+
+
void MacroAssembler::call_VM_leaf_base(address entry_point,
int number_of_arguments,
Label *retaddr) {
@@ -3451,9 +3601,8 @@ extern "C" void findpc(intptr_t x);
void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[])
{
// In order to get locks to work, we need to fake a in_VM state
- if (ShowMessageBoxOnError ) {
+ if (ShowMessageBoxOnError) {
JavaThread* thread = JavaThread::current();
- JavaThreadState saved_state = thread->thread_state();
thread->set_thread_state(_thread_in_vm);
#ifndef PRODUCT
if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
@@ -5578,7 +5727,6 @@ address MacroAssembler::read_polling_page(Register r, relocInfo::relocType rtype
}
void MacroAssembler::adrp(Register reg1, const Address &dest, uint64_t &byte_offset) {
- relocInfo::relocType rtype = dest.rspec().reloc()->type();
uint64_t low_page = (uint64_t)CodeCache::low_bound() >> 12;
uint64_t high_page = (uint64_t)(CodeCache::high_bound()-1) >> 12;
uint64_t dest_page = (uint64_t)dest.target() >> 12;
@@ -5606,12 +5754,33 @@ void MacroAssembler::adrp(Register reg1, const Address &dest, uint64_t &byte_off
}
void MacroAssembler::load_byte_map_base(Register reg) {
- CardTable::CardValue* byte_map_base =
- ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base();
+#if INCLUDE_CDS
+ if (AOTCodeCache::is_on_for_dump()) {
+ address byte_map_base_adr = AOTRuntimeConstants::card_table_base_address();
+ lea(reg, ExternalAddress(byte_map_base_adr));
+ ldr(reg, Address(reg));
+ return;
+ }
+#endif
+ CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
- // Strictly speaking the byte_map_base isn't an address at all, and it might
+ // Strictly speaking the card table base isn't an address at all, and it might
// even be negative. It is thus materialised as a constant.
- mov(reg, (uint64_t)byte_map_base);
+ mov(reg, (uint64_t)ctbs->card_table_base_const());
+}
+
+void MacroAssembler::load_aotrc_address(Register reg, address a) {
+#if INCLUDE_CDS
+ assert(AOTRuntimeConstants::contains(a), "address out of range for data area");
+ if (AOTCodeCache::is_on_for_dump()) {
+ // all aotrc field addresses should be registered in the AOTCodeCache address table
+ lea(reg, ExternalAddress(a));
+ } else {
+ mov(reg, (uint64_t)a);
+ }
+#else
+ ShouldNotReachHere();
+#endif
}
void MacroAssembler::build_frame(int framesize) {
@@ -5782,6 +5951,9 @@ address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3,
// return false;
bind(A_IS_NOT_NULL);
ldrw(cnt1, Address(a1, length_offset));
+ ldrw(tmp5, Address(a2, length_offset));
+ cmp(cnt1, tmp5);
+ br(NE, DONE); // If lengths differ, return false
// Increase loop counter by diff between base- and actual start-offset.
addw(cnt1, cnt1, extra_length);
lea(a1, Address(a1, start_offset));
@@ -5848,6 +6020,9 @@ address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3,
cbz(a1, DONE);
ldrw(cnt1, Address(a1, length_offset));
cbz(a2, DONE);
+ ldrw(tmp5, Address(a2, length_offset));
+ cmp(cnt1, tmp5);
+ br(NE, DONE); // If lengths differ, return false
// Increase loop counter by diff between base- and actual start-offset.
addw(cnt1, cnt1, extra_length);
@@ -5949,7 +6124,6 @@ void MacroAssembler::string_equals(Register a1, Register a2,
Label SAME, DONE, SHORT, NEXT_WORD;
Register tmp1 = rscratch1;
Register tmp2 = rscratch2;
- Register cnt2 = tmp2; // cnt2 only used in array length compare
assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2);
@@ -6259,10 +6433,14 @@ void MacroAssembler::fill_words(Register base, Register cnt, Register value)
// Intrinsic for
//
-// - sun/nio/cs/ISO_8859_1$Encoder.implEncodeISOArray
-// return the number of characters copied.
-// - java/lang/StringUTF16.compress
-// return index of non-latin1 character if copy fails, otherwise 'len'.
+// - sun.nio.cs.ISO_8859_1.Encoder#encodeISOArray0(byte[] sa, int sp, byte[] da, int dp, int len)
+// Encodes char[] to byte[] in ISO-8859-1
+//
+// - java.lang.StringCoding#encodeISOArray0(byte[] sa, int sp, byte[] da, int dp, int len)
+// Encodes byte[] (containing UTF-16) to byte[] in ISO-8859-1
+//
+// - java.lang.StringCoding#encodeAsciiArray0(char[] sa, int sp, byte[] da, int dp, int len)
+// Encodes char[] to byte[] in ASCII
//
// This version always returns the number of characters copied, and does not
// clobber the 'len' register. A successful copy will complete with the post-
diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
index 4baa07d7d49..fa32f3055b9 100644
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2024, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -678,7 +678,6 @@ public:
static bool uses_implicit_null_check(void* address);
static address target_addr_for_insn(address insn_addr);
- static address target_addr_for_insn_or_null(address insn_addr);
// Required platform-specific helpers for Label::patch_instructions.
// They _shadow_ the declarations in AbstractAssembler, which are undefined.
@@ -1122,6 +1121,8 @@ public:
Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
+ void profile_receiver_type(Register recv, Register mdp, int mdp_offset);
+
void verify_sve_vector_length(Register tmp = rscratch1);
void reinitialize_ptrue() {
if (UseSVE > 0) {
@@ -1475,6 +1476,9 @@ public:
// Load the base of the cardtable byte map into reg.
void load_byte_map_base(Register reg);
+ // Load a constant address in the AOT Runtime Constants area
+ void load_aotrc_address(Register reg, address a);
+
// Prolog generator routines to support switch between x86 code and
// generated ARM code
diff --git a/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp b/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp
index f2003dd9b55..8b76b96d345 100644
--- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -133,7 +133,6 @@ void NativeMovConstReg::verify() {
intptr_t NativeMovConstReg::data() const {
- // das(uint64_t(instruction_address()),2);
address addr = MacroAssembler::target_addr_for_insn(instruction_address());
if (maybe_cpool_ref(instruction_address())) {
return *(intptr_t*)addr;
@@ -144,6 +143,7 @@ intptr_t NativeMovConstReg::data() const {
void NativeMovConstReg::set_data(intptr_t x) {
if (maybe_cpool_ref(instruction_address())) {
+ MACOS_AARCH64_ONLY(os::thread_wx_enable_write());
address addr = MacroAssembler::target_addr_for_insn(instruction_address());
*(intptr_t*)addr = x;
} else {
@@ -192,7 +192,6 @@ int NativeMovRegMem::offset() const {
void NativeMovRegMem::set_offset(int x) {
address pc = instruction_address();
- unsigned insn = *(unsigned*)pc;
if (maybe_cpool_ref(pc)) {
address addr = MacroAssembler::target_addr_for_insn(pc);
*(int64_t*)addr = x;
@@ -204,7 +203,7 @@ void NativeMovRegMem::set_offset(int x) {
void NativeMovRegMem::verify() {
#ifdef ASSERT
- address dest = MacroAssembler::target_addr_for_insn_or_null(instruction_address());
+ MacroAssembler::target_addr_for_insn(instruction_address());
#endif
}
@@ -213,7 +212,7 @@ void NativeMovRegMem::verify() {
void NativeJump::verify() { ; }
address NativeJump::jump_destination() const {
- address dest = MacroAssembler::target_addr_for_insn_or_null(instruction_address());
+ address dest = MacroAssembler::target_addr_for_insn(instruction_address());
// We use jump to self as the unresolved address which the inline
// cache code (and relocs) know about
@@ -350,8 +349,6 @@ bool NativeInstruction::is_stop() {
//-------------------------------------------------------------------
-void NativeGeneralJump::verify() { }
-
// MT-safe patching of a long jump instruction.
void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
ShouldNotCallThis();
diff --git a/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp b/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp
index c30cb911d96..fc7274714ad 100644
--- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2025, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -78,7 +78,6 @@ public:
inline bool is_nop() const;
bool is_jump();
bool is_general_jump();
- inline bool is_jump_or_nop();
inline bool is_cond_jump();
bool is_safepoint_poll();
bool is_movz();
@@ -90,16 +89,18 @@ protected:
s_char sbyte_at(int offset) const { return *(s_char*)addr_at(offset); }
u_char ubyte_at(int offset) const { return *(u_char*)addr_at(offset); }
- jint int_at(int offset) const { return *(jint*)addr_at(offset); }
- juint uint_at(int offset) const { return *(juint*)addr_at(offset); }
- address ptr_at(int offset) const { return *(address*)addr_at(offset); }
- oop oop_at(int offset) const { return *(oop*)addr_at(offset); }
+ jint int_at(int offset) const { return *(jint*)addr_at(offset); }
+ juint uint_at(int offset) const { return *(juint*)addr_at(offset); }
+ address ptr_at(int offset) const { return *(address*)addr_at(offset); }
+ oop oop_at(int offset) const { return *(oop*)addr_at(offset); }
- void set_char_at(int offset, char c) { *addr_at(offset) = (u_char)c; }
- void set_int_at(int offset, jint i) { *(jint*)addr_at(offset) = i; }
- void set_uint_at(int offset, jint i) { *(juint*)addr_at(offset) = i; }
- void set_ptr_at(int offset, address ptr) { *(address*)addr_at(offset) = ptr; }
- void set_oop_at(int offset, oop o) { *(oop*)addr_at(offset) = o; }
+#define MACOS_WX_WRITE MACOS_AARCH64_ONLY(os::thread_wx_enable_write())
+ void set_char_at(int offset, char c) { MACOS_WX_WRITE; *addr_at(offset) = (u_char)c; }
+ void set_int_at(int offset, jint i) { MACOS_WX_WRITE; *(jint*)addr_at(offset) = i; }
+ void set_uint_at(int offset, jint i) { MACOS_WX_WRITE; *(juint*)addr_at(offset) = i; }
+ void set_ptr_at(int offset, address ptr) { MACOS_WX_WRITE; *(address*)addr_at(offset) = ptr; }
+ void set_oop_at(int offset, oop o) { MACOS_WX_WRITE; *(oop*)addr_at(offset) = o; }
+#undef MACOS_WX_WRITE
void wrote(int offset);
@@ -380,7 +381,6 @@ public:
void set_jump_destination(address dest);
static void replace_mt_safe(address instr_addr, address code_buffer);
- static void verify();
};
inline NativeGeneralJump* nativeGeneralJump_at(address address) {
@@ -419,10 +419,6 @@ inline bool NativeInstruction::is_jump() {
return false;
}
-inline bool NativeInstruction::is_jump_or_nop() {
- return is_nop() || is_jump();
-}
-
// Call trampoline stubs.
class NativeCallTrampolineStub : public NativeInstruction {
public:
diff --git a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp
index 89ae6bc10e0..73b631029a0 100644
--- a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -722,22 +722,20 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
// Class initialization barrier for static methods
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
- if (VM_Version::supports_fast_class_init_checks()) {
- Label L_skip_barrier;
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
+ Label L_skip_barrier;
- { // Bypass the barrier for non-static methods
- __ ldrh(rscratch1, Address(rmethod, Method::access_flags_offset()));
- __ andsw(zr, rscratch1, JVM_ACC_STATIC);
- __ br(Assembler::EQ, L_skip_barrier); // non-static
- }
+ // Bypass the barrier for non-static methods
+ __ ldrh(rscratch1, Address(rmethod, Method::access_flags_offset()));
+ __ andsw(zr, rscratch1, JVM_ACC_STATIC);
+ __ br(Assembler::EQ, L_skip_barrier); // non-static
- __ load_method_holder(rscratch2, rmethod);
- __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
- __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
+ __ load_method_holder(rscratch2, rmethod);
+ __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
+ __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
- __ bind(L_skip_barrier);
- entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
- }
+ __ bind(L_skip_barrier);
+ entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->c2i_entry_barrier(masm);
@@ -1508,7 +1506,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// SVC, HVC, or SMC. Make it a NOP.
__ nop();
- if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
+ if (method->needs_clinit_barrier()) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
Label L_skip_barrier;
__ mov_metadata(rscratch2, method->method_holder()); // InstanceKlass*
__ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
diff --git a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp
index 7e2f333ba40..a459a28b09e 100644
--- a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp
@@ -6081,14 +6081,18 @@ class StubGenerator: public StubCodeGenerator {
// static int implKyber12To16(
// byte[] condensed, int index, short[] parsed, int parsedLength) {}
//
- // (parsedLength or (parsedLength - 48) must be divisible by 64.)
+ // we assume that parsed and condensed are allocated such that for
+ // n = (parsedLength + 63) / 64
+ // n blocks of 96 bytes of input can be processed, i.e.
+ // index + n * 96 <= condensed.length and
+ // n * 64 <= parsed.length
//
// condensed (byte[]) = c_rarg0
// condensedIndex = c_rarg1
- // parsed (short[112 or 256]) = c_rarg2
- // parsedLength (112 or 256) = c_rarg3
+ // parsed (short[]) = c_rarg2
+ // parsedLength = c_rarg3
address generate_kyber12To16() {
- Label L_F00, L_loop, L_end;
+ Label L_F00, L_loop;
__ align(CodeEntryAlignment);
StubId stub_id = StubId::stubgen_kyber12To16_id;
@@ -6209,75 +6213,8 @@ class StubGenerator: public StubCodeGenerator {
vs_st2_post(vs_front(vb), __ T8H, parsed);
__ sub(parsedLength, parsedLength, 64);
- __ cmp(parsedLength, (u1)64);
- __ br(Assembler::GE, L_loop);
- __ cbz(parsedLength, L_end);
-
- // if anything is left it should be a final 72 bytes of input
- // i.e. a final 48 12-bit values. so we handle this by loading
- // 48 bytes into all 16B lanes of front(vin) and only 24
- // bytes into the lower 8B lane of back(vin)
- vs_ld3_post(vs_front(vin), __ T16B, condensed);
- vs_ld3(vs_back(vin), __ T8B, condensed);
-
- // Expand vin[0] into va[0:1], and vin[1] into va[2:3] and va[4:5]
- // n.b. target elements 2 and 3 of va duplicate elements 4 and
- // 5 and target element 2 of vb duplicates element 4.
- __ ushll(va[0], __ T8H, vin[0], __ T8B, 0);
- __ ushll2(va[1], __ T8H, vin[0], __ T16B, 0);
- __ ushll(va[2], __ T8H, vin[1], __ T8B, 0);
- __ ushll2(va[3], __ T8H, vin[1], __ T16B, 0);
- __ ushll(va[4], __ T8H, vin[1], __ T8B, 0);
- __ ushll2(va[5], __ T8H, vin[1], __ T16B, 0);
-
- // This time expand just the lower 8 lanes
- __ ushll(vb[0], __ T8H, vin[3], __ T8B, 0);
- __ ushll(vb[2], __ T8H, vin[4], __ T8B, 0);
- __ ushll(vb[4], __ T8H, vin[4], __ T8B, 0);
-
- // shift lo byte of copy 1 of the middle stripe into the high byte
- __ shl(va[2], __ T8H, va[2], 8);
- __ shl(va[3], __ T8H, va[3], 8);
- __ shl(vb[2], __ T8H, vb[2], 8);
-
- // expand vin[2] into va[6:7] and lower 8 lanes of vin[5] into
- // vb[6] pre-shifted by 4 to ensure top bits of the input 12-bit
- // int are in bit positions [4..11].
- __ ushll(va[6], __ T8H, vin[2], __ T8B, 4);
- __ ushll2(va[7], __ T8H, vin[2], __ T16B, 4);
- __ ushll(vb[6], __ T8H, vin[5], __ T8B, 4);
-
- // mask hi 4 bits of each 1st 12-bit int in pair from copy1 and
- // shift lo 4 bits of each 2nd 12-bit int in pair to bottom of
- // copy2
- __ andr(va[2], __ T16B, va[2], v31);
- __ andr(va[3], __ T16B, va[3], v31);
- __ ushr(va[4], __ T8H, va[4], 4);
- __ ushr(va[5], __ T8H, va[5], 4);
- __ andr(vb[2], __ T16B, vb[2], v31);
- __ ushr(vb[4], __ T8H, vb[4], 4);
-
-
-
- // sum hi 4 bits and lo 8 bits of each 1st 12-bit int in pair and
- // hi 8 bits plus lo 4 bits of each 2nd 12-bit int in pair
-
- // n.b. ordering ensures: i) inputs are consumed before they are
- // overwritten ii) order of 16-bit results across succsessive
- // pairs of vectors in va and then lower half of vb reflects order
- // of corresponding 12-bit inputs
- __ addv(va[0], __ T8H, va[0], va[2]);
- __ addv(va[2], __ T8H, va[1], va[3]);
- __ addv(va[1], __ T8H, va[4], va[6]);
- __ addv(va[3], __ T8H, va[5], va[7]);
- __ addv(vb[0], __ T8H, vb[0], vb[2]);
- __ addv(vb[1], __ T8H, vb[4], vb[6]);
-
- // store 48 results interleaved as shorts
- vs_st2_post(vs_front(va), __ T8H, parsed);
- vs_st2_post(vs_front(vs_front(vb)), __ T8H, parsed);
-
- __ BIND(L_end);
+ __ cmp(parsedLength, (u1)0);
+ __ br(Assembler::GT, L_loop);
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ mov(r0, zr); // return 0
@@ -11805,7 +11742,9 @@ class StubGenerator: public StubCodeGenerator {
}
#endif
- StubRoutines::_unsafe_setmemory = generate_unsafe_setmemory();
+ if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_setMemory)) {
+ StubRoutines::_unsafe_setmemory = generate_unsafe_setmemory();
+ }
StubRoutines::aarch64::set_completed(); // Inidicate that arraycopy and zero_blocks stubs are generated
}
diff --git a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp
index cde142b39ac..69769fb8441 100644
--- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -96,10 +96,6 @@ static inline Address aaddress(Register r) {
return iaddress(r);
}
-static inline Address at_rsp() {
- return Address(esp, 0);
-}
-
// At top of Java expression stack which may be different than esp(). It
// isn't for category 1 objects.
static inline Address at_tos () {
@@ -2290,7 +2286,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
__ subs(zr, temp, (int) code); // have we resolved this bytecode?
// Class initialization barrier for static methods
- if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
+ if (bytecode() == Bytecodes::_invokestatic) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
__ br(Assembler::NE, L_clinit_barrier_slow);
__ ldr(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::method_offset())));
__ load_method_holder(temp, temp);
@@ -2340,8 +2337,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
__ subs(zr, temp, (int) code); // have we resolved this bytecode?
// Class initialization barrier for static fields
- if (VM_Version::supports_fast_class_init_checks() &&
- (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
+ if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
const Register field_holder = temp;
__ br(Assembler::NE, L_clinit_barrier_slow);
@@ -3369,7 +3366,7 @@ void TemplateTable::invokevirtual_helper(Register index,
__ load_klass(r0, recv);
// profile this call
- __ profile_virtual_call(r0, rlocals, r3);
+ __ profile_virtual_call(r0, rlocals);
// get target Method & entry point
__ lookup_virtual_method(r0, index, method);
@@ -3499,7 +3496,7 @@ void TemplateTable::invokeinterface(int byte_no) {
/*return_method=*/false);
// profile this call
- __ profile_virtual_call(r3, r13, r19);
+ __ profile_virtual_call(r3, r13);
// Get declaring interface class from method, and itable index
diff --git a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp
index 659c231464a..9b85733ed08 100644
--- a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2020, Red Hat Inc. All rights reserved.
* Copyright 2025 Arm Limited and/or its affiliates.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -201,16 +201,14 @@ void VM_Version::initialize() {
}
}
- // Cortex A53
- if (_cpu == CPU_ARM && model_is(0xd03)) {
+ if (_cpu == CPU_ARM && model_is(CPU_MODEL_ARM_CORTEX_A53)) {
set_feature(CPU_A53MAC);
if (FLAG_IS_DEFAULT(UseSIMDForArrayEquals)) {
FLAG_SET_DEFAULT(UseSIMDForArrayEquals, false);
}
}
- // Cortex A73
- if (_cpu == CPU_ARM && model_is(0xd09)) {
+ if (_cpu == CPU_ARM && model_is(CPU_MODEL_ARM_CORTEX_A73)) {
if (FLAG_IS_DEFAULT(SoftwarePrefetchHintDistance)) {
FLAG_SET_DEFAULT(SoftwarePrefetchHintDistance, -1);
}
@@ -220,16 +218,11 @@ void VM_Version::initialize() {
}
}
- // Neoverse
- // N1: 0xd0c
- // N2: 0xd49
- // N3: 0xd8e
- // V1: 0xd40
- // V2: 0xd4f
- // V3: 0xd84
- if (_cpu == CPU_ARM && (model_is(0xd0c) || model_is(0xd49) ||
- model_is(0xd40) || model_is(0xd4f) ||
- model_is(0xd8e) || model_is(0xd84))) {
+ if (_cpu == CPU_ARM &&
+ model_is_in({ CPU_MODEL_ARM_NEOVERSE_N1, CPU_MODEL_ARM_NEOVERSE_V1,
+ CPU_MODEL_ARM_NEOVERSE_N2, CPU_MODEL_ARM_NEOVERSE_V2,
+ CPU_MODEL_ARM_NEOVERSE_N3, CPU_MODEL_ARM_NEOVERSE_V3,
+ CPU_MODEL_ARM_NEOVERSE_V3AE })) {
if (FLAG_IS_DEFAULT(UseSIMDForMemoryOps)) {
FLAG_SET_DEFAULT(UseSIMDForMemoryOps, true);
}
@@ -261,12 +254,9 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseCRC32, false);
}
- // Neoverse
- // V1: 0xd40
- // V2: 0xd4f
- // V3: 0xd84
if (_cpu == CPU_ARM &&
- (model_is(0xd40) || model_is(0xd4f) || model_is(0xd84))) {
+ model_is_in({ CPU_MODEL_ARM_NEOVERSE_V1, CPU_MODEL_ARM_NEOVERSE_V2,
+ CPU_MODEL_ARM_NEOVERSE_V3, CPU_MODEL_ARM_NEOVERSE_V3AE })) {
if (FLAG_IS_DEFAULT(UseCryptoPmullForCRC32)) {
FLAG_SET_DEFAULT(UseCryptoPmullForCRC32, true);
}
@@ -456,7 +446,9 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(BlockZeroingLowLimit, 4 * VM_Version::zva_length());
}
} else if (UseBlockZeroing) {
- warning("DC ZVA is not available on this CPU");
+ if (!FLAG_IS_DEFAULT(UseBlockZeroing)) {
+ warning("DC ZVA is not available on this CPU");
+ }
FLAG_SET_DEFAULT(UseBlockZeroing, false);
}
@@ -632,6 +624,22 @@ void VM_Version::initialize() {
check_virtualizations();
+#ifdef __APPLE__
+ DefaultWXWriteMode = UseOldWX ? WXWrite : WXArmedForWrite;
+
+ if (TraceWXHealing) {
+ if (pthread_jit_write_protect_supported_np()) {
+ tty->print_cr("### TraceWXHealing is in use");
+ if (StressWXHealing) {
+ tty->print_cr("### StressWXHealing is in use");
+ }
+ } else {
+ tty->print_cr("WX Healing is not in use because MAP_JIT write protection "
+ "does not work on this system.");
+ }
+ }
+#endif
+
// Sync SVE related CPU features with flags
if (UseSVE < 2) {
clear_feature(CPU_SVE2);
@@ -658,16 +666,52 @@ void VM_Version::initialize() {
void VM_Version::insert_features_names(uint64_t features, stringStream& ss) {
int i = 0;
ss.join([&]() {
- while (i < MAX_CPU_FEATURES) {
- if (supports_feature((VM_Version::Feature_Flag)i)) {
- return _features_names[i++];
+ const char* str = nullptr;
+ while ((i < MAX_CPU_FEATURES) && (str == nullptr)) {
+ if (supports_feature(features, (VM_Version::Feature_Flag)i)) {
+ str = _features_names[i];
}
i += 1;
}
- return (const char*)nullptr;
+ return str;
}, ", ");
}
+void VM_Version::get_cpu_features_name(void* features_buffer, stringStream& ss) {
+ uint64_t features = *(uint64_t*)features_buffer;
+ insert_features_names(features, ss);
+}
+
+void VM_Version::get_missing_features_name(void* features_set1, void* features_set2, stringStream& ss) {
+ uint64_t vm_features_set1 = *(uint64_t*)features_set1;
+ uint64_t vm_features_set2 = *(uint64_t*)features_set2;
+ int i = 0;
+ ss.join([&]() {
+ const char* str = nullptr;
+ while ((i < MAX_CPU_FEATURES) && (str == nullptr)) {
+ Feature_Flag flag = (Feature_Flag)i;
+ if (supports_feature(vm_features_set1, flag) && !supports_feature(vm_features_set2, flag)) {
+ str = _features_names[i];
+ }
+ i += 1;
+ }
+ return str;
+ }, ", ");
+}
+
+int VM_Version::cpu_features_size() {
+ return sizeof(_features);
+}
+
+void VM_Version::store_cpu_features(void* buf) {
+ *(uint64_t*)buf = _features;
+}
+
+bool VM_Version::supports_features(void* features_buffer) {
+ uint64_t features_to_test = *(uint64_t*)features_buffer;
+ return (_features & features_to_test) == features_to_test;
+}
+
#if defined(LINUX)
static bool check_info_file(const char* fpath,
const char* virt1, VirtualizationType vt1,
diff --git a/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp b/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp
index 17087d243d3..0213872852b 100644
--- a/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -30,6 +30,8 @@
#include "runtime/abstract_vm_version.hpp"
#include "utilities/sizes.hpp"
+#include
+
class stringStream;
#define BIT_MASK(flag) (1ULL<<(flag))
@@ -112,14 +114,26 @@ public:
CPU_APPLE = 'a',
};
-enum Ampere_CPU_Model {
+ enum Ampere_CPU_Model {
CPU_MODEL_EMAG = 0x0, /* CPU implementer is CPU_AMCC */
CPU_MODEL_ALTRA = 0xd0c, /* CPU implementer is CPU_ARM, Neoverse N1 */
CPU_MODEL_ALTRAMAX = 0xd0c, /* CPU implementer is CPU_ARM, Neoverse N1 */
CPU_MODEL_AMPERE_1 = 0xac3, /* CPU implementer is CPU_AMPERE */
CPU_MODEL_AMPERE_1A = 0xac4, /* CPU implementer is CPU_AMPERE */
CPU_MODEL_AMPERE_1B = 0xac5 /* AMPERE_1B core Implements ARMv8.7 with CSSC, MTE, SM3/SM4 extensions */
-};
+ };
+
+ enum ARM_CPU_Model {
+ CPU_MODEL_ARM_CORTEX_A53 = 0xd03,
+ CPU_MODEL_ARM_CORTEX_A73 = 0xd09,
+ CPU_MODEL_ARM_NEOVERSE_N1 = 0xd0c,
+ CPU_MODEL_ARM_NEOVERSE_V1 = 0xd40,
+ CPU_MODEL_ARM_NEOVERSE_N2 = 0xd49,
+ CPU_MODEL_ARM_NEOVERSE_V2 = 0xd4f,
+ CPU_MODEL_ARM_NEOVERSE_V3AE = 0xd83,
+ CPU_MODEL_ARM_NEOVERSE_V3 = 0xd84,
+ CPU_MODEL_ARM_NEOVERSE_N3 = 0xd8e,
+ };
#define CPU_FEATURE_FLAGS(decl) \
decl(FP, fp, 0) \
@@ -170,6 +184,9 @@ enum Ampere_CPU_Model {
static bool supports_feature(Feature_Flag flag) {
return (_features & BIT_MASK(flag)) != 0;
}
+ static bool supports_feature(uint64_t features, Feature_Flag flag) {
+ return (features & BIT_MASK(flag)) != 0;
+ }
static int cpu_family() { return _cpu; }
static int cpu_model() { return _model; }
@@ -181,7 +198,16 @@ enum Ampere_CPU_Model {
return _model == cpu_model || _model2 == cpu_model;
}
- static bool is_zva_enabled() { return 0 <= _zva_length; }
+ static bool model_is_in(std::initializer_list cpu_models) {
+ for (const int& cpu_model : cpu_models) {
+ if (_model == cpu_model || _model2 == cpu_model) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ static bool is_zva_enabled() { return 0 < _zva_length; }
static int zva_length() {
assert(is_zva_enabled(), "ZVA not available");
return _zva_length;
@@ -221,6 +247,20 @@ enum Ampere_CPU_Model {
static bool use_neon_for_vector(int vector_length_in_bytes) {
return vector_length_in_bytes <= 16;
}
+
+ static void get_cpu_features_name(void* features_buffer, stringStream& ss);
+
+ // Returns names of features present in features_set1 but not in features_set2
+ static void get_missing_features_name(void* features_set1, void* features_set2, stringStream& ss);
+
+ // Returns number of bytes required to store cpu features representation
+ static int cpu_features_size();
+
+ // Stores cpu features representation in the provided buffer. This representation is arch dependent.
+ // Size of the buffer must be same as returned by cpu_features_size()
+ static void store_cpu_features(void* buf);
+
+ static bool supports_features(void* features_to_test);
};
#endif // CPU_AARCH64_VM_VERSION_AARCH64_HPP
diff --git a/src/hotspot/cpu/arm/frame_arm.cpp b/src/hotspot/cpu/arm/frame_arm.cpp
index 7a23296a3d4..f791fae7bd7 100644
--- a/src/hotspot/cpu/arm/frame_arm.cpp
+++ b/src/hotspot/cpu/arm/frame_arm.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -356,10 +356,10 @@ frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
assert(is_interpreted_frame(), "Not an interpreted frame");
// These are reasonable sanity checks
- if (fp() == 0 || (intptr_t(fp()) & (wordSize-1)) != 0) {
+ if (fp() == nullptr || (intptr_t(fp()) & (wordSize-1)) != 0) {
return false;
}
- if (sp() == 0 || (intptr_t(sp()) & (wordSize-1)) != 0) {
+ if (sp() == nullptr || (intptr_t(sp()) & (wordSize-1)) != 0) {
return false;
}
if (fp() + interpreter_frame_initial_sp_offset < sp()) {
diff --git a/src/hotspot/cpu/arm/gc/shared/cardTableBarrierSetAssembler_arm.cpp b/src/hotspot/cpu/arm/gc/shared/cardTableBarrierSetAssembler_arm.cpp
index 2427d46cafa..5d63035ac69 100644
--- a/src/hotspot/cpu/arm/gc/shared/cardTableBarrierSetAssembler_arm.cpp
+++ b/src/hotspot/cpu/arm/gc/shared/cardTableBarrierSetAssembler_arm.cpp
@@ -67,9 +67,7 @@ void CardTableBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet d
void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, Register tmp) {
BLOCK_COMMENT("CardTablePostBarrier");
- BarrierSet* bs = BarrierSet::barrier_set();
- CardTableBarrierSet* ctbs = barrier_set_cast(bs);
- CardTable* ct = ctbs->card_table();
+ CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
Label L_cardtable_loop, L_done;
@@ -83,7 +81,7 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
__ sub(count, count, addr); // nb of cards
// warning: Rthread has not been preserved
- __ mov_address(tmp, (address) ct->byte_map_base());
+ __ mov_address(tmp, (address)ctbs->card_table_base_const());
__ add(addr,tmp, addr);
Register zero = __ zero_register(tmp);
@@ -122,8 +120,7 @@ void CardTableBarrierSetAssembler::store_check_part1(MacroAssembler* masm, Regis
assert(bs->kind() == BarrierSet::CardTableBarrierSet,
"Wrong barrier set kind");
- CardTableBarrierSet* ctbs = barrier_set_cast(bs);
- CardTable* ct = ctbs->card_table();
+ CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
// Load card table base address.
@@ -140,7 +137,7 @@ void CardTableBarrierSetAssembler::store_check_part1(MacroAssembler* masm, Regis
Possible cause is a cache miss (card table base address resides in a
rarely accessed area of thread descriptor).
*/
- __ mov_address(card_table_base, (address)ct->byte_map_base());
+ __ mov_address(card_table_base, (address)ctbs->card_table_base_const());
}
// The 2nd part of the store check.
@@ -170,8 +167,8 @@ void CardTableBarrierSetAssembler::store_check_part2(MacroAssembler* masm, Regis
void CardTableBarrierSetAssembler::set_card(MacroAssembler* masm, Register card_table_base, Address card_table_addr, Register tmp) {
CardTableBarrierSet* ctbs = barrier_set_cast(BarrierSet::barrier_set());
- CardTable* ct = ctbs->card_table();
- if ((((uintptr_t)ct->byte_map_base() & 0xff) == 0)) {
+
+ if ((((uintptr_t)ctbs->card_table_base_const() & 0xff) == 0)) {
// Card table is aligned so the lowest byte of the table address base is zero.
// This works only if the code is not saved for later use, possibly
// in a context where the base would no longer be aligned.
diff --git a/src/hotspot/cpu/arm/globals_arm.hpp b/src/hotspot/cpu/arm/globals_arm.hpp
index 363a9a2c25c..c568ea04122 100644
--- a/src/hotspot/cpu/arm/globals_arm.hpp
+++ b/src/hotspot/cpu/arm/globals_arm.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -37,7 +37,7 @@ define_pd_global(bool, TrapBasedNullChecks, false); // Not needed
define_pd_global(bool, DelayCompilerStubsGeneration, false); // No need - only few compiler's stubs
define_pd_global(size_t, CodeCacheSegmentSize, 64);
-define_pd_global(intx, CodeEntryAlignment, 16);
+define_pd_global(uint, CodeEntryAlignment, 16);
define_pd_global(intx, OptoLoopAlignment, 16);
#define DEFAULT_STACK_YELLOW_PAGES (2)
diff --git a/src/hotspot/cpu/arm/nativeInst_arm_32.cpp b/src/hotspot/cpu/arm/nativeInst_arm_32.cpp
index 232294b246a..df780ac31a6 100644
--- a/src/hotspot/cpu/arm/nativeInst_arm_32.cpp
+++ b/src/hotspot/cpu/arm/nativeInst_arm_32.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -172,7 +172,7 @@ void NativeMovConstReg::set_data(intptr_t x, address pc) {
address addr = oop_addr != nullptr ? (address)oop_addr : (address)metadata_addr;
- if(pc == 0) {
+ if (pc == nullptr) {
offset = addr - instruction_address() - 8;
} else {
offset = addr - pc - 8;
@@ -228,7 +228,7 @@ void NativeMovConstReg::set_data(intptr_t x, address pc) {
void NativeMovConstReg::set_pc_relative_offset(address addr, address pc) {
int offset;
- if (pc == 0) {
+ if (pc == nullptr) {
offset = addr - instruction_address() - 8;
} else {
offset = addr - pc - 8;
diff --git a/src/hotspot/cpu/arm/nativeInst_arm_32.hpp b/src/hotspot/cpu/arm/nativeInst_arm_32.hpp
index 82385bf0244..2b52db89285 100644
--- a/src/hotspot/cpu/arm/nativeInst_arm_32.hpp
+++ b/src/hotspot/cpu/arm/nativeInst_arm_32.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -371,7 +371,7 @@ class NativeMovConstReg: public NativeInstruction {
public:
intptr_t data() const;
- void set_data(intptr_t x, address pc = 0);
+ void set_data(intptr_t x, address pc = nullptr);
bool is_pc_relative() {
return !is_movw();
}
diff --git a/src/hotspot/cpu/ppc/assembler_ppc.hpp b/src/hotspot/cpu/ppc/assembler_ppc.hpp
index 15e38411482..da2daffd579 100644
--- a/src/hotspot/cpu/ppc/assembler_ppc.hpp
+++ b/src/hotspot/cpu/ppc/assembler_ppc.hpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2025 SAP SE. All rights reserved.
+ * Copyright (c) 2002, 2026, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -568,6 +568,9 @@ class Assembler : public AbstractAssembler {
XSCVDPHP_OPCODE= (60u << OPCODE_SHIFT | 347u << 2 | 17u << 16), // XX2-FORM
XXPERM_OPCODE = (60u << OPCODE_SHIFT | 26u << 3),
XXSEL_OPCODE = (60u << OPCODE_SHIFT | 3u << 4),
+ XSCMPEQDP_OPCODE=(60u << OPCODE_SHIFT | 3u << 3),
+ XSCMPGEDP_OPCODE=(60u << OPCODE_SHIFT | 19u << 3),
+ XSCMPGTDP_OPCODE=(60u << OPCODE_SHIFT | 11u << 3),
XXSPLTIB_OPCODE= (60u << OPCODE_SHIFT | 360u << 1),
XVDIVDP_OPCODE = (60u << OPCODE_SHIFT | 120u << 3),
XVABSSP_OPCODE = (60u << OPCODE_SHIFT | 409u << 2),
@@ -596,6 +599,9 @@ class Assembler : public AbstractAssembler {
XVMAXSP_OPCODE = (60u << OPCODE_SHIFT | 192u << 3),
XVMAXDP_OPCODE = (60u << OPCODE_SHIFT | 224u << 3),
+ XSMINJDP_OPCODE = (60u << OPCODE_SHIFT | 152u << 3),
+ XSMAXJDP_OPCODE = (60u << OPCODE_SHIFT | 144u << 3),
+
// Deliver A Random Number (introduced with POWER9)
DARN_OPCODE = (31u << OPCODE_SHIFT | 755u << 1),
@@ -2424,6 +2430,9 @@ class Assembler : public AbstractAssembler {
inline void xscvdphp( VectorSRegister d, VectorSRegister b);
inline void xxland( VectorSRegister d, VectorSRegister a, VectorSRegister b);
inline void xxsel( VectorSRegister d, VectorSRegister a, VectorSRegister b, VectorSRegister c);
+ inline void xscmpeqdp(VectorSRegister t, VectorSRegister a, VectorSRegister b); // Requires Power9
+ inline void xscmpgedp(VectorSRegister t, VectorSRegister a, VectorSRegister b); // Requires Power9
+ inline void xscmpgtdp(VectorSRegister t, VectorSRegister a, VectorSRegister b); // Requires Power9
inline void xxspltib( VectorSRegister d, int ui8);
inline void xvdivsp( VectorSRegister d, VectorSRegister a, VectorSRegister b);
inline void xvdivdp( VectorSRegister d, VectorSRegister a, VectorSRegister b);
@@ -2449,6 +2458,9 @@ class Assembler : public AbstractAssembler {
inline void xvrdpim( VectorSRegister d, VectorSRegister b);
inline void xvrdpip( VectorSRegister d, VectorSRegister b);
+ inline void xsminjdp( VectorSRegister d, VectorSRegister a, VectorSRegister b); // Requires Power 9
+ inline void xsmaxjdp( VectorSRegister d, VectorSRegister a, VectorSRegister b); // Requires Power 9
+
// The following functions do not match exactly the Java.math semantics.
inline void xvminsp( VectorSRegister d, VectorSRegister a, VectorSRegister b);
inline void xvmindp( VectorSRegister d, VectorSRegister a, VectorSRegister b);
diff --git a/src/hotspot/cpu/ppc/assembler_ppc.inline.hpp b/src/hotspot/cpu/ppc/assembler_ppc.inline.hpp
index 7e49ec7455d..bd6f3300606 100644
--- a/src/hotspot/cpu/ppc/assembler_ppc.inline.hpp
+++ b/src/hotspot/cpu/ppc/assembler_ppc.inline.hpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2025 SAP SE. All rights reserved.
+ * Copyright (c) 2002, 2026, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -908,6 +908,9 @@ inline void Assembler::xvrdpic( VectorSRegister d, VectorSRegister b)
inline void Assembler::xvrdpim( VectorSRegister d, VectorSRegister b) { emit_int32( XVRDPIM_OPCODE | vsrt(d) | vsrb(b)); }
inline void Assembler::xvrdpip( VectorSRegister d, VectorSRegister b) { emit_int32( XVRDPIP_OPCODE | vsrt(d) | vsrb(b)); }
+inline void Assembler::xsminjdp(VectorSRegister d, VectorSRegister a, VectorSRegister b) { emit_int32( XSMINJDP_OPCODE | vsrt(d) | vsra(a) | vsrb(b)); }
+inline void Assembler::xsmaxjdp(VectorSRegister d, VectorSRegister a, VectorSRegister b) { emit_int32( XSMAXJDP_OPCODE | vsrt(d) | vsra(a) | vsrb(b)); }
+
inline void Assembler::xvminsp(VectorSRegister d, VectorSRegister a, VectorSRegister b) { emit_int32( XVMINSP_OPCODE | vsrt(d) | vsra(a) | vsrb(b)); }
inline void Assembler::xvmindp(VectorSRegister d, VectorSRegister a, VectorSRegister b) { emit_int32( XVMINDP_OPCODE | vsrt(d) | vsra(a) | vsrb(b)); }
inline void Assembler::xvmaxsp(VectorSRegister d, VectorSRegister a, VectorSRegister b) { emit_int32( XVMAXSP_OPCODE | vsrt(d) | vsra(a) | vsrb(b)); }
@@ -923,6 +926,10 @@ inline void Assembler::xxmrghw( VectorSRegister d, VectorSRegister a, VectorSReg
inline void Assembler::xxmrglw( VectorSRegister d, VectorSRegister a, VectorSRegister b) { emit_int32( XXMRGHW_OPCODE | vsrt(d) | vsra(a) | vsrb(b)); }
inline void Assembler::xxsel( VectorSRegister d, VectorSRegister a, VectorSRegister b, VectorSRegister c) { emit_int32( XXSEL_OPCODE | vsrt(d) | vsra(a) | vsrb(b) | vsrc(c)); }
+inline void Assembler::xscmpeqdp(VectorSRegister t, VectorSRegister a, VectorSRegister b) { emit_int32( XSCMPEQDP_OPCODE | vsrt(t) | vsra(a) | vsrb(b) );}
+inline void Assembler::xscmpgedp(VectorSRegister t, VectorSRegister a, VectorSRegister b) { emit_int32( XSCMPGEDP_OPCODE | vsrt(t) | vsra(a) | vsrb(b) );}
+inline void Assembler::xscmpgtdp(VectorSRegister t, VectorSRegister a, VectorSRegister b) { emit_int32( XSCMPGTDP_OPCODE | vsrt(t) | vsra(a) | vsrb(b) );}
+
// VSX Extended Mnemonics
inline void Assembler::xxspltd( VectorSRegister d, VectorSRegister a, int x) { xxpermdi(d, a, a, x ? 3 : 0); }
inline void Assembler::xxmrghd( VectorSRegister d, VectorSRegister a, VectorSRegister b) { xxpermdi(d, a, b, 0); }
diff --git a/src/hotspot/cpu/ppc/c2_MacroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/c2_MacroAssembler_ppc.cpp
index edf348fdc50..8bbffc22c54 100644
--- a/src/hotspot/cpu/ppc/c2_MacroAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/c2_MacroAssembler_ppc.cpp
@@ -1,5 +1,6 @@
/*
- * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -600,19 +601,21 @@ void C2_MacroAssembler::count_positives(Register src, Register cnt, Register res
orr(tmp0, tmp2, tmp0);
and_(tmp0, tmp0, tmp1);
- bne(CR0, Lslow); // Found negative byte.
+ bne(CR0, Lslow); // Found negative byte.
addi(result, result, 16);
bdnz(Lfastloop);
bind(Lslow); // Fallback to slow version.
subf(tmp0, src, result); // Bytes known positive.
- subf_(tmp0, tmp0, cnt); // Remaining Bytes.
+ clrldi(tmp1, cnt, 32); // Clear garbage from upper 32 bits.
+ subf_(tmp0, tmp0, tmp1); // Remaining Bytes.
beq(CR0, Ldone);
mtctr(tmp0);
+
bind(Lloop);
lbz(tmp0, 0, result);
andi_(tmp0, tmp0, 0x80);
- bne(CR0, Ldone); // Found negative byte.
+ bne(CR0, Ldone); // Found negative byte.
addi(result, result, 1);
bdnz(Lloop);
@@ -664,3 +667,37 @@ void C2_MacroAssembler::reduceI(int opcode, Register dst, Register iSrc, VectorR
fn_scalar_op(opcode, dst, iSrc, R0); // dst <- op(iSrc, R0)
}
+// Works for single and double precision floats.
+// dst = (op1 cmp(cc) op2) ? src1 : src2;
+// Unordered semantics are the same as for CmpF3Node/CmpD3Node which implement the fcmpl/dcmpl bytecodes.
+// Comparing unordered values has the same result as when src1 is less than src2.
+// So dst = src1 for <, <=, != and dst = src2 for >, >=, ==.
+void C2_MacroAssembler::cmovF(int cc, VectorSRegister dst, VectorSRegister op1, VectorSRegister op2,
+ VectorSRegister src1, VectorSRegister src2, VectorSRegister tmp) {
+ // See operand cmpOp() for details.
+ bool invert_cond = (cc & 8) == 0; // invert reflects bcondCRbiIs0
+ auto cmp = (Assembler::Condition)(cc & 3);
+
+ switch(cmp) {
+ case Assembler::Condition::equal:
+ // Use false_result if "unordered".
+ xscmpeqdp(tmp, op1, op2);
+ break;
+ case Assembler::Condition::greater:
+ // Use false_result if "unordered".
+ xscmpgtdp(tmp, op1, op2);
+ break;
+ case Assembler::Condition::less:
+ // Use true_result if "unordered".
+ xscmpgedp(tmp, op1, op2);
+ invert_cond = !invert_cond;
+ break;
+ default:
+ assert(false, "unsupported compare condition: %d", cc);
+ ShouldNotReachHere();
+ }
+
+ VectorSRegister true_result = invert_cond ? src2 : src1;
+ VectorSRegister false_result = invert_cond ? src1 : src2;
+ xxsel(dst, false_result, true_result, tmp);
+}
diff --git a/src/hotspot/cpu/ppc/c2_MacroAssembler_ppc.hpp b/src/hotspot/cpu/ppc/c2_MacroAssembler_ppc.hpp
index 5a114294c1f..e0dffec8396 100644
--- a/src/hotspot/cpu/ppc/c2_MacroAssembler_ppc.hpp
+++ b/src/hotspot/cpu/ppc/c2_MacroAssembler_ppc.hpp
@@ -74,5 +74,7 @@
void count_positives(Register src, Register cnt, Register result, Register tmp1, Register tmp2);
void reduceI(int opcode, Register dst, Register iSrc, VectorRegister vSrc, VectorRegister vTmp1, VectorRegister vTmp2);
+ void cmovF(int cc, VectorSRegister dst, VectorSRegister op1, VectorSRegister op2,
+ VectorSRegister src1, VectorSRegister src2, VectorSRegister tmp);
#endif // CPU_PPC_C2_MACROASSEMBLER_PPC_HPP
diff --git a/src/hotspot/cpu/ppc/c2_globals_ppc.hpp b/src/hotspot/cpu/ppc/c2_globals_ppc.hpp
index d5a0ff10994..caef322d4a1 100644
--- a/src/hotspot/cpu/ppc/c2_globals_ppc.hpp
+++ b/src/hotspot/cpu/ppc/c2_globals_ppc.hpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2019 SAP SE. All rights reserved.
+ * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -44,7 +44,7 @@ define_pd_global(intx, CompileThreshold, 10000);
define_pd_global(intx, OnStackReplacePercentage, 140);
define_pd_global(intx, ConditionalMoveLimit, 3);
-define_pd_global(intx, FreqInlineSize, 175);
+define_pd_global(intx, FreqInlineSize, 325);
define_pd_global(intx, MinJumpTableSize, 10);
define_pd_global(intx, InteriorEntryAlignment, 16);
define_pd_global(size_t, NewSizeThreadIncrease, ScaleForWordSize(4*K));
diff --git a/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.cpp b/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.cpp
index 8712c75711d..82d06f6c685 100644
--- a/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2025 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -275,6 +275,11 @@ OptoReg::Name BarrierSetAssembler::refine_register(const Node* node, OptoReg::Na
return opto_reg;
}
+void BarrierSetAssembler::try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path) {
+ // Load the oop from the weak handle.
+ __ ld(obj, 0, obj);
+}
+
#undef __
#define __ _masm->
diff --git a/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.hpp b/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.hpp
index 2bf26bd5010..d78071f2ee0 100644
--- a/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.hpp
+++ b/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2022 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -81,6 +81,8 @@ public:
#ifdef COMPILER2
OptoReg::Name refine_register(const Node* node, OptoReg::Name opto_reg) const;
+ virtual void try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj,
+ Register tmp, Label& slow_path);
#endif // COMPILER2
};
diff --git a/src/hotspot/cpu/ppc/gc/shared/cardTableBarrierSetAssembler_ppc.cpp b/src/hotspot/cpu/ppc/gc/shared/cardTableBarrierSetAssembler_ppc.cpp
index 7404f7e2e5c..297ce57a394 100644
--- a/src/hotspot/cpu/ppc/gc/shared/cardTableBarrierSetAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/gc/shared/cardTableBarrierSetAssembler_ppc.cpp
@@ -103,8 +103,7 @@ void CardTableBarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Registe
void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr,
Register count, Register preserve) {
- CardTableBarrierSet* ctbs = barrier_set_cast(BarrierSet::barrier_set());
- CardTable* ct = ctbs->card_table();
+ CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
assert_different_registers(addr, count, R0);
Label Lskip_loop, Lstore_loop;
@@ -117,7 +116,7 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
__ srdi(addr, addr, CardTable::card_shift());
__ srdi(count, count, CardTable::card_shift());
__ subf(count, addr, count);
- __ add_const_optimized(addr, addr, (address)ct->byte_map_base(), R0);
+ __ add_const_optimized(addr, addr, (address)ctbs->card_table_base_const(), R0);
__ addi(count, count, 1);
__ li(R0, 0);
__ mtctr(count);
@@ -140,8 +139,8 @@ void CardTableBarrierSetAssembler::card_table_write(MacroAssembler* masm,
}
void CardTableBarrierSetAssembler::card_write_barrier_post(MacroAssembler* masm, Register store_addr, Register tmp) {
- CardTableBarrierSet* bs = barrier_set_cast(BarrierSet::barrier_set());
- card_table_write(masm, bs->card_table()->byte_map_base(), tmp, store_addr);
+ CardTableBarrierSet* bs = CardTableBarrierSet::barrier_set();
+ card_table_write(masm, bs->card_table_base_const(), tmp, store_addr);
}
void CardTableBarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
diff --git a/src/hotspot/cpu/ppc/gc/shenandoah/shenandoahBarrierSetAssembler_ppc.cpp b/src/hotspot/cpu/ppc/gc/shenandoah/shenandoahBarrierSetAssembler_ppc.cpp
index 1f1bc7622ed..e1f0416d65d 100644
--- a/src/hotspot/cpu/ppc/gc/shenandoah/shenandoahBarrierSetAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/gc/shenandoah/shenandoahBarrierSetAssembler_ppc.cpp
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2025, Red Hat, Inc. All rights reserved.
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -50,14 +51,14 @@
#define __ masm->
-void ShenandoahBarrierSetAssembler::satb_write_barrier(MacroAssembler *masm,
- Register base, RegisterOrConstant ind_or_offs,
- Register tmp1, Register tmp2, Register tmp3,
- MacroAssembler::PreservationLevel preservation_level) {
+void ShenandoahBarrierSetAssembler::satb_barrier(MacroAssembler *masm,
+ Register base, RegisterOrConstant ind_or_offs,
+ Register tmp1, Register tmp2, Register tmp3,
+ MacroAssembler::PreservationLevel preservation_level) {
if (ShenandoahSATBBarrier) {
- __ block_comment("satb_write_barrier (shenandoahgc) {");
- satb_write_barrier_impl(masm, 0, base, ind_or_offs, tmp1, tmp2, tmp3, preservation_level);
- __ block_comment("} satb_write_barrier (shenandoahgc)");
+ __ block_comment("satb_barrier (shenandoahgc) {");
+ satb_barrier_impl(masm, 0, base, ind_or_offs, tmp1, tmp2, tmp3, preservation_level);
+ __ block_comment("} satb_barrier (shenandoahgc)");
}
}
@@ -198,11 +199,12 @@ void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, Dec
// In "load mode", this register acts as a temporary register and must
// thus not be 'noreg'. In "preloaded mode", its content will be sustained.
// tmp1/tmp2: Temporary registers, one of which must be non-volatile in "preloaded mode".
-void ShenandoahBarrierSetAssembler::satb_write_barrier_impl(MacroAssembler *masm, DecoratorSet decorators,
- Register base, RegisterOrConstant ind_or_offs,
- Register pre_val,
- Register tmp1, Register tmp2,
- MacroAssembler::PreservationLevel preservation_level) {
+void ShenandoahBarrierSetAssembler::satb_barrier_impl(MacroAssembler *masm, DecoratorSet decorators,
+ Register base, RegisterOrConstant ind_or_offs,
+ Register pre_val,
+ Register tmp1, Register tmp2,
+ MacroAssembler::PreservationLevel preservation_level) {
+ assert(ShenandoahSATBBarrier, "Should be checked by caller");
assert_different_registers(tmp1, tmp2, pre_val, noreg);
Label skip_barrier;
@@ -574,13 +576,13 @@ void ShenandoahBarrierSetAssembler::load_at(
if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
if (ShenandoahSATBBarrier) {
__ block_comment("keep_alive_barrier (shenandoahgc) {");
- satb_write_barrier_impl(masm, 0, noreg, noreg, dst, tmp1, tmp2, preservation_level);
+ satb_barrier_impl(masm, 0, noreg, noreg, dst, tmp1, tmp2, preservation_level);
__ block_comment("} keep_alive_barrier (shenandoahgc)");
}
}
}
-void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register base, RegisterOrConstant ind_or_offs, Register tmp) {
+void ShenandoahBarrierSetAssembler::card_barrier(MacroAssembler* masm, Register base, RegisterOrConstant ind_or_offs, Register tmp) {
assert(ShenandoahCardBarrier, "Should have been checked by caller");
assert_different_registers(base, tmp, R0);
@@ -603,21 +605,33 @@ void ShenandoahBarrierSetAssembler::store_at(MacroAssembler *masm, DecoratorSet
Register base, RegisterOrConstant ind_or_offs, Register val,
Register tmp1, Register tmp2, Register tmp3,
MacroAssembler::PreservationLevel preservation_level) {
- if (is_reference_type(type)) {
- if (ShenandoahSATBBarrier) {
- satb_write_barrier(masm, base, ind_or_offs, tmp1, tmp2, tmp3, preservation_level);
- }
+ // 1: non-reference types require no barriers
+ if (!is_reference_type(type)) {
+ BarrierSetAssembler::store_at(masm, decorators, type,
+ base, ind_or_offs,
+ val,
+ tmp1, tmp2, tmp3,
+ preservation_level);
+ return;
}
+ bool storing_non_null = (val != noreg);
+
+ // 2: pre-barrier: SATB needs the previous value
+ if (ShenandoahBarrierSet::need_satb_barrier(decorators, type)) {
+ satb_barrier(masm, base, ind_or_offs, tmp1, tmp2, tmp3, preservation_level);
+ }
+
+ // Store!
BarrierSetAssembler::store_at(masm, decorators, type,
base, ind_or_offs,
val,
tmp1, tmp2, tmp3,
preservation_level);
- // No need for post barrier if storing null
- if (ShenandoahCardBarrier && is_reference_type(type) && val != noreg) {
- store_check(masm, base, ind_or_offs, tmp1);
+ // 3: post-barrier: card barrier needs store address
+ if (ShenandoahBarrierSet::need_card_barrier(decorators, type) && storing_non_null) {
+ card_barrier(masm, base, ind_or_offs, tmp1);
}
}
@@ -649,6 +663,33 @@ void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler
__ block_comment("} try_resolve_jobject_in_native (shenandoahgc)");
}
+#ifdef COMPILER2
+void ShenandoahBarrierSetAssembler::try_resolve_weak_handle_in_c2(MacroAssembler *masm, Register obj,
+ Register tmp, Label &slow_path) {
+ __ block_comment("try_resolve_weak_handle_in_c2 (shenandoahgc) {");
+
+ assert_different_registers(obj, tmp);
+
+ Label done;
+
+ // Resolve weak handle using the standard implementation.
+ BarrierSetAssembler::try_resolve_weak_handle_in_c2(masm, obj, tmp, slow_path);
+
+ // Check if the reference is null, and if it is, take the fast path.
+ __ cmpdi(CR0, obj, 0);
+ __ beq(CR0, done);
+
+ // Check if the heap is under weak-reference/roots processing, in
+ // which case we need to take the slow path.
+ __ lbz(tmp, in_bytes(ShenandoahThreadLocalData::gc_state_offset()), R16_thread);
+ __ andi_(tmp, tmp, ShenandoahHeap::WEAK_ROOTS);
+ __ bne(CR0, slow_path);
+ __ bind(done);
+
+ __ block_comment("} try_resolve_weak_handle_in_c2 (shenandoahgc)");
+}
+#endif
+
// Special shenandoah CAS implementation that handles false negatives due
// to concurrent evacuation. That is, the CAS operation is intended to succeed in
// the following scenarios (success criteria):
@@ -771,9 +812,6 @@ void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler *masm, Register b
void ShenandoahBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, Register preserve) {
assert(ShenandoahCardBarrier, "Should have been checked by caller");
-
- ShenandoahBarrierSet* bs = ShenandoahBarrierSet::barrier_set();
- CardTable* ct = bs->card_table();
assert_different_registers(addr, count, R0);
Label L_skip_loop, L_store_loop;
diff --git a/src/hotspot/cpu/ppc/gc/shenandoah/shenandoahBarrierSetAssembler_ppc.hpp b/src/hotspot/cpu/ppc/gc/shenandoah/shenandoahBarrierSetAssembler_ppc.hpp
index b058dcf1a2e..672f8122bcb 100644
--- a/src/hotspot/cpu/ppc/gc/shenandoah/shenandoahBarrierSetAssembler_ppc.hpp
+++ b/src/hotspot/cpu/ppc/gc/shenandoah/shenandoahBarrierSetAssembler_ppc.hpp
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2022, Red Hat, Inc. All rights reserved.
* Copyright (c) 2012, 2022 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -45,15 +46,15 @@ class ShenandoahBarrierSetAssembler: public BarrierSetAssembler {
private:
/* ==== Actual barrier implementations ==== */
- void satb_write_barrier_impl(MacroAssembler* masm, DecoratorSet decorators,
- Register base, RegisterOrConstant ind_or_offs,
- Register pre_val,
- Register tmp1, Register tmp2,
- MacroAssembler::PreservationLevel preservation_level);
+ void satb_barrier_impl(MacroAssembler* masm, DecoratorSet decorators,
+ Register base, RegisterOrConstant ind_or_offs,
+ Register pre_val,
+ Register tmp1, Register tmp2,
+ MacroAssembler::PreservationLevel preservation_level);
- void store_check(MacroAssembler* masm,
- Register base, RegisterOrConstant ind_or_offs,
- Register tmp);
+ void card_barrier(MacroAssembler* masm,
+ Register base, RegisterOrConstant ind_or_offs,
+ Register tmp);
void load_reference_barrier_impl(MacroAssembler* masm, DecoratorSet decorators,
Register base, RegisterOrConstant ind_or_offs,
@@ -85,10 +86,10 @@ public:
#endif
/* ==== Available barriers (facades of the actual implementations) ==== */
- void satb_write_barrier(MacroAssembler* masm,
- Register base, RegisterOrConstant ind_or_offs,
- Register tmp1, Register tmp2, Register tmp3,
- MacroAssembler::PreservationLevel preservation_level);
+ void satb_barrier(MacroAssembler* masm,
+ Register base, RegisterOrConstant ind_or_offs,
+ Register tmp1, Register tmp2, Register tmp3,
+ MacroAssembler::PreservationLevel preservation_level);
void load_reference_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register base, RegisterOrConstant ind_or_offs,
@@ -121,6 +122,9 @@ public:
virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register dst, Register jni_env,
Register obj, Register tmp, Label& slowpath);
+#ifdef COMPILER2
+ virtual void try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path);
+#endif
};
#endif // CPU_PPC_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_PPC_HPP
diff --git a/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.cpp b/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.cpp
index 0aa5858c8e6..bfa3c87c179 100644
--- a/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2025 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -950,6 +950,19 @@ void ZBarrierSetAssembler::generate_c2_store_barrier_stub(MacroAssembler* masm,
__ b(*stub->continuation());
}
+void ZBarrierSetAssembler::try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path) {
+ // Resolve weak handle using the standard implementation.
+ BarrierSetAssembler::try_resolve_weak_handle_in_c2(masm, obj, tmp, slow_path);
+
+ // Check if the oop is bad, in which case we need to take the slow path.
+ __ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatMarkBadMask);
+ __ andi_(R0, obj, barrier_Relocation::unpatched);
+ __ bne(CR0, slow_path);
+
+ // Oop is okay, so we uncolor it.
+ __ srdi(obj, obj, ZPointerLoadShift);
+}
+
#undef __
#endif // COMPILER2
diff --git a/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.hpp b/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.hpp
index 27203e7b01c..e31817370d9 100644
--- a/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.hpp
+++ b/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2022 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -108,6 +108,8 @@ public:
void generate_c2_load_barrier_stub(MacroAssembler* masm, ZLoadBarrierStubC2* stub) const;
void generate_c2_store_barrier_stub(MacroAssembler* masm, ZStoreBarrierStubC2* stub) const;
+
+ void try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path);
#endif // COMPILER2
void store_barrier_fast(MacroAssembler* masm,
diff --git a/src/hotspot/cpu/ppc/globals_ppc.hpp b/src/hotspot/cpu/ppc/globals_ppc.hpp
index 41a8e821ada..927a8cc2be3 100644
--- a/src/hotspot/cpu/ppc/globals_ppc.hpp
+++ b/src/hotspot/cpu/ppc/globals_ppc.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2024 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -60,7 +60,7 @@ define_pd_global(bool, VMContinuations, true);
// Use large code-entry alignment.
define_pd_global(size_t, CodeCacheSegmentSize, 128);
-define_pd_global(intx, CodeEntryAlignment, 64);
+define_pd_global(uint, CodeEntryAlignment, 64);
define_pd_global(intx, OptoLoopAlignment, 16);
define_pd_global(intx, InlineSmallCode, 1500);
diff --git a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp
index fc865be015e..f7bf457f72c 100644
--- a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp
+++ b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp
@@ -1109,11 +1109,11 @@ void InterpreterMacroAssembler::verify_method_data_pointer() {
lhz(R11_scratch1, in_bytes(DataLayout::bci_offset()), R28_mdx);
ld(R12_scratch2, in_bytes(Method::const_offset()), R19_method);
addi(R11_scratch1, R11_scratch1, in_bytes(ConstMethod::codes_offset()));
- add(R11_scratch1, R12_scratch2, R12_scratch2);
+ add(R11_scratch1, R11_scratch1, R12_scratch2);
cmpd(CR0, R11_scratch1, R14_bcp);
beq(CR0, verify_continue);
- call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp ), R19_method, R14_bcp, R28_mdx);
+ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), R19_method, R14_bcp, R28_mdx);
bind(verify_continue);
#endif
diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp
index 5649ead2ea8..986dd335816 100644
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -42,6 +42,7 @@
#include "runtime/icache.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/objectMonitor.hpp"
+#include "runtime/objectMonitorTable.hpp"
#include "runtime/os.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/safepointMechanism.hpp"
@@ -2756,39 +2757,54 @@ void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register
addi(owner_addr, mark, in_bytes(ObjectMonitor::owner_offset()) - monitor_tag);
mark = noreg;
} else {
+ const Register tmp3_bucket = tmp3;
+ const Register tmp2_hash = tmp2;
Label monitor_found;
- Register cache_addr = tmp2;
- // Load cache address
- addi(cache_addr, R16_thread, in_bytes(JavaThread::om_cache_oops_offset()));
+ // Save the mark, we might need it to extract the hash.
+ mr(tmp2_hash, mark);
- const int num_unrolled = 2;
+ // Look for the monitor in the om_cache.
+
+ ByteSize cache_offset = JavaThread::om_cache_oops_offset();
+ ByteSize monitor_offset = OMCache::oop_to_monitor_difference();
+ const int num_unrolled = OMCache::CAPACITY;
for (int i = 0; i < num_unrolled; i++) {
- ld(R0, 0, cache_addr);
+ ld(R0, in_bytes(cache_offset), R16_thread);
+ ld(monitor, in_bytes(cache_offset + monitor_offset), R16_thread);
cmpd(CR0, R0, obj);
beq(CR0, monitor_found);
- addi(cache_addr, cache_addr, in_bytes(OMCache::oop_to_oop_difference()));
+ cache_offset = cache_offset + OMCache::oop_to_oop_difference();
}
- Label loop;
+ // Look for the monitor in the table.
- // Search for obj in cache.
- bind(loop);
+ // Get the hash code.
+ srdi(tmp2_hash, tmp2_hash, markWord::hash_shift);
- // Check for match.
- ld(R0, 0, cache_addr);
- cmpd(CR0, R0, obj);
- beq(CR0, monitor_found);
+ // Get the table and calculate the bucket's address
+ int simm16_rest = load_const_optimized(tmp3, ObjectMonitorTable::current_table_address(), R0, true);
+ ld_ptr(tmp3, simm16_rest, tmp3);
+ ld(tmp1, in_bytes(ObjectMonitorTable::table_capacity_mask_offset()), tmp3);
+ andr(tmp2_hash, tmp2_hash, tmp1);
+ ld(tmp3_bucket, in_bytes(ObjectMonitorTable::table_buckets_offset()), tmp3);
- // Search until null encountered, guaranteed _null_sentinel at end.
- addi(cache_addr, cache_addr, in_bytes(OMCache::oop_to_oop_difference()));
- cmpdi(CR1, R0, 0);
- bne(CR1, loop);
- // Cache Miss, CR0.NE set from cmp above
- b(slow_path);
+ // Read the monitor from the bucket.
+ sldi(tmp2_hash, tmp2_hash, LogBytesPerWord);
+ ldx(monitor, tmp3_bucket, tmp2_hash);
+
+ // Check if the monitor in the bucket is special (empty, tombstone or removed).
+ cmpldi(CR0, monitor, ObjectMonitorTable::SpecialPointerValues::below_is_special);
+ blt(CR0, slow_path);
+
+ // Check if object matches.
+ ld(tmp3, in_bytes(ObjectMonitor::object_offset()), monitor);
+ BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
+ bs_asm->try_resolve_weak_handle_in_c2(this, tmp3, tmp2, slow_path);
+ cmpd(CR0, tmp3, obj);
+ bne(CR0, slow_path);
bind(monitor_found);
- ld(monitor, in_bytes(OMCache::oop_to_monitor_difference()), cache_addr);
// Compute owner address.
addi(owner_addr, monitor, in_bytes(ObjectMonitor::owner_offset()));
@@ -4535,7 +4551,7 @@ void MacroAssembler::push_cont_fastpath() {
Label done;
ld_ptr(R0, JavaThread::cont_fastpath_offset(), R16_thread);
cmpld(CR0, R1_SP, R0);
- ble(CR0, done);
+ ble(CR0, done); // if (SP <= _cont_fastpath) goto done;
st_ptr(R1_SP, JavaThread::cont_fastpath_offset(), R16_thread);
bind(done);
}
@@ -4546,7 +4562,7 @@ void MacroAssembler::pop_cont_fastpath() {
Label done;
ld_ptr(R0, JavaThread::cont_fastpath_offset(), R16_thread);
cmpld(CR0, R1_SP, R0);
- ble(CR0, done);
+ blt(CR0, done); // if (SP < _cont_fastpath) goto done;
li(R0, 0);
st_ptr(R0, JavaThread::cont_fastpath_offset(), R16_thread);
bind(done);
diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp
index 875602cae58..58dec702d7a 100644
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2025 SAP SE. All rights reserved.
+ * Copyright (c) 2002, 2026, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
diff --git a/src/hotspot/cpu/ppc/matcher_ppc.hpp b/src/hotspot/cpu/ppc/matcher_ppc.hpp
index aad41fb7b1c..2ddbec3e48c 100644
--- a/src/hotspot/cpu/ppc/matcher_ppc.hpp
+++ b/src/hotspot/cpu/ppc/matcher_ppc.hpp
@@ -1,5 +1,6 @@
/*
- * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -64,12 +65,10 @@
return true;
}
- // Use conditional move (CMOVL) on Power7.
static constexpr int long_cmove_cost() { return 0; } // this only makes long cmoves more expensive than int cmoves
- // Suppress CMOVF. Conditional move available (sort of) on PPC64 only from P7 onwards. Not exploited yet.
- // fsel doesn't accept a condition register as input, so this would be slightly different.
- static int float_cmove_cost() { return ConditionalMoveLimit; }
+ // Suppress CMOVF for Power8 because there are no fast nodes.
+ static int float_cmove_cost() { return (PowerArchitecturePPC64 >= 9) ? 0 : ConditionalMoveLimit; }
// This affects two different things:
// - how Decode nodes are matched
diff --git a/src/hotspot/cpu/ppc/methodHandles_ppc.cpp b/src/hotspot/cpu/ppc/methodHandles_ppc.cpp
index 803bb6bfe69..45537e0ea96 100644
--- a/src/hotspot/cpu/ppc/methodHandles_ppc.cpp
+++ b/src/hotspot/cpu/ppc/methodHandles_ppc.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2025 SAP SE. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -49,11 +49,6 @@
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
-// Workaround for C++ overloading nastiness on '0' for RegisterOrConstant.
-inline static RegisterOrConstant constant(int value) {
- return RegisterOrConstant(value);
-}
-
void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg,
Register temp_reg, Register temp2_reg) {
if (VerifyMethodHandles) {
diff --git a/src/hotspot/cpu/ppc/ppc.ad b/src/hotspot/cpu/ppc/ppc.ad
index 2a0a9149bb3..4cb9f8820a0 100644
--- a/src/hotspot/cpu/ppc/ppc.ad
+++ b/src/hotspot/cpu/ppc/ppc.ad
@@ -1,6 +1,6 @@
//
-// Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
-// Copyright (c) 2012, 2025 SAP SE. All rights reserved.
+// Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2012, 2026 SAP SE. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
@@ -2234,6 +2234,12 @@ bool Matcher::match_rule_supported(int opcode) {
case Op_FmaVD:
return (SuperwordUseVSX && UseFMA);
+ case Op_MinF:
+ case Op_MaxF:
+ case Op_MinD:
+ case Op_MaxD:
+ return (PowerArchitecturePPC64 >= 9);
+
case Op_Digit:
return vmIntrinsics::is_intrinsic_available(vmIntrinsics::_isDigit);
case Op_LowerCase:
@@ -3024,7 +3030,6 @@ encode %{
%}
enc_class postalloc_expand_encode_oop(iRegNdst dst, iRegPdst src, flagsReg crx) %{
- // use isel instruction with Power 7
cmpP_reg_imm16Node *n_compare = new cmpP_reg_imm16Node();
encodeP_subNode *n_sub_base = new encodeP_subNode();
encodeP_shiftNode *n_shift = new encodeP_shiftNode();
@@ -3099,7 +3104,6 @@ encode %{
n_shift->_opnds[1] = op_src;
n_shift->_bottom_type = _bottom_type;
- // use isel instruction with Power 7
decodeN_addNode *n_add_base = new decodeN_addNode();
n_add_base->add_req(n_region, n_shift);
n_add_base->_opnds[0] = op_dst;
@@ -6618,7 +6622,6 @@ instruct cond_sub_base(iRegNdst dst, flagsRegSrc crx, iRegPsrc src1) %{
ins_pipe(pipe_class_default);
%}
-// Power 7 can use isel instruction
instruct cond_set_0_oop(iRegNdst dst, flagsRegSrc crx, iRegPsrc src1) %{
// The match rule is needed to make it a 'MachTypeNode'!
match(Set dst (EncodeP (Binary crx src1)));
@@ -7293,7 +7296,6 @@ instruct cmovF_reg(cmpOp cmp, flagsRegSrc crx, regF dst, regF src) %{
ins_variable_size_depending_on_alignment(true);
format %{ "CMOVEF $cmp, $crx, $dst, $src\n\t" %}
- // Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode %{
Label done;
@@ -7313,7 +7315,6 @@ instruct cmovD_reg(cmpOp cmp, flagsRegSrc crx, regD dst, regD src) %{
ins_variable_size_depending_on_alignment(true);
format %{ "CMOVEF $cmp, $crx, $dst, $src\n\t" %}
- // Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode %{
Label done;
@@ -7326,6 +7327,70 @@ instruct cmovD_reg(cmpOp cmp, flagsRegSrc crx, regD dst, regD src) %{
ins_pipe(pipe_class_default);
%}
+instruct cmovF_cmpF(cmpOp cop, regF op1, regF op2, regF dst, regF false_result, regF true_result, regD tmp) %{
+ match(Set dst (CMoveF (Binary cop (CmpF op1 op2)) (Binary false_result true_result)));
+ predicate(PowerArchitecturePPC64 >= 9);
+ effect(TEMP tmp);
+ ins_cost(2*DEFAULT_COST);
+ format %{ "cmovF_cmpF $dst = ($op1 $cop $op2) ? $true_result : $false_result\n\t" %}
+ size(8);
+ ins_encode %{
+ __ cmovF($cop$$cmpcode, $dst$$FloatRegister->to_vsr(),
+ $op1$$FloatRegister->to_vsr(), $op2$$FloatRegister->to_vsr(),
+ $true_result$$FloatRegister->to_vsr(), $false_result$$FloatRegister->to_vsr(),
+ $tmp$$FloatRegister->to_vsr());
+ %}
+ ins_pipe(pipe_class_default);
+%}
+
+instruct cmovF_cmpD(cmpOp cop, regD op1, regD op2, regF dst, regF false_result, regF true_result, regD tmp) %{
+ match(Set dst (CMoveF (Binary cop (CmpD op1 op2)) (Binary false_result true_result)));
+ predicate(PowerArchitecturePPC64 >= 9);
+ effect(TEMP tmp);
+ ins_cost(2*DEFAULT_COST);
+ format %{ "cmovF_cmpD $dst = ($op1 $cop $op2) ? $true_result : $false_result\n\t" %}
+ size(8);
+ ins_encode %{
+ __ cmovF($cop$$cmpcode, $dst$$FloatRegister->to_vsr(),
+ $op1$$FloatRegister->to_vsr(), $op2$$FloatRegister->to_vsr(),
+ $true_result$$FloatRegister->to_vsr(), $false_result$$FloatRegister->to_vsr(),
+ $tmp$$FloatRegister->to_vsr());
+ %}
+ ins_pipe(pipe_class_default);
+%}
+
+instruct cmovD_cmpD(cmpOp cop, regD op1, regD op2, regD dst, regD false_result, regD true_result, regD tmp) %{
+ match(Set dst (CMoveD (Binary cop (CmpD op1 op2)) (Binary false_result true_result)));
+ predicate(PowerArchitecturePPC64 >= 9);
+ effect(TEMP tmp);
+ ins_cost(2*DEFAULT_COST);
+ format %{ "cmovD_cmpD $dst = ($op1 $cop $op2) ? $true_result : $false_result\n\t" %}
+ size(8);
+ ins_encode %{
+ __ cmovF($cop$$cmpcode, $dst$$FloatRegister->to_vsr(),
+ $op1$$FloatRegister->to_vsr(), $op2$$FloatRegister->to_vsr(),
+ $true_result$$FloatRegister->to_vsr(), $false_result$$FloatRegister->to_vsr(),
+ $tmp$$FloatRegister->to_vsr());
+ %}
+ ins_pipe(pipe_class_default);
+%}
+
+instruct cmovD_cmpF(cmpOp cop, regF op1, regF op2, regD dst, regD false_result, regD true_result, regD tmp) %{
+ match(Set dst (CMoveD (Binary cop (CmpF op1 op2)) (Binary false_result true_result)));
+ predicate(PowerArchitecturePPC64 >= 9);
+ effect(TEMP tmp);
+ ins_cost(2*DEFAULT_COST);
+ format %{ "cmovD_cmpF $dst = ($op1 $cop $op2) ? $true_result : $false_result\n\t" %}
+ size(8);
+ ins_encode %{
+ __ cmovF($cop$$cmpcode, $dst$$FloatRegister->to_vsr(),
+ $op1$$FloatRegister->to_vsr(), $op2$$FloatRegister->to_vsr(),
+ $true_result$$FloatRegister->to_vsr(), $false_result$$FloatRegister->to_vsr(),
+ $tmp$$FloatRegister->to_vsr());
+ %}
+ ins_pipe(pipe_class_default);
+%}
+
//----------Compare-And-Swap---------------------------------------------------
// CompareAndSwap{P,I,L} have more than one output, therefore "CmpI
@@ -8492,7 +8557,6 @@ instruct cmovI_bne_negI_reg(iRegIdst dst, flagsRegSrc crx, iRegIsrc src1) %{
ins_variable_size_depending_on_alignment(true);
format %{ "CMOVE $dst, neg($src1), $crx" %}
- // Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode %{
Label done;
@@ -8551,7 +8615,6 @@ instruct cmovL_bne_negL_reg(iRegLdst dst, flagsRegSrc crx, iRegLsrc src1) %{
ins_variable_size_depending_on_alignment(true);
format %{ "CMOVE $dst, neg($src1), $crx" %}
- // Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode %{
Label done;
@@ -10262,7 +10325,6 @@ instruct cmovI_bso_stackSlotL(iRegIdst dst, flagsRegSrc crx, stackSlotL src) %{
ins_variable_size_depending_on_alignment(true);
format %{ "cmovI $crx, $dst, $src" %}
- // Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode( enc_cmove_bso_stackSlotL(dst, crx, src) );
ins_pipe(pipe_class_default);
@@ -10276,7 +10338,6 @@ instruct cmovI_bso_reg(iRegIdst dst, flagsRegSrc crx, regD src) %{
ins_variable_size_depending_on_alignment(true);
format %{ "cmovI $crx, $dst, $src" %}
- // Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode( enc_cmove_bso_reg(dst, crx, src) );
ins_pipe(pipe_class_default);
@@ -10439,7 +10500,6 @@ instruct cmovL_bso_stackSlotL(iRegLdst dst, flagsRegSrc crx, stackSlotL src) %{
ins_variable_size_depending_on_alignment(true);
format %{ "cmovL $crx, $dst, $src" %}
- // Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode( enc_cmove_bso_stackSlotL(dst, crx, src) );
ins_pipe(pipe_class_default);
@@ -10453,7 +10513,6 @@ instruct cmovL_bso_reg(iRegLdst dst, flagsRegSrc crx, regD src) %{
ins_variable_size_depending_on_alignment(true);
format %{ "cmovL $crx, $dst, $src" %}
- // Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode( enc_cmove_bso_reg(dst, crx, src) );
ins_pipe(pipe_class_default);
@@ -11080,7 +11139,6 @@ instruct cmov_bns_less(flagsReg crx) %{
ins_variable_size_depending_on_alignment(true);
format %{ "cmov $crx" %}
- // Worst case is branch + move + stop, no stop without scheduler.
size(12);
ins_encode %{
Label done;
@@ -12261,6 +12319,58 @@ instruct maxI_reg_reg_isel(iRegIdst dst, iRegIsrc src1, iRegIsrc src2, flagsRegC
ins_pipe(pipe_class_default);
%}
+instruct minF(regF dst, regF src1, regF src2) %{
+ match(Set dst (MinF src1 src2));
+ predicate(PowerArchitecturePPC64 >= 9);
+ ins_cost(DEFAULT_COST);
+
+ format %{ "MinF $dst, $src1, $src2" %}
+ size(4);
+ ins_encode %{
+ __ xsminjdp($dst$$FloatRegister->to_vsr(), $src1$$FloatRegister->to_vsr(), $src2$$FloatRegister->to_vsr());
+ %}
+ ins_pipe(pipe_class_default);
+%}
+
+instruct minD(regD dst, regD src1, regD src2) %{
+ match(Set dst (MinD src1 src2));
+ predicate(PowerArchitecturePPC64 >= 9);
+ ins_cost(DEFAULT_COST);
+
+ format %{ "MinD $dst, $src1, $src2" %}
+ size(4);
+ ins_encode %{
+ __ xsminjdp($dst$$FloatRegister->to_vsr(), $src1$$FloatRegister->to_vsr(), $src2$$FloatRegister->to_vsr());
+ %}
+ ins_pipe(pipe_class_default);
+%}
+
+instruct maxF(regF dst, regF src1, regF src2) %{
+ match(Set dst (MaxF src1 src2));
+ predicate(PowerArchitecturePPC64 >= 9);
+ ins_cost(DEFAULT_COST);
+
+ format %{ "MaxF $dst, $src1, $src2" %}
+ size(4);
+ ins_encode %{
+ __ xsmaxjdp($dst$$FloatRegister->to_vsr(), $src1$$FloatRegister->to_vsr(), $src2$$FloatRegister->to_vsr());
+ %}
+ ins_pipe(pipe_class_default);
+%}
+
+instruct maxD(regD dst, regD src1, regD src2) %{
+ match(Set dst (MaxD src1 src2));
+ predicate(PowerArchitecturePPC64 >= 9);
+ ins_cost(DEFAULT_COST);
+
+ format %{ "MaxD $dst, $src1, $src2" %}
+ size(4);
+ ins_encode %{
+ __ xsmaxjdp($dst$$FloatRegister->to_vsr(), $src1$$FloatRegister->to_vsr(), $src2$$FloatRegister->to_vsr());
+ %}
+ ins_pipe(pipe_class_default);
+%}
+
//---------- Population Count Instructions ------------------------------------
instruct popCountI(iRegIdst dst, iRegIsrc src) %{
diff --git a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp
index 4e427ace404..5260ed978ff 100644
--- a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp
+++ b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2025 SAP SE. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -775,7 +775,6 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
return stk;
}
-#if defined(COMPILER1) || defined(COMPILER2)
// Calling convention for calling C code.
int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
VMRegPair *regs,
@@ -913,7 +912,6 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
return MAX2(arg, 8) * 2 + additional_frame_header_slots;
#endif
}
-#endif // COMPILER2
int SharedRuntime::vector_calling_convention(VMRegPair *regs,
uint num_bits,
@@ -1237,26 +1235,24 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
// Class initialization barrier for static methods
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
- if (VM_Version::supports_fast_class_init_checks()) {
- Label L_skip_barrier;
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
+ Label L_skip_barrier;
- { // Bypass the barrier for non-static methods
- __ lhz(R0, in_bytes(Method::access_flags_offset()), R19_method);
- __ andi_(R0, R0, JVM_ACC_STATIC);
- __ beq(CR0, L_skip_barrier); // non-static
- }
+ // Bypass the barrier for non-static methods
+ __ lhz(R0, in_bytes(Method::access_flags_offset()), R19_method);
+ __ andi_(R0, R0, JVM_ACC_STATIC);
+ __ beq(CR0, L_skip_barrier); // non-static
- Register klass = R11_scratch1;
- __ load_method_holder(klass, R19_method);
- __ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/);
+ Register klass = R11_scratch1;
+ __ load_method_holder(klass, R19_method);
+ __ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/);
- __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0);
- __ mtctr(klass);
- __ bctr();
+ __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0);
+ __ mtctr(klass);
+ __ bctr();
- __ bind(L_skip_barrier);
- entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
- }
+ __ bind(L_skip_barrier);
+ entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->c2i_entry_barrier(masm, /* tmp register*/ ic_klass, /* tmp register*/ receiver_klass, /* tmp register*/ code);
@@ -2210,7 +2206,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// --------------------------------------------------------------------------
vep_start_pc = (intptr_t)__ pc();
- if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
+ if (method->needs_clinit_barrier()) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
Label L_skip_barrier;
Register klass = r_temp_1;
// Notify OOP recorder (don't need the relocation)
@@ -2875,7 +2872,6 @@ void SharedRuntime::generate_deopt_blob() {
CodeBuffer buffer(name, 2048, 1024);
InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer);
Label exec_mode_initialized;
- int frame_size_in_words;
OopMap* map = nullptr;
OopMapSet *oop_maps = new OopMapSet();
@@ -2887,6 +2883,9 @@ void SharedRuntime::generate_deopt_blob() {
const Register exec_mode_reg = R21_tmp1;
const address start = __ pc();
+ int exception_offset = 0;
+ int exception_in_tls_offset = 0;
+ int reexecute_offset = 0;
#if defined(COMPILER1) || defined(COMPILER2)
// --------------------------------------------------------------------------
@@ -2926,7 +2925,7 @@ void SharedRuntime::generate_deopt_blob() {
// - R3_ARG1: exception oop
// - R4_ARG2: exception pc
- int exception_offset = __ pc() - start;
+ exception_offset = __ pc() - start;
BLOCK_COMMENT("Prolog for exception case");
@@ -2937,7 +2936,7 @@ void SharedRuntime::generate_deopt_blob() {
__ std(R4_ARG2, _abi0(lr), R1_SP);
// Vanilla deoptimization with an exception pending in exception_oop.
- int exception_in_tls_offset = __ pc() - start;
+ exception_in_tls_offset = __ pc() - start;
// Push the "unpack frame".
// Save everything in sight.
@@ -2950,8 +2949,6 @@ void SharedRuntime::generate_deopt_blob() {
__ li(exec_mode_reg, Deoptimization::Unpack_exception);
// fall through
-
- int reexecute_offset = 0;
#ifdef COMPILER1
__ b(exec_mode_initialized);
@@ -3069,11 +3066,12 @@ void SharedRuntime::generate_deopt_blob() {
// Return to the interpreter entry point.
__ blr();
- __ flush();
-#else // COMPILER2
+#else // !defined(COMPILER1) && !defined(COMPILER2)
__ unimplemented("deopt blob needed only with compiler");
- int exception_offset = __ pc() - start;
-#endif // COMPILER2
+#endif
+
+ // Make sure all code is generated
+ __ flush();
_deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset,
reexecute_offset, first_frame_size_in_bytes / wordSize);
diff --git a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp
index 8d61ba1b2d7..8a3af748fa1 100644
--- a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp
+++ b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2025 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -2199,7 +2199,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no, Register Rca
__ isync(); // Order load wrt. succeeding loads.
// Class initialization barrier for static methods
- if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
+ if (bytecode() == Bytecodes::_invokestatic) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
const Register method = Rscratch;
const Register klass = Rscratch;
@@ -2244,8 +2245,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no, Register Rcac
__ isync(); // Order load wrt. succeeding loads.
// Class initialization barrier for static fields
- if (VM_Version::supports_fast_class_init_checks() &&
- (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
+ if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
const Register field_holder = R4_ARG2;
// InterpreterRuntime::resolve_get_put sets field_holder and finally release-stores put_code.
diff --git a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp
index e77a2067e89..63e2fd015d7 100644
--- a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp
+++ b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp
@@ -1041,31 +1041,10 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
__ bind(*op->stub()->continuation());
}
-void LIR_Assembler::type_profile_helper(Register mdo, ciMethodData *md, ciProfileData *data,
- Register recv, Label* update_done) {
- for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
- Label next_test;
- // See if the receiver is receiver[n].
- __ ld(t1, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
- __ bne(recv, t1, next_test);
- Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
- __ increment(data_addr, DataLayout::counter_increment);
- __ j(*update_done);
- __ bind(next_test);
- }
-
- // Didn't find receiver; find next empty slot and fill it in
- for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
- Label next_test;
- Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)));
- __ ld(t1, recv_addr);
- __ bnez(t1, next_test);
- __ sd(recv, recv_addr);
- __ mv(t1, DataLayout::counter_increment);
- __ sd(t1, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))));
- __ j(*update_done);
- __ bind(next_test);
- }
+void LIR_Assembler::type_profile_helper(Register mdo, ciMethodData *md,
+ ciProfileData *data, Register recv) {
+ int mdp_offset = md->byte_offset_of_slot(data, in_ByteSize(0));
+ __ profile_receiver_type(recv, mdo, mdp_offset);
}
void LIR_Assembler::data_check(LIR_OpTypeCheck *op, ciMethodData **md, ciProfileData **data) {
@@ -1139,14 +1118,9 @@ void LIR_Assembler::profile_object(ciMethodData* md, ciProfileData* data, Regist
__ j(*obj_is_null);
__ bind(not_null);
- Label update_done;
Register recv = k_RInfo;
__ load_klass(recv, obj);
- type_profile_helper(mdo, md, data, recv, &update_done);
- Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
- __ increment(counter_addr, DataLayout::counter_increment);
-
- __ bind(update_done);
+ type_profile_helper(mdo, md, data, recv);
}
void LIR_Assembler::typecheck_loaded(LIR_OpTypeCheck *op, ciKlass* k, Register k_RInfo) {
@@ -1554,11 +1528,8 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
// We know the type that will be seen at this call site; we can
// statically update the MethodData* rather than needing to do
// dynamic tests on the receiver type
- // NOTE: we should probably put a lock around this search to
- // avoid collisions by concurrent compilations
ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
- uint i;
- for (i = 0; i < VirtualCallData::row_limit(); i++) {
+ for (uint i = 0; i < VirtualCallData::row_limit(); i++) {
ciKlass* receiver = vc_data->receiver(i);
if (known_klass->equals(receiver)) {
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
@@ -1566,32 +1537,13 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
return;
}
}
-
- // Receiver type not found in profile data; select an empty slot
- // Note that this is less efficient than it should be because it
- // always does a write to the receiver part of the
- // VirtualCallData rather than just the first time
- for (i = 0; i < VirtualCallData::row_limit(); i++) {
- ciKlass* receiver = vc_data->receiver(i);
- if (receiver == nullptr) {
- Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
- __ mov_metadata(t1, known_klass->constant_encoding());
- __ sd(t1, recv_addr);
- Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
- __ increment(data_addr, DataLayout::counter_increment);
- return;
- }
- }
+ // Receiver type is not found in profile data.
+ // Fall back to runtime helper to handle the rest at runtime.
+ __ mov_metadata(recv, known_klass->constant_encoding());
} else {
__ load_klass(recv, recv);
- Label update_done;
- type_profile_helper(mdo, md, data, recv, &update_done);
- // Receiver did not match any saved receiver and there is no empty row for it.
- // Increment total counter to indicate polymorphic case.
- __ increment(counter_addr, DataLayout::counter_increment);
-
- __ bind(update_done);
}
+ type_profile_helper(mdo, md, data, recv);
} else {
// Static call
__ increment(counter_addr, DataLayout::counter_increment);
diff --git a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.hpp b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.hpp
index 1e466e90d37..90b6b3ee4f4 100644
--- a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.hpp
+++ b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.hpp
@@ -54,9 +54,8 @@ private:
Address stack_slot_address(int index, uint shift, int adjust = 0);
// Record the type of the receiver in ReceiverTypeData
- void type_profile_helper(Register mdo,
- ciMethodData *md, ciProfileData *data,
- Register recv, Label* update_done);
+ void type_profile_helper(Register mdo, ciMethodData *md,
+ ciProfileData *data, Register recv);
void casw(Register addr, Register newval, Register cmpval);
void caswu(Register addr, Register newval, Register cmpval);
diff --git a/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp
index 824ea872935..72a90ddde1f 100644
--- a/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp
+++ b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -30,7 +30,9 @@
#include "opto/intrinsicnode.hpp"
#include "opto/output.hpp"
#include "opto/subnode.hpp"
+#include "runtime/objectMonitorTable.hpp"
#include "runtime/stubRoutines.hpp"
+#include "runtime/synchronizer.hpp"
#include "utilities/globalDefinitions.hpp"
#ifdef PRODUCT
@@ -123,35 +125,52 @@ void C2_MacroAssembler::fast_lock(Register obj, Register box,
if (!UseObjectMonitorTable) {
assert(tmp1_monitor == tmp1_mark, "should be the same here");
} else {
+ const Register tmp2_hash = tmp2;
+ const Register tmp3_bucket = tmp3;
Label monitor_found;
- // Load cache address
- la(tmp3_t, Address(xthread, JavaThread::om_cache_oops_offset()));
+ // Save the mark, we might need it to extract the hash.
+ mv(tmp2_hash, tmp1_mark);
- const int num_unrolled = 2;
+ // Look for the monitor in the om_cache.
+
+ ByteSize cache_offset = JavaThread::om_cache_oops_offset();
+ ByteSize monitor_offset = OMCache::oop_to_monitor_difference();
+ const int num_unrolled = OMCache::CAPACITY;
for (int i = 0; i < num_unrolled; i++) {
- ld(tmp1, Address(tmp3_t));
- beq(obj, tmp1, monitor_found);
- add(tmp3_t, tmp3_t, in_bytes(OMCache::oop_to_oop_difference()));
+ ld(tmp1_monitor, Address(xthread, cache_offset + monitor_offset));
+ ld(tmp4, Address(xthread, cache_offset));
+ beq(obj, tmp4, monitor_found);
+ cache_offset = cache_offset + OMCache::oop_to_oop_difference();
}
- Label loop;
+ // Look for the monitor in the table.
- // Search for obj in cache.
- bind(loop);
+ // Get the hash code.
+ srli(tmp2_hash, tmp2_hash, markWord::hash_shift);
- // Check for match.
- ld(tmp1, Address(tmp3_t));
- beq(obj, tmp1, monitor_found);
+ // Get the table and calculate the bucket's address.
+ la(tmp3_t, ExternalAddress(ObjectMonitorTable::current_table_address()));
+ ld(tmp3_t, Address(tmp3_t));
+ ld(tmp1, Address(tmp3_t, ObjectMonitorTable::table_capacity_mask_offset()));
+ andr(tmp2_hash, tmp2_hash, tmp1);
+ ld(tmp3_t, Address(tmp3_t, ObjectMonitorTable::table_buckets_offset()));
- // Search until null encountered, guaranteed _null_sentinel at end.
- add(tmp3_t, tmp3_t, in_bytes(OMCache::oop_to_oop_difference()));
- bnez(tmp1, loop);
- // Cache Miss. Take the slowpath.
- j(slow_path);
+ // Read the monitor from the bucket.
+ shadd(tmp3_bucket, tmp2_hash, tmp3_t, tmp4, LogBytesPerWord);
+ ld(tmp1_monitor, Address(tmp3_bucket));
+
+ // Check if the monitor in the bucket is special (empty, tombstone or removed).
+ mv(tmp2, ObjectMonitorTable::SpecialPointerValues::below_is_special);
+ bltu(tmp1_monitor, tmp2, slow_path);
+
+ // Check if object matches.
+ ld(tmp3, Address(tmp1_monitor, ObjectMonitor::object_offset()));
+ BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
+ bs_asm->try_resolve_weak_handle_in_c2(this, tmp3, tmp2, slow_path);
+ bne(tmp3, obj, slow_path);
bind(monitor_found);
- ld(tmp1_monitor, Address(tmp3_t, OMCache::oop_to_monitor_difference()));
}
const Register tmp2_owner_addr = tmp2;
@@ -2813,10 +2832,14 @@ void C2_MacroAssembler::char_array_compress_v(Register src, Register dst, Regist
// Intrinsic for
//
-// - sun/nio/cs/ISO_8859_1$Encoder.implEncodeISOArray
-// return the number of characters copied.
-// - java/lang/StringUTF16.compress
-// return index of non-latin1 character if copy fails, otherwise 'len'.
+// - sun.nio.cs.ISO_8859_1.Encoder#encodeISOArray0(byte[] sa, int sp, byte[] da, int dp, int len)
+// Encodes char[] to byte[] in ISO-8859-1
+//
+// - java.lang.StringCoding#encodeISOArray0(byte[] sa, int sp, byte[] da, int dp, int len)
+// Encodes byte[] (containing UTF-16) to byte[] in ISO-8859-1
+//
+// - java.lang.StringCoding#encodeAsciiArray0(char[] sa, int sp, byte[] da, int dp, int len)
+// Encodes char[] to byte[] in ASCII
//
// This version always returns the number of characters copied. A successful
// copy will complete with the post-condition: 'res' == 'len', while an
diff --git a/src/hotspot/cpu/riscv/gc/shared/barrierSetAssembler_riscv.cpp b/src/hotspot/cpu/riscv/gc/shared/barrierSetAssembler_riscv.cpp
index f5916000890..aeb9df06de6 100644
--- a/src/hotspot/cpu/riscv/gc/shared/barrierSetAssembler_riscv.cpp
+++ b/src/hotspot/cpu/riscv/gc/shared/barrierSetAssembler_riscv.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -369,6 +369,11 @@ OptoReg::Name BarrierSetAssembler::refine_register(const Node* node, OptoReg::Na
return opto_reg;
}
+void BarrierSetAssembler::try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path) {
+ // Load the oop from the weak handle.
+ __ ld(obj, Address(obj));
+}
+
#undef __
#define __ _masm->
diff --git a/src/hotspot/cpu/riscv/gc/shared/barrierSetAssembler_riscv.hpp b/src/hotspot/cpu/riscv/gc/shared/barrierSetAssembler_riscv.hpp
index e50fa1dae36..bbb2a5af824 100644
--- a/src/hotspot/cpu/riscv/gc/shared/barrierSetAssembler_riscv.hpp
+++ b/src/hotspot/cpu/riscv/gc/shared/barrierSetAssembler_riscv.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -110,6 +110,8 @@ public:
#ifdef COMPILER2
OptoReg::Name refine_register(const Node* node,
OptoReg::Name opto_reg);
+ virtual void try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj,
+ Register tmp, Label& slow_path);
#endif // COMPILER2
};
diff --git a/src/hotspot/cpu/riscv/gc/shenandoah/shenandoahBarrierSetAssembler_riscv.cpp b/src/hotspot/cpu/riscv/gc/shenandoah/shenandoahBarrierSetAssembler_riscv.cpp
index dd6c8556307..8d530d15ee5 100644
--- a/src/hotspot/cpu/riscv/gc/shenandoah/shenandoahBarrierSetAssembler_riscv.cpp
+++ b/src/hotspot/cpu/riscv/gc/shenandoah/shenandoahBarrierSetAssembler_riscv.cpp
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2020, Red Hat, Inc. All rights reserved.
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -88,26 +89,16 @@ void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, Dec
}
}
-void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm,
- Register obj,
- Register pre_val,
- Register thread,
- Register tmp,
- bool tosca_live,
- bool expand_call) {
- if (ShenandoahSATBBarrier) {
- satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, t0, tosca_live, expand_call);
- }
-}
+void ShenandoahBarrierSetAssembler::satb_barrier(MacroAssembler* masm,
+ Register obj,
+ Register pre_val,
+ Register thread,
+ Register tmp1,
+ Register tmp2,
+ bool tosca_live,
+ bool expand_call) {
+ assert(ShenandoahSATBBarrier, "Should be checked by caller");
-void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
- Register obj,
- Register pre_val,
- Register thread,
- Register tmp1,
- Register tmp2,
- bool tosca_live,
- bool expand_call) {
// If expand_call is true then we expand the call_VM_leaf macro
// directly to skip generating the check by
// InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
@@ -376,21 +367,21 @@ void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm,
if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
__ enter();
__ push_call_clobbered_registers();
- satb_write_barrier_pre(masm /* masm */,
- noreg /* obj */,
- dst /* pre_val */,
- xthread /* thread */,
- tmp1 /* tmp1 */,
- tmp2 /* tmp2 */,
- true /* tosca_live */,
- true /* expand_call */);
+ satb_barrier(masm /* masm */,
+ noreg /* obj */,
+ dst /* pre_val */,
+ xthread /* thread */,
+ tmp1 /* tmp1 */,
+ tmp2 /* tmp2 */,
+ true /* tosca_live */,
+ true /* expand_call */);
__ pop_call_clobbered_registers();
__ leave();
}
}
-void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj) {
- assert(ShenandoahCardBarrier, "Did you mean to enable ShenandoahCardBarrier?");
+void ShenandoahBarrierSetAssembler::card_barrier(MacroAssembler* masm, Register obj) {
+ assert(ShenandoahCardBarrier, "Should have been checked by caller");
__ srli(obj, obj, CardTable::card_shift());
@@ -413,13 +404,13 @@ void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register o
void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
- bool on_oop = is_reference_type(type);
- if (!on_oop) {
+ // 1: non-reference types require no barriers
+ if (!is_reference_type(type)) {
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
return;
}
- // flatten object address if needed
+ // Flatten object address right away for simplicity: likely needed by barriers
if (dst.offset() == 0) {
if (dst.base() != tmp3) {
__ mv(tmp3, dst.base());
@@ -428,20 +419,26 @@ void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet
__ la(tmp3, dst);
}
- shenandoah_write_barrier_pre(masm,
- tmp3 /* obj */,
- tmp2 /* pre_val */,
- xthread /* thread */,
- tmp1 /* tmp */,
- val != noreg /* tosca_live */,
- false /* expand_call */);
+ bool storing_non_null = (val != noreg);
+ // 2: pre-barrier: SATB needs the previous value
+ if (ShenandoahBarrierSet::need_satb_barrier(decorators, type)) {
+ satb_barrier(masm,
+ tmp3 /* obj */,
+ tmp2 /* pre_val */,
+ xthread /* thread */,
+ tmp1 /* tmp */,
+ t0 /* tmp2 */,
+ storing_non_null /* tosca_live */,
+ false /* expand_call */);
+ }
+
+ // Store!
BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), val, noreg, noreg, noreg);
- bool in_heap = (decorators & IN_HEAP) != 0;
- bool needs_post_barrier = (val != noreg) && in_heap && ShenandoahCardBarrier;
- if (needs_post_barrier) {
- store_check(masm, tmp3);
+ // 3: post-barrier: card barrier needs store address
+ if (ShenandoahBarrierSet::need_card_barrier(decorators, type) && storing_non_null) {
+ card_barrier(masm, tmp3);
}
}
@@ -465,6 +462,30 @@ void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler
__ bind(done);
}
+#ifdef COMPILER2
+void ShenandoahBarrierSetAssembler::try_resolve_weak_handle_in_c2(MacroAssembler *masm, Register obj,
+ Register tmp, Label& slow_path) {
+ assert_different_registers(obj, tmp);
+
+ Label done;
+
+ // Resolve weak handle using the standard implementation.
+ BarrierSetAssembler::try_resolve_weak_handle_in_c2(masm, obj, tmp, slow_path);
+
+ // Check if the reference is null, and if it is, take the fast path.
+ __ beqz(obj, done);
+
+ Address gc_state(xthread, ShenandoahThreadLocalData::gc_state_offset());
+ __ lbu(tmp, gc_state);
+
+ // Check if the heap is under weak-reference/roots processing, in
+ // which case we need to take the slow path.
+ __ test_bit(tmp, tmp, ShenandoahHeap::WEAK_ROOTS_BITPOS);
+ __ bnez(tmp, slow_path);
+ __ bind(done);
+}
+#endif
+
// Special Shenandoah CAS implementation that handles false negatives due
// to concurrent evacuation. The service is more complex than a
// traditional CAS operation because the CAS operation is intended to
diff --git a/src/hotspot/cpu/riscv/gc/shenandoah/shenandoahBarrierSetAssembler_riscv.hpp b/src/hotspot/cpu/riscv/gc/shenandoah/shenandoahBarrierSetAssembler_riscv.hpp
index c8a7c35fb83..e35e09c93da 100644
--- a/src/hotspot/cpu/riscv/gc/shenandoah/shenandoahBarrierSetAssembler_riscv.hpp
+++ b/src/hotspot/cpu/riscv/gc/shenandoah/shenandoahBarrierSetAssembler_riscv.hpp
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -41,23 +42,16 @@ class StubCodeGenerator;
class ShenandoahBarrierSetAssembler: public BarrierSetAssembler {
private:
- void satb_write_barrier_pre(MacroAssembler* masm,
- Register obj,
- Register pre_val,
- Register thread,
- Register tmp1,
- Register tmp2,
- bool tosca_live,
- bool expand_call);
- void shenandoah_write_barrier_pre(MacroAssembler* masm,
- Register obj,
- Register pre_val,
- Register thread,
- Register tmp,
- bool tosca_live,
- bool expand_call);
+ void satb_barrier(MacroAssembler* masm,
+ Register obj,
+ Register pre_val,
+ Register thread,
+ Register tmp1,
+ Register tmp2,
+ bool tosca_live,
+ bool expand_call);
- void store_check(MacroAssembler* masm, Register obj);
+ void card_barrier(MacroAssembler* masm, Register obj);
void resolve_forward_pointer(MacroAssembler* masm, Register dst, Register tmp = noreg);
void resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp = noreg);
@@ -91,7 +85,9 @@ public:
virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
Register obj, Register tmp, Label& slowpath);
-
+#ifdef COMPILER2
+ virtual void try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path);
+#endif
void cmpxchg_oop(MacroAssembler* masm, Register addr, Register expected, Register new_val,
Assembler::Aqrl acquire, Assembler::Aqrl release, bool is_cae, Register result);
};
diff --git a/src/hotspot/cpu/riscv/gc/z/zBarrierSetAssembler_riscv.cpp b/src/hotspot/cpu/riscv/gc/z/zBarrierSetAssembler_riscv.cpp
index 09dea62b6d1..163271a2f11 100644
--- a/src/hotspot/cpu/riscv/gc/z/zBarrierSetAssembler_riscv.cpp
+++ b/src/hotspot/cpu/riscv/gc/z/zBarrierSetAssembler_riscv.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -602,6 +602,27 @@ void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm,
BLOCK_COMMENT("} ZBarrierSetAssembler::try_resolve_jobject_in_native");
}
+#ifdef COMPILER2
+void ZBarrierSetAssembler::try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path) {
+ BLOCK_COMMENT("ZBarrierSetAssembler::try_resolve_weak_handle_in_c2 {");
+
+ // Resolve weak handle using the standard implementation.
+ BarrierSetAssembler::try_resolve_weak_handle_in_c2(masm, obj, tmp, slow_path);
+
+ // Check if the oop is bad, in which case we need to take the slow path.
+ __ relocate(barrier_Relocation::spec(), [&] {
+ __ li16u(tmp, barrier_Relocation::unpatched);
+ }, ZBarrierRelocationFormatMarkBadMask);
+ __ andr(tmp, obj, tmp);
+ __ bnez(tmp, slow_path);
+
+ // Oop is okay, so we uncolor it.
+ __ srli(obj, obj, ZPointerLoadShift);
+
+ BLOCK_COMMENT("} ZBarrierSetAssembler::try_resolve_weak_handle_in_c2");
+}
+#endif
+
static uint16_t patch_barrier_relocation_value(int format) {
switch (format) {
case ZBarrierRelocationFormatLoadBadMask:
diff --git a/src/hotspot/cpu/riscv/gc/z/zBarrierSetAssembler_riscv.hpp b/src/hotspot/cpu/riscv/gc/z/zBarrierSetAssembler_riscv.hpp
index 190d81acd0c..648cb3bf63d 100644
--- a/src/hotspot/cpu/riscv/gc/z/zBarrierSetAssembler_riscv.hpp
+++ b/src/hotspot/cpu/riscv/gc/z/zBarrierSetAssembler_riscv.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -170,6 +170,10 @@ public:
ZLoadBarrierStubC2* stub) const;
void generate_c2_store_barrier_stub(MacroAssembler* masm,
ZStoreBarrierStubC2* stub) const;
+ void try_resolve_weak_handle_in_c2(MacroAssembler* masm,
+ Register obj,
+ Register tmp,
+ Label& slow_path);
#endif // COMPILER2
void check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error);
diff --git a/src/hotspot/cpu/riscv/globals_riscv.hpp b/src/hotspot/cpu/riscv/globals_riscv.hpp
index 390ed2daeb9..21b119266e2 100644
--- a/src/hotspot/cpu/riscv/globals_riscv.hpp
+++ b/src/hotspot/cpu/riscv/globals_riscv.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -39,7 +39,7 @@ define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap nulls
define_pd_global(bool, DelayCompilerStubsGeneration, COMPILER2_OR_JVMCI);
define_pd_global(size_t, CodeCacheSegmentSize, 64 COMPILER1_AND_COMPILER2_PRESENT(+64)); // Tiered compilation has large code-entry alignment.
-define_pd_global(intx, CodeEntryAlignment, 64);
+define_pd_global(uint, CodeEntryAlignment, 64);
define_pd_global(intx, OptoLoopAlignment, 16);
#define DEFAULT_STACK_YELLOW_PAGES (2)
diff --git a/src/hotspot/cpu/riscv/icache_riscv.cpp b/src/hotspot/cpu/riscv/icache_riscv.cpp
index 258bc665770..20de2dbb2ad 100644
--- a/src/hotspot/cpu/riscv/icache_riscv.cpp
+++ b/src/hotspot/cpu/riscv/icache_riscv.cpp
@@ -39,7 +39,8 @@ static int icache_flush(address addr, int lines, int magic) {
// We need to make sure stores happens before the I/D cache synchronization.
__asm__ volatile("fence rw, rw" : : : "memory");
- RiscvFlushIcache::flush((uintptr_t)addr, ((uintptr_t)lines) << ICache::log2_line_size);
+ uintptr_t end = (uintptr_t)addr + ((uintptr_t)lines << ICache::log2_line_size);
+ RiscvFlushIcache::flush((uintptr_t)addr, end);
return magic;
}
diff --git a/src/hotspot/cpu/riscv/interp_masm_riscv.cpp b/src/hotspot/cpu/riscv/interp_masm_riscv.cpp
index 189c7c93d07..744590bec2b 100644
--- a/src/hotspot/cpu/riscv/interp_masm_riscv.cpp
+++ b/src/hotspot/cpu/riscv/interp_masm_riscv.cpp
@@ -237,15 +237,14 @@ void InterpreterMacroAssembler::load_resolved_klass_at_offset(
// Rsub_klass: subklass
//
// Kills:
-// x12, x15
+// x12
void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
Label& ok_is_subtype) {
assert(Rsub_klass != x10, "x10 holds superklass");
assert(Rsub_klass != x12, "x12 holds 2ndary super array length");
- assert(Rsub_klass != x15, "x15 holds 2ndary super array scan ptr");
// Profile the not-null value's klass.
- profile_typecheck(x12, Rsub_klass, x15); // blows x12, reloads x15
+ profile_typecheck(x12, Rsub_klass); // blows x12
// Do the check.
check_klass_subtype(Rsub_klass, x10, x12, ok_is_subtype); // blows x12
@@ -1042,7 +1041,6 @@ void InterpreterMacroAssembler::profile_final_call(Register mdp) {
void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
Register mdp,
- Register reg2,
bool receiver_can_be_null) {
if (ProfileInterpreter) {
Label profile_continue;
@@ -1060,7 +1058,7 @@ void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
}
// Record the receiver type.
- record_klass_in_profile(receiver, mdp, reg2);
+ profile_receiver_type(receiver, mdp, 0);
bind(skip_receiver_profile);
// The method data pointer needs to be updated to reflect the new target.
@@ -1072,153 +1070,6 @@ void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
}
}
-// This routine creates a state machine for updating the multi-row
-// type profile at a virtual call site (or other type-sensitive bytecode).
-// The machine visits each row (of receiver/count) until the receiver type
-// is found, or until it runs out of rows. At the same time, it remembers
-// the location of the first empty row. (An empty row records null for its
-// receiver, and can be allocated for a newly-observed receiver type.)
-// Because there are two degrees of freedom in the state, a simple linear
-// search will not work; it must be a decision tree. Hence this helper
-// function is recursive, to generate the required tree structured code.
-// It's the interpreter, so we are trading off code space for speed.
-// See below for example code.
-void InterpreterMacroAssembler::record_klass_in_profile_helper(
- Register receiver, Register mdp,
- Register reg2, Label& done) {
- if (TypeProfileWidth == 0) {
- increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
- } else {
- record_item_in_profile_helper(receiver, mdp, reg2, 0, done, TypeProfileWidth,
- &VirtualCallData::receiver_offset, &VirtualCallData::receiver_count_offset);
- }
-}
-
-void InterpreterMacroAssembler::record_item_in_profile_helper(Register item, Register mdp,
- Register reg2, int start_row, Label& done, int total_rows,
- OffsetFunction item_offset_fn, OffsetFunction item_count_offset_fn) {
- int last_row = total_rows - 1;
- assert(start_row <= last_row, "must be work left to do");
- // Test this row for both the item and for null.
- // Take any of three different outcomes:
- // 1. found item => increment count and goto done
- // 2. found null => keep looking for case 1, maybe allocate this cell
- // 3. found something else => keep looking for cases 1 and 2
- // Case 3 is handled by a recursive call.
- for (int row = start_row; row <= last_row; row++) {
- Label next_test;
- bool test_for_null_also = (row == start_row);
-
- // See if the item is item[n].
- int item_offset = in_bytes(item_offset_fn(row));
- test_mdp_data_at(mdp, item_offset, item,
- (test_for_null_also ? reg2 : noreg),
- next_test);
- // (Reg2 now contains the item from the CallData.)
-
- // The item is item[n]. Increment count[n].
- int count_offset = in_bytes(item_count_offset_fn(row));
- increment_mdp_data_at(mdp, count_offset);
- j(done);
- bind(next_test);
-
- if (test_for_null_also) {
- Label found_null;
- // Failed the equality check on item[n]... Test for null.
- if (start_row == last_row) {
- // The only thing left to do is handle the null case.
- beqz(reg2, found_null);
- // Item did not match any saved item and there is no empty row for it.
- // Increment total counter to indicate polymorphic case.
- increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
- j(done);
- bind(found_null);
- break;
- }
- // Since null is rare, make it be the branch-taken case.
- beqz(reg2, found_null);
-
- // Put all the "Case 3" tests here.
- record_item_in_profile_helper(item, mdp, reg2, start_row + 1, done, total_rows,
- item_offset_fn, item_count_offset_fn);
-
- // Found a null. Keep searching for a matching item,
- // but remember that this is an empty (unused) slot.
- bind(found_null);
- }
- }
-
- // In the fall-through case, we found no matching item, but we
- // observed the item[start_row] is null.
- // Fill in the item field and increment the count.
- int item_offset = in_bytes(item_offset_fn(start_row));
- set_mdp_data_at(mdp, item_offset, item);
- int count_offset = in_bytes(item_count_offset_fn(start_row));
- mv(reg2, DataLayout::counter_increment);
- set_mdp_data_at(mdp, count_offset, reg2);
- if (start_row > 0) {
- j(done);
- }
-}
-
-// Example state machine code for three profile rows:
-// # main copy of decision tree, rooted at row[1]
-// if (row[0].rec == rec) then [
-// row[0].incr()
-// goto done
-// ]
-// if (row[0].rec != nullptr) then [
-// # inner copy of decision tree, rooted at row[1]
-// if (row[1].rec == rec) then [
-// row[1].incr()
-// goto done
-// ]
-// if (row[1].rec != nullptr) then [
-// # degenerate decision tree, rooted at row[2]
-// if (row[2].rec == rec) then [
-// row[2].incr()
-// goto done
-// ]
-// if (row[2].rec != nullptr) then [
-// count.incr()
-// goto done
-// ] # overflow
-// row[2].init(rec)
-// goto done
-// ] else [
-// # remember row[1] is empty
-// if (row[2].rec == rec) then [
-// row[2].incr()
-// goto done
-// ]
-// row[1].init(rec)
-// goto done
-// ]
-// else [
-// # remember row[0] is empty
-// if (row[1].rec == rec) then [
-// row[1].incr()
-// goto done
-// ]
-// if (row[2].rec == rec) then [
-// row[2].incr()
-// goto done
-// ]
-// row[0].init(rec)
-// goto done
-// ]
-// done:
-
-void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
- Register mdp, Register reg2) {
- assert(ProfileInterpreter, "must be profiling");
- Label done;
-
- record_klass_in_profile_helper(receiver, mdp, reg2, done);
-
- bind(done);
-}
-
void InterpreterMacroAssembler::profile_ret(Register return_bci, Register mdp) {
if (ProfileInterpreter) {
Label profile_continue;
@@ -1274,7 +1125,7 @@ void InterpreterMacroAssembler::profile_null_seen(Register mdp) {
}
}
-void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass, Register reg2) {
+void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass) {
if (ProfileInterpreter) {
Label profile_continue;
@@ -1287,7 +1138,7 @@ void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass,
mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
// Record the object type.
- record_klass_in_profile(klass, mdp, reg2);
+ profile_receiver_type(klass, mdp, 0);
}
update_mdp_by_constant(mdp, mdp_delta);
diff --git a/src/hotspot/cpu/riscv/interp_masm_riscv.hpp b/src/hotspot/cpu/riscv/interp_masm_riscv.hpp
index a9df09d656a..59cc76b022f 100644
--- a/src/hotspot/cpu/riscv/interp_masm_riscv.hpp
+++ b/src/hotspot/cpu/riscv/interp_masm_riscv.hpp
@@ -262,14 +262,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
Register test_value_out,
Label& not_equal_continue);
- void record_klass_in_profile(Register receiver, Register mdp,
- Register reg2);
- void record_klass_in_profile_helper(Register receiver, Register mdp,
- Register reg2, Label& done);
- void record_item_in_profile_helper(Register item, Register mdp,
- Register reg2, int start_row, Label& done, int total_rows,
- OffsetFunction item_offset_fn, OffsetFunction item_count_offset_fn);
-
void update_mdp_by_offset(Register mdp_in, int offset_of_offset);
void update_mdp_by_offset(Register mdp_in, Register reg, int offset_of_disp);
void update_mdp_by_constant(Register mdp_in, int constant);
@@ -283,11 +275,10 @@ class InterpreterMacroAssembler: public MacroAssembler {
void profile_call(Register mdp);
void profile_final_call(Register mdp);
void profile_virtual_call(Register receiver, Register mdp,
- Register t1,
bool receiver_can_be_null = false);
void profile_ret(Register return_bci, Register mdp);
void profile_null_seen(Register mdp);
- void profile_typecheck(Register mdp, Register klass, Register temp);
+ void profile_typecheck(Register mdp, Register klass);
void profile_typecheck_failed(Register mdp);
void profile_switch_default(Register mdp);
void profile_switch_case(Register index_in_scratch, Register mdp,
diff --git a/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp
index 43b17a13c20..4f5e7afc166 100644
--- a/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp
+++ b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp
@@ -543,6 +543,160 @@ void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file,
BLOCK_COMMENT("} verify_oop");
}
+// Handle the receiver type profile update given the "recv" klass.
+//
+// Normally updates the ReceiverData (RD) that starts at "mdp" + "mdp_offset".
+// If there are no matching or claimable receiver entries in RD, updates
+// the polymorphic counter.
+//
+// This code expected to run by either the interpreter or JIT-ed code, without
+// extra synchronization. For safety, receiver cells are claimed atomically, which
+// avoids grossly misrepresenting the profiles under concurrent updates. For speed,
+// counter updates are not atomic.
+//
+void MacroAssembler::profile_receiver_type(Register recv, Register mdp, int mdp_offset) {
+ assert_different_registers(recv, mdp, t0, t1);
+
+ int base_receiver_offset = in_bytes(ReceiverTypeData::receiver_offset(0));
+ int end_receiver_offset = in_bytes(ReceiverTypeData::receiver_offset(ReceiverTypeData::row_limit()));
+ int poly_count_offset = in_bytes(CounterData::count_offset());
+ int receiver_step = in_bytes(ReceiverTypeData::receiver_offset(1)) - base_receiver_offset;
+ int receiver_to_count_step = in_bytes(ReceiverTypeData::receiver_count_offset(0)) - base_receiver_offset;
+
+ // Adjust for MDP offsets. Slots are pointer-sized, so is the global offset.
+ base_receiver_offset += mdp_offset;
+ end_receiver_offset += mdp_offset;
+ poly_count_offset += mdp_offset;
+
+#ifdef ASSERT
+ // We are about to walk the MDO slots without asking for offsets.
+ // Check that our math hits all the right spots.
+ for (uint c = 0; c < ReceiverTypeData::row_limit(); c++) {
+ int real_recv_offset = mdp_offset + in_bytes(ReceiverTypeData::receiver_offset(c));
+ int real_count_offset = mdp_offset + in_bytes(ReceiverTypeData::receiver_count_offset(c));
+ int offset = base_receiver_offset + receiver_step*c;
+ int count_offset = offset + receiver_to_count_step;
+ assert(offset == real_recv_offset, "receiver slot math");
+ assert(count_offset == real_count_offset, "receiver count math");
+ }
+ int real_poly_count_offset = mdp_offset + in_bytes(CounterData::count_offset());
+ assert(poly_count_offset == real_poly_count_offset, "poly counter math");
+#endif
+
+ // Corner case: no profile table. Increment poly counter and exit.
+ if (ReceiverTypeData::row_limit() == 0) {
+ increment(Address(mdp, poly_count_offset), DataLayout::counter_increment);
+ return;
+ }
+
+ Register offset = t1;
+
+ Label L_loop_search_receiver, L_loop_search_empty;
+ Label L_restart, L_found_recv, L_found_empty, L_polymorphic, L_count_update;
+
+ // The code here recognizes three major cases:
+ // A. Fastest: receiver found in the table
+ // B. Fast: no receiver in the table, and the table is full
+ // C. Slow: no receiver in the table, free slots in the table
+ //
+ // The case A performance is most important, as perfectly-behaved code would end up
+ // there, especially with larger TypeProfileWidth. The case B performance is
+ // important as well, this is where bulk of code would land for normally megamorphic
+ // cases. The case C performance is not essential, its job is to deal with installation
+ // races, we optimize for code density instead. Case C needs to make sure that receiver
+ // rows are only claimed once. This makes sure we never overwrite a row for another
+ // receiver and never duplicate the receivers in the list, making profile type-accurate.
+ //
+ // It is very tempting to handle these cases in a single loop, and claim the first slot
+ // without checking the rest of the table. But, profiling code should tolerate free slots
+ // in the table, as class unloading can clear them. After such cleanup, the receiver
+ // we need might be _after_ the free slot. Therefore, we need to let at least full scan
+ // to complete, before trying to install new slots. Splitting the code in several tight
+ // loops also helpfully optimizes for cases A and B.
+ //
+ // This code is effectively:
+ //
+ // restart:
+ // // Fastest: receiver is already installed
+ // for (i = 0; i < receiver_count(); i++) {
+ // if (receiver(i) == recv) goto found_recv(i);
+ // }
+ //
+ // // Fast: no receiver, but profile is full
+ // for (i = 0; i < receiver_count(); i++) {
+ // if (receiver(i) == null) goto found_null(i);
+ // }
+ // goto polymorphic
+ //
+ // // Slow: try to install receiver
+ // found_null(i):
+ // CAS(&receiver(i), null, recv);
+ // goto restart
+ //
+ // polymorphic:
+ // count++;
+ // return
+ //
+ // found_recv(i):
+ // *receiver_count(i)++
+ //
+
+ bind(L_restart);
+
+ // Fastest: receiver is already installed
+ mv(offset, base_receiver_offset);
+ bind(L_loop_search_receiver);
+ add(t0, mdp, offset);
+ ld(t0, Address(t0));
+ beq(recv, t0, L_found_recv);
+ add(offset, offset, receiver_step);
+ sub(t0, offset, end_receiver_offset);
+ bnez(t0, L_loop_search_receiver);
+
+ // Fast: no receiver, but profile is full
+ mv(offset, base_receiver_offset);
+ bind(L_loop_search_empty);
+ add(t0, mdp, offset);
+ ld(t0, Address(t0));
+ beqz(t0, L_found_empty);
+ add(offset, offset, receiver_step);
+ sub(t0, offset, end_receiver_offset);
+ bnez(t0, L_loop_search_empty);
+ j(L_polymorphic);
+
+ // Slow: try to install receiver
+ bind(L_found_empty);
+
+ // Atomically swing receiver slot: null -> recv.
+ //
+ // The update uses CAS, which clobbers t0. Therefore, t1
+ // is used to hold the destination address. This is safe because the
+ // offset is no longer needed after the address is computed.
+ add(t1, mdp, offset);
+ weak_cmpxchg(/*addr*/ t1, /*expected*/ zr, /*new*/ recv, Assembler::int64,
+ /*acquire*/ Assembler::relaxed, /*release*/ Assembler::relaxed, /*result*/ t0);
+
+ // CAS success means the slot now has the receiver we want. CAS failure means
+ // something had claimed the slot concurrently: it can be the same receiver we want,
+ // or something else. Since this is a slow path, we can optimize for code density,
+ // and just restart the search from the beginning.
+ j(L_restart);
+
+ // Counter updates:
+ // Increment polymorphic counter instead of receiver slot.
+ bind(L_polymorphic);
+ mv(offset, poly_count_offset);
+ j(L_count_update);
+
+ // Found a receiver, convert its slot offset to corresponding count offset.
+ bind(L_found_recv);
+ add(offset, offset, receiver_to_count_step);
+
+ bind(L_count_update);
+ add(t1, mdp, offset);
+ increment(Address(t1), DataLayout::counter_increment);
+}
+
void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) {
if (!VerifyOops) {
return;
@@ -5110,9 +5264,8 @@ void MacroAssembler::get_thread(Register thread) {
}
void MacroAssembler::load_byte_map_base(Register reg) {
- CardTable::CardValue* byte_map_base =
- ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base();
- mv(reg, (uint64_t)byte_map_base);
+ CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
+ mv(reg, (uint64_t)ctbs->card_table_base_const());
}
void MacroAssembler::build_frame(int framesize) {
diff --git a/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp b/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp
index 3b021388fa5..f5e985c28a2 100644
--- a/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp
+++ b/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp
@@ -390,6 +390,8 @@ class MacroAssembler: public Assembler {
Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
+ void profile_receiver_type(Register recv, Register mdp, int mdp_offset);
+
// only if +VerifyOops
void _verify_oop(Register reg, const char* s, const char* file, int line);
void _verify_oop_addr(Address addr, const char* s, const char* file, int line);
diff --git a/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp
index eeb6fad1b59..8c343f6ab2b 100644
--- a/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp
+++ b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -213,7 +213,7 @@ void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
// Is vector's size (in bytes) bigger than a size saved by default?
// riscv does not ovlerlay the floating-point registers on vector registers like aarch64.
bool SharedRuntime::is_wide_vector(int size) {
- return UseRVV;
+ return UseRVV && size > 0;
}
// ---------------------------------------------------------------------------
@@ -637,22 +637,20 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
// Class initialization barrier for static methods
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
- if (VM_Version::supports_fast_class_init_checks()) {
- Label L_skip_barrier;
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
+ Label L_skip_barrier;
- { // Bypass the barrier for non-static methods
- __ load_unsigned_short(t0, Address(xmethod, Method::access_flags_offset()));
- __ test_bit(t1, t0, exact_log2(JVM_ACC_STATIC));
- __ beqz(t1, L_skip_barrier); // non-static
- }
+ // Bypass the barrier for non-static methods
+ __ load_unsigned_short(t0, Address(xmethod, Method::access_flags_offset()));
+ __ test_bit(t1, t0, exact_log2(JVM_ACC_STATIC));
+ __ beqz(t1, L_skip_barrier); // non-static
- __ load_method_holder(t1, xmethod);
- __ clinit_barrier(t1, t0, &L_skip_barrier);
- __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
+ __ load_method_holder(t1, xmethod);
+ __ clinit_barrier(t1, t0, &L_skip_barrier);
+ __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
- __ bind(L_skip_barrier);
- entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
- }
+ __ bind(L_skip_barrier);
+ entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->c2i_entry_barrier(masm);
@@ -1443,7 +1441,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ nop(); // 4 bytes
}
- if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
+ if (method->needs_clinit_barrier()) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
Label L_skip_barrier;
__ mov_metadata(t1, method->method_holder()); // InstanceKlass*
__ clinit_barrier(t1, t0, &L_skip_barrier);
diff --git a/src/hotspot/cpu/riscv/templateTable_riscv.cpp b/src/hotspot/cpu/riscv/templateTable_riscv.cpp
index ca41583e4bc..5cc725e3af4 100644
--- a/src/hotspot/cpu/riscv/templateTable_riscv.cpp
+++ b/src/hotspot/cpu/riscv/templateTable_riscv.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -708,7 +708,6 @@ void TemplateTable::index_check(Register array, Register index) {
__ mv(x11, index);
}
Label ok;
- __ sext(index, index, 32);
__ bltu(index, length, ok);
__ mv(x13, array);
__ mv(t1, Interpreter::_throw_ArrayIndexOutOfBoundsException_entry);
@@ -1052,7 +1051,7 @@ void TemplateTable::aastore() {
transition(vtos, vtos);
// stack: ..., array, index, value
__ ld(x10, at_tos()); // value
- __ ld(x12, at_tos_p1()); // index
+ __ lw(x12, at_tos_p1()); // index
__ ld(x13, at_tos_p2()); // array
index_check(x13, x12); // kills x11
@@ -1462,9 +1461,9 @@ void TemplateTable::iinc() {
transition(vtos, vtos);
__ load_signed_byte(x11, at_bcp(2)); // get constant
locals_index(x12);
- __ ld(x10, iaddress(x12, x10, _masm));
+ __ lw(x10, iaddress(x12, x10, _masm));
__ addw(x10, x10, x11);
- __ sd(x10, iaddress(x12, t0, _masm));
+ __ sw(x10, iaddress(x12, t0, _masm));
}
void TemplateTable::wide_iinc() {
@@ -1477,9 +1476,9 @@ void TemplateTable::wide_iinc() {
__ orr(x11, x11, t1);
locals_index_wide(x12);
- __ ld(x10, iaddress(x12, t0, _masm));
+ __ lw(x10, iaddress(x12, t0, _masm));
__ addw(x10, x10, x11);
- __ sd(x10, iaddress(x12, t0, _masm));
+ __ sw(x10, iaddress(x12, t0, _masm));
}
void TemplateTable::convert() {
@@ -2192,7 +2191,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
__ mv(t0, (int) code);
// Class initialization barrier for static methods
- if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
+ if (bytecode() == Bytecodes::_invokestatic) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
__ bne(temp, t0, L_clinit_barrier_slow); // have we resolved this bytecode?
__ ld(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::method_offset())));
__ load_method_holder(temp, temp);
@@ -2243,8 +2243,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
__ mv(t0, (int) code); // have we resolved this bytecode?
// Class initialization barrier for static fields
- if (VM_Version::supports_fast_class_init_checks() &&
- (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
+ if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
const Register field_holder = temp;
__ bne(temp, t0, L_clinit_barrier_slow);
@@ -3279,7 +3279,7 @@ void TemplateTable::invokevirtual_helper(Register index,
__ load_klass(x10, recv);
// profile this call
- __ profile_virtual_call(x10, xlocals, x13);
+ __ profile_virtual_call(x10, xlocals);
// get target Method & entry point
__ lookup_virtual_method(x10, index, method);
@@ -3406,7 +3406,7 @@ void TemplateTable::invokeinterface(int byte_no) {
/*return_method=*/false);
// profile this call
- __ profile_virtual_call(x13, x30, x9);
+ __ profile_virtual_call(x13, x30);
// Get declaring interface class from method, and itable index
__ load_method_holder(x10, xmethod);
diff --git a/src/hotspot/cpu/riscv/vm_version_riscv.cpp b/src/hotspot/cpu/riscv/vm_version_riscv.cpp
index 22f19c4f5ea..36f0864da0b 100644
--- a/src/hotspot/cpu/riscv/vm_version_riscv.cpp
+++ b/src/hotspot/cpu/riscv/vm_version_riscv.cpp
@@ -167,11 +167,6 @@ void VM_Version::common_initialize() {
(unaligned_scalar.value() == MISALIGNED_SCALAR_FAST));
}
- if (FLAG_IS_DEFAULT(AlignVector)) {
- FLAG_SET_DEFAULT(AlignVector,
- unaligned_vector.value() != MISALIGNED_VECTOR_FAST);
- }
-
#ifdef __riscv_ztso
// Hotspot is compiled with TSO support, it will only run on hardware which
// supports Ztso
@@ -242,6 +237,11 @@ void VM_Version::c2_initialize() {
}
}
+ if (FLAG_IS_DEFAULT(AlignVector)) {
+ FLAG_SET_DEFAULT(AlignVector,
+ unaligned_vector.value() != MISALIGNED_VECTOR_FAST);
+ }
+
// NOTE: Make sure codes dependent on UseRVV are put after MaxVectorSize initialize,
// as there are extra checks inside it which could disable UseRVV
// in some situations.
diff --git a/src/hotspot/cpu/s390/c2_globals_s390.hpp b/src/hotspot/cpu/s390/c2_globals_s390.hpp
index 431a36cda07..125b317588d 100644
--- a/src/hotspot/cpu/s390/c2_globals_s390.hpp
+++ b/src/hotspot/cpu/s390/c2_globals_s390.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2018 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -44,7 +44,7 @@ define_pd_global(intx, CompileThreshold, 10000);
define_pd_global(intx, OnStackReplacePercentage, 140);
define_pd_global(intx, ConditionalMoveLimit, 4);
-define_pd_global(intx, FreqInlineSize, 175);
+define_pd_global(intx, FreqInlineSize, 325);
define_pd_global(intx, InteriorEntryAlignment, 4);
define_pd_global(size_t, NewSizeThreadIncrease, ScaleForWordSize(4*K));
define_pd_global(intx, RegisterCostAreaRatio, 12000);
diff --git a/src/hotspot/cpu/s390/compiledIC_s390.cpp b/src/hotspot/cpu/s390/compiledIC_s390.cpp
index 8501a0cb346..43f5d80250e 100644
--- a/src/hotspot/cpu/s390/compiledIC_s390.cpp
+++ b/src/hotspot/cpu/s390/compiledIC_s390.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2019 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -29,9 +29,6 @@
#include "memory/resourceArea.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/safepoint.hpp"
-#ifdef COMPILER2
-#include "opto/matcher.hpp"
-#endif
// ----------------------------------------------------------------------------
@@ -39,7 +36,6 @@
#define __ masm->
address CompiledDirectCall::emit_to_interp_stub(MacroAssembler *masm, address mark/* = nullptr*/) {
-#ifdef COMPILER2
// Stub is fixed up when the corresponding call is converted from calling
// compiled code to calling interpreted code.
if (mark == nullptr) {
@@ -55,7 +51,7 @@ address CompiledDirectCall::emit_to_interp_stub(MacroAssembler *masm, address ma
__ relocate(static_stub_Relocation::spec(mark));
AddressLiteral meta = __ allocate_metadata_address(nullptr);
- bool success = __ load_const_from_toc(as_Register(Matcher::inline_cache_reg_encode()), meta);
+ bool success = __ load_const_from_toc(Z_inline_cache, meta);
__ set_inst_mark();
AddressLiteral a((address)-1);
@@ -67,10 +63,6 @@ address CompiledDirectCall::emit_to_interp_stub(MacroAssembler *masm, address ma
__ z_br(Z_R1);
__ end_a_stub(); // Update current stubs pointer and restore insts_end.
return stub;
-#else
- ShouldNotReachHere();
- return nullptr;
-#endif
}
#undef __
diff --git a/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.cpp b/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.cpp
index 272136fc28c..617bc7cd00c 100644
--- a/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.cpp
+++ b/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2024 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -129,6 +129,57 @@ void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* mas
}
}
+static void generate_post_barrier(MacroAssembler* masm,
+ const Register store_addr,
+ const Register new_val,
+ const Register thread,
+ const Register tmp1,
+ const Register tmp2,
+ Label& done,
+ bool new_val_may_be_null) {
+
+ __ block_comment("generate_post_barrier {");
+
+ assert(thread == Z_thread, "must be");
+ assert_different_registers(store_addr, new_val, thread, tmp1, tmp2, noreg);
+
+ // Does store cross heap regions?
+ if (VM_Version::has_DistinctOpnds()) {
+ __ z_xgrk(tmp1, store_addr, new_val); // tmp1 := store address ^ new value
+ } else {
+ __ z_lgr(tmp1, store_addr);
+ __ z_xgr(tmp1, new_val);
+ }
+ __ z_srag(tmp1, tmp1, G1HeapRegion::LogOfHRGrainBytes); // tmp1 := ((store address ^ new value) >> LogOfHRGrainBytes)
+ __ branch_optimized(Assembler::bcondEqual, done);
+
+ // Crosses regions, storing null?
+ if (new_val_may_be_null) {
+ __ z_ltgr(new_val, new_val);
+ __ z_bre(done);
+ } else {
+#ifdef ASSERT
+ __ z_ltgr(new_val, new_val);
+ __ asm_assert(Assembler::bcondNotZero, "null oop not allowed (G1 post)", 0x322); // Checked by caller.
+#endif
+ }
+
+ __ z_srag(tmp1, store_addr, CardTable::card_shift());
+
+ Address card_table_addr(thread, in_bytes(G1ThreadLocalData::card_table_base_offset()));
+ __ z_alg(tmp1, card_table_addr); // tmp1 := card address
+
+ if(UseCondCardMark) {
+ __ z_cli(0, tmp1, G1CardTable::clean_card_val());
+ __ branch_optimized(Assembler::bcondNotEqual, done);
+ }
+
+ static_assert(G1CardTable::dirty_card_val() == 0, "must be to use z_mvi");
+ __ z_mvi(0, tmp1, G1CardTable::dirty_card_val()); // *(card address) := dirty_card_val
+
+ __ block_comment("} generate_post_barrier");
+}
+
#if defined(COMPILER2)
#undef __
@@ -204,57 +255,6 @@ void G1BarrierSetAssembler::generate_c2_pre_barrier_stub(MacroAssembler* masm,
BLOCK_COMMENT("} generate_c2_pre_barrier_stub");
}
-static void generate_post_barrier(MacroAssembler* masm,
- const Register store_addr,
- const Register new_val,
- const Register thread,
- const Register tmp1,
- const Register tmp2,
- Label& done,
- bool new_val_may_be_null) {
-
- __ block_comment("generate_post_barrier {");
-
- assert(thread == Z_thread, "must be");
- assert_different_registers(store_addr, new_val, thread, tmp1, tmp2, noreg);
-
- // Does store cross heap regions?
- if (VM_Version::has_DistinctOpnds()) {
- __ z_xgrk(tmp1, store_addr, new_val); // tmp1 := store address ^ new value
- } else {
- __ z_lgr(tmp1, store_addr);
- __ z_xgr(tmp1, new_val);
- }
- __ z_srag(tmp1, tmp1, G1HeapRegion::LogOfHRGrainBytes); // tmp1 := ((store address ^ new value) >> LogOfHRGrainBytes)
- __ branch_optimized(Assembler::bcondEqual, done);
-
- // Crosses regions, storing null?
- if (new_val_may_be_null) {
- __ z_ltgr(new_val, new_val);
- __ z_bre(done);
- } else {
-#ifdef ASSERT
- __ z_ltgr(new_val, new_val);
- __ asm_assert(Assembler::bcondNotZero, "null oop not allowed (G1 post)", 0x322); // Checked by caller.
-#endif
- }
-
- __ z_srag(tmp1, store_addr, CardTable::card_shift());
-
- Address card_table_addr(thread, in_bytes(G1ThreadLocalData::card_table_base_offset()));
- __ z_alg(tmp1, card_table_addr); // tmp1 := card address
-
- if(UseCondCardMark) {
- __ z_cli(0, tmp1, G1CardTable::clean_card_val());
- __ branch_optimized(Assembler::bcondNotEqual, done);
- }
-
- static_assert(G1CardTable::dirty_card_val() == 0, "must be to use z_mvi");
- __ z_mvi(0, tmp1, G1CardTable::dirty_card_val()); // *(card address) := dirty_card_val
-
- __ block_comment("} generate_post_barrier");
-}
-
void G1BarrierSetAssembler::g1_write_barrier_post_c2(MacroAssembler* masm,
Register store_addr,
Register new_val,
diff --git a/src/hotspot/cpu/s390/gc/shared/barrierSetAssembler_s390.cpp b/src/hotspot/cpu/s390/gc/shared/barrierSetAssembler_s390.cpp
index c6f5a4e119c..7617c7a49e8 100644
--- a/src/hotspot/cpu/s390/gc/shared/barrierSetAssembler_s390.cpp
+++ b/src/hotspot/cpu/s390/gc/shared/barrierSetAssembler_s390.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2022, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -206,6 +206,11 @@ OptoReg::Name BarrierSetAssembler::refine_register(const Node* node, OptoReg::Na
return opto_reg;
}
+void BarrierSetAssembler::try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path) {
+ // Load the oop from the weak handle.
+ __ z_lg(obj, Address(obj));
+}
+
#undef __
#define __ _masm->
diff --git a/src/hotspot/cpu/s390/gc/shared/barrierSetAssembler_s390.hpp b/src/hotspot/cpu/s390/gc/shared/barrierSetAssembler_s390.hpp
index 65db915b672..d5682450414 100644
--- a/src/hotspot/cpu/s390/gc/shared/barrierSetAssembler_s390.hpp
+++ b/src/hotspot/cpu/s390/gc/shared/barrierSetAssembler_s390.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2022, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -65,6 +65,8 @@ public:
#ifdef COMPILER2
OptoReg::Name refine_register(const Node* node,
OptoReg::Name opto_reg) const;
+ virtual void try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj,
+ Register tmp, Label& slow_path);
#endif // COMPILER2
static const int OFFSET_TO_PATCHABLE_DATA_INSTRUCTION = 6 + 6 + 6; // iihf(6) + iilf(6) + lg(6)
diff --git a/src/hotspot/cpu/s390/gc/shared/cardTableBarrierSetAssembler_s390.cpp b/src/hotspot/cpu/s390/gc/shared/cardTableBarrierSetAssembler_s390.cpp
index a0da6ebe682..9bb7f63ff31 100644
--- a/src/hotspot/cpu/s390/gc/shared/cardTableBarrierSetAssembler_s390.cpp
+++ b/src/hotspot/cpu/s390/gc/shared/cardTableBarrierSetAssembler_s390.cpp
@@ -83,8 +83,7 @@ void CardTableBarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Registe
void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count,
bool do_return) {
- CardTableBarrierSet* ctbs = barrier_set_cast(BarrierSet::barrier_set());
- CardTable* ct = ctbs->card_table();
+ CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
NearLabel doXC, done;
assert_different_registers(Z_R0, Z_R1, addr, count);
@@ -105,7 +104,7 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
__ add2reg_with_index(count, -BytesPerHeapOop, count, addr);
// Get base address of card table.
- __ load_const_optimized(Z_R1, (address)ct->byte_map_base());
+ __ load_const_optimized(Z_R1, (address)ctbs->card_table_base_const());
// count = (count>>shift) - (addr>>shift)
__ z_srlg(addr, addr, CardTable::card_shift());
@@ -179,13 +178,12 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register store_addr, Register tmp) {
// Does a store check for the oop in register obj. The content of
// register obj is destroyed afterwards.
- CardTableBarrierSet* ctbs = barrier_set_cast(BarrierSet::barrier_set());
- CardTable* ct = ctbs->card_table();
+ CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
assert_different_registers(store_addr, tmp);
__ z_srlg(store_addr, store_addr, CardTable::card_shift());
- __ load_absolute_address(tmp, (address)ct->byte_map_base());
+ __ load_absolute_address(tmp, (address)ctbs->card_table_base_const());
__ z_agr(store_addr, tmp);
__ z_mvi(0, store_addr, CardTable::dirty_card_val());
}
diff --git a/src/hotspot/cpu/s390/globals_s390.hpp b/src/hotspot/cpu/s390/globals_s390.hpp
index 07987ea3469..d110443adf8 100644
--- a/src/hotspot/cpu/s390/globals_s390.hpp
+++ b/src/hotspot/cpu/s390/globals_s390.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2018 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -43,7 +43,7 @@ define_pd_global(size_t, CodeCacheSegmentSize, 256);
// Ideally, this is 256 (cache line size). This keeps code end data
// on separate lines. But we reduced it to 64 since 256 increased
// code size significantly by padding nops between IVC and second UEP.
-define_pd_global(intx, CodeEntryAlignment, 64);
+define_pd_global(uint, CodeEntryAlignment, 64);
define_pd_global(intx, OptoLoopAlignment, 2);
define_pd_global(intx, InlineSmallCode, 2000);
diff --git a/src/hotspot/cpu/s390/macroAssembler_s390.cpp b/src/hotspot/cpu/s390/macroAssembler_s390.cpp
index f35e18c7398..78779a9098a 100644
--- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp
+++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2024 SAP SE. All rights reserved.
* Copyright 2024 IBM Corporation. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -44,6 +44,7 @@
#include "runtime/icache.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/objectMonitor.hpp"
+#include "runtime/objectMonitorTable.hpp"
#include "runtime/os.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/safepointMechanism.hpp"
@@ -6372,45 +6373,55 @@ void MacroAssembler::compiler_fast_lock_object(Register obj, Register box, Regis
if (!UseObjectMonitorTable) {
assert(tmp1_monitor == mark, "should be the same here");
} else {
+ const Register tmp1_bucket = tmp1;
+ const Register hash = Z_R0_scratch;
NearLabel monitor_found;
- // load cache address
- z_la(tmp1, Address(Z_thread, JavaThread::om_cache_oops_offset()));
+ // Save the mark, we might need it to extract the hash.
+ z_lgr(hash, mark);
- const int num_unrolled = 2;
+ // Look for the monitor in the om_cache.
+
+ ByteSize cache_offset = JavaThread::om_cache_oops_offset();
+ ByteSize monitor_offset = OMCache::oop_to_monitor_difference();
+ const int num_unrolled = OMCache::CAPACITY;
for (int i = 0; i < num_unrolled; i++) {
- z_cg(obj, Address(tmp1));
+ z_lg(tmp1_monitor, Address(Z_thread, cache_offset + monitor_offset));
+ z_cg(obj, Address(Z_thread, cache_offset));
z_bre(monitor_found);
- add2reg(tmp1, in_bytes(OMCache::oop_to_oop_difference()));
+ cache_offset = cache_offset + OMCache::oop_to_oop_difference();
}
- NearLabel loop;
- // Search for obj in cache
+ // Get the hash code.
+ z_srlg(hash, hash, markWord::hash_shift);
- bind(loop);
+ // Get the table and calculate the bucket's address.
+ load_const_optimized(tmp2, ObjectMonitorTable::current_table_address());
+ z_lg(tmp2, Address(tmp2));
+ z_ng(hash, Address(tmp2, ObjectMonitorTable::table_capacity_mask_offset()));
+ z_lg(tmp1_bucket, Address(tmp2, ObjectMonitorTable::table_buckets_offset()));
+ z_sllg(hash, hash, LogBytesPerWord);
+ z_agr(tmp1_bucket, hash);
- // check for match.
- z_cg(obj, Address(tmp1));
- z_bre(monitor_found);
+ // Read the monitor from the bucket.
+ z_lg(tmp1_monitor, Address(tmp1_bucket));
- // search until null encountered, guaranteed _null_sentinel at end.
- add2reg(tmp1, in_bytes(OMCache::oop_to_oop_difference()));
- z_cghsi(0, tmp1, 0);
- z_brne(loop); // if not EQ to 0, go for another loop
+ // Check if the monitor in the bucket is special (empty, tombstone or removed).
+ z_clgfi(tmp1_monitor, ObjectMonitorTable::SpecialPointerValues::below_is_special);
+ z_brl(slow_path);
- // we reached to the end, cache miss
- z_ltgr(obj, obj); // set CC to NE
- z_bru(slow_path);
+ // Check if object matches.
+ z_lg(tmp2, Address(tmp1_monitor, ObjectMonitor::object_offset()));
+ BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
+ bs_asm->try_resolve_weak_handle_in_c2(this, tmp2, Z_R0_scratch, slow_path);
+ z_cgr(obj, tmp2);
+ z_brne(slow_path);
- // cache hit
bind(monitor_found);
- z_lg(tmp1_monitor, Address(tmp1, OMCache::oop_to_monitor_difference()));
}
NearLabel monitor_locked;
// lock the monitor
- // mark contains the tagged ObjectMonitor*.
- const Register tagged_monitor = mark;
const Register zero = tmp2;
const ByteSize monitor_tag = in_ByteSize(UseObjectMonitorTable ? 0 : checked_cast(markWord::monitor_value));
diff --git a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp
index 5b6f7dcd984..00a830a80cd 100644
--- a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp
+++ b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2024 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -1567,7 +1567,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
//---------------------------------------------------------------------
wrapper_VEPStart = __ offset();
- if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
+ if (method->needs_clinit_barrier()) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
Label L_skip_barrier;
Register klass = Z_R1_scratch;
// Notify OOP recorder (don't need the relocation)
@@ -2378,24 +2379,22 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
// Class initialization barrier for static methods
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
- if (VM_Version::supports_fast_class_init_checks()) {
- Label L_skip_barrier;
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
+ Label L_skip_barrier;
- { // Bypass the barrier for non-static methods
- __ testbit_ushort(Address(Z_method, Method::access_flags_offset()), JVM_ACC_STATIC_BIT);
- __ z_bfalse(L_skip_barrier); // non-static
- }
+ // Bypass the barrier for non-static methods
+ __ testbit_ushort(Address(Z_method, Method::access_flags_offset()), JVM_ACC_STATIC_BIT);
+ __ z_bfalse(L_skip_barrier); // non-static
- Register klass = Z_R11;
- __ load_method_holder(klass, Z_method);
- __ clinit_barrier(klass, Z_thread, &L_skip_barrier /*L_fast_path*/);
+ Register klass = Z_R11;
+ __ load_method_holder(klass, Z_method);
+ __ clinit_barrier(klass, Z_thread, &L_skip_barrier /*L_fast_path*/);
- __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub());
- __ z_br(klass);
+ __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub());
+ __ z_br(klass);
- __ bind(L_skip_barrier);
- entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
- }
+ __ bind(L_skip_barrier);
+ entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
return;
diff --git a/src/hotspot/cpu/s390/templateTable_s390.cpp b/src/hotspot/cpu/s390/templateTable_s390.cpp
index 4e8fdf275e4..647915ef4fa 100644
--- a/src/hotspot/cpu/s390/templateTable_s390.cpp
+++ b/src/hotspot/cpu/s390/templateTable_s390.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2024 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -2377,7 +2377,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
__ z_cli(Address(Rcache, bc_offset), code);
// Class initialization barrier for static methods
- if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
+ if (bytecode() == Bytecodes::_invokestatic) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
const Register method = Z_R1_scratch;
const Register klass = Z_R1_scratch;
__ z_brne(L_clinit_barrier_slow);
@@ -2427,8 +2428,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
__ z_cli(Address(cache, code_offset), code);
// Class initialization barrier for static fields
- if (VM_Version::supports_fast_class_init_checks() &&
- (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
+ if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
const Register field_holder = index;
__ z_brne(L_clinit_barrier_slow);
diff --git a/src/hotspot/cpu/x86/assembler_x86.cpp b/src/hotspot/cpu/x86/assembler_x86.cpp
index cbc5c6988d4..3c8defe62d9 100644
--- a/src/hotspot/cpu/x86/assembler_x86.cpp
+++ b/src/hotspot/cpu/x86/assembler_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -7320,6 +7320,25 @@ void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
emit_int16(0x2E, (0xC0 | encode));
}
+void Assembler::vucomxsd(XMMRegister dst, Address src) {
+ assert(VM_Version::supports_avx10_2(), "");
+ InstructionMark im(this);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
+ attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
+ attributes.set_is_evex_instruction();
+ vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
+ emit_int8(0x2E);
+ emit_operand(dst, src, 0);
+}
+
+void Assembler::vucomxsd(XMMRegister dst, XMMRegister src) {
+ assert(VM_Version::supports_avx10_2(), "");
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
+ attributes.set_is_evex_instruction();
+ int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
+ emit_int16(0x2E, (0xC0 | encode));
+}
+
void Assembler::ucomiss(XMMRegister dst, Address src) {
InstructionMark im(this);
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
@@ -7335,6 +7354,25 @@ void Assembler::ucomiss(XMMRegister dst, XMMRegister src) {
emit_int16(0x2E, (0xC0 | encode));
}
+void Assembler::vucomxss(XMMRegister dst, Address src) {
+ assert(VM_Version::supports_avx10_2(), "");
+ InstructionMark im(this);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
+ attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
+ attributes.set_is_evex_instruction();
+ vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
+ emit_int8(0x2E);
+ emit_operand(dst, src, 0);
+}
+
+void Assembler::vucomxss(XMMRegister dst, XMMRegister src) {
+ assert(VM_Version::supports_avx10_2(), "");
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
+ attributes.set_is_evex_instruction();
+ int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
+ emit_int16(0x2E, (0xC0 | encode));
+}
+
void Assembler::xabort(int8_t imm8) {
emit_int24((unsigned char)0xC6, (unsigned char)0xF8, (imm8 & 0xFF));
}
diff --git a/src/hotspot/cpu/x86/assembler_x86.hpp b/src/hotspot/cpu/x86/assembler_x86.hpp
index 26c57fc2d80..97854f712cf 100644
--- a/src/hotspot/cpu/x86/assembler_x86.hpp
+++ b/src/hotspot/cpu/x86/assembler_x86.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -2331,10 +2331,14 @@ private:
// Unordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS
void ucomisd(XMMRegister dst, Address src);
void ucomisd(XMMRegister dst, XMMRegister src);
+ void vucomxsd(XMMRegister dst, Address src);
+ void vucomxsd(XMMRegister dst, XMMRegister src);
// Unordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS
void ucomiss(XMMRegister dst, Address src);
void ucomiss(XMMRegister dst, XMMRegister src);
+ void vucomxss(XMMRegister dst, Address src);
+ void vucomxss(XMMRegister dst, XMMRegister src);
void xabort(int8_t imm8);
diff --git a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp
index 37ee9451405..d9be0fdcc8d 100644
--- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp
@@ -32,6 +32,7 @@
#include "c1/c1_ValueStack.hpp"
#include "ci/ciArrayKlass.hpp"
#include "ci/ciInstance.hpp"
+#include "code/aotCodeCache.hpp"
#include "compiler/oopMap.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/gc_globals.hpp"
@@ -535,6 +536,15 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
case T_LONG: {
assert(patch_code == lir_patch_none, "no patching handled here");
+#if INCLUDE_CDS
+ if (AOTCodeCache::is_on_for_dump()) {
+ address b = c->as_pointer();
+ if (AOTRuntimeConstants::contains(b)) {
+ __ load_aotrc_address(dest->as_register_lo(), b);
+ break;
+ }
+ }
+#endif
__ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());
break;
}
diff --git a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp
index 8fc3d18abb1..a3ccc081b6b 100644
--- a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -34,7 +34,9 @@
#include "opto/subnode.hpp"
#include "runtime/globals.hpp"
#include "runtime/objectMonitor.hpp"
+#include "runtime/objectMonitorTable.hpp"
#include "runtime/stubRoutines.hpp"
+#include "runtime/synchronizer.hpp"
#include "utilities/checkedCast.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/powerOfTwo.hpp"
@@ -217,7 +219,6 @@ inline Assembler::AvxVectorLen C2_MacroAssembler::vector_length_encoding(int vle
// In the case of failure, the node will branch directly to the
// FailureLabel
-
// obj: object to lock
// box: on-stack box address -- KILLED
// rax: tmp -- KILLED
@@ -286,7 +287,7 @@ void C2_MacroAssembler::fast_lock(Register obj, Register box, Register rax_reg,
// After successful lock, push object on lock-stack.
movptr(Address(thread, top), obj);
addl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize);
- jmpb(locked);
+ jmp(locked);
}
{ // Handle inflated monitor.
@@ -297,38 +298,49 @@ void C2_MacroAssembler::fast_lock(Register obj, Register box, Register rax_reg,
if (!UseObjectMonitorTable) {
assert(mark == monitor, "should be the same here");
} else {
- // Uses ObjectMonitorTable. Look for the monitor in the om_cache.
- // Fetch ObjectMonitor* from the cache or take the slow-path.
+ const Register hash = t;
Label monitor_found;
- // Load cache address
- lea(t, Address(thread, JavaThread::om_cache_oops_offset()));
+ // Look for the monitor in the om_cache.
- const int num_unrolled = 2;
+ ByteSize cache_offset = JavaThread::om_cache_oops_offset();
+ ByteSize monitor_offset = OMCache::oop_to_monitor_difference();
+ const int num_unrolled = OMCache::CAPACITY;
for (int i = 0; i < num_unrolled; i++) {
- cmpptr(obj, Address(t));
+ movptr(monitor, Address(thread, cache_offset + monitor_offset));
+ cmpptr(obj, Address(thread, cache_offset));
jccb(Assembler::equal, monitor_found);
- increment(t, in_bytes(OMCache::oop_to_oop_difference()));
+ cache_offset = cache_offset + OMCache::oop_to_oop_difference();
}
- Label loop;
+ // Look for the monitor in the table.
- // Search for obj in cache.
- bind(loop);
+ // Get the hash code.
+ movptr(hash, Address(obj, oopDesc::mark_offset_in_bytes()));
+ shrq(hash, markWord::hash_shift);
+ andq(hash, markWord::hash_mask);
- // Check for match.
- cmpptr(obj, Address(t));
- jccb(Assembler::equal, monitor_found);
+ // Get the table and calculate the bucket's address.
+ lea(rax_reg, ExternalAddress(ObjectMonitorTable::current_table_address()));
+ movptr(rax_reg, Address(rax_reg));
+ andq(hash, Address(rax_reg, ObjectMonitorTable::table_capacity_mask_offset()));
+ movptr(rax_reg, Address(rax_reg, ObjectMonitorTable::table_buckets_offset()));
- // Search until null encountered, guaranteed _null_sentinel at end.
- cmpptr(Address(t), 1);
- jcc(Assembler::below, slow_path); // 0 check, but with ZF=0 when *t == 0
- increment(t, in_bytes(OMCache::oop_to_oop_difference()));
- jmpb(loop);
+ // Read the monitor from the bucket.
+ movptr(monitor, Address(rax_reg, hash, Address::times_ptr));
+
+ // Check if the monitor in the bucket is special (empty, tombstone or removed)
+ cmpptr(monitor, ObjectMonitorTable::SpecialPointerValues::below_is_special);
+ jcc(Assembler::below, slow_path);
+
+ // Check if object matches.
+ movptr(rax_reg, Address(monitor, ObjectMonitor::object_offset()));
+ BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
+ bs_asm->try_resolve_weak_handle_in_c2(this, rax_reg, slow_path);
+ cmpptr(rax_reg, obj);
+ jcc(Assembler::notEqual, slow_path);
- // Cache hit.
bind(monitor_found);
- movptr(monitor, Address(t, OMCache::oop_to_monitor_difference()));
}
const ByteSize monitor_tag = in_ByteSize(UseObjectMonitorTable ? 0 : checked_cast(markWord::monitor_value));
const Address recursions_address(monitor, ObjectMonitor::recursions_offset() - monitor_tag);
@@ -487,14 +499,14 @@ void C2_MacroAssembler::fast_unlock(Register obj, Register reg_rax, Register t,
cmpl(top, in_bytes(JavaThread::lock_stack_base_offset()));
jcc(Assembler::below, check_done);
cmpptr(obj, Address(thread, top));
- jccb(Assembler::notEqual, inflated_check_lock_stack);
+ jcc(Assembler::notEqual, inflated_check_lock_stack);
stop("Fast Unlock lock on stack");
bind(check_done);
if (UseObjectMonitorTable) {
movptr(mark, Address(obj, oopDesc::mark_offset_in_bytes()));
}
testptr(mark, markWord::monitor_value);
- jccb(Assembler::notZero, inflated);
+ jcc(Assembler::notZero, inflated);
stop("Fast Unlock not monitor");
#endif
@@ -519,7 +531,7 @@ void C2_MacroAssembler::fast_unlock(Register obj, Register reg_rax, Register t,
// Check if recursive.
cmpptr(recursions_address, 0);
- jccb(Assembler::notZero, recursive);
+ jcc(Assembler::notZero, recursive);
// Set owner to null.
// Release to satisfy the JMM
@@ -530,11 +542,11 @@ void C2_MacroAssembler::fast_unlock(Register obj, Register reg_rax, Register t,
// Check if the entry_list is empty.
cmpptr(entry_list_address, NULL_WORD);
- jccb(Assembler::zero, unlocked); // If so we are done.
+ jcc(Assembler::zero, unlocked); // If so we are done.
// Check if there is a successor.
cmpptr(succ_address, NULL_WORD);
- jccb(Assembler::notZero, unlocked); // If so we are done.
+ jcc(Assembler::notZero, unlocked); // If so we are done.
// Save the monitor pointer in the current thread, so we can try to
// reacquire the lock in SharedRuntime::monitor_exit_helper().
@@ -1046,17 +1058,28 @@ void C2_MacroAssembler::signum_fp(int opcode, XMMRegister dst, XMMRegister zero,
Label DONE_LABEL;
+ // Handle special cases +0.0/-0.0 and NaN, if argument is +0.0/-0.0 or NaN, return argument
+ // If AVX10.2 (or newer) floating point comparison instructions used, SF=1 for equal and unordered cases
+ // If other floating point comparison instructions used, ZF=1 for equal and unordered cases
if (opcode == Op_SignumF) {
- ucomiss(dst, zero);
- jcc(Assembler::equal, DONE_LABEL); // handle special case +0.0/-0.0, if argument is +0.0/-0.0, return argument
- jcc(Assembler::parity, DONE_LABEL); // handle special case NaN, if argument NaN, return NaN
+ if (VM_Version::supports_avx10_2()) {
+ vucomxss(dst, zero);
+ jcc(Assembler::negative, DONE_LABEL);
+ } else {
+ ucomiss(dst, zero);
+ jcc(Assembler::equal, DONE_LABEL);
+ }
movflt(dst, one);
jcc(Assembler::above, DONE_LABEL);
xorps(dst, ExternalAddress(StubRoutines::x86::vector_float_sign_flip()), noreg);
} else if (opcode == Op_SignumD) {
- ucomisd(dst, zero);
- jcc(Assembler::equal, DONE_LABEL); // handle special case +0.0/-0.0, if argument is +0.0/-0.0, return argument
- jcc(Assembler::parity, DONE_LABEL); // handle special case NaN, if argument NaN, return NaN
+ if (VM_Version::supports_avx10_2()) {
+ vucomxsd(dst, zero);
+ jcc(Assembler::negative, DONE_LABEL);
+ } else {
+ ucomisd(dst, zero);
+ jcc(Assembler::equal, DONE_LABEL);
+ }
movdbl(dst, one);
jcc(Assembler::above, DONE_LABEL);
xorpd(dst, ExternalAddress(StubRoutines::x86::vector_double_sign_flip()), noreg);
diff --git a/src/hotspot/cpu/x86/c2_stubGenerator_x86_64_string.cpp b/src/hotspot/cpu/x86/c2_stubGenerator_x86_64_string.cpp
index 0951bda0d17..77a149addb5 100644
--- a/src/hotspot/cpu/x86/c2_stubGenerator_x86_64_string.cpp
+++ b/src/hotspot/cpu/x86/c2_stubGenerator_x86_64_string.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2024, 2025, Intel Corporation. All rights reserved.
+ * Copyright (c) 2024, 2026, Intel Corporation. All rights reserved.
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -1330,10 +1330,12 @@ static void big_case_loop_helper(bool sizeKnown, int size, Label &noMatch, Label
// Clarification: The BYTE_K compare above compares haystack[(n-32):(n-1)]. We need to
// compare haystack[(k-1):(k-1+31)]. Subtracting either index gives shift value of
// (k + 31 - n): x = (k-1+31)-(n-1) = k-1+31-n+1 = k+31-n.
+ // When isU is set, similarly, shift is from haystack[(n-32):(n-1)] to [(k-2):(k-2+31)]
+
if (sizeKnown) {
- __ movl(temp2, 31 + size);
+ __ movl(temp2, (isU ? 30 : 31) + size);
} else {
- __ movl(temp2, 31);
+ __ movl(temp2, isU ? 30 : 31);
__ addl(temp2, needleLen);
}
__ subl(temp2, hsLength);
diff --git a/src/hotspot/cpu/x86/gc/g1/g1BarrierSetAssembler_x86.cpp b/src/hotspot/cpu/x86/gc/g1/g1BarrierSetAssembler_x86.cpp
index 34de9403ccf..b20d7b5cd07 100644
--- a/src/hotspot/cpu/x86/gc/g1/g1BarrierSetAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/gc/g1/g1BarrierSetAssembler_x86.cpp
@@ -23,6 +23,7 @@
*/
#include "asm/macroAssembler.inline.hpp"
+#include "code/aotCodeCache.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1BarrierSetAssembler.hpp"
#include "gc/g1/g1BarrierSetRuntime.hpp"
@@ -268,6 +269,16 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm,
__ bind(done);
}
+#if INCLUDE_CDS
+// return a register that differs from reg1, reg2, reg3 and reg4
+
+static Register pick_different_reg(Register reg1, Register reg2 = noreg, Register reg3= noreg, Register reg4 = noreg) {
+ RegSet available = (RegSet::of(rscratch1, rscratch2, rax, rbx) + rdx -
+ RegSet::of(reg1, reg2, reg3, reg4));
+ return *(available.begin());
+}
+#endif // INCLUDE_CDS
+
static void generate_post_barrier(MacroAssembler* masm,
const Register store_addr,
const Register new_val,
@@ -280,10 +291,32 @@ static void generate_post_barrier(MacroAssembler* masm,
Label L_done;
// Does store cross heap regions?
- __ movptr(tmp1, store_addr); // tmp1 := store address
- __ xorptr(tmp1, new_val); // tmp1 := store address ^ new value
- __ shrptr(tmp1, G1HeapRegion::LogOfHRGrainBytes); // ((store address ^ new value) >> LogOfHRGrainBytes) == 0?
- __ jccb(Assembler::equal, L_done);
+#if INCLUDE_CDS
+ // AOT code needs to load the barrier grain shift from the aot
+ // runtime constants area in the code cache otherwise we can compile
+ // it as an immediate operand
+
+ if (AOTCodeCache::is_on_for_dump()) {
+ address grain_shift_addr = AOTRuntimeConstants::grain_shift_address();
+ Register save = pick_different_reg(rcx, tmp1, new_val, store_addr);
+ __ push(save);
+ __ movptr(save, store_addr);
+ __ xorptr(save, new_val);
+ __ push(rcx);
+ __ lea(rcx, ExternalAddress(grain_shift_addr));
+ __ movl(rcx, Address(rcx, 0));
+ __ shrptr(save);
+ __ pop(rcx);
+ __ pop(save);
+ __ jcc(Assembler::equal, L_done);
+ } else
+#endif // INCLUDE_CDS
+ {
+ __ movptr(tmp1, store_addr); // tmp1 := store address
+ __ xorptr(tmp1, new_val); // tmp1 := store address ^ new value
+ __ shrptr(tmp1, G1HeapRegion::LogOfHRGrainBytes); // ((store address ^ new value) >> LogOfHRGrainBytes) == 0?
+ __ jccb(Assembler::equal, L_done);
+ }
// Crosses regions, storing null?
if (new_val_may_be_null) {
diff --git a/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp b/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp
index 09c5d93dbb3..215dc30f7fd 100644
--- a/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -395,6 +395,11 @@ OptoReg::Name BarrierSetAssembler::refine_register(const Node* node, OptoReg::Na
extern void vec_spill_helper(C2_MacroAssembler *masm, bool is_load,
int stack_offset, int reg, uint ireg, outputStream* st);
+void BarrierSetAssembler::try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Label& slowpath) {
+ // Load the oop from the weak handle.
+ __ movptr(obj, Address(obj));
+}
+
#undef __
#define __ _masm->
diff --git a/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.hpp b/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.hpp
index c5bf17c3b4e..6aff29850e3 100644
--- a/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.hpp
+++ b/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -109,6 +109,8 @@ public:
#ifdef COMPILER2
OptoReg::Name refine_register(const Node* node,
OptoReg::Name opto_reg);
+
+ virtual void try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Label& slowpath);
#endif // COMPILER2
};
diff --git a/src/hotspot/cpu/x86/gc/shared/cardTableBarrierSetAssembler_x86.cpp b/src/hotspot/cpu/x86/gc/shared/cardTableBarrierSetAssembler_x86.cpp
index 2b91662ddb5..0ea769dd488 100644
--- a/src/hotspot/cpu/x86/gc/shared/cardTableBarrierSetAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/gc/shared/cardTableBarrierSetAssembler_x86.cpp
@@ -23,6 +23,7 @@
*/
#include "asm/macroAssembler.inline.hpp"
+#include "code/aotCodeCache.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
@@ -95,11 +96,7 @@ void CardTableBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet d
void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, Register tmp) {
- BarrierSet *bs = BarrierSet::barrier_set();
- CardTableBarrierSet* ctbs = barrier_set_cast(bs);
- CardTable* ct = ctbs->card_table();
- intptr_t disp = (intptr_t) ct->byte_map_base();
- SHENANDOAHGC_ONLY(assert(!UseShenandoahGC, "Shenandoah byte_map_base is not constant.");)
+ CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
Label L_loop, L_done;
const Register end = count;
@@ -115,7 +112,15 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
__ shrptr(end, CardTable::card_shift());
__ subptr(end, addr); // end --> cards count
- __ mov64(tmp, disp);
+#if INCLUDE_CDS
+ if (AOTCodeCache::is_on_for_dump()) {
+ __ lea(tmp, ExternalAddress(AOTRuntimeConstants::card_table_base_address()));
+ __ movq(tmp, Address(tmp, 0));
+ } else
+#endif
+ {
+ __ mov64(tmp, (intptr_t)ctbs->card_table_base_const());
+ }
__ addptr(addr, tmp);
__ BIND(L_loop);
__ movb(Address(addr, count, Address::times_1), 0);
@@ -125,13 +130,10 @@ __ BIND(L_loop);
__ BIND(L_done);
}
-void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj, Address dst) {
+void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj, Address dst, Register rscratch) {
// Does a store check for the oop in register obj. The content of
// register obj is destroyed afterwards.
- BarrierSet* bs = BarrierSet::barrier_set();
-
- CardTableBarrierSet* ctbs = barrier_set_cast(bs);
- CardTable* ct = ctbs->card_table();
+ CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
__ shrptr(obj, CardTable::card_shift());
@@ -142,7 +144,14 @@ void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register ob
// So this essentially converts an address to a displacement and it will
// never need to be relocated. On 64bit however the value may be too
// large for a 32bit displacement.
- intptr_t byte_map_base = (intptr_t)ct->byte_map_base();
+ intptr_t byte_map_base = (intptr_t)ctbs->card_table_base_const();
+#if INCLUDE_CDS
+ if (AOTCodeCache::is_on_for_dump()) {
+ __ lea(rscratch, ExternalAddress(AOTRuntimeConstants::card_table_base_address()));
+ __ movq(rscratch, Address(rscratch, 0));
+ card_addr = Address(rscratch, obj, Address::times_1, 0);
+ } else
+#endif
if (__ is_simm32(byte_map_base)) {
card_addr = Address(noreg, obj, Address::times_1, byte_map_base);
} else {
@@ -181,10 +190,10 @@ void CardTableBarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorS
if (needs_post_barrier) {
// flatten object address if needed
if (!precise || (dst.index() == noreg && dst.disp() == 0)) {
- store_check(masm, dst.base(), dst);
+ store_check(masm, dst.base(), dst, tmp2);
} else {
__ lea(tmp1, dst);
- store_check(masm, tmp1, dst);
+ store_check(masm, tmp1, dst, tmp2);
}
}
}
diff --git a/src/hotspot/cpu/x86/gc/shared/cardTableBarrierSetAssembler_x86.hpp b/src/hotspot/cpu/x86/gc/shared/cardTableBarrierSetAssembler_x86.hpp
index 0a36571c757..201c11062f2 100644
--- a/src/hotspot/cpu/x86/gc/shared/cardTableBarrierSetAssembler_x86.hpp
+++ b/src/hotspot/cpu/x86/gc/shared/cardTableBarrierSetAssembler_x86.hpp
@@ -33,7 +33,7 @@ protected:
virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count) {}
- void store_check(MacroAssembler* masm, Register obj, Address dst);
+ void store_check(MacroAssembler* masm, Register obj, Address dst, Register rscratch);
virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count, Register tmp);
diff --git a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp
index 9e321391f6c..67510fac58f 100644
--- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2021, Red Hat, Inc. All rights reserved.
* Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -174,24 +175,14 @@ void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, Dec
}
}
-void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm,
- Register obj,
- Register pre_val,
- Register tmp,
- bool tosca_live,
- bool expand_call) {
+void ShenandoahBarrierSetAssembler::satb_barrier(MacroAssembler* masm,
+ Register obj,
+ Register pre_val,
+ Register tmp,
+ bool tosca_live,
+ bool expand_call) {
+ assert(ShenandoahSATBBarrier, "Should be checked by caller");
- if (ShenandoahSATBBarrier) {
- satb_write_barrier_pre(masm, obj, pre_val, tmp, tosca_live, expand_call);
- }
-}
-
-void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
- Register obj,
- Register pre_val,
- Register tmp,
- bool tosca_live,
- bool expand_call) {
// If expand_call is true then we expand the call_VM_leaf macro
// directly to skip generating the check by
// InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
@@ -533,18 +524,18 @@ void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet d
assert_different_registers(dst, tmp1, r15_thread);
// Generate the SATB pre-barrier code to log the value of
// the referent field in an SATB buffer.
- shenandoah_write_barrier_pre(masm /* masm */,
- noreg /* obj */,
- dst /* pre_val */,
- tmp1 /* tmp */,
- true /* tosca_live */,
- true /* expand_call */);
+ satb_barrier(masm /* masm */,
+ noreg /* obj */,
+ dst /* pre_val */,
+ tmp1 /* tmp */,
+ true /* tosca_live */,
+ true /* expand_call */);
restore_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ true);
}
}
-void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj) {
+void ShenandoahBarrierSetAssembler::card_barrier(MacroAssembler* masm, Register obj) {
assert(ShenandoahCardBarrier, "Should have been checked by caller");
// Does a store check for the oop in register obj. The content of
@@ -575,41 +566,40 @@ void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register o
void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
- bool on_oop = is_reference_type(type);
- bool in_heap = (decorators & IN_HEAP) != 0;
- bool as_normal = (decorators & AS_NORMAL) != 0;
- if (on_oop && in_heap) {
- bool needs_pre_barrier = as_normal;
+ // 1: non-reference types require no barriers
+ if (!is_reference_type(type)) {
+ BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
+ return;
+ }
- // flatten object address if needed
- // We do it regardless of precise because we need the registers
- if (dst.index() == noreg && dst.disp() == 0) {
- if (dst.base() != tmp1) {
- __ movptr(tmp1, dst.base());
- }
- } else {
- __ lea(tmp1, dst);
- }
-
- assert_different_registers(val, tmp1, tmp2, tmp3, r15_thread);
-
- if (needs_pre_barrier) {
- shenandoah_write_barrier_pre(masm /*masm*/,
- tmp1 /* obj */,
- tmp2 /* pre_val */,
- tmp3 /* tmp */,
- val != noreg /* tosca_live */,
- false /* expand_call */);
- }
-
- BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg, noreg);
- if (val != noreg) {
- if (ShenandoahCardBarrier) {
- store_check(masm, tmp1);
- }
+ // Flatten object address right away for simplicity: likely needed by barriers
+ assert_different_registers(val, tmp1, tmp2, tmp3, r15_thread);
+ if (dst.index() == noreg && dst.disp() == 0) {
+ if (dst.base() != tmp1) {
+ __ movptr(tmp1, dst.base());
}
} else {
- BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
+ __ lea(tmp1, dst);
+ }
+
+ bool storing_non_null = (val != noreg);
+
+ // 2: pre-barrier: SATB needs the previous value
+ if (ShenandoahBarrierSet::need_satb_barrier(decorators, type)) {
+ satb_barrier(masm,
+ tmp1 /* obj */,
+ tmp2 /* pre_val */,
+ tmp3 /* tmp */,
+ storing_non_null /* tosca_live */,
+ false /* expand_call */);
+ }
+
+ // Store!
+ BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg, noreg);
+
+ // 3: post-barrier: card barrier needs store address
+ if (ShenandoahBarrierSet::need_card_barrier(decorators, type) && storing_non_null) {
+ card_barrier(masm, tmp1);
}
}
@@ -629,6 +619,27 @@ void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler
__ bind(done);
}
+#ifdef COMPILER2
+void ShenandoahBarrierSetAssembler::try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Label& slowpath) {
+ Label done;
+
+ // Resolve weak handle using the standard implementation.
+ BarrierSetAssembler::try_resolve_weak_handle_in_c2(masm, obj, slowpath);
+
+ // Check if the reference is null, and if it is, take the fast path.
+ __ testptr(obj, obj);
+ __ jcc(Assembler::zero, done);
+
+ Address gc_state(r15_thread, ShenandoahThreadLocalData::gc_state_offset());
+
+ // Check if the heap is under weak-reference/roots processing, in
+ // which case we need to take the slow path.
+ __ testb(gc_state, ShenandoahHeap::WEAK_ROOTS);
+ __ jcc(Assembler::notZero, slowpath);
+ __ bind(done);
+}
+#endif // COMPILER2
+
// Special Shenandoah CAS implementation that handles false negatives
// due to concurrent evacuation.
void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm,
diff --git a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.hpp b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.hpp
index b0185f2dbff..79540aa19e1 100644
--- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.hpp
+++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.hpp
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2021, Red Hat, Inc. All rights reserved.
* Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -41,21 +42,14 @@ class StubCodeGenerator;
class ShenandoahBarrierSetAssembler: public BarrierSetAssembler {
private:
- void satb_write_barrier_pre(MacroAssembler* masm,
- Register obj,
- Register pre_val,
- Register tmp,
- bool tosca_live,
- bool expand_call);
+ void satb_barrier(MacroAssembler* masm,
+ Register obj,
+ Register pre_val,
+ Register tmp,
+ bool tosca_live,
+ bool expand_call);
- void shenandoah_write_barrier_pre(MacroAssembler* masm,
- Register obj,
- Register pre_val,
- Register tmp,
- bool tosca_live,
- bool expand_call);
-
- void store_check(MacroAssembler* masm, Register obj);
+ void card_barrier(MacroAssembler* masm, Register obj);
void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count,
@@ -84,6 +78,9 @@ public:
Address dst, Register val, Register tmp1, Register tmp2, Register tmp3);
virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
Register obj, Register tmp, Label& slowpath);
+#ifdef COMPILER2
+ virtual void try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Label& slowpath);
+#endif // COMPILER2
};
#endif // CPU_X86_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_X86_HPP
diff --git a/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp
index ae93cca8c19..47a3dad54e7 100644
--- a/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1328,6 +1328,19 @@ void ZBarrierSetAssembler::generate_c2_store_barrier_stub(MacroAssembler* masm,
__ jmp(slow_continuation);
}
+void ZBarrierSetAssembler::try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Label& slow_path) {
+ // Resolve weak handle using the standard implementation.
+ BarrierSetAssembler::try_resolve_weak_handle_in_c2(masm, obj, slow_path);
+
+ // Check if the oop is bad, in which case we need to take the slow path.
+ __ testptr(obj, Address(r15_thread, ZThreadLocalData::mark_bad_mask_offset()));
+ __ jcc(Assembler::notZero, slow_path);
+
+ // Oop is okay, so we uncolor it.
+ __ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatLoadGoodBeforeShl);
+ __ shrq(obj, barrier_Relocation::unpatched);
+}
+
#undef __
#endif // COMPILER2
diff --git a/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.hpp b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.hpp
index 19902500f93..e91e2b9ea20 100644
--- a/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.hpp
+++ b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -167,6 +167,8 @@ public:
ZLoadBarrierStubC2* stub) const;
void generate_c2_store_barrier_stub(MacroAssembler* masm,
ZStoreBarrierStubC2* stub) const;
+
+ virtual void try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Label& slow_path);
#endif // COMPILER2
void store_barrier_fast(MacroAssembler* masm,
diff --git a/src/hotspot/cpu/x86/globals_x86.hpp b/src/hotspot/cpu/x86/globals_x86.hpp
index 103e22d0185..4f5b6d31e75 100644
--- a/src/hotspot/cpu/x86/globals_x86.hpp
+++ b/src/hotspot/cpu/x86/globals_x86.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -46,9 +46,9 @@ define_pd_global(size_t, CodeCacheSegmentSize, 64 COMPILER1_AND_COMPILER2_PRES
// the uep and the vep doesn't get real alignment but just slops on by
// only assured that the entry instruction meets the 5 byte size requirement.
#if COMPILER2_OR_JVMCI
-define_pd_global(intx, CodeEntryAlignment, 32);
+define_pd_global(uint, CodeEntryAlignment, 32);
#else
-define_pd_global(intx, CodeEntryAlignment, 16);
+define_pd_global(uint, CodeEntryAlignment, 16);
#endif // COMPILER2_OR_JVMCI
define_pd_global(intx, OptoLoopAlignment, 16);
define_pd_global(intx, InlineSmallCode, 1000);
diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.cpp b/src/hotspot/cpu/x86/macroAssembler_x86.cpp
index be7deb884ce..b54f6adc263 100644
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -765,7 +765,7 @@ void MacroAssembler::align32() {
void MacroAssembler::align(uint modulus) {
// 8273459: Ensure alignment is possible with current segment alignment
- assert(modulus <= (uintx)CodeEntryAlignment, "Alignment must be <= CodeEntryAlignment");
+ assert(modulus <= CodeEntryAlignment, "Alignment must be <= CodeEntryAlignment");
align(modulus, offset());
}
@@ -2656,6 +2656,17 @@ void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src, Register rscra
}
}
+void MacroAssembler::vucomxsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
+ assert(rscratch != noreg || always_reachable(src), "missing");
+
+ if (reachable(src)) {
+ Assembler::vucomxsd(dst, as_Address(src));
+ } else {
+ lea(rscratch, src);
+ Assembler::vucomxsd(dst, Address(rscratch, 0));
+ }
+}
+
void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch) {
assert(rscratch != noreg || always_reachable(src), "missing");
@@ -2667,6 +2678,17 @@ void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src, Register rscra
}
}
+void MacroAssembler::vucomxss(XMMRegister dst, AddressLiteral src, Register rscratch) {
+ assert(rscratch != noreg || always_reachable(src), "missing");
+
+ if (reachable(src)) {
+ Assembler::vucomxss(dst, as_Address(src));
+ } else {
+ lea(rscratch, src);
+ Assembler::vucomxss(dst, Address(rscratch, 0));
+ }
+}
+
void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src, Register rscratch) {
assert(rscratch != noreg || always_reachable(src), "missing");
@@ -6086,7 +6108,7 @@ void MacroAssembler::generate_fill(BasicType t, bool aligned,
vpbroadcastd(xtmp, xtmp, Assembler::AVX_512bit);
subptr(count, 16 << shift);
- jccb(Assembler::less, L_check_fill_32_bytes);
+ jcc(Assembler::less, L_check_fill_32_bytes);
align(16);
BIND(L_fill_64_bytes_loop_avx3);
@@ -6251,32 +6273,46 @@ void MacroAssembler::evpbroadcast(BasicType type, XMMRegister dst, Register src,
}
}
-// encode char[] to byte[] in ISO_8859_1 or ASCII
- //@IntrinsicCandidate
- //private static int implEncodeISOArray(byte[] sa, int sp,
- //byte[] da, int dp, int len) {
- // int i = 0;
- // for (; i < len; i++) {
- // char c = StringUTF16.getChar(sa, sp++);
- // if (c > '\u00FF')
- // break;
- // da[dp++] = (byte)c;
- // }
- // return i;
- //}
- //
- //@IntrinsicCandidate
- //private static int implEncodeAsciiArray(char[] sa, int sp,
- // byte[] da, int dp, int len) {
- // int i = 0;
- // for (; i < len; i++) {
- // char c = sa[sp++];
- // if (c >= '\u0080')
- // break;
- // da[dp++] = (byte)c;
- // }
- // return i;
- //}
+// Encode given char[]/byte[] to byte[] in ISO_8859_1 or ASCII
+//
+// @IntrinsicCandidate
+// int sun.nio.cs.ISO_8859_1.Encoder#encodeISOArray0(
+// char[] sa, int sp, byte[] da, int dp, int len) {
+// int i = 0;
+// for (; i < len; i++) {
+// char c = sa[sp++];
+// if (c > '\u00FF')
+// break;
+// da[dp++] = (byte) c;
+// }
+// return i;
+// }
+//
+// @IntrinsicCandidate
+// int java.lang.StringCoding.encodeISOArray0(
+// byte[] sa, int sp, byte[] da, int dp, int len) {
+// int i = 0;
+// for (; i < len; i++) {
+// char c = StringUTF16.getChar(sa, sp++);
+// if (c > '\u00FF')
+// break;
+// da[dp++] = (byte) c;
+// }
+// return i;
+// }
+//
+// @IntrinsicCandidate
+// int java.lang.StringCoding.encodeAsciiArray0(
+// char[] sa, int sp, byte[] da, int dp, int len) {
+// int i = 0;
+// for (; i < len; i++) {
+// char c = sa[sp++];
+// if (c >= '\u0080')
+// break;
+// da[dp++] = (byte) c;
+// }
+// return i;
+// }
void MacroAssembler::encode_iso_array(Register src, Register dst, Register len,
XMMRegister tmp1Reg, XMMRegister tmp2Reg,
XMMRegister tmp3Reg, XMMRegister tmp4Reg,
@@ -9998,6 +10034,20 @@ void MacroAssembler::restore_legacy_gprs() {
addq(rsp, 16 * wordSize);
}
+void MacroAssembler::load_aotrc_address(Register reg, address a) {
+#if INCLUDE_CDS
+ assert(AOTRuntimeConstants::contains(a), "address out of range for data area");
+ if (AOTCodeCache::is_on_for_dump()) {
+ // all aotrc field addresses should be registered in the AOTCodeCache address table
+ lea(reg, ExternalAddress(a));
+ } else {
+ mov64(reg, (uint64_t)a);
+ }
+#else
+ ShouldNotReachHere();
+#endif
+}
+
void MacroAssembler::setcc(Assembler::Condition comparison, Register dst) {
if (VM_Version::supports_apx_f()) {
esetzucc(comparison, dst);
diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.hpp b/src/hotspot/cpu/x86/macroAssembler_x86.hpp
index 93e3529ac1e..5c049f710e2 100644
--- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1313,10 +1313,18 @@ public:
void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); }
void ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
+ void vucomxss(XMMRegister dst, XMMRegister src) { Assembler::vucomxss(dst, src); }
+ void vucomxss(XMMRegister dst, Address src) { Assembler::vucomxss(dst, src); }
+ void vucomxss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
+
void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); }
void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); }
void ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
+ void vucomxsd(XMMRegister dst, XMMRegister src) { Assembler::vucomxsd(dst, src); }
+ void vucomxsd(XMMRegister dst, Address src) { Assembler::vucomxsd(dst, src); }
+ void vucomxsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
+
// Bitwise Logical XOR of Packed Double-Precision Floating-Point Values
void xorpd(XMMRegister dst, XMMRegister src);
void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); }
@@ -2062,6 +2070,7 @@ public:
void save_legacy_gprs();
void restore_legacy_gprs();
+ void load_aotrc_address(Register reg, address a);
void setcc(Assembler::Condition comparison, Register dst);
};
diff --git a/src/hotspot/cpu/x86/register_x86.cpp b/src/hotspot/cpu/x86/register_x86.cpp
index e6083429344..188af9264a8 100644
--- a/src/hotspot/cpu/x86/register_x86.cpp
+++ b/src/hotspot/cpu/x86/register_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,14 +32,10 @@ const KRegister::KRegisterImpl all_KRegisterImpls [KRegister::number_
const char * Register::RegisterImpl::name() const {
static const char *const names[number_of_registers] = {
-#ifdef _LP64
"rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
"r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
"r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
-#else
- "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi"
-#endif // _LP64
};
return is_valid() ? names[encoding()] : "noreg";
}
@@ -54,11 +50,9 @@ const char* FloatRegister::FloatRegisterImpl::name() const {
const char* XMMRegister::XMMRegisterImpl::name() const {
static const char *const names[number_of_registers] = {
"xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
-#ifdef _LP64
,"xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"
,"xmm16", "xmm17", "xmm18", "xmm19", "xmm20", "xmm21", "xmm22", "xmm23"
,"xmm24", "xmm25", "xmm26", "xmm27", "xmm28", "xmm29", "xmm30", "xmm31"
-#endif // _LP64
};
return is_valid() ? names[encoding()] : "xnoreg";
}
diff --git a/src/hotspot/cpu/x86/register_x86.hpp b/src/hotspot/cpu/x86/register_x86.hpp
index 0a8ecb7be26..5e0326b9aad 100644
--- a/src/hotspot/cpu/x86/register_x86.hpp
+++ b/src/hotspot/cpu/x86/register_x86.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -34,7 +34,7 @@
class VMRegImpl;
typedef VMRegImpl* VMReg;
-// The implementation of integer registers for the x86/x64 architectures.
+// The implementation of integer registers for the x64 architectures.
class Register {
private:
int _encoding;
@@ -44,11 +44,9 @@ private:
public:
inline friend constexpr Register as_Register(int encoding);
- enum {
- number_of_registers = LP64_ONLY( 32 ) NOT_LP64( 8 ),
- number_of_byte_registers = LP64_ONLY( 32 ) NOT_LP64( 4 ),
- max_slots_per_register = LP64_ONLY( 2 ) NOT_LP64( 1 )
- };
+ static const int number_of_registers = 32;
+ static const int number_of_byte_registers = 32;
+ static const int max_slots_per_register = 2;
class RegisterImpl: public AbstractRegisterImpl {
friend class Register;
@@ -79,11 +77,9 @@ public:
// Actually available GP registers for use, depending on actual CPU capabilities and flags.
static int available_gp_registers() {
-#ifdef _LP64
if (!UseAPX) {
return number_of_registers / 2;
}
-#endif // _LP64
return number_of_registers;
}
};
@@ -116,9 +112,8 @@ constexpr Register rsp = as_Register(4);
constexpr Register rbp = as_Register(5);
constexpr Register rsi = as_Register(6);
constexpr Register rdi = as_Register(7);
-#ifdef _LP64
-constexpr Register r8 = as_Register( 8);
-constexpr Register r9 = as_Register( 9);
+constexpr Register r8 = as_Register(8);
+constexpr Register r9 = as_Register(9);
constexpr Register r10 = as_Register(10);
constexpr Register r11 = as_Register(11);
constexpr Register r12 = as_Register(12);
@@ -141,7 +136,6 @@ constexpr Register r28 = as_Register(28);
constexpr Register r29 = as_Register(29);
constexpr Register r30 = as_Register(30);
constexpr Register r31 = as_Register(31);
-#endif // _LP64
// The implementation of x87 floating point registers for the ia32 architecture.
@@ -154,10 +148,8 @@ private:
public:
inline friend constexpr FloatRegister as_FloatRegister(int encoding);
- enum {
- number_of_registers = 8,
- max_slots_per_register = 2
- };
+ static const int number_of_registers = 8;
+ static const int max_slots_per_register = 2;
class FloatRegisterImpl: public AbstractRegisterImpl {
friend class FloatRegister;
@@ -217,10 +209,8 @@ private:
public:
inline friend constexpr XMMRegister as_XMMRegister(int encoding);
- enum {
- number_of_registers = LP64_ONLY( 32 ) NOT_LP64( 8 ),
- max_slots_per_register = LP64_ONLY( 16 ) NOT_LP64( 16 ) // 512-bit
- };
+ static const int number_of_registers = 32;
+ static const int max_slots_per_register = 16; // 512-bit
class XMMRegisterImpl: public AbstractRegisterImpl {
friend class XMMRegister;
@@ -250,11 +240,9 @@ public:
// Actually available XMM registers for use, depending on actual CPU capabilities and flags.
static int available_xmm_registers() {
-#ifdef _LP64
if (UseAVX < 3) {
return number_of_registers / 2;
}
-#endif // _LP64
return number_of_registers;
}
};
@@ -287,7 +275,6 @@ constexpr XMMRegister xmm4 = as_XMMRegister( 4);
constexpr XMMRegister xmm5 = as_XMMRegister( 5);
constexpr XMMRegister xmm6 = as_XMMRegister( 6);
constexpr XMMRegister xmm7 = as_XMMRegister( 7);
-#ifdef _LP64
constexpr XMMRegister xmm8 = as_XMMRegister( 8);
constexpr XMMRegister xmm9 = as_XMMRegister( 9);
constexpr XMMRegister xmm10 = as_XMMRegister(10);
@@ -312,7 +299,6 @@ constexpr XMMRegister xmm28 = as_XMMRegister(28);
constexpr XMMRegister xmm29 = as_XMMRegister(29);
constexpr XMMRegister xmm30 = as_XMMRegister(30);
constexpr XMMRegister xmm31 = as_XMMRegister(31);
-#endif // _LP64
// The implementation of AVX-512 opmask registers.
@@ -394,25 +380,17 @@ constexpr KRegister k7 = as_KRegister(7);
// Define a class that exports it.
class ConcreteRegisterImpl : public AbstractRegisterImpl {
public:
- enum {
- max_gpr = Register::number_of_registers * Register::max_slots_per_register,
- max_fpr = max_gpr + FloatRegister::number_of_registers * FloatRegister::max_slots_per_register,
- max_xmm = max_fpr + XMMRegister::number_of_registers * XMMRegister::max_slots_per_register,
- max_kpr = max_xmm + KRegister::number_of_registers * KRegister::max_slots_per_register,
+ static const int max_gpr = Register::number_of_registers * Register::max_slots_per_register;
+ static const int max_fpr = max_gpr + FloatRegister::number_of_registers * FloatRegister::max_slots_per_register;
+ static const int max_xmm = max_fpr + XMMRegister::number_of_registers * XMMRegister::max_slots_per_register;
+ static const int max_kpr = max_xmm + KRegister::number_of_registers * KRegister::max_slots_per_register;
// A big enough number for C2: all the registers plus flags
// This number must be large enough to cover REG_COUNT (defined by c2) registers.
// There is no requirement that any ordering here matches any ordering c2 gives
// it's optoregs.
-
- // x86_32.ad defines additional dummy FILL0-FILL7 registers, in order to tally
- // REG_COUNT (computed by ADLC based on the number of reg_defs seen in .ad files)
- // with ConcreteRegisterImpl::number_of_registers additional count of 8 is being
- // added for 32 bit jvm.
- number_of_registers = max_kpr + // gpr/fpr/xmm/kpr
- NOT_LP64( 8 + ) // FILL0-FILL7 in x86_32.ad
- 1 // eflags
- };
+ static const int number_of_registers = max_kpr + // gpr/fpr/xmm/kpr
+ 1; // eflags
};
template <>
diff --git a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
index 5a4a5b1809e..bbd43c1a0e8 100644
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1043,26 +1043,24 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
// Class initialization barrier for static methods
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
- if (VM_Version::supports_fast_class_init_checks()) {
- Label L_skip_barrier;
- Register method = rbx;
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
+ Label L_skip_barrier;
+ Register method = rbx;
- { // Bypass the barrier for non-static methods
- Register flags = rscratch1;
- __ load_unsigned_short(flags, Address(method, Method::access_flags_offset()));
- __ testl(flags, JVM_ACC_STATIC);
- __ jcc(Assembler::zero, L_skip_barrier); // non-static
- }
+ // Bypass the barrier for non-static methods
+ Register flags = rscratch1;
+ __ load_unsigned_short(flags, Address(method, Method::access_flags_offset()));
+ __ testl(flags, JVM_ACC_STATIC);
+ __ jcc(Assembler::zero, L_skip_barrier); // non-static
- Register klass = rscratch1;
- __ load_method_holder(klass, method);
- __ clinit_barrier(klass, &L_skip_barrier /*L_fast_path*/);
+ Register klass = rscratch1;
+ __ load_method_holder(klass, method);
+ __ clinit_barrier(klass, &L_skip_barrier /*L_fast_path*/);
- __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path
+ __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path
- __ bind(L_skip_barrier);
- entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
- }
+ __ bind(L_skip_barrier);
+ entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->c2i_entry_barrier(masm);
@@ -1904,7 +1902,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
int vep_offset = ((intptr_t)__ pc()) - start;
- if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
+ if (method->needs_clinit_barrier()) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
Label L_skip_barrier;
Register klass = r10;
__ mov_metadata(klass, method->method_holder()); // InstanceKlass*
@@ -3602,4 +3601,3 @@ RuntimeStub* SharedRuntime::generate_jfr_return_lease() {
}
#endif // INCLUDE_JFR
-
diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_kyber.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_kyber.cpp
index 3e5593322d5..7d5dee6a5df 100644
--- a/src/hotspot/cpu/x86/stubGenerator_x86_64_kyber.cpp
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_kyber.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -64,6 +64,39 @@ static address kyberAvx512ConstsAddr(int offset) {
const Register scratch = r10;
+ATTRIBUTE_ALIGNED(64) static const uint8_t kyberAvx512_12To16Dup[] = {
+// 0 - 63
+ 0, 1, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11, 12, 13, 13, 14, 15, 16,
+ 16, 17, 18, 19, 19, 20, 21, 22, 22, 23, 24, 25, 25, 26, 27, 28, 28, 29, 30,
+ 31, 31, 32, 33, 34, 34, 35, 36, 37, 37, 38, 39, 40, 40, 41, 42, 43, 43, 44,
+ 45, 46, 46, 47
+ };
+
+static address kyberAvx512_12To16DupAddr() {
+ return (address) kyberAvx512_12To16Dup;
+}
+
+ATTRIBUTE_ALIGNED(64) static const uint16_t kyberAvx512_12To16Shift[] = {
+// 0 - 31
+ 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0,
+ 4, 0, 4, 0, 4, 0, 4
+ };
+
+static address kyberAvx512_12To16ShiftAddr() {
+ return (address) kyberAvx512_12To16Shift;
+}
+
+ATTRIBUTE_ALIGNED(64) static const uint64_t kyberAvx512_12To16And[] = {
+// 0 - 7
+ 0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF,
+ 0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF,
+ 0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF
+ };
+
+static address kyberAvx512_12To16AndAddr() {
+ return (address) kyberAvx512_12To16And;
+}
+
ATTRIBUTE_ALIGNED(64) static const uint16_t kyberAvx512NttPerms[] = {
// 0
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
@@ -822,10 +855,65 @@ address generate_kyber12To16_avx512(StubGenerator *stubgen,
const Register perms = r11;
- Label Loop;
+ Label Loop, VBMILoop;
__ addptr(condensed, condensedOffs);
+ if (VM_Version::supports_avx512_vbmi()) {
+ // mask load for the first 48 bytes of each vector
+ __ mov64(rax, 0x0000FFFFFFFFFFFF);
+ __ kmovql(k1, rax);
+
+ __ lea(perms, ExternalAddress(kyberAvx512_12To16DupAddr()));
+ __ evmovdqub(xmm20, Address(perms), Assembler::AVX_512bit);
+
+ __ lea(perms, ExternalAddress(kyberAvx512_12To16ShiftAddr()));
+ __ evmovdquw(xmm21, Address(perms), Assembler::AVX_512bit);
+
+ __ lea(perms, ExternalAddress(kyberAvx512_12To16AndAddr()));
+ __ evmovdquq(xmm22, Address(perms), Assembler::AVX_512bit);
+
+ __ align(OptoLoopAlignment);
+ __ BIND(VBMILoop);
+
+ __ evmovdqub(xmm0, k1, Address(condensed, 0), false,
+ Assembler::AVX_512bit);
+ __ evmovdqub(xmm1, k1, Address(condensed, 48), false,
+ Assembler::AVX_512bit);
+ __ evmovdqub(xmm2, k1, Address(condensed, 96), false,
+ Assembler::AVX_512bit);
+ __ evmovdqub(xmm3, k1, Address(condensed, 144), false,
+ Assembler::AVX_512bit);
+
+ __ evpermb(xmm4, k0, xmm20, xmm0, false, Assembler::AVX_512bit);
+ __ evpermb(xmm5, k0, xmm20, xmm1, false, Assembler::AVX_512bit);
+ __ evpermb(xmm6, k0, xmm20, xmm2, false, Assembler::AVX_512bit);
+ __ evpermb(xmm7, k0, xmm20, xmm3, false, Assembler::AVX_512bit);
+
+ __ evpsrlvw(xmm4, xmm4, xmm21, Assembler::AVX_512bit);
+ __ evpsrlvw(xmm5, xmm5, xmm21, Assembler::AVX_512bit);
+ __ evpsrlvw(xmm6, xmm6, xmm21, Assembler::AVX_512bit);
+ __ evpsrlvw(xmm7, xmm7, xmm21, Assembler::AVX_512bit);
+
+ __ evpandq(xmm0, xmm22, xmm4, Assembler::AVX_512bit);
+ __ evpandq(xmm1, xmm22, xmm5, Assembler::AVX_512bit);
+ __ evpandq(xmm2, xmm22, xmm6, Assembler::AVX_512bit);
+ __ evpandq(xmm3, xmm22, xmm7, Assembler::AVX_512bit);
+
+ store4regs(parsed, 0, xmm0_3, _masm);
+
+ __ addptr(condensed, 192);
+ __ addptr(parsed, 256);
+ __ subl(parsedLength, 128);
+ __ jcc(Assembler::greater, VBMILoop);
+
+ __ leave(); // required for proper stackwalking of RuntimeStub frame
+ __ mov64(rax, 0); // return 0
+ __ ret(0);
+
+ return start;
+ }
+
__ lea(perms, ExternalAddress(kyberAvx512_12To16PermsAddr()));
load4regs(xmm24_27, perms, 0, _masm);
diff --git a/src/hotspot/cpu/x86/templateTable_x86.cpp b/src/hotspot/cpu/x86/templateTable_x86.cpp
index 42392b84833..db7749ec482 100644
--- a/src/hotspot/cpu/x86/templateTable_x86.cpp
+++ b/src/hotspot/cpu/x86/templateTable_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -2216,7 +2216,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
__ cmpl(temp, code); // have we resolved this bytecode?
// Class initialization barrier for static methods
- if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
+ if (bytecode() == Bytecodes::_invokestatic) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
const Register method = temp;
const Register klass = temp;
@@ -2264,8 +2265,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
__ cmpl(temp, code); // have we resolved this bytecode?
// Class initialization barrier for static fields
- if (VM_Version::supports_fast_class_init_checks() &&
- (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
+ if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
const Register field_holder = temp;
__ jcc(Assembler::notEqual, L_clinit_barrier_slow);
diff --git a/src/hotspot/cpu/x86/vm_version_x86.cpp b/src/hotspot/cpu/x86/vm_version_x86.cpp
index 747daefd51d..78d6dec08cf 100644
--- a/src/hotspot/cpu/x86/vm_version_x86.cpp
+++ b/src/hotspot/cpu/x86/vm_version_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -48,7 +48,7 @@ int VM_Version::_stepping;
bool VM_Version::_has_intel_jcc_erratum;
VM_Version::CpuidInfo VM_Version::_cpuid_info = { 0, };
-#define DECLARE_CPU_FEATURE_NAME(id, name, bit) name,
+#define DECLARE_CPU_FEATURE_NAME(id, name, bit) XSTR(name),
const char* VM_Version::_features_names[] = { CPU_FEATURE_FLAGS(DECLARE_CPU_FEATURE_NAME)};
#undef DECLARE_CPU_FEATURE_NAME
@@ -143,7 +143,7 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4, std_cpuid24, std_cpuid29;
Label sef_cpuid, sefsl1_cpuid, ext_cpuid, ext_cpuid1, ext_cpuid5, ext_cpuid7;
- Label ext_cpuid8, done, wrapup, vector_save_restore, apx_save_restore_warning;
+ Label ext_cpuid8, done, wrapup, vector_save_restore, apx_save_restore_warning, apx_xstate;
Label legacy_setup, save_restore_except, legacy_save_restore, start_simd_check;
StubCodeMark mark(this, "VM_Version", "get_cpu_info_stub");
@@ -468,6 +468,20 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
__ movq(Address(rsi, 0), r16);
__ movq(Address(rsi, 8), r31);
+ //
+ // Query CPUID 0xD.19 for APX XSAVE offset
+ // Extended State Enumeration Sub-leaf 19 (APX)
+ // EAX = size of APX state (should be 128)
+ // EBX = offset in standard XSAVE format
+ //
+ __ movl(rax, 0xD);
+ __ movl(rcx, 19);
+ __ cpuid();
+ __ lea(rsi, Address(rbp, in_bytes(VM_Version::apx_xstate_size_offset())));
+ __ movl(Address(rsi, 0), rax);
+ __ lea(rsi, Address(rbp, in_bytes(VM_Version::apx_xstate_offset_offset())));
+ __ movl(Address(rsi, 0), rbx);
+
UseAPX = save_apx;
__ bind(vector_save_restore);
//
@@ -921,8 +935,9 @@ void VM_Version::get_processor_features() {
// Check if processor has Intel Ecore
if (FLAG_IS_DEFAULT(EnableX86ECoreOpts) && is_intel() && is_intel_server_family() &&
- (_model == 0x97 || _model == 0xAA || _model == 0xAC || _model == 0xAF ||
- _model == 0xCC || _model == 0xDD)) {
+ (supports_hybrid() ||
+ _model == 0xAF /* Xeon 6 E-cores (Sierra Forest) */ ||
+ _model == 0xDD /* Xeon 6+ E-cores (Clearwater Forest) */ )) {
FLAG_SET_DEFAULT(EnableX86ECoreOpts, true);
}
@@ -1137,6 +1152,10 @@ void VM_Version::get_processor_features() {
warning("AES intrinsics require UseAES flag to be enabled. Intrinsics will be disabled.");
}
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
+ if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) {
+ warning("AES_CTR intrinsics require UseAES flag to be enabled. AES_CTR intrinsics will be disabled.");
+ }
+ FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
} else {
if (UseSSE > 2) {
if (FLAG_IS_DEFAULT(UseAESIntrinsics)) {
@@ -1155,8 +1174,8 @@ void VM_Version::get_processor_features() {
if (!UseAESIntrinsics) {
if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) {
warning("AES-CTR intrinsics require UseAESIntrinsics flag to be enabled. Intrinsics will be disabled.");
- FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
}
+ FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
} else {
if (supports_sse4_1()) {
if (FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) {
@@ -1176,16 +1195,16 @@ void VM_Version::get_processor_features() {
} else if (UseAES || UseAESIntrinsics || UseAESCTRIntrinsics) {
if (UseAES && !FLAG_IS_DEFAULT(UseAES)) {
warning("AES instructions are not available on this CPU");
- FLAG_SET_DEFAULT(UseAES, false);
}
+ FLAG_SET_DEFAULT(UseAES, false);
if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
warning("AES intrinsics are not available on this CPU");
- FLAG_SET_DEFAULT(UseAESIntrinsics, false);
}
+ FLAG_SET_DEFAULT(UseAESIntrinsics, false);
if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) {
warning("AES-CTR intrinsics are not available on this CPU");
- FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
}
+ FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
}
// Use CLMUL instructions if available.
@@ -1340,16 +1359,16 @@ void VM_Version::get_processor_features() {
FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
}
- if (supports_evex() && supports_avx512bw()) {
- if (FLAG_IS_DEFAULT(UseSHA3Intrinsics)) {
- UseSHA3Intrinsics = true;
- }
+ if (UseSHA && supports_evex() && supports_avx512bw()) {
+ if (FLAG_IS_DEFAULT(UseSHA3Intrinsics)) {
+ FLAG_SET_DEFAULT(UseSHA3Intrinsics, true);
+ }
} else if (UseSHA3Intrinsics) {
- warning("Intrinsics for SHA3-224, SHA3-256, SHA3-384 and SHA3-512 crypto hash functions not available on this CPU.");
- FLAG_SET_DEFAULT(UseSHA3Intrinsics, false);
+ warning("Intrinsics for SHA3-224, SHA3-256, SHA3-384 and SHA3-512 crypto hash functions not available on this CPU.");
+ FLAG_SET_DEFAULT(UseSHA3Intrinsics, false);
}
- if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) {
+ if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics || UseSHA3Intrinsics)) {
FLAG_SET_DEFAULT(UseSHA, false);
}
@@ -1640,41 +1659,40 @@ void VM_Version::get_processor_features() {
if (FLAG_IS_DEFAULT(AllocatePrefetchInstr) && supports_3dnow_prefetch()) {
FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3);
}
-#ifdef COMPILER2
- if (UseAVX > 2) {
- if (FLAG_IS_DEFAULT(ArrayOperationPartialInlineSize) ||
- (!FLAG_IS_DEFAULT(ArrayOperationPartialInlineSize) &&
- ArrayOperationPartialInlineSize != 0 &&
- ArrayOperationPartialInlineSize != 16 &&
- ArrayOperationPartialInlineSize != 32 &&
- ArrayOperationPartialInlineSize != 64)) {
- int inline_size = 0;
- if (MaxVectorSize >= 64 && AVX3Threshold == 0) {
- inline_size = 64;
- } else if (MaxVectorSize >= 32) {
- inline_size = 32;
- } else if (MaxVectorSize >= 16) {
- inline_size = 16;
- }
- if(!FLAG_IS_DEFAULT(ArrayOperationPartialInlineSize)) {
- warning("Setting ArrayOperationPartialInlineSize as %d", inline_size);
- }
- ArrayOperationPartialInlineSize = inline_size;
- }
-
- if (ArrayOperationPartialInlineSize > MaxVectorSize) {
- ArrayOperationPartialInlineSize = MaxVectorSize >= 16 ? MaxVectorSize : 0;
- if (ArrayOperationPartialInlineSize) {
- warning("Setting ArrayOperationPartialInlineSize as MaxVectorSize=%zd", MaxVectorSize);
- } else {
- warning("Setting ArrayOperationPartialInlineSize as %zd", ArrayOperationPartialInlineSize);
- }
- }
- }
-#endif
}
#ifdef COMPILER2
+ if (UseAVX > 2) {
+ if (FLAG_IS_DEFAULT(ArrayOperationPartialInlineSize) ||
+ (!FLAG_IS_DEFAULT(ArrayOperationPartialInlineSize) &&
+ ArrayOperationPartialInlineSize != 0 &&
+ ArrayOperationPartialInlineSize != 16 &&
+ ArrayOperationPartialInlineSize != 32 &&
+ ArrayOperationPartialInlineSize != 64)) {
+ int inline_size = 0;
+ if (MaxVectorSize >= 64 && AVX3Threshold == 0) {
+ inline_size = 64;
+ } else if (MaxVectorSize >= 32) {
+ inline_size = 32;
+ } else if (MaxVectorSize >= 16) {
+ inline_size = 16;
+ }
+ if(!FLAG_IS_DEFAULT(ArrayOperationPartialInlineSize)) {
+ warning("Setting ArrayOperationPartialInlineSize as %d", inline_size);
+ }
+ ArrayOperationPartialInlineSize = inline_size;
+ }
+
+ if (ArrayOperationPartialInlineSize > MaxVectorSize) {
+ ArrayOperationPartialInlineSize = MaxVectorSize >= 16 ? MaxVectorSize : 0;
+ if (ArrayOperationPartialInlineSize) {
+ warning("Setting ArrayOperationPartialInlineSize as MaxVectorSize=%zd", MaxVectorSize);
+ } else {
+ warning("Setting ArrayOperationPartialInlineSize as %zd", ArrayOperationPartialInlineSize);
+ }
+ }
+ }
+
if (FLAG_IS_DEFAULT(OptimizeFill)) {
if (MaxVectorSize < 32 || (!EnableX86ECoreOpts && !VM_Version::supports_avx512vlbw())) {
OptimizeFill = false;
@@ -3279,12 +3297,50 @@ bool VM_Version::is_intrinsic_supported(vmIntrinsicID id) {
void VM_Version::insert_features_names(VM_Version::VM_Features features, stringStream& ss) {
int i = 0;
ss.join([&]() {
- while (i < MAX_CPU_FEATURES) {
- if (_features.supports_feature((VM_Version::Feature_Flag)i)) {
- return _features_names[i++];
+ const char* str = nullptr;
+ while ((i < MAX_CPU_FEATURES) && (str == nullptr)) {
+ if (features.supports_feature((VM_Version::Feature_Flag)i)) {
+ str = _features_names[i];
}
i += 1;
}
- return (const char*)nullptr;
+ return str;
}, ", ");
}
+
+void VM_Version::get_cpu_features_name(void* features_buffer, stringStream& ss) {
+ VM_Features* features = (VM_Features*)features_buffer;
+ insert_features_names(*features, ss);
+}
+
+void VM_Version::get_missing_features_name(void* features_set1, void* features_set2, stringStream& ss) {
+ VM_Features* vm_features_set1 = (VM_Features*)features_set1;
+ VM_Features* vm_features_set2 = (VM_Features*)features_set2;
+ int i = 0;
+ ss.join([&]() {
+ const char* str = nullptr;
+ while ((i < MAX_CPU_FEATURES) && (str == nullptr)) {
+ Feature_Flag flag = (Feature_Flag)i;
+ if (vm_features_set1->supports_feature(flag) && !vm_features_set2->supports_feature(flag)) {
+ str = _features_names[i];
+ }
+ i += 1;
+ }
+ return str;
+ }, ", ");
+}
+
+int VM_Version::cpu_features_size() {
+ return sizeof(VM_Features);
+}
+
+void VM_Version::store_cpu_features(void* buf) {
+ VM_Features copy = _features;
+ copy.clear_feature(CPU_HT); // HT does not result in incompatibility of aot code cache
+ memcpy(buf, ©, sizeof(VM_Features));
+}
+
+bool VM_Version::supports_features(void* features_buffer) {
+ VM_Features* features_to_test = (VM_Features*)features_buffer;
+ return _features.supports_features(features_to_test);
+}
diff --git a/src/hotspot/cpu/x86/vm_version_x86.hpp b/src/hotspot/cpu/x86/vm_version_x86.hpp
index cc93ee3564e..e0a895737b7 100644
--- a/src/hotspot/cpu/x86/vm_version_x86.hpp
+++ b/src/hotspot/cpu/x86/vm_version_x86.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -377,84 +377,84 @@ protected:
*/
enum Feature_Flag {
#define CPU_FEATURE_FLAGS(decl) \
- decl(CX8, "cx8", 0) /* next bits are from cpuid 1 (EDX) */ \
- decl(CMOV, "cmov", 1) \
- decl(FXSR, "fxsr", 2) \
- decl(HT, "ht", 3) \
+ decl(CX8, cx8, 0) /* next bits are from cpuid 1 (EDX) */ \
+ decl(CMOV, cmov, 1) \
+ decl(FXSR, fxsr, 2) \
+ decl(HT, ht, 3) \
\
- decl(MMX, "mmx", 4) \
- decl(3DNOW_PREFETCH, "3dnowpref", 5) /* Processor supports 3dnow prefetch and prefetchw instructions */ \
+ decl(MMX, mmx, 4) \
+ decl(3DNOW_PREFETCH, 3dnowpref, 5) /* Processor supports 3dnow prefetch and prefetchw instructions */ \
/* may not necessarily support other 3dnow instructions */ \
- decl(SSE, "sse", 6) \
- decl(SSE2, "sse2", 7) \
+ decl(SSE, sse, 6) \
+ decl(SSE2, sse2, 7) \
\
- decl(SSE3, "sse3", 8 ) /* SSE3 comes from cpuid 1 (ECX) */ \
- decl(SSSE3, "ssse3", 9 ) \
- decl(SSE4A, "sse4a", 10) \
- decl(SSE4_1, "sse4.1", 11) \
+ decl(SSE3, sse3, 8 ) /* SSE3 comes from cpuid 1 (ECX) */ \
+ decl(SSSE3, ssse3, 9 ) \
+ decl(SSE4A, sse4a, 10) \
+ decl(SSE4_1, sse4.1, 11) \
\
- decl(SSE4_2, "sse4.2", 12) \
- decl(POPCNT, "popcnt", 13) \
- decl(LZCNT, "lzcnt", 14) \
- decl(TSC, "tsc", 15) \
+ decl(SSE4_2, sse4.2, 12) \
+ decl(POPCNT, popcnt, 13) \
+ decl(LZCNT, lzcnt, 14) \
+ decl(TSC, tsc, 15) \
\
- decl(TSCINV_BIT, "tscinvbit", 16) \
- decl(TSCINV, "tscinv", 17) \
- decl(AVX, "avx", 18) \
- decl(AVX2, "avx2", 19) \
+ decl(TSCINV_BIT, tscinvbit, 16) \
+ decl(TSCINV, tscinv, 17) \
+ decl(AVX, avx, 18) \
+ decl(AVX2, avx2, 19) \
\
- decl(AES, "aes", 20) \
- decl(ERMS, "erms", 21) /* enhanced 'rep movsb/stosb' instructions */ \
- decl(CLMUL, "clmul", 22) /* carryless multiply for CRC */ \
- decl(BMI1, "bmi1", 23) \
+ decl(AES, aes, 20) \
+ decl(ERMS, erms, 21) /* enhanced 'rep movsb/stosb' instructions */ \
+ decl(CLMUL, clmul, 22) /* carryless multiply for CRC */ \
+ decl(BMI1, bmi1, 23) \
\
- decl(BMI2, "bmi2", 24) \
- decl(RTM, "rtm", 25) /* Restricted Transactional Memory instructions */ \
- decl(ADX, "adx", 26) \
- decl(AVX512F, "avx512f", 27) /* AVX 512bit foundation instructions */ \
+ decl(BMI2, bmi2, 24) \
+ decl(RTM, rtm, 25) /* Restricted Transactional Memory instructions */ \
+ decl(ADX, adx, 26) \
+ decl(AVX512F, avx512f, 27) /* AVX 512bit foundation instructions */ \
\
- decl(AVX512DQ, "avx512dq", 28) \
- decl(AVX512PF, "avx512pf", 29) \
- decl(AVX512ER, "avx512er", 30) \
- decl(AVX512CD, "avx512cd", 31) \
+ decl(AVX512DQ, avx512dq, 28) \
+ decl(AVX512PF, avx512pf, 29) \
+ decl(AVX512ER, avx512er, 30) \
+ decl(AVX512CD, avx512cd, 31) \
\
- decl(AVX512BW, "avx512bw", 32) /* Byte and word vector instructions */ \
- decl(AVX512VL, "avx512vl", 33) /* EVEX instructions with smaller vector length */ \
- decl(SHA, "sha", 34) /* SHA instructions */ \
- decl(FMA, "fma", 35) /* FMA instructions */ \
+ decl(AVX512BW, avx512bw, 32) /* Byte and word vector instructions */ \
+ decl(AVX512VL, avx512vl, 33) /* EVEX instructions with smaller vector length */ \
+ decl(SHA, sha, 34) /* SHA instructions */ \
+ decl(FMA, fma, 35) /* FMA instructions */ \
\
- decl(VZEROUPPER, "vzeroupper", 36) /* Vzeroupper instruction */ \
- decl(AVX512_VPOPCNTDQ, "avx512_vpopcntdq", 37) /* Vector popcount */ \
- decl(AVX512_VPCLMULQDQ, "avx512_vpclmulqdq", 38) /* Vector carryless multiplication */ \
- decl(AVX512_VAES, "avx512_vaes", 39) /* Vector AES instruction */ \
+ decl(VZEROUPPER, vzeroupper, 36) /* Vzeroupper instruction */ \
+ decl(AVX512_VPOPCNTDQ, avx512_vpopcntdq, 37) /* Vector popcount */ \
+ decl(AVX512_VPCLMULQDQ, avx512_vpclmulqdq, 38) /* Vector carryless multiplication */ \
+ decl(AVX512_VAES, avx512_vaes, 39) /* Vector AES instruction */ \
\
- decl(AVX512_VNNI, "avx512_vnni", 40) /* Vector Neural Network Instructions */ \
- decl(FLUSH, "clflush", 41) /* flush instruction */ \
- decl(FLUSHOPT, "clflushopt", 42) /* flusopth instruction */ \
- decl(CLWB, "clwb", 43) /* clwb instruction */ \
+ decl(AVX512_VNNI, avx512_vnni, 40) /* Vector Neural Network Instructions */ \
+ decl(FLUSH, clflush, 41) /* flush instruction */ \
+ decl(FLUSHOPT, clflushopt, 42) /* flusopth instruction */ \
+ decl(CLWB, clwb, 43) /* clwb instruction */ \
\
- decl(AVX512_VBMI2, "avx512_vbmi2", 44) /* VBMI2 shift left double instructions */ \
- decl(AVX512_VBMI, "avx512_vbmi", 45) /* Vector BMI instructions */ \
- decl(HV, "hv", 46) /* Hypervisor instructions */ \
- decl(SERIALIZE, "serialize", 47) /* CPU SERIALIZE */ \
- decl(RDTSCP, "rdtscp", 48) /* RDTSCP instruction */ \
- decl(RDPID, "rdpid", 49) /* RDPID instruction */ \
- decl(FSRM, "fsrm", 50) /* Fast Short REP MOV */ \
- decl(GFNI, "gfni", 51) /* Vector GFNI instructions */ \
- decl(AVX512_BITALG, "avx512_bitalg", 52) /* Vector sub-word popcount and bit gather instructions */\
- decl(F16C, "f16c", 53) /* Half-precision and single precision FP conversion instructions*/ \
- decl(PKU, "pku", 54) /* Protection keys for user-mode pages */ \
- decl(OSPKE, "ospke", 55) /* OS enables protection keys */ \
- decl(CET_IBT, "cet_ibt", 56) /* Control Flow Enforcement - Indirect Branch Tracking */ \
- decl(CET_SS, "cet_ss", 57) /* Control Flow Enforcement - Shadow Stack */ \
- decl(AVX512_IFMA, "avx512_ifma", 58) /* Integer Vector FMA instructions*/ \
- decl(AVX_IFMA, "avx_ifma", 59) /* 256-bit VEX-coded variant of AVX512-IFMA*/ \
- decl(APX_F, "apx_f", 60) /* Intel Advanced Performance Extensions*/ \
- decl(SHA512, "sha512", 61) /* SHA512 instructions*/ \
- decl(AVX512_FP16, "avx512_fp16", 62) /* AVX512 FP16 ISA support*/ \
- decl(AVX10_1, "avx10_1", 63) /* AVX10 512 bit vector ISA Version 1 support*/ \
- decl(AVX10_2, "avx10_2", 64) /* AVX10 512 bit vector ISA Version 2 support*/ \
- decl(HYBRID, "hybrid", 65) /* Hybrid architecture */
+ decl(AVX512_VBMI2, avx512_vbmi2, 44) /* VBMI2 shift left double instructions */ \
+ decl(AVX512_VBMI, avx512_vbmi, 45) /* Vector BMI instructions */ \
+ decl(HV, hv, 46) /* Hypervisor instructions */ \
+ decl(SERIALIZE, serialize, 47) /* CPU SERIALIZE */ \
+ decl(RDTSCP, rdtscp, 48) /* RDTSCP instruction */ \
+ decl(RDPID, rdpid, 49) /* RDPID instruction */ \
+ decl(FSRM, fsrm, 50) /* Fast Short REP MOV */ \
+ decl(GFNI, gfni, 51) /* Vector GFNI instructions */ \
+ decl(AVX512_BITALG, avx512_bitalg, 52) /* Vector sub-word popcount and bit gather instructions */\
+ decl(F16C, f16c, 53) /* Half-precision and single precision FP conversion instructions*/ \
+ decl(PKU, pku, 54) /* Protection keys for user-mode pages */ \
+ decl(OSPKE, ospke, 55) /* OS enables protection keys */ \
+ decl(CET_IBT, cet_ibt, 56) /* Control Flow Enforcement - Indirect Branch Tracking */ \
+ decl(CET_SS, cet_ss, 57) /* Control Flow Enforcement - Shadow Stack */ \
+ decl(AVX512_IFMA, avx512_ifma, 58) /* Integer Vector FMA instructions*/ \
+ decl(AVX_IFMA, avx_ifma, 59) /* 256-bit VEX-coded variant of AVX512-IFMA*/ \
+ decl(APX_F, apx_f, 60) /* Intel Advanced Performance Extensions*/ \
+ decl(SHA512, sha512, 61) /* SHA512 instructions*/ \
+ decl(AVX512_FP16, avx512_fp16, 62) /* AVX512 FP16 ISA support*/ \
+ decl(AVX10_1, avx10_1, 63) /* AVX10 512 bit vector ISA Version 1 support*/ \
+ decl(AVX10_2, avx10_2, 64) /* AVX10 512 bit vector ISA Version 2 support*/ \
+ decl(HYBRID, hybrid, 65) /* Hybrid architecture */
#define DECLARE_CPU_FEATURE_FLAG(id, name, bit) CPU_##id = (bit),
CPU_FEATURE_FLAGS(DECLARE_CPU_FEATURE_FLAG)
@@ -516,6 +516,15 @@ protected:
int idx = index(feature);
return (_features_bitmap[idx] & bit_mask(feature)) != 0;
}
+
+ bool supports_features(VM_Features* features_to_test) {
+ for (int i = 0; i < features_bitmap_element_count(); i++) {
+ if ((_features_bitmap[i] & features_to_test->_features_bitmap[i]) != features_to_test->_features_bitmap[i]) {
+ return false;
+ }
+ }
+ return true;
+ }
};
// CPU feature flags vector, can be affected by VM settings.
@@ -676,6 +685,10 @@ protected:
// Space to save apx registers after signal handle
jlong apx_save[2]; // Save r16 and r31
+ // cpuid function 0xD, subleaf 19 (APX extended state)
+ uint32_t apx_xstate_size; // EAX: size of APX state (128)
+ uint32_t apx_xstate_offset; // EBX: offset in standard XSAVE area
+
VM_Features feature_flags() const;
// Asserts
@@ -739,6 +752,11 @@ public:
static ByteSize ymm_save_offset() { return byte_offset_of(CpuidInfo, ymm_save); }
static ByteSize zmm_save_offset() { return byte_offset_of(CpuidInfo, zmm_save); }
static ByteSize apx_save_offset() { return byte_offset_of(CpuidInfo, apx_save); }
+ static ByteSize apx_xstate_offset_offset() { return byte_offset_of(CpuidInfo, apx_xstate_offset); }
+ static ByteSize apx_xstate_size_offset() { return byte_offset_of(CpuidInfo, apx_xstate_size); }
+
+ static uint32_t apx_xstate_offset() { return _cpuid_info.apx_xstate_offset; }
+ static uint32_t apx_xstate_size() { return _cpuid_info.apx_xstate_size; }
// The value used to check ymm register after signal handle
static int ymm_test_value() { return 0xCAFEBABE; }
@@ -1094,6 +1112,20 @@ public:
static bool supports_tscinv_ext(void);
static void initialize_cpu_information(void);
+
+ static void get_cpu_features_name(void* features_buffer, stringStream& ss);
+
+ // Returns names of features present in features_set1 but not in features_set2
+ static void get_missing_features_name(void* features_set1, void* features_set2, stringStream& ss);
+
+ // Returns number of bytes required to store cpu features representation
+ static int cpu_features_size();
+
+ // Stores cpu features representation in the provided buffer. This representation is arch dependent.
+ // Size of the buffer must be same as returned by cpu_features_size()
+ static void store_cpu_features(void* buf);
+
+ static bool supports_features(void* features_to_test);
};
#endif // CPU_X86_VM_VERSION_X86_HPP
diff --git a/src/hotspot/cpu/x86/vmreg_x86.cpp b/src/hotspot/cpu/x86/vmreg_x86.cpp
index 44aee56ef15..45269e69fcb 100644
--- a/src/hotspot/cpu/x86/vmreg_x86.cpp
+++ b/src/hotspot/cpu/x86/vmreg_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,9 +32,7 @@ void VMRegImpl::set_regName() {
int i;
for (i = 0; i < ConcreteRegisterImpl::max_gpr ; ) {
regName[i++] = reg->name();
-#ifdef AMD64
regName[i++] = reg->name();
-#endif // AMD64
reg = reg->successor();
}
diff --git a/src/hotspot/cpu/x86/vmreg_x86.hpp b/src/hotspot/cpu/x86/vmreg_x86.hpp
index 6f7c7fafb32..032ce654f2f 100644
--- a/src/hotspot/cpu/x86/vmreg_x86.hpp
+++ b/src/hotspot/cpu/x86/vmreg_x86.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -52,14 +52,8 @@ inline bool is_KRegister() {
}
inline Register as_Register() {
-
- assert( is_Register(), "must be");
- // Yuk
-#ifdef AMD64
+ assert(is_Register(), "must be");
return ::as_Register(value() >> 1);
-#else
- return ::as_Register(value());
-#endif // AMD64
}
inline FloatRegister as_FloatRegister() {
@@ -82,9 +76,6 @@ inline KRegister as_KRegister() {
inline bool is_concrete() {
assert(is_reg(), "must be");
-#ifndef AMD64
- if (is_Register()) return true;
-#endif // AMD64
// Do not use is_XMMRegister() here as it depends on the UseAVX setting.
if (value() >= ConcreteRegisterImpl::max_fpr && value() < ConcreteRegisterImpl::max_xmm) {
int base = value() - ConcreteRegisterImpl::max_fpr;
diff --git a/src/hotspot/cpu/x86/vmreg_x86.inline.hpp b/src/hotspot/cpu/x86/vmreg_x86.inline.hpp
index 1aeedc094fd..76d70900fe4 100644
--- a/src/hotspot/cpu/x86/vmreg_x86.inline.hpp
+++ b/src/hotspot/cpu/x86/vmreg_x86.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006, 2021, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
#define CPU_X86_VMREG_X86_INLINE_HPP
inline VMReg Register::RegisterImpl::as_VMReg() const {
- return VMRegImpl::as_VMReg(encoding() LP64_ONLY( << 1 ));
+ return VMRegImpl::as_VMReg(encoding() << 1);
}
inline VMReg FloatRegister::FloatRegisterImpl::as_VMReg() const {
diff --git a/src/hotspot/cpu/x86/x86.ad b/src/hotspot/cpu/x86/x86.ad
index 93b306c37d6..0ffa4c2031c 100644
--- a/src/hotspot/cpu/x86/x86.ad
+++ b/src/hotspot/cpu/x86/x86.ad
@@ -1699,9 +1699,10 @@ static void emit_cmpfp_fixup(MacroAssembler* masm) {
}
static void emit_cmpfp3(MacroAssembler* masm, Register dst) {
+ // If any floating point comparison instruction is used, unordered case always triggers jump
+ // for below condition, CF=1 is true when at least one input is NaN
Label done;
__ movl(dst, -1);
- __ jcc(Assembler::parity, done);
__ jcc(Assembler::below, done);
__ setcc(Assembler::notEqual, dst);
__ bind(done);
@@ -5186,6 +5187,18 @@ operand immL_65535()
interface(CONST_INTER);
%}
+// AOT Runtime Constants Address
+operand immAOTRuntimeConstantsAddress()
+%{
+ // Check if the address is in the range of AOT Runtime Constants
+ predicate(AOTRuntimeConstants::contains((address)(n->get_ptr())));
+ match(ConP);
+
+ op_cost(0);
+ format %{ %}
+ interface(CONST_INTER);
+%}
+
operand kReg()
%{
constraint(ALLOC_IN_RC(vectmask_reg));
@@ -5529,12 +5542,21 @@ operand rFlagsRegU()
operand rFlagsRegUCF() %{
constraint(ALLOC_IN_RC(int_flags));
match(RegFlags);
- predicate(false);
+ predicate(!UseAPX || !VM_Version::supports_avx10_2());
format %{ "RFLAGS_U_CF" %}
interface(REG_INTER);
%}
+operand rFlagsRegUCFE() %{
+ constraint(ALLOC_IN_RC(int_flags));
+ match(RegFlags);
+ predicate(UseAPX && VM_Version::supports_avx10_2());
+
+ format %{ "RFLAGS_U_CFE" %}
+ interface(REG_INTER);
+%}
+
// Float register operands
operand regF() %{
constraint(ALLOC_IN_RC(float_reg));
@@ -6027,10 +6049,10 @@ operand cmpOp()
interface(COND_INTER) %{
equal(0x4, "e");
not_equal(0x5, "ne");
- less(0xC, "l");
- greater_equal(0xD, "ge");
- less_equal(0xE, "le");
- greater(0xF, "g");
+ less(0xc, "l");
+ greater_equal(0xd, "ge");
+ less_equal(0xe, "le");
+ greater(0xf, "g");
overflow(0x0, "o");
no_overflow(0x1, "no");
%}
@@ -6062,11 +6084,12 @@ operand cmpOpU()
// don't need to use cmpOpUCF2 for eq/ne
operand cmpOpUCF() %{
match(Bool);
- predicate(n->as_Bool()->_test._test == BoolTest::lt ||
- n->as_Bool()->_test._test == BoolTest::ge ||
- n->as_Bool()->_test._test == BoolTest::le ||
- n->as_Bool()->_test._test == BoolTest::gt ||
- n->in(1)->in(1) == n->in(1)->in(2));
+ predicate((!UseAPX || !VM_Version::supports_avx10_2()) &&
+ (n->as_Bool()->_test._test == BoolTest::lt ||
+ n->as_Bool()->_test._test == BoolTest::ge ||
+ n->as_Bool()->_test._test == BoolTest::le ||
+ n->as_Bool()->_test._test == BoolTest::gt ||
+ n->in(1)->in(1) == n->in(1)->in(2)));
format %{ "" %}
interface(COND_INTER) %{
equal(0xb, "np");
@@ -6084,7 +6107,8 @@ operand cmpOpUCF() %{
// Floating comparisons that can be fixed up with extra conditional jumps
operand cmpOpUCF2() %{
match(Bool);
- predicate((n->as_Bool()->_test._test == BoolTest::ne ||
+ predicate((!UseAPX || !VM_Version::supports_avx10_2()) &&
+ (n->as_Bool()->_test._test == BoolTest::ne ||
n->as_Bool()->_test._test == BoolTest::eq) &&
n->in(1)->in(1) != n->in(1)->in(2));
format %{ "" %}
@@ -6100,6 +6124,37 @@ operand cmpOpUCF2() %{
%}
%}
+
+// Floating point comparisons that set condition flags to test more directly,
+// Unsigned tests are used for G (>) and GE (>=) conditions while signed tests
+// are used for L (<) and LE (<=) conditions. It's important to convert these
+// latter conditions to ones that use unsigned tests before passing into an
+// instruction because the preceding comparison might be based on a three way
+// comparison (CmpF3 or CmpD3) that also assigns unordered outcomes to -1.
+operand cmpOpUCFE()
+%{
+ match(Bool);
+ predicate((UseAPX && VM_Version::supports_avx10_2()) &&
+ (n->as_Bool()->_test._test == BoolTest::ne ||
+ n->as_Bool()->_test._test == BoolTest::eq ||
+ n->as_Bool()->_test._test == BoolTest::lt ||
+ n->as_Bool()->_test._test == BoolTest::ge ||
+ n->as_Bool()->_test._test == BoolTest::le ||
+ n->as_Bool()->_test._test == BoolTest::gt));
+
+ format %{ "" %}
+ interface(COND_INTER) %{
+ equal(0x4, "e");
+ not_equal(0x5, "ne");
+ less(0x2, "b");
+ greater_equal(0x3, "ae");
+ less_equal(0x6, "be");
+ greater(0x7, "a");
+ overflow(0x0, "o");
+ no_overflow(0x1, "no");
+ %}
+%}
+
// Operands for bound floating pointer register arguments
operand rxmm0() %{
constraint(ALLOC_IN_RC(xmm0_reg));
@@ -7289,6 +7344,19 @@ instruct loadD(regD dst, memory mem)
ins_pipe(pipe_slow); // XXX
%}
+instruct loadAOTRCAddress(rRegP dst, immAOTRuntimeConstantsAddress con)
+%{
+ match(Set dst con);
+
+ format %{ "leaq $dst, $con\t# AOT Runtime Constants Address" %}
+
+ ins_encode %{
+ __ load_aotrc_address($dst$$Register, (address)$con$$constant);
+ %}
+
+ ins_pipe(ialu_reg_fat);
+%}
+
// max = java.lang.Math.max(float a, float b)
instruct maxF_reg_avx10_2(regF dst, regF a, regF b) %{
predicate(VM_Version::supports_avx10_2());
@@ -9116,20 +9184,34 @@ instruct cmovI_imm_01UCF(rRegI dst, immI_1 src, rFlagsRegUCF cr, cmpOpUCF cop)
ins_pipe(ialu_reg);
%}
+instruct cmovI_imm_01UCFE(rRegI dst, immI_1 src, rFlagsRegUCFE cr, cmpOpUCFE cop)
+%{
+ predicate(n->in(2)->in(2)->is_Con() && n->in(2)->in(2)->get_int() == 0);
+ match(Set dst (CMoveI (Binary cop cr) (Binary src dst)));
+
+ ins_cost(100); // XXX
+ format %{ "setbn$cop $dst\t# signed, unsigned, int" %}
+ ins_encode %{
+ Assembler::Condition cond = (Assembler::Condition)($cop$$cmpcode);
+ __ setb(MacroAssembler::negate_condition(cond), $dst$$Register);
+ %}
+ ins_pipe(ialu_reg);
+%}
+
instruct cmovI_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegI dst, rRegI src) %{
- predicate(!UseAPX);
match(Set dst (CMoveI (Binary cop cr) (Binary dst src)));
+
ins_cost(200);
expand %{
cmovI_regU(cop, cr, dst, src);
%}
%}
-instruct cmovI_regUCF_ndd(rRegI dst, cmpOpUCF cop, rFlagsRegUCF cr, rRegI src1, rRegI src2) %{
- predicate(UseAPX);
+instruct cmovI_regUCFE_ndd(rRegI dst, cmpOpUCFE cop, rFlagsRegUCFE cr, rRegI src1, rRegI src2) %{
match(Set dst (CMoveI (Binary cop cr) (Binary src1 src2)));
+
ins_cost(200);
- format %{ "ecmovl$cop $dst, $src1, $src2\t# unsigned, int ndd" %}
+ format %{ "ecmovl$cop $dst, $src1, $src2\t# signed, unsigned, int ndd" %}
ins_encode %{
__ ecmovl((Assembler::Condition)($cop$$cmpcode), $dst$$Register, $src1$$Register, $src2$$Register);
%}
@@ -9137,7 +9219,7 @@ instruct cmovI_regUCF_ndd(rRegI dst, cmpOpUCF cop, rFlagsRegUCF cr, rRegI src1,
%}
instruct cmovI_regUCF2_ne(cmpOpUCF2 cop, rFlagsRegUCF cr, rRegI dst, rRegI src) %{
- predicate(!UseAPX && n->in(1)->in(1)->as_Bool()->_test._test == BoolTest::ne);
+ predicate(n->in(1)->in(1)->as_Bool()->_test._test == BoolTest::ne);
match(Set dst (CMoveI (Binary cop cr) (Binary dst src)));
ins_cost(200); // XXX
@@ -9150,25 +9232,10 @@ instruct cmovI_regUCF2_ne(cmpOpUCF2 cop, rFlagsRegUCF cr, rRegI dst, rRegI src)
ins_pipe(pipe_cmov_reg);
%}
-instruct cmovI_regUCF2_ne_ndd(cmpOpUCF2 cop, rFlagsRegUCF cr, rRegI dst, rRegI src1, rRegI src2) %{
- predicate(UseAPX && n->in(1)->in(1)->as_Bool()->_test._test == BoolTest::ne);
- match(Set dst (CMoveI (Binary cop cr) (Binary src1 src2)));
- effect(TEMP dst);
-
- ins_cost(200);
- format %{ "ecmovpl $dst, $src1, $src2\n\t"
- "cmovnel $dst, $src2" %}
- ins_encode %{
- __ ecmovl(Assembler::parity, $dst$$Register, $src1$$Register, $src2$$Register);
- __ cmovl(Assembler::notEqual, $dst$$Register, $src2$$Register);
- %}
- ins_pipe(pipe_cmov_reg);
-%}
-
// Since (x == y) == !(x != y), we can flip the sense of the test by flipping the
// inputs of the CMove
instruct cmovI_regUCF2_eq(cmpOpUCF2 cop, rFlagsRegUCF cr, rRegI dst, rRegI src) %{
- predicate(!UseAPX && n->in(1)->in(1)->as_Bool()->_test._test == BoolTest::eq);
+ predicate(n->in(1)->in(1)->as_Bool()->_test._test == BoolTest::eq);
match(Set dst (CMoveI (Binary cop cr) (Binary src dst)));
effect(TEMP dst);
@@ -9182,23 +9249,6 @@ instruct cmovI_regUCF2_eq(cmpOpUCF2 cop, rFlagsRegUCF cr, rRegI dst, rRegI src)
ins_pipe(pipe_cmov_reg);
%}
-// We need this special handling for only eq / neq comparison since NaN == NaN is false,
-// and parity flag bit is set if any of the operand is a NaN.
-instruct cmovI_regUCF2_eq_ndd(cmpOpUCF2 cop, rFlagsRegUCF cr, rRegI dst, rRegI src1, rRegI src2) %{
- predicate(UseAPX && n->in(1)->in(1)->as_Bool()->_test._test == BoolTest::eq);
- match(Set dst (CMoveI (Binary cop cr) (Binary src2 src1)));
- effect(TEMP dst);
-
- ins_cost(200);
- format %{ "ecmovpl $dst, $src1, $src2\n\t"
- "cmovnel $dst, $src2" %}
- ins_encode %{
- __ ecmovl(Assembler::parity, $dst$$Register, $src1$$Register, $src2$$Register);
- __ cmovl(Assembler::notEqual, $dst$$Register, $src2$$Register);
- %}
- ins_pipe(pipe_cmov_reg);
-%}
-
// Conditional move
instruct cmovI_mem(cmpOp cop, rFlagsReg cr, rRegI dst, memory src) %{
predicate(!UseAPX);
@@ -9241,8 +9291,8 @@ instruct cmovI_memU(cmpOpU cop, rFlagsRegU cr, rRegI dst, memory src)
%}
instruct cmovI_memUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegI dst, memory src) %{
- predicate(!UseAPX);
match(Set dst (CMoveI (Binary cop cr) (Binary dst (LoadI src))));
+
ins_cost(250);
expand %{
cmovI_memU(cop, cr, dst, src);
@@ -9262,12 +9312,12 @@ instruct cmovI_rReg_rReg_memU_ndd(rRegI dst, cmpOpU cop, rFlagsRegU cr, rRegI sr
ins_pipe(pipe_cmov_mem);
%}
-instruct cmovI_rReg_rReg_memUCF_ndd(rRegI dst, cmpOpUCF cop, rFlagsRegUCF cr, rRegI src1, memory src2)
+instruct cmovI_rReg_rReg_memUCFE_ndd(rRegI dst, cmpOpUCFE cop, rFlagsRegUCFE cr, rRegI src1, memory src2)
%{
- predicate(UseAPX);
match(Set dst (CMoveI (Binary cop cr) (Binary src1 (LoadI src2))));
+
ins_cost(250);
- format %{ "ecmovl$cop $dst, $src1, $src2\t# unsigned, int ndd" %}
+ format %{ "ecmovl$cop $dst, $src1, $src2\t# signed, unsigned, int ndd" %}
ins_encode %{
__ ecmovl((Assembler::Condition)($cop$$cmpcode), $dst$$Register, $src1$$Register, $src2$$Address);
%}
@@ -9317,8 +9367,8 @@ instruct cmovN_regU(cmpOpU cop, rFlagsRegU cr, rRegN dst, rRegN src)
%}
instruct cmovN_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegN dst, rRegN src) %{
- predicate(!UseAPX);
match(Set dst (CMoveN (Binary cop cr) (Binary dst src)));
+
ins_cost(200);
expand %{
cmovN_regU(cop, cr, dst, src);
@@ -9339,11 +9389,11 @@ instruct cmovN_regU_ndd(rRegN dst, cmpOpU cop, rFlagsRegU cr, rRegN src1, rRegN
ins_pipe(pipe_cmov_reg);
%}
-instruct cmovN_regUCF_ndd(rRegN dst, cmpOpUCF cop, rFlagsRegUCF cr, rRegN src1, rRegN src2) %{
- predicate(UseAPX);
+instruct cmovN_regUCFE_ndd(rRegN dst, cmpOpUCFE cop, rFlagsRegUCFE cr, rRegN src1, rRegN src2) %{
match(Set dst (CMoveN (Binary cop cr) (Binary src1 src2)));
+
ins_cost(200);
- format %{ "ecmovl$cop $dst, $src1, $src2\t# unsigned, compressed ptr ndd" %}
+ format %{ "ecmovl$cop $dst, $src1, $src2\t# signed, unsigned, compressed ptr ndd" %}
ins_encode %{
__ ecmovl((Assembler::Condition)($cop$$cmpcode), $dst$$Register, $src1$$Register, $src2$$Register);
%}
@@ -9437,19 +9487,19 @@ instruct cmovP_regU_ndd(rRegP dst, cmpOpU cop, rFlagsRegU cr, rRegP src1, rRegP
%}
instruct cmovP_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegP dst, rRegP src) %{
- predicate(!UseAPX);
match(Set dst (CMoveP (Binary cop cr) (Binary dst src)));
+
ins_cost(200);
expand %{
cmovP_regU(cop, cr, dst, src);
%}
%}
-instruct cmovP_regUCF_ndd(rRegP dst, cmpOpUCF cop, rFlagsRegUCF cr, rRegP src1, rRegP src2) %{
- predicate(UseAPX);
+instruct cmovP_regUCFE_ndd(rRegP dst, cmpOpUCFE cop, rFlagsRegUCFE cr, rRegP src1, rRegP src2) %{
match(Set dst (CMoveP (Binary cop cr) (Binary src1 src2)));
+
ins_cost(200);
- format %{ "ecmovq$cop $dst, $src1, $src2\t# unsigned, ptr ndd" %}
+ format %{ "ecmovq$cop $dst, $src1, $src2\t# signed, unsigned, ptr ndd" %}
ins_encode %{
__ ecmovq((Assembler::Condition)($cop$$cmpcode), $dst$$Register, $src1$$Register, $src2$$Register);
%}
@@ -9457,7 +9507,7 @@ instruct cmovP_regUCF_ndd(rRegP dst, cmpOpUCF cop, rFlagsRegUCF cr, rRegP src1,
%}
instruct cmovP_regUCF2_ne(cmpOpUCF2 cop, rFlagsRegUCF cr, rRegP dst, rRegP src) %{
- predicate(!UseAPX && n->in(1)->in(1)->as_Bool()->_test._test == BoolTest::ne);
+ predicate(n->in(1)->in(1)->as_Bool()->_test._test == BoolTest::ne);
match(Set dst (CMoveP (Binary cop cr) (Binary dst src)));
ins_cost(200); // XXX
@@ -9470,25 +9520,10 @@ instruct cmovP_regUCF2_ne(cmpOpUCF2 cop, rFlagsRegUCF cr, rRegP dst, rRegP src)
ins_pipe(pipe_cmov_reg);
%}
-instruct cmovP_regUCF2_ne_ndd(cmpOpUCF2 cop, rFlagsRegUCF cr, rRegP dst, rRegP src1, rRegP src2) %{
- predicate(UseAPX && n->in(1)->in(1)->as_Bool()->_test._test == BoolTest::ne);
- match(Set dst (CMoveP (Binary cop cr) (Binary src1 src2)));
- effect(TEMP dst);
-
- ins_cost(200);
- format %{ "ecmovpq $dst, $src1, $src2\n\t"
- "cmovneq $dst, $src2" %}
- ins_encode %{
- __ ecmovq(Assembler::parity, $dst$$Register, $src1$$Register, $src2$$Register);
- __ cmovq(Assembler::notEqual, $dst$$Register, $src2$$Register);
- %}
- ins_pipe(pipe_cmov_reg);
-%}
-
// Since (x == y) == !(x != y), we can flip the sense of the test by flipping the
// inputs of the CMove
instruct cmovP_regUCF2_eq(cmpOpUCF2 cop, rFlagsRegUCF cr, rRegP dst, rRegP src) %{
- predicate(!UseAPX && n->in(1)->in(1)->as_Bool()->_test._test == BoolTest::eq);
+ predicate(n->in(1)->in(1)->as_Bool()->_test._test == BoolTest::eq);
match(Set dst (CMoveP (Binary cop cr) (Binary src dst)));
ins_cost(200); // XXX
@@ -9501,21 +9536,6 @@ instruct cmovP_regUCF2_eq(cmpOpUCF2 cop, rFlagsRegUCF cr, rRegP dst, rRegP src)
ins_pipe(pipe_cmov_reg);
%}
-instruct cmovP_regUCF2_eq_ndd(cmpOpUCF2 cop, rFlagsRegUCF cr, rRegP dst, rRegP src1, rRegP src2) %{
- predicate(UseAPX && n->in(1)->in(1)->as_Bool()->_test._test == BoolTest::eq);
- match(Set dst (CMoveP (Binary cop cr) (Binary src2 src1)));
- effect(TEMP dst);
-
- ins_cost(200);
- format %{ "ecmovpq $dst, $src1, $src2\n\t"
- "cmovneq $dst, $src2" %}
- ins_encode %{
- __ ecmovq(Assembler::parity, $dst$$Register, $src1$$Register, $src2$$Register);
- __ cmovq(Assembler::notEqual, $dst$$Register, $src2$$Register);
- %}
- ins_pipe(pipe_cmov_reg);
-%}
-
instruct cmovL_imm_01(rRegL dst, immL1 src, rFlagsReg cr, cmpOp cop)
%{
predicate(n->in(2)->in(2)->is_Con() && n->in(2)->in(2)->get_long() == 0);
@@ -9636,21 +9656,35 @@ instruct cmovL_imm_01UCF(rRegL dst, immL1 src, rFlagsRegUCF cr, cmpOpUCF cop)
ins_pipe(ialu_reg);
%}
+instruct cmovL_imm_01UCFE(rRegL dst, immL1 src, rFlagsRegUCFE cr, cmpOpUCFE cop)
+%{
+ predicate(n->in(2)->in(2)->is_Con() && n->in(2)->in(2)->get_long() == 0);
+ match(Set dst (CMoveL (Binary cop cr) (Binary src dst)));
+
+ ins_cost(100); // XXX
+ format %{ "setbn$cop $dst\t# signed, unsigned, long" %}
+ ins_encode %{
+ Assembler::Condition cond = (Assembler::Condition)($cop$$cmpcode);
+ __ setb(MacroAssembler::negate_condition(cond), $dst$$Register);
+ %}
+ ins_pipe(ialu_reg);
+%}
+
instruct cmovL_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegL dst, rRegL src) %{
- predicate(!UseAPX);
match(Set dst (CMoveL (Binary cop cr) (Binary dst src)));
+
ins_cost(200);
expand %{
cmovL_regU(cop, cr, dst, src);
%}
%}
-instruct cmovL_regUCF_ndd(rRegL dst, cmpOpUCF cop, rFlagsRegUCF cr, rRegL src1, rRegL src2)
+instruct cmovL_regUCFE_ndd(rRegL dst, cmpOpUCFE cop, rFlagsRegUCFE cr, rRegL src1, rRegL src2)
%{
- predicate(UseAPX);
match(Set dst (CMoveL (Binary cop cr) (Binary src1 src2)));
+
ins_cost(200);
- format %{ "ecmovq$cop $dst, $src1, $src2\t# unsigned, long ndd" %}
+ format %{ "ecmovq$cop $dst, $src1, $src2\t# signed, unsigned, long ndd" %}
ins_encode %{
__ ecmovq((Assembler::Condition)($cop$$cmpcode), $dst$$Register, $src1$$Register, $src2$$Register);
%}
@@ -9658,7 +9692,7 @@ instruct cmovL_regUCF_ndd(rRegL dst, cmpOpUCF cop, rFlagsRegUCF cr, rRegL src1,
%}
instruct cmovL_regUCF2_ne(cmpOpUCF2 cop, rFlagsRegUCF cr, rRegL dst, rRegL src) %{
- predicate(!UseAPX && n->in(1)->in(1)->as_Bool()->_test._test == BoolTest::ne);
+ predicate(n->in(1)->in(1)->as_Bool()->_test._test == BoolTest::ne);
match(Set dst (CMoveL (Binary cop cr) (Binary dst src)));
ins_cost(200); // XXX
@@ -9671,25 +9705,10 @@ instruct cmovL_regUCF2_ne(cmpOpUCF2 cop, rFlagsRegUCF cr, rRegL dst, rRegL src)
ins_pipe(pipe_cmov_reg);
%}
-instruct cmovL_regUCF2_ne_ndd(cmpOpUCF2 cop, rFlagsRegUCF cr, rRegL dst, rRegL src1, rRegL src2) %{
- predicate(UseAPX && n->in(1)->in(1)->as_Bool()->_test._test == BoolTest::ne);
- match(Set dst (CMoveL (Binary cop cr) (Binary src1 src2)));
- effect(TEMP dst);
-
- ins_cost(200);
- format %{ "ecmovpq $dst, $src1, $src2\n\t"
- "cmovneq $dst, $src2" %}
- ins_encode %{
- __ ecmovq(Assembler::parity, $dst$$Register, $src1$$Register, $src2$$Register);
- __ cmovq(Assembler::notEqual, $dst$$Register, $src2$$Register);
- %}
- ins_pipe(pipe_cmov_reg);
-%}
-
// Since (x == y) == !(x != y), we can flip the sense of the test by flipping the
// inputs of the CMove
instruct cmovL_regUCF2_eq(cmpOpUCF2 cop, rFlagsRegUCF cr, rRegL dst, rRegL src) %{
- predicate(!UseAPX && n->in(1)->in(1)->as_Bool()->_test._test == BoolTest::eq);
+ predicate(n->in(1)->in(1)->as_Bool()->_test._test == BoolTest::eq);
match(Set dst (CMoveL (Binary cop cr) (Binary src dst)));
ins_cost(200); // XXX
@@ -9702,21 +9721,6 @@ instruct cmovL_regUCF2_eq(cmpOpUCF2 cop, rFlagsRegUCF cr, rRegL dst, rRegL src)
ins_pipe(pipe_cmov_reg);
%}
-instruct cmovL_regUCF2_eq_ndd(cmpOpUCF2 cop, rFlagsRegUCF cr, rRegL dst, rRegL src1, rRegL src2) %{
- predicate(UseAPX && n->in(1)->in(1)->as_Bool()->_test._test == BoolTest::eq);
- match(Set dst (CMoveL (Binary cop cr) (Binary src2 src1)));
- effect(TEMP dst);
-
- ins_cost(200);
- format %{ "ecmovpq $dst, $src1, $src2\n\t"
- "cmovneq $dst, $src2" %}
- ins_encode %{
- __ ecmovq(Assembler::parity, $dst$$Register, $src1$$Register, $src2$$Register);
- __ cmovq(Assembler::notEqual, $dst$$Register, $src2$$Register);
- %}
- ins_pipe(pipe_cmov_reg);
-%}
-
instruct cmovL_memU(cmpOpU cop, rFlagsRegU cr, rRegL dst, memory src)
%{
predicate(!UseAPX);
@@ -9731,8 +9735,8 @@ instruct cmovL_memU(cmpOpU cop, rFlagsRegU cr, rRegL dst, memory src)
%}
instruct cmovL_memUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegL dst, memory src) %{
- predicate(!UseAPX);
match(Set dst (CMoveL (Binary cop cr) (Binary dst (LoadL src))));
+
ins_cost(200);
expand %{
cmovL_memU(cop, cr, dst, src);
@@ -9752,12 +9756,12 @@ instruct cmovL_rReg_rReg_memU_ndd(rRegL dst, cmpOpU cop, rFlagsRegU cr, rRegL sr
ins_pipe(pipe_cmov_mem);
%}
-instruct cmovL_rReg_rReg_memUCF_ndd(rRegL dst, cmpOpUCF cop, rFlagsRegUCF cr, rRegL src1, memory src2)
+instruct cmovL_rReg_rReg_memUCFE_ndd(rRegL dst, cmpOpUCFE cop, rFlagsRegUCFE cr, rRegL src1, memory src2)
%{
- predicate(UseAPX);
match(Set dst (CMoveL (Binary cop cr) (Binary src1 (LoadL src2))));
+
ins_cost(200);
- format %{ "ecmovq$cop $dst, $src1, $src2\t# unsigned, long ndd" %}
+ format %{ "ecmovq$cop $dst, $src1, $src2\t# signed, unsigned, long ndd" %}
ins_encode %{
__ ecmovq((Assembler::Condition)($cop$$cmpcode), $dst$$Register, $src1$$Register, $src2$$Address);
%}
@@ -9802,12 +9806,31 @@ instruct cmovF_regU(cmpOpU cop, rFlagsRegU cr, regF dst, regF src)
instruct cmovF_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, regF dst, regF src) %{
match(Set dst (CMoveF (Binary cop cr) (Binary dst src)));
+
ins_cost(200);
expand %{
cmovF_regU(cop, cr, dst, src);
%}
%}
+instruct cmovF_regUCFE(cmpOpUCFE cop, rFlagsRegUCFE cr, regF dst, regF src)
+%{
+ match(Set dst (CMoveF (Binary cop cr) (Binary dst src)));
+
+ ins_cost(200); // XXX
+ format %{ "jn$cop skip\t# signed, unsigned cmove float\n\t"
+ "movss $dst, $src\n"
+ "skip:" %}
+ ins_encode %{
+ Label Lskip;
+ // Invert sense of branch from sense of CMOV
+ __ jccb((Assembler::Condition)($cop$$cmpcode^1), Lskip);
+ __ movflt($dst$$XMMRegister, $src$$XMMRegister);
+ __ bind(Lskip);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
instruct cmovD_reg(cmpOp cop, rFlagsReg cr, regD dst, regD src)
%{
match(Set dst (CMoveD (Binary cop cr) (Binary dst src)));
@@ -9846,12 +9869,31 @@ instruct cmovD_regU(cmpOpU cop, rFlagsRegU cr, regD dst, regD src)
instruct cmovD_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, regD dst, regD src) %{
match(Set dst (CMoveD (Binary cop cr) (Binary dst src)));
+
ins_cost(200);
expand %{
cmovD_regU(cop, cr, dst, src);
%}
%}
+instruct cmovD_regUCFE(cmpOpUCFE cop, rFlagsRegUCFE cr, regD dst, regD src)
+%{
+ match(Set dst (CMoveD (Binary cop cr) (Binary dst src)));
+
+ ins_cost(200); // XXX
+ format %{ "jn$cop skip\t# signed, unsigned cmove double\n\t"
+ "movsd $dst, $src\n"
+ "skip:" %}
+ ins_encode %{
+ Label Lskip;
+ // Invert sense of branch from sense of CMOV
+ __ jccb((Assembler::Condition)($cop$$cmpcode^1), Lskip);
+ __ movdbl($dst$$XMMRegister, $src$$XMMRegister);
+ __ bind(Lskip);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
//----------Arithmetic Instructions--------------------------------------------
//----------Addition Instructions----------------------------------------------
@@ -14319,7 +14361,7 @@ instruct cmpF_cc_reg(rFlagsRegU cr, regF src1, regF src2)
ins_pipe(pipe_slow);
%}
-instruct cmpF_cc_reg_CF(rFlagsRegUCF cr, regF src1, regF src2) %{
+instruct cmpF_cc_regCF(rFlagsRegUCF cr, regF src1, regF src2) %{
match(Set cr (CmpF src1 src2));
ins_cost(100);
@@ -14330,6 +14372,17 @@ instruct cmpF_cc_reg_CF(rFlagsRegUCF cr, regF src1, regF src2) %{
ins_pipe(pipe_slow);
%}
+instruct cmpF_cc_regCFE(rFlagsRegUCFE cr, regF src1, regF src2) %{
+ match(Set cr (CmpF src1 src2));
+
+ ins_cost(100);
+ format %{ "vucomxss $src1, $src2" %}
+ ins_encode %{
+ __ vucomxss($src1$$XMMRegister, $src2$$XMMRegister);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
instruct cmpF_cc_memCF(rFlagsRegUCF cr, regF src1, memory src2) %{
match(Set cr (CmpF src1 (LoadF src2)));
@@ -14341,8 +14394,20 @@ instruct cmpF_cc_memCF(rFlagsRegUCF cr, regF src1, memory src2) %{
ins_pipe(pipe_slow);
%}
+instruct cmpF_cc_memCFE(rFlagsRegUCFE cr, regF src1, memory src2) %{
+ match(Set cr (CmpF src1 (LoadF src2)));
+
+ ins_cost(100);
+ format %{ "vucomxss $src1, $src2" %}
+ ins_encode %{
+ __ vucomxss($src1$$XMMRegister, $src2$$Address);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
instruct cmpF_cc_immCF(rFlagsRegUCF cr, regF src, immF con) %{
match(Set cr (CmpF src con));
+
ins_cost(100);
format %{ "ucomiss $src, [$constantaddress]\t# load from constant table: float=$con" %}
ins_encode %{
@@ -14351,6 +14416,17 @@ instruct cmpF_cc_immCF(rFlagsRegUCF cr, regF src, immF con) %{
ins_pipe(pipe_slow);
%}
+instruct cmpF_cc_immCFE(rFlagsRegUCFE cr, regF src, immF con) %{
+ match(Set cr (CmpF src con));
+
+ ins_cost(100);
+ format %{ "vucomxss $src, [$constantaddress]\t# load from constant table: float=$con" %}
+ ins_encode %{
+ __ vucomxss($src$$XMMRegister, $constantaddress($con));
+ %}
+ ins_pipe(pipe_slow);
+%}
+
// Really expensive, avoid
instruct cmpD_cc_reg(rFlagsRegU cr, regD src1, regD src2)
%{
@@ -14370,7 +14446,7 @@ instruct cmpD_cc_reg(rFlagsRegU cr, regD src1, regD src2)
ins_pipe(pipe_slow);
%}
-instruct cmpD_cc_reg_CF(rFlagsRegUCF cr, regD src1, regD src2) %{
+instruct cmpD_cc_regCF(rFlagsRegUCF cr, regD src1, regD src2) %{
match(Set cr (CmpD src1 src2));
ins_cost(100);
@@ -14381,6 +14457,17 @@ instruct cmpD_cc_reg_CF(rFlagsRegUCF cr, regD src1, regD src2) %{
ins_pipe(pipe_slow);
%}
+instruct cmpD_cc_regCFE(rFlagsRegUCFE cr, regD src1, regD src2) %{
+ match(Set cr (CmpD src1 src2));
+
+ ins_cost(100);
+ format %{ "vucomxsd $src1, $src2 test" %}
+ ins_encode %{
+ __ vucomxsd($src1$$XMMRegister, $src2$$XMMRegister);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
instruct cmpD_cc_memCF(rFlagsRegUCF cr, regD src1, memory src2) %{
match(Set cr (CmpD src1 (LoadD src2)));
@@ -14392,6 +14479,17 @@ instruct cmpD_cc_memCF(rFlagsRegUCF cr, regD src1, memory src2) %{
ins_pipe(pipe_slow);
%}
+instruct cmpD_cc_memCFE(rFlagsRegUCFE cr, regD src1, memory src2) %{
+ match(Set cr (CmpD src1 (LoadD src2)));
+
+ ins_cost(100);
+ format %{ "vucomxsd $src1, $src2" %}
+ ins_encode %{
+ __ vucomxsd($src1$$XMMRegister, $src2$$Address);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
instruct cmpD_cc_immCF(rFlagsRegUCF cr, regD src, immD con) %{
match(Set cr (CmpD src con));
ins_cost(100);
@@ -14402,6 +14500,17 @@ instruct cmpD_cc_immCF(rFlagsRegUCF cr, regD src, immD con) %{
ins_pipe(pipe_slow);
%}
+instruct cmpD_cc_immCFE(rFlagsRegUCFE cr, regD src, immD con) %{
+ match(Set cr (CmpD src con));
+
+ ins_cost(100);
+ format %{ "vucomxsd $src, [$constantaddress]\t# load from constant table: double=$con" %}
+ ins_encode %{
+ __ vucomxsd($src$$XMMRegister, $constantaddress($con));
+ %}
+ ins_pipe(pipe_slow);
+%}
+
// Compare into -1,0,1
instruct cmpF_reg(rRegI dst, regF src1, regF src2, rFlagsReg cr)
%{
@@ -16808,6 +16917,21 @@ instruct jmpConUCF2(cmpOpUCF2 cop, rFlagsRegUCF cmp, label labl) %{
ins_pipe(pipe_jcc);
%}
+// Jump Direct Conditional - using signed and unsigned comparison
+instruct jmpConUCFE(cmpOpUCFE cop, rFlagsRegUCFE cmp, label labl) %{
+ match(If cop cmp);
+ effect(USE labl);
+
+ ins_cost(200);
+ format %{ "j$cop,su $labl" %}
+ size(6);
+ ins_encode %{
+ Label* L = $labl$$label;
+ __ jcc((Assembler::Condition)($cop$$cmpcode), *L, false); // Always long jump
+ %}
+ ins_pipe(pipe_jcc);
+%}
+
// ============================================================================
// The 2nd slow-half of a subtype check. Scan the subklass's 2ndary
// superklass array for an instance of the superklass. Set a hidden
@@ -17026,6 +17150,22 @@ instruct jmpConUCF2_short(cmpOpUCF2 cop, rFlagsRegUCF cmp, label labl) %{
ins_short_branch(1);
%}
+// Jump Direct Conditional - using signed and unsigned comparison
+instruct jmpConUCFE_short(cmpOpUCFE cop, rFlagsRegUCFE cmp, label labl) %{
+ match(If cop cmp);
+ effect(USE labl);
+
+ ins_cost(300);
+ format %{ "j$cop,sus $labl" %}
+ size(2);
+ ins_encode %{
+ Label* L = $labl$$label;
+ __ jccb((Assembler::Condition)($cop$$cmpcode), *L);
+ %}
+ ins_pipe(pipe_jcc);
+ ins_short_branch(1);
+%}
+
// ============================================================================
// inlined locking and unlocking
diff --git a/src/hotspot/cpu/zero/bytecodeInterpreter_zero.inline.hpp b/src/hotspot/cpu/zero/bytecodeInterpreter_zero.inline.hpp
index 4d813cd53c6..4c73368b673 100644
--- a/src/hotspot/cpu/zero/bytecodeInterpreter_zero.inline.hpp
+++ b/src/hotspot/cpu/zero/bytecodeInterpreter_zero.inline.hpp
@@ -26,6 +26,8 @@
#ifndef CPU_ZERO_BYTECODEINTERPRETER_ZERO_INLINE_HPP
#define CPU_ZERO_BYTECODEINTERPRETER_ZERO_INLINE_HPP
+#include "sanitizers/ub.hpp"
+
// Inline interpreter functions for zero
inline jfloat BytecodeInterpreter::VMfloatAdd(jfloat op1, jfloat op2) {
@@ -40,6 +42,7 @@ inline jfloat BytecodeInterpreter::VMfloatMul(jfloat op1, jfloat op2) {
return op1 * op2;
}
+ATTRIBUTE_NO_UBSAN // IEEE-754 division by zero is well-defined
inline jfloat BytecodeInterpreter::VMfloatDiv(jfloat op1, jfloat op2) {
return op1 / op2;
}
@@ -68,7 +71,7 @@ inline void BytecodeInterpreter::VMmemCopy64(uint32_t to[2],
}
inline jlong BytecodeInterpreter::VMlongAdd(jlong op1, jlong op2) {
- return op1 + op2;
+ return java_add(op1, op2);
}
inline jlong BytecodeInterpreter::VMlongAnd(jlong op1, jlong op2) {
@@ -82,7 +85,7 @@ inline jlong BytecodeInterpreter::VMlongDiv(jlong op1, jlong op2) {
}
inline jlong BytecodeInterpreter::VMlongMul(jlong op1, jlong op2) {
- return op1 * op2;
+ return java_multiply(op1, op2);
}
inline jlong BytecodeInterpreter::VMlongOr(jlong op1, jlong op2) {
@@ -90,7 +93,7 @@ inline jlong BytecodeInterpreter::VMlongOr(jlong op1, jlong op2) {
}
inline jlong BytecodeInterpreter::VMlongSub(jlong op1, jlong op2) {
- return op1 - op2;
+ return java_subtract(op1, op2);
}
inline jlong BytecodeInterpreter::VMlongXor(jlong op1, jlong op2) {
@@ -104,19 +107,19 @@ inline jlong BytecodeInterpreter::VMlongRem(jlong op1, jlong op2) {
}
inline jlong BytecodeInterpreter::VMlongUshr(jlong op1, jint op2) {
- return ((unsigned long long) op1) >> (op2 & 0x3F);
+ return java_shift_right_unsigned(op1, op2);
}
inline jlong BytecodeInterpreter::VMlongShr(jlong op1, jint op2) {
- return op1 >> (op2 & 0x3F);
+ return java_shift_right(op1, op2);
}
inline jlong BytecodeInterpreter::VMlongShl(jlong op1, jint op2) {
- return op1 << (op2 & 0x3F);
+ return java_shift_left(op1, op2);
}
inline jlong BytecodeInterpreter::VMlongNeg(jlong op) {
- return -op;
+ return java_negate(op);
}
inline jlong BytecodeInterpreter::VMlongNot(jlong op) {
@@ -183,8 +186,8 @@ inline jdouble BytecodeInterpreter::VMdoubleAdd(jdouble op1, jdouble op2) {
return op1 + op2;
}
+ATTRIBUTE_NO_UBSAN // IEEE-754 division by zero is well-defined
inline jdouble BytecodeInterpreter::VMdoubleDiv(jdouble op1, jdouble op2) {
- // Divide by zero... QQQ
return op1 / op2;
}
@@ -228,7 +231,7 @@ inline jdouble BytecodeInterpreter::VMfloat2Double(jfloat op) {
// Integer Arithmetic
inline jint BytecodeInterpreter::VMintAdd(jint op1, jint op2) {
- return op1 + op2;
+ return java_add(op1, op2);
}
inline jint BytecodeInterpreter::VMintAnd(jint op1, jint op2) {
@@ -242,11 +245,11 @@ inline jint BytecodeInterpreter::VMintDiv(jint op1, jint op2) {
}
inline jint BytecodeInterpreter::VMintMul(jint op1, jint op2) {
- return op1 * op2;
+ return java_multiply(op1, op2);
}
inline jint BytecodeInterpreter::VMintNeg(jint op) {
- return -op;
+ return java_negate(op);
}
inline jint BytecodeInterpreter::VMintOr(jint op1, jint op2) {
@@ -260,19 +263,19 @@ inline jint BytecodeInterpreter::VMintRem(jint op1, jint op2) {
}
inline jint BytecodeInterpreter::VMintShl(jint op1, jint op2) {
- return op1 << (op2 & 0x1F);
+ return java_shift_left(op1, op2);
}
inline jint BytecodeInterpreter::VMintShr(jint op1, jint op2) {
- return op1 >> (op2 & 0x1F);
+ return java_shift_right(op1, op2);
}
inline jint BytecodeInterpreter::VMintSub(jint op1, jint op2) {
- return op1 - op2;
+ return java_subtract(op1, op2);
}
inline juint BytecodeInterpreter::VMintUshr(jint op1, jint op2) {
- return ((juint) op1) >> (op2 & 0x1F);
+ return java_shift_right_unsigned(op1, op2);
}
inline jint BytecodeInterpreter::VMintXor(jint op1, jint op2) {
diff --git a/src/hotspot/cpu/zero/globals_zero.hpp b/src/hotspot/cpu/zero/globals_zero.hpp
index 6b6c6ea983c..6dc7d81275c 100644
--- a/src/hotspot/cpu/zero/globals_zero.hpp
+++ b/src/hotspot/cpu/zero/globals_zero.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -39,7 +39,7 @@ define_pd_global(bool, UncommonNullCast, true);
define_pd_global(bool, DelayCompilerStubsGeneration, false); // Don't have compiler's stubs
define_pd_global(size_t, CodeCacheSegmentSize, 64 COMPILER1_AND_COMPILER2_PRESENT(+64)); // Tiered compilation has large code-entry alignment.
-define_pd_global(intx, CodeEntryAlignment, 32);
+define_pd_global(uint, CodeEntryAlignment, 32);
define_pd_global(intx, OptoLoopAlignment, 16);
define_pd_global(intx, InlineSmallCode, 1000);
diff --git a/src/hotspot/cpu/zero/register_zero.hpp b/src/hotspot/cpu/zero/register_zero.hpp
index fd30f206762..846b649eebd 100644
--- a/src/hotspot/cpu/zero/register_zero.hpp
+++ b/src/hotspot/cpu/zero/register_zero.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -48,7 +48,6 @@ class RegisterImpl : public AbstractRegisterImpl {
};
// construction
- inline friend Register as_Register(int encoding);
VMReg as_VMReg();
// derived registers, offsets, and addresses
@@ -113,7 +112,6 @@ class ConcreteRegisterImpl : public AbstractRegisterImpl {
static const int max_fpr;
};
-CONSTANT_REGISTER_DECLARATION(Register, noreg, (-1));
-#define noreg ((Register)(noreg_RegisterEnumValue))
+const Register noreg = as_Register(-1);
#endif // CPU_ZERO_REGISTER_ZERO_HPP
diff --git a/src/hotspot/cpu/zero/zeroInterpreter_zero.cpp b/src/hotspot/cpu/zero/zeroInterpreter_zero.cpp
index 28c2364315e..1b20761f6e4 100644
--- a/src/hotspot/cpu/zero/zeroInterpreter_zero.cpp
+++ b/src/hotspot/cpu/zero/zeroInterpreter_zero.cpp
@@ -368,12 +368,15 @@ int ZeroInterpreter::native_entry(Method* method, intptr_t UNUSED, TRAPS) {
goto unlock_unwind_and_return;
void **arguments;
- void *mirror; {
+ // These locals must remain on stack until call completes
+ void *mirror;
+ void *env;
+ {
arguments =
(void **) stack->alloc(handler->argument_count() * sizeof(void **));
void **dst = arguments;
- void *env = thread->jni_environment();
+ env = thread->jni_environment();
*(dst++) = &env;
if (method->is_static()) {
diff --git a/src/hotspot/os/aix/decoder_aix.hpp b/src/hotspot/os/aix/decoder_aix.hpp
index 2ba3e1c5a3a..632355ccf4e 100644
--- a/src/hotspot/os/aix/decoder_aix.hpp
+++ b/src/hotspot/os/aix/decoder_aix.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -38,7 +38,7 @@ class AIXDecoder: public AbstractDecoder {
virtual bool demangle(const char* symbol, char* buf, int buflen) { return false; } // use AixSymbols::get_function_name to demangle
virtual bool decode(address addr, char* buf, int buflen, int* offset, const char* modulepath, bool demangle) {
- return AixSymbols::get_function_name(addr, buf, buflen, offset, 0, demangle);
+ return AixSymbols::get_function_name(addr, buf, buflen, offset, nullptr, demangle);
}
virtual bool decode(address addr, char *buf, int buflen, int* offset, const void *base) {
ShouldNotReachHere();
diff --git a/src/hotspot/os/aix/globals_aix.hpp b/src/hotspot/os/aix/globals_aix.hpp
index 14b956235e8..473d7759063 100644
--- a/src/hotspot/os/aix/globals_aix.hpp
+++ b/src/hotspot/os/aix/globals_aix.hpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2024 SAP SE. All rights reserved.
+ * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -61,10 +61,6 @@
product(bool, OptimizePollingPageLocation, true, DIAGNOSTIC, \
"Optimize the location of the polling page used for Safepoints") \
\
- /* Use 64K pages for virtual memory (shmat). */ \
- product(bool, Use64KPages, true, DIAGNOSTIC, \
- "Use 64K pages if available.") \
- \
/* Normally AIX commits memory on touch, but sometimes it is helpful to have */ \
/* explicit commit behaviour. This flag, if true, causes the VM to touch */ \
/* memory on os::commit_memory() (which normally is a noop). */ \
@@ -79,7 +75,6 @@
//
// UseLargePages means nothing, for now, on AIX.
-// Use Use64KPages or Use16MPages instead.
define_pd_global(size_t, PreTouchParallelChunkSize, 1 * G);
define_pd_global(bool, UseLargePages, false);
define_pd_global(bool, UseLargePagesIndividualAllocation, false);
diff --git a/src/hotspot/os/aix/libodm_aix.cpp b/src/hotspot/os/aix/libodm_aix.cpp
index 38e8067181a..57eee47c098 100644
--- a/src/hotspot/os/aix/libodm_aix.cpp
+++ b/src/hotspot/os/aix/libodm_aix.cpp
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2015, 2019 SAP SE. All rights reserved.
+ * Copyright (c) 2015, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -63,13 +63,12 @@ dynamicOdm::~dynamicOdm() {
void odmWrapper::clean_data() { if (_data) { permit_forbidden_function::free(_data); _data = nullptr; } }
-int odmWrapper::class_offset(const char *field, bool is_aix_5)
+int odmWrapper::class_offset(const char *field)
{
assert(has_class(), "initialization");
for (int i = 0; i < odm_class()->nelem; i++) {
if (strcmp(odm_class()->elem[i].elemname, field) == 0) {
int offset = odm_class()->elem[i].offset;
- if (is_aix_5) { offset += LINK_VAL_OFFSET; }
return offset;
}
}
@@ -88,11 +87,10 @@ void odmWrapper::determine_os_kernel_version(uint32_t* p_ver) {
return;
}
int voff, roff, moff, foff;
- bool is_aix_5 = (major_aix_version == 5);
- voff = odm.class_offset("ver", is_aix_5);
- roff = odm.class_offset("rel", is_aix_5);
- moff = odm.class_offset("mod", is_aix_5);
- foff = odm.class_offset("fix", is_aix_5);
+ voff = odm.class_offset("ver");
+ roff = odm.class_offset("rel");
+ moff = odm.class_offset("mod");
+ foff = odm.class_offset("fix");
if (voff == -1 || roff == -1 || moff == -1 || foff == -1) {
trcVerbose("try_determine_os_kernel_version: could not get offsets");
return;
diff --git a/src/hotspot/os/aix/libodm_aix.hpp b/src/hotspot/os/aix/libodm_aix.hpp
index 924ccaf8c51..11e67a4f5ae 100644
--- a/src/hotspot/os/aix/libodm_aix.hpp
+++ b/src/hotspot/os/aix/libodm_aix.hpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2015, 2024 SAP SE. All rights reserved.
+ * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -82,7 +82,7 @@ class odmWrapper : private dynamicOdm {
CLASS_SYMBOL odm_class() { return _odm_class; }
bool has_class() { return odm_class() != (CLASS_SYMBOL)-1; }
- int class_offset(const char *field, bool is_aix_5);
+ int class_offset(const char *field);
char* data() { return _data; }
char* retrieve_obj(const char* name = nullptr) {
diff --git a/src/hotspot/os/aix/os_aix.cpp b/src/hotspot/os/aix/os_aix.cpp
index f88729cdc66..af743dc7484 100644
--- a/src/hotspot/os/aix/os_aix.cpp
+++ b/src/hotspot/os/aix/os_aix.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2025 SAP SE. All rights reserved.
+ * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -122,12 +122,6 @@
extern "C"
int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
-#if !defined(_AIXVERSION_610)
-extern "C" int getthrds64(pid_t, struct thrdentry64*, int, tid64_t*, int);
-extern "C" int getprocs64(procentry64*, int, fdsinfo*, int, pid_t*, int);
-extern "C" int getargs(procsinfo*, int, char*, int);
-#endif
-
#define MAX_PATH (2 * K)
// for multipage initialization error analysis (in 'g_multipage_error')
@@ -216,7 +210,7 @@ static address g_brk_at_startup = nullptr;
// shmctl(). Different shared memory regions can have different page
// sizes.
//
-// More information can be found at AIBM info center:
+// More information can be found at IBM info center:
// http://publib.boulder.ibm.com/infocenter/aix/v6r1/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/multiple_page_size_app_support.htm
//
static struct {
@@ -258,10 +252,18 @@ bool os::free_memory(physical_memory_size_type& value) {
return Aix::available_memory(value);
}
+bool os::Machine::free_memory(physical_memory_size_type& value) {
+ return Aix::available_memory(value);
+}
+
bool os::available_memory(physical_memory_size_type& value) {
return Aix::available_memory(value);
}
+bool os::Machine::available_memory(physical_memory_size_type& value) {
+ return Aix::available_memory(value);
+}
+
bool os::Aix::available_memory(physical_memory_size_type& value) {
os::Aix::meminfo_t mi;
if (os::Aix::get_meminfo(&mi)) {
@@ -273,6 +275,10 @@ bool os::Aix::available_memory(physical_memory_size_type& value) {
}
bool os::total_swap_space(physical_memory_size_type& value) {
+ return Machine::total_swap_space(value);
+}
+
+bool os::Machine::total_swap_space(physical_memory_size_type& value) {
perfstat_memory_total_t memory_info;
if (libperfstat::perfstat_memory_total(nullptr, &memory_info, sizeof(perfstat_memory_total_t), 1) == -1) {
return false;
@@ -282,6 +288,10 @@ bool os::total_swap_space(physical_memory_size_type& value) {
}
bool os::free_swap_space(physical_memory_size_type& value) {
+ return Machine::free_swap_space(value);
+}
+
+bool os::Machine::free_swap_space(physical_memory_size_type& value) {
perfstat_memory_total_t memory_info;
if (libperfstat::perfstat_memory_total(nullptr, &memory_info, sizeof(perfstat_memory_total_t), 1) == -1) {
return false;
@@ -294,6 +304,10 @@ physical_memory_size_type os::physical_memory() {
return Aix::physical_memory();
}
+physical_memory_size_type os::Machine::physical_memory() {
+ return Aix::physical_memory();
+}
+
size_t os::rss() { return (size_t)0; }
// Cpu architecture string
@@ -683,7 +697,7 @@ static void *thread_native_entry(Thread *thread) {
log_info(os, thread)("Thread finished (tid: %zu, kernel thread id: %zu).",
os::current_thread_id(), (uintx) kernel_thread_id);
- return 0;
+ return nullptr;
}
bool os::create_thread(Thread* thread, ThreadType thr_type,
@@ -1733,10 +1747,9 @@ bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
return true;
}
-bool os::remove_stack_guard_pages(char* addr, size_t size) {
+void os::remove_stack_guard_pages(char* addr, size_t size) {
// Do not call this; no need to commit stack pages on AIX.
ShouldNotReachHere();
- return true;
}
void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
@@ -1937,11 +1950,6 @@ char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, size_t page_
return nullptr;
}
-bool os::pd_release_memory_special(char* base, size_t bytes) {
- fatal("os::release_memory_special should not be called on AIX.");
- return false;
-}
-
size_t os::large_page_size() {
return _large_page_size;
}
@@ -2123,46 +2131,12 @@ void os::init(void) {
// 64k no --- AIX 5.2 ? ---
// 64k yes 64k new systems and standard java loader (we set datapsize=64k when linking)
- // We explicitly leave no option to change page size, because only upgrading would work,
- // not downgrading (if stack page size is 64k you cannot pretend its 4k).
-
- if (g_multipage_support.datapsize == 4*K) {
- // datapsize = 4K. Data segment, thread stacks are 4K paged.
- if (g_multipage_support.can_use_64K_pages || g_multipage_support.can_use_64K_mmap_pages) {
- // .. but we are able to use 64K pages dynamically.
- // This would be typical for java launchers which are not linked
- // with datapsize=64K (like, any other launcher but our own).
- //
- // In this case it would be smart to allocate the java heap with 64K
- // to get the performance benefit, and to fake 64k pages for the
- // data segment (when dealing with thread stacks).
- //
- // However, leave a possibility to downgrade to 4K, using
- // -XX:-Use64KPages.
- if (Use64KPages) {
- trcVerbose("64K page mode (faked for data segment)");
- set_page_size(64*K);
- } else {
- trcVerbose("4K page mode (Use64KPages=off)");
- set_page_size(4*K);
- }
- } else {
- // .. and not able to allocate 64k pages dynamically. Here, just
- // fall back to 4K paged mode and use mmap for everything.
- trcVerbose("4K page mode");
- set_page_size(4*K);
- FLAG_SET_ERGO(Use64KPages, false);
- }
- } else {
- // datapsize = 64k. Data segment, thread stacks are 64k paged.
- // This normally means that we can allocate 64k pages dynamically.
- // (There is one special case where this may be false: EXTSHM=on.
- // but we decided to not support that mode).
- assert0(g_multipage_support.can_use_64K_pages || g_multipage_support.can_use_64K_mmap_pages);
- set_page_size(64*K);
- trcVerbose("64K page mode");
- FLAG_SET_ERGO(Use64KPages, true);
- }
+ // datapsize = 64k. Data segment, thread stacks are 64k paged.
+ // This normally means that we can allocate 64k pages dynamically.
+ // (There is one special case where this may be false: EXTSHM=on.
+ // but we decided to not support that mode).
+ assert0(g_multipage_support.can_use_64K_pages || g_multipage_support.can_use_64K_mmap_pages);
+ set_page_size(64*K);
// For now UseLargePages is just ignored.
FLAG_SET_ERGO(UseLargePages, false);
@@ -2264,6 +2238,10 @@ int os::active_processor_count() {
return ActiveProcessorCount;
}
+ return Machine::active_processor_count();
+}
+
+int os::Machine::active_processor_count() {
int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
return online_cpus;
@@ -2541,23 +2519,18 @@ void os::Aix::initialize_os_info() {
assert(minor > 0, "invalid OS release");
_os_version = (major << 24) | (minor << 16);
char ver_str[20] = {0};
- const char* name_str = "unknown OS";
- if (strcmp(uts.sysname, "AIX") == 0) {
- // We run on AIX. We do not support versions older than AIX 7.1.
- // Determine detailed AIX version: Version, Release, Modification, Fix Level.
- odmWrapper::determine_os_kernel_version(&_os_version);
- if (os_version_short() < 0x0701) {
- log_warning(os)("AIX releases older than AIX 7.1 are not supported.");
- assert(false, "AIX release too old.");
- }
- name_str = "AIX";
- jio_snprintf(ver_str, sizeof(ver_str), "%u.%u.%u.%u",
- major, minor, (_os_version >> 8) & 0xFF, _os_version & 0xFF);
- } else {
- assert(false, "%s", name_str);
+ // We do not support versions older than AIX 7.2 TL 5.
+ // Determine detailed AIX version: Version, Release, Modification, Fix Level.
+ odmWrapper::determine_os_kernel_version(&_os_version);
+ if (_os_version < 0x07020500) {
+ log_warning(os)("AIX releases older than AIX 7.2 TL 5 are not supported.");
+ assert(false, "AIX release too old.");
}
- log_info(os)("We run on %s %s", name_str, ver_str);
+
+ jio_snprintf(ver_str, sizeof(ver_str), "%u.%u.%u.%u",
+ major, minor, (_os_version >> 8) & 0xFF, _os_version & 0xFF);
+ log_info(os)("We run on AIX %s", ver_str);
}
guarantee(_os_version, "Could not determine AIX release");
diff --git a/src/hotspot/os/aix/os_perf_aix.cpp b/src/hotspot/os/aix/os_perf_aix.cpp
index aa8819d035f..cbf78083483 100644
--- a/src/hotspot/os/aix/os_perf_aix.cpp
+++ b/src/hotspot/os/aix/os_perf_aix.cpp
@@ -143,12 +143,6 @@ static OSReturn get_jvm_load(double* jvm_uload, double* jvm_sload) {
return OS_OK;
}
-static void update_prev_time(jvm_time_store_t* from, jvm_time_store_t* to) {
- if (from && to) {
- memcpy(to, from, sizeof(jvm_time_store_t));
- }
-}
-
static void update_prev_ticks(cpu_tick_store_t* from, cpu_tick_store_t* to) {
if (from && to) {
memcpy(to, from, sizeof(cpu_tick_store_t));
diff --git a/src/hotspot/os/aix/porting_aix.cpp b/src/hotspot/os/aix/porting_aix.cpp
index 7311afc197b..b3f878fbfdd 100644
--- a/src/hotspot/os/aix/porting_aix.cpp
+++ b/src/hotspot/os/aix/porting_aix.cpp
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2012, 2024 SAP SE. All rights reserved.
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -78,7 +78,7 @@ class fixed_strings {
public:
- fixed_strings() : first(0) {}
+ fixed_strings() : first(nullptr) {}
~fixed_strings() {
node* n = first;
while (n) {
@@ -113,7 +113,7 @@ bool AixSymbols::get_function_name (
// information (null if not available)
bool demangle // [in] whether to demangle the name
) {
- struct tbtable* tb = 0;
+ struct tbtable* tb = nullptr;
unsigned int searchcount = 0;
// initialize output parameters
@@ -653,10 +653,10 @@ void AixNativeCallstack::print_callstack_for_context(outputStream* st, const uco
// To print the first frame, use the current value of iar:
// current entry indicated by iar (the current pc)
- codeptr_t cur_iar = 0;
- stackptr_t cur_sp = 0;
- codeptr_t cur_rtoc = 0;
- codeptr_t cur_lr = 0;
+ codeptr_t cur_iar = nullptr;
+ stackptr_t cur_sp = nullptr;
+ codeptr_t cur_rtoc = nullptr;
+ codeptr_t cur_lr = nullptr;
const ucontext_t* uc = (const ucontext_t*) context;
@@ -926,7 +926,7 @@ static struct handletableentry* p_handletable = nullptr;
static const char* rtv_linkedin_libpath() {
constexpr int bufsize = 4096;
static char buffer[bufsize];
- static const char* libpath = 0;
+ static const char* libpath = nullptr;
// we only try to retrieve the libpath once. After that try we
// let libpath point to buffer, which then contains a valid libpath
diff --git a/src/hotspot/os/bsd/globals_bsd.hpp b/src/hotspot/os/bsd/globals_bsd.hpp
index 850d491a11f..22f587ed789 100644
--- a/src/hotspot/os/bsd/globals_bsd.hpp
+++ b/src/hotspot/os/bsd/globals_bsd.hpp
@@ -28,6 +28,7 @@
//
// Declare Bsd specific flags. They are not available on other platforms.
//
+#ifdef AARCH64
#define RUNTIME_OS_FLAGS(develop, \
develop_pd, \
product, \
@@ -35,9 +36,21 @@
range, \
constraint) \
\
- AARCH64_ONLY(develop(bool, AssertWXAtThreadSync, true, \
- "Conservatively check W^X thread state at possible safepoint" \
- "or handshake"))
+ develop(bool, TraceWXHealing, false, \
+ "track occurrences of W^X mode healing") \
+ develop(bool, UseOldWX, false, \
+ "Choose old W^X implementation.") \
+ product(bool, StressWXHealing, false, DIAGNOSTIC, \
+ "Stress W xor X healing on MacOS")
+
+#else
+#define RUNTIME_OS_FLAGS(develop, \
+ develop_pd, \
+ product, \
+ product_pd, \
+ range, \
+ constraint)
+#endif
// end of RUNTIME_OS_FLAGS
diff --git a/src/hotspot/os/bsd/memMapPrinter_macosx.cpp b/src/hotspot/os/bsd/memMapPrinter_macosx.cpp
index 6fd08d63e85..30e258c9d2c 100644
--- a/src/hotspot/os/bsd/memMapPrinter_macosx.cpp
+++ b/src/hotspot/os/bsd/memMapPrinter_macosx.cpp
@@ -132,7 +132,7 @@ public:
static const char* tagToStr(uint32_t user_tag) {
switch (user_tag) {
case 0:
- return 0;
+ return nullptr;
X1(MALLOC, malloc);
X1(MALLOC_SMALL, malloc_small);
X1(MALLOC_LARGE, malloc_large);
diff --git a/src/hotspot/os/bsd/os_bsd.cpp b/src/hotspot/os/bsd/os_bsd.cpp
index 61de48bb7fa..29ebe65e0db 100644
--- a/src/hotspot/os/bsd/os_bsd.cpp
+++ b/src/hotspot/os/bsd/os_bsd.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -137,10 +137,18 @@ bool os::available_memory(physical_memory_size_type& value) {
return Bsd::available_memory(value);
}
+bool os::Machine::available_memory(physical_memory_size_type& value) {
+ return Bsd::available_memory(value);
+}
+
bool os::free_memory(physical_memory_size_type& value) {
return Bsd::available_memory(value);
}
+bool os::Machine::free_memory(physical_memory_size_type& value) {
+ return Bsd::available_memory(value);
+}
+
// Available here means free. Note that this number is of no much use. As an estimate
// for future memory pressure it is far too conservative, since MacOS will use a lot
// of unused memory for caches, and return it willingly in case of needs.
@@ -181,6 +189,10 @@ void os::Bsd::print_uptime_info(outputStream* st) {
}
bool os::total_swap_space(physical_memory_size_type& value) {
+ return Machine::total_swap_space(value);
+}
+
+bool os::Machine::total_swap_space(physical_memory_size_type& value) {
#if defined(__APPLE__)
struct xsw_usage vmusage;
size_t size = sizeof(vmusage);
@@ -195,6 +207,10 @@ bool os::total_swap_space(physical_memory_size_type& value) {
}
bool os::free_swap_space(physical_memory_size_type& value) {
+ return Machine::free_swap_space(value);
+}
+
+bool os::Machine::free_swap_space(physical_memory_size_type& value) {
#if defined(__APPLE__)
struct xsw_usage vmusage;
size_t size = sizeof(vmusage);
@@ -212,6 +228,10 @@ physical_memory_size_type os::physical_memory() {
return Bsd::physical_memory();
}
+physical_memory_size_type os::Machine::physical_memory() {
+ return Bsd::physical_memory();
+}
+
size_t os::rss() {
size_t rss = 0;
#ifdef __APPLE__
@@ -608,7 +628,7 @@ static void *thread_native_entry(Thread *thread) {
log_info(os, thread)("Thread finished (tid: %zu, pthread id: %zu).",
os::current_thread_id(), (uintx) pthread_self());
- return 0;
+ return nullptr;
}
bool os::create_thread(Thread* thread, ThreadType thr_type,
@@ -821,6 +841,7 @@ jlong os::javaTimeNanos() {
// We might also condition (c) on the magnitude of the delta between obsv and now.
// Avoiding excessive CAS operations to hot RW locations is critical.
// See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
+ // https://web.archive.org/web/20131214182431/https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
return (prev == obsv) ? now : obsv;
}
@@ -1400,7 +1421,7 @@ int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *pa
#elif defined(__APPLE__)
for (uint32_t i = 1; i < _dyld_image_count(); i++) {
// Value for top_address is returned as 0 since we don't have any information about module size
- if (callback(_dyld_get_image_name(i), (address)_dyld_get_image_header(i), (address)0, param)) {
+ if (callback(_dyld_get_image_name(i), (address)_dyld_get_image_header(i), nullptr, param)) {
return 1;
}
}
@@ -1761,10 +1782,8 @@ bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
return os::commit_memory(addr, size, !ExecMem);
}
-// If this is a growable mapping, remove the guard pages entirely by
-// munmap()ping them. If not, just call uncommit_memory().
-bool os::remove_stack_guard_pages(char* addr, size_t size) {
- return os::uncommit_memory(addr, size);
+void os::remove_stack_guard_pages(char* addr, size_t size) {
+ os::uncommit_memory(addr, size);
}
// 'requested_addr' is only treated as a hint, the return value may or
@@ -1866,11 +1885,6 @@ char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, size_t page_
return nullptr;
}
-bool os::pd_release_memory_special(char* base, size_t bytes) {
- fatal("os::release_memory_special should not be called on BSD.");
- return false;
-}
-
size_t os::large_page_size() {
return _large_page_size;
}
@@ -2189,6 +2203,10 @@ int os::active_processor_count() {
return ActiveProcessorCount;
}
+ return Machine::active_processor_count();
+}
+
+int os::Machine::active_processor_count() {
return _processor_count;
}
diff --git a/src/hotspot/os/linux/cgroupSubsystem_linux.cpp b/src/hotspot/os/linux/cgroupSubsystem_linux.cpp
index f9c6f794ebd..e49d070890e 100644
--- a/src/hotspot/os/linux/cgroupSubsystem_linux.cpp
+++ b/src/hotspot/os/linux/cgroupSubsystem_linux.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -631,22 +631,20 @@ void CgroupSubsystemFactory::cleanup(CgroupInfo* cg_infos) {
* return:
* true if there were no errors. false otherwise.
*/
-bool CgroupSubsystem::active_processor_count(int& value) {
- int cpu_count;
- int result = -1;
-
+bool CgroupSubsystem::active_processor_count(double& value) {
// We use a cache with a timeout to avoid performing expensive
// computations in the event this function is called frequently.
// [See 8227006].
- CachingCgroupController* contrl = cpu_controller();
- CachedMetric* cpu_limit = contrl->metrics_cache();
+ CachingCgroupController* contrl = cpu_controller();
+ CachedMetric* cpu_limit = contrl->metrics_cache();
if (!cpu_limit->should_check_metric()) {
- value = (int)cpu_limit->value();
- log_trace(os, container)("CgroupSubsystem::active_processor_count (cached): %d", value);
+ value = cpu_limit->value();
+ log_trace(os, container)("CgroupSubsystem::active_processor_count (cached): %.2f", value);
return true;
}
- cpu_count = os::Linux::active_processor_count();
+ int cpu_count = os::Linux::active_processor_count();
+ double result = -1;
if (!CgroupUtil::processor_count(contrl->controller(), cpu_count, result)) {
return false;
}
@@ -671,8 +669,8 @@ bool CgroupSubsystem::active_processor_count(int& value) {
*/
bool CgroupSubsystem::memory_limit_in_bytes(physical_memory_size_type upper_bound,
physical_memory_size_type& value) {
- CachingCgroupController* contrl = memory_controller();
- CachedMetric* memory_limit = contrl->metrics_cache();
+ CachingCgroupController* contrl = memory_controller();
+ CachedMetric* memory_limit = contrl->metrics_cache();
if (!memory_limit->should_check_metric()) {
value = memory_limit->value();
return true;
diff --git a/src/hotspot/os/linux/cgroupSubsystem_linux.hpp b/src/hotspot/os/linux/cgroupSubsystem_linux.hpp
index 522b64f8816..d083a9985c2 100644
--- a/src/hotspot/os/linux/cgroupSubsystem_linux.hpp
+++ b/src/hotspot/os/linux/cgroupSubsystem_linux.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -181,20 +181,21 @@ class CgroupController: public CHeapObj {
static bool limit_from_str(char* limit_str, physical_memory_size_type& value);
};
+template
class CachedMetric : public CHeapObj{
private:
- volatile physical_memory_size_type _metric;
+ volatile MetricType _metric;
volatile jlong _next_check_counter;
public:
CachedMetric() {
- _metric = value_unlimited;
+ _metric = static_cast(value_unlimited);
_next_check_counter = min_jlong;
}
bool should_check_metric() {
return os::elapsed_counter() > _next_check_counter;
}
- physical_memory_size_type value() { return _metric; }
- void set_value(physical_memory_size_type value, jlong timeout) {
+ MetricType value() { return _metric; }
+ void set_value(MetricType value, jlong timeout) {
_metric = value;
// Metric is unlikely to change, but we want to remain
// responsive to configuration changes. A very short grace time
@@ -205,19 +206,19 @@ class CachedMetric : public CHeapObj{
}
};
-template
+template
class CachingCgroupController : public CHeapObj {
private:
T* _controller;
- CachedMetric* _metrics_cache;
+ CachedMetric* _metrics_cache;
public:
CachingCgroupController(T* cont) {
_controller = cont;
- _metrics_cache = new CachedMetric();
+ _metrics_cache = new CachedMetric();
}
- CachedMetric* metrics_cache() { return _metrics_cache; }
+ CachedMetric* metrics_cache() { return _metrics_cache; }
T* controller() { return _controller; }
};
@@ -277,7 +278,7 @@ class CgroupMemoryController: public CHeapObj {
class CgroupSubsystem: public CHeapObj {
public:
bool memory_limit_in_bytes(physical_memory_size_type upper_bound, physical_memory_size_type& value);
- bool active_processor_count(int& value);
+ bool active_processor_count(double& value);
virtual bool pids_max(uint64_t& value) = 0;
virtual bool pids_current(uint64_t& value) = 0;
@@ -286,8 +287,8 @@ class CgroupSubsystem: public CHeapObj {
virtual char * cpu_cpuset_cpus() = 0;
virtual char * cpu_cpuset_memory_nodes() = 0;
virtual const char * container_type() = 0;
- virtual CachingCgroupController* memory_controller() = 0;
- virtual CachingCgroupController* cpu_controller() = 0;
+ virtual CachingCgroupController* memory_controller() = 0;
+ virtual CachingCgroupController* cpu_controller() = 0;
virtual CgroupCpuacctController* cpuacct_controller() = 0;
bool cpu_quota(int& value);
diff --git a/src/hotspot/os/linux/cgroupUtil_linux.cpp b/src/hotspot/os/linux/cgroupUtil_linux.cpp
index 7aa07d53148..570b335940b 100644
--- a/src/hotspot/os/linux/cgroupUtil_linux.cpp
+++ b/src/hotspot/os/linux/cgroupUtil_linux.cpp
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2024, 2025, Red Hat, Inc.
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,9 +26,8 @@
#include "cgroupUtil_linux.hpp"
#include "os_linux.hpp"
-bool CgroupUtil::processor_count(CgroupCpuController* cpu_ctrl, int upper_bound, int& value) {
+bool CgroupUtil::processor_count(CgroupCpuController* cpu_ctrl, int upper_bound, double& value) {
assert(upper_bound > 0, "upper bound of cpus must be positive");
- int limit_count = upper_bound;
int quota = -1;
int period = -1;
if (!cpu_ctrl->cpu_quota(quota)) {
@@ -37,20 +37,15 @@ bool CgroupUtil::processor_count(CgroupCpuController* cpu_ctrl, int upper_bound,
return false;
}
int quota_count = 0;
- int result = upper_bound;
+ double result = upper_bound;
- if (quota > -1 && period > 0) {
- quota_count = ceilf((float)quota / (float)period);
- log_trace(os, container)("CPU Quota count based on quota/period: %d", quota_count);
+ if (quota > 0 && period > 0) { // Use quotas
+ double cpu_quota = static_cast(quota) / period;
+ log_trace(os, container)("CPU Quota based on quota/period: %.2f", cpu_quota);
+ result = MIN2(result, cpu_quota);
}
- // Use quotas
- if (quota_count != 0) {
- limit_count = quota_count;
- }
-
- result = MIN2(upper_bound, limit_count);
- log_trace(os, container)("OSContainer::active_processor_count: %d", result);
+ log_trace(os, container)("OSContainer::active_processor_count: %.2f", result);
value = result;
return true;
}
@@ -73,11 +68,11 @@ physical_memory_size_type CgroupUtil::get_updated_mem_limit(CgroupMemoryControll
// Get an updated cpu limit. The return value is strictly less than or equal to the
// passed in 'lowest' value.
-int CgroupUtil::get_updated_cpu_limit(CgroupCpuController* cpu,
+double CgroupUtil::get_updated_cpu_limit(CgroupCpuController* cpu,
int lowest,
int upper_bound) {
assert(lowest > 0 && lowest <= upper_bound, "invariant");
- int cpu_limit_val = -1;
+ double cpu_limit_val = -1;
if (CgroupUtil::processor_count(cpu, upper_bound, cpu_limit_val) && cpu_limit_val != upper_bound) {
assert(cpu_limit_val <= upper_bound, "invariant");
if (lowest > cpu_limit_val) {
@@ -172,7 +167,7 @@ void CgroupUtil::adjust_controller(CgroupCpuController* cpu) {
assert(cg_path[0] == '/', "cgroup path must start with '/'");
int host_cpus = os::Linux::active_processor_count();
int lowest_limit = host_cpus;
- int cpus = get_updated_cpu_limit(cpu, lowest_limit, host_cpus);
+ double cpus = get_updated_cpu_limit(cpu, lowest_limit, host_cpus);
int orig_limit = lowest_limit != host_cpus ? lowest_limit : host_cpus;
char* limit_cg_path = nullptr;
while ((last_slash = strrchr(cg_path, '/')) != cg_path) {
diff --git a/src/hotspot/os/linux/cgroupUtil_linux.hpp b/src/hotspot/os/linux/cgroupUtil_linux.hpp
index d72bbd1cf1e..1fd2a7d872b 100644
--- a/src/hotspot/os/linux/cgroupUtil_linux.hpp
+++ b/src/hotspot/os/linux/cgroupUtil_linux.hpp
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2024, Red Hat, Inc.
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,7 +32,7 @@
class CgroupUtil: AllStatic {
public:
- static bool processor_count(CgroupCpuController* cpu, int upper_bound, int& value);
+ static bool processor_count(CgroupCpuController* cpu, int upper_bound, double& value);
// Given a memory controller, adjust its path to a point in the hierarchy
// that represents the closest memory limit.
static void adjust_controller(CgroupMemoryController* m);
@@ -42,9 +43,7 @@ class CgroupUtil: AllStatic {
static physical_memory_size_type get_updated_mem_limit(CgroupMemoryController* m,
physical_memory_size_type lowest,
physical_memory_size_type upper_bound);
- static int get_updated_cpu_limit(CgroupCpuController* c,
- int lowest,
- int upper_bound);
+ static double get_updated_cpu_limit(CgroupCpuController* c, int lowest, int upper_bound);
};
#endif // CGROUP_UTIL_LINUX_HPP
diff --git a/src/hotspot/os/linux/cgroupV1Subsystem_linux.cpp b/src/hotspot/os/linux/cgroupV1Subsystem_linux.cpp
index 2df604083d2..c8f5a290c99 100644
--- a/src/hotspot/os/linux/cgroupV1Subsystem_linux.cpp
+++ b/src/hotspot/os/linux/cgroupV1Subsystem_linux.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -328,8 +328,8 @@ CgroupV1Subsystem::CgroupV1Subsystem(CgroupV1Controller* cpuset,
_pids(pids) {
CgroupUtil::adjust_controller(memory);
CgroupUtil::adjust_controller(cpu);
- _memory = new CachingCgroupController(memory);
- _cpu = new CachingCgroupController(cpu);
+ _memory = new CachingCgroupController(memory);
+ _cpu = new CachingCgroupController(cpu);
}
bool CgroupV1Subsystem::is_containerized() {
diff --git a/src/hotspot/os/linux/cgroupV1Subsystem_linux.hpp b/src/hotspot/os/linux/cgroupV1Subsystem_linux.hpp
index f556bc57f26..af8d0efd378 100644
--- a/src/hotspot/os/linux/cgroupV1Subsystem_linux.hpp
+++ b/src/hotspot/os/linux/cgroupV1Subsystem_linux.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -214,15 +214,15 @@ class CgroupV1Subsystem: public CgroupSubsystem {
const char * container_type() override {
return "cgroupv1";
}
- CachingCgroupController* memory_controller() override { return _memory; }
- CachingCgroupController* cpu_controller() override { return _cpu; }
+ CachingCgroupController* memory_controller() override { return _memory; }
+ CachingCgroupController* cpu_controller() override { return _cpu; }
CgroupCpuacctController* cpuacct_controller() override { return _cpuacct; }
private:
/* controllers */
- CachingCgroupController* _memory = nullptr;
+ CachingCgroupController* _memory = nullptr;
CgroupV1Controller* _cpuset = nullptr;
- CachingCgroupController* _cpu = nullptr;
+ CachingCgroupController* _cpu = nullptr;
CgroupV1CpuacctController* _cpuacct = nullptr;
CgroupV1Controller* _pids = nullptr;
diff --git a/src/hotspot/os/linux/cgroupV2Subsystem_linux.cpp b/src/hotspot/os/linux/cgroupV2Subsystem_linux.cpp
index c61d30e9236..30e1affc646 100644
--- a/src/hotspot/os/linux/cgroupV2Subsystem_linux.cpp
+++ b/src/hotspot/os/linux/cgroupV2Subsystem_linux.cpp
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2020, 2025, Red Hat Inc.
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -156,8 +156,8 @@ CgroupV2Subsystem::CgroupV2Subsystem(CgroupV2MemoryController * memory,
_unified(unified) {
CgroupUtil::adjust_controller(memory);
CgroupUtil::adjust_controller(cpu);
- _memory = new CachingCgroupController(memory);
- _cpu = new CachingCgroupController(cpu);
+ _memory = new CachingCgroupController(memory);
+ _cpu = new CachingCgroupController(cpu);
_cpuacct = cpuacct;
}
diff --git a/src/hotspot/os/linux/cgroupV2Subsystem_linux.hpp b/src/hotspot/os/linux/cgroupV2Subsystem_linux.hpp
index 39a4fabe9f6..998145c0ff9 100644
--- a/src/hotspot/os/linux/cgroupV2Subsystem_linux.hpp
+++ b/src/hotspot/os/linux/cgroupV2Subsystem_linux.hpp
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2020, 2024, Red Hat Inc.
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -152,8 +152,8 @@ class CgroupV2Subsystem: public CgroupSubsystem {
/* One unified controller */
CgroupV2Controller _unified;
/* Caching wrappers for cpu/memory metrics */
- CachingCgroupController* _memory = nullptr;
- CachingCgroupController* _cpu = nullptr;
+ CachingCgroupController* _memory = nullptr;
+ CachingCgroupController* _cpu = nullptr;
CgroupCpuacctController* _cpuacct = nullptr;
@@ -175,8 +175,8 @@ class CgroupV2Subsystem: public CgroupSubsystem {
const char * container_type() override {
return "cgroupv2";
}
- CachingCgroupController* memory_controller() override { return _memory; }
- CachingCgroupController* cpu_controller() override { return _cpu; }
+ CachingCgroupController* memory_controller() override { return _memory; }
+ CachingCgroupController* cpu_controller() override { return _cpu; }
CgroupCpuacctController* cpuacct_controller() override { return _cpuacct; };
};
diff --git a/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp b/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp
index 25ffd0b8078..28159ae4801 100644
--- a/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp
+++ b/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp
@@ -66,9 +66,6 @@
#endif
// open(2) flags
-#ifndef O_CLOEXEC
-#define O_CLOEXEC 02000000
-#endif
#ifndef O_TMPFILE
#define O_TMPFILE (020000000 | O_DIRECTORY)
#endif
diff --git a/src/hotspot/os/linux/osContainer_linux.cpp b/src/hotspot/os/linux/osContainer_linux.cpp
index 15a6403d07f..b46263efd99 100644
--- a/src/hotspot/os/linux/osContainer_linux.cpp
+++ b/src/hotspot/os/linux/osContainer_linux.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -86,8 +86,8 @@ void OSContainer::init() {
// 2.) On a physical Linux system with a limit enforced by other means (like systemd slice)
physical_memory_size_type mem_limit_val = value_unlimited;
(void)memory_limit_in_bytes(mem_limit_val); // discard error and use default
- int host_cpus = os::Linux::active_processor_count();
- int cpus = host_cpus;
+ double host_cpus = os::Linux::active_processor_count();
+ double cpus = host_cpus;
(void)active_processor_count(cpus); // discard error and use default
any_mem_cpu_limit_present = mem_limit_val != value_unlimited || host_cpus != cpus;
if (any_mem_cpu_limit_present) {
@@ -127,8 +127,7 @@ bool OSContainer::available_memory_in_bytes(physical_memory_size_type& value) {
return false;
}
-bool OSContainer::available_swap_in_bytes(physical_memory_size_type host_free_swap,
- physical_memory_size_type& value) {
+bool OSContainer::available_swap_in_bytes(physical_memory_size_type& value) {
physical_memory_size_type mem_limit = 0;
physical_memory_size_type mem_swap_limit = 0;
if (memory_limit_in_bytes(mem_limit) &&
@@ -179,8 +178,7 @@ bool OSContainer::available_swap_in_bytes(physical_memory_size_type host_free_sw
assert(num < 25, "buffer too small");
mem_limit_buf[num] = '\0';
log_trace(os,container)("OSContainer::available_swap_in_bytes: container_swap_limit=%s"
- " container_mem_limit=%s, host_free_swap: " PHYS_MEM_TYPE_FORMAT,
- mem_swap_buf, mem_limit_buf, host_free_swap);
+ " container_mem_limit=%s", mem_swap_buf, mem_limit_buf);
}
return false;
}
@@ -252,7 +250,7 @@ char * OSContainer::cpu_cpuset_memory_nodes() {
return cgroup_subsystem->cpu_cpuset_memory_nodes();
}
-bool OSContainer::active_processor_count(int& value) {
+bool OSContainer::active_processor_count(double& value) {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
return cgroup_subsystem->active_processor_count(value);
}
@@ -291,11 +289,13 @@ template struct metric_fmt;
template<> struct metric_fmt { static constexpr const char* fmt = "%llu"; };
template<> struct metric_fmt { static constexpr const char* fmt = "%lu"; };
template<> struct metric_fmt { static constexpr const char* fmt = "%d"; };
+template<> struct metric_fmt { static constexpr const char* fmt = "%.2f"; };
template<> struct metric_fmt { static constexpr const char* fmt = "%s"; };
template void OSContainer::print_container_metric(outputStream*, const char*, unsigned long long int, const char*);
template void OSContainer::print_container_metric(outputStream*, const char*, unsigned long int, const char*);
template void OSContainer::print_container_metric(outputStream*, const char*, int, const char*);
+template void OSContainer::print_container_metric(outputStream*, const char*, double, const char*);
template void OSContainer::print_container_metric(outputStream*, const char*, const char*, const char*);
template
@@ -304,12 +304,13 @@ void OSContainer::print_container_metric(outputStream* st, const char* metrics,
constexpr int longest_value = max_length - 11; // Max length - shortest "metric: " string ("cpu_quota: ")
char value_str[longest_value + 1] = {};
os::snprintf_checked(value_str, longest_value, metric_fmt::fmt, value);
- st->print("%s: %*s", metrics, max_length - static_cast(strlen(metrics)) - 2, value_str); // -2 for the ": "
- if (unit[0] != '\0') {
- st->print_cr(" %s", unit);
- } else {
- st->print_cr("");
- }
+
+ const int pad_width = max_length - static_cast(strlen(metrics)) - 2; // -2 for the ": "
+ const char* unit_prefix = unit[0] != '\0' ? " " : "";
+
+ char line[128] = {};
+ os::snprintf_checked(line, sizeof(line), "%s: %*s%s%s", metrics, pad_width, value_str, unit_prefix, unit);
+ st->print_cr("%s", line);
}
void OSContainer::print_container_helper(outputStream* st, MetricResult& res, const char* metrics) {
diff --git a/src/hotspot/os/linux/osContainer_linux.hpp b/src/hotspot/os/linux/osContainer_linux.hpp
index 11c3e086feb..96b59b98db8 100644
--- a/src/hotspot/os/linux/osContainer_linux.hpp
+++ b/src/hotspot/os/linux/osContainer_linux.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -72,8 +72,7 @@ class OSContainer: AllStatic {
static const char * container_type();
static bool available_memory_in_bytes(physical_memory_size_type& value);
- static bool available_swap_in_bytes(physical_memory_size_type host_free_swap,
- physical_memory_size_type& value);
+ static bool available_swap_in_bytes(physical_memory_size_type& value);
static bool memory_limit_in_bytes(physical_memory_size_type& value);
static bool memory_and_swap_limit_in_bytes(physical_memory_size_type& value);
static bool memory_and_swap_usage_in_bytes(physical_memory_size_type& value);
@@ -84,7 +83,7 @@ class OSContainer: AllStatic {
static bool rss_usage_in_bytes(physical_memory_size_type& value);
static bool cache_usage_in_bytes(physical_memory_size_type& value);
- static bool active_processor_count(int& value);
+ static bool active_processor_count(double& value);
static char * cpu_cpuset_cpus();
static char * cpu_cpuset_memory_nodes();
diff --git a/src/hotspot/os/linux/os_linux.cpp b/src/hotspot/os/linux/os_linux.cpp
index 88e5e9b582a..9c2fbab7535 100644
--- a/src/hotspot/os/linux/os_linux.cpp
+++ b/src/hotspot/os/linux/os_linux.cpp
@@ -211,15 +211,58 @@ static bool suppress_primordial_thread_resolution = false;
// utility functions
+bool os::is_containerized() {
+ return OSContainer::is_containerized();
+}
+
+bool os::Container::memory_limit(physical_memory_size_type& value) {
+ physical_memory_size_type result = 0;
+ if (OSContainer::memory_limit_in_bytes(result) && result != value_unlimited) {
+ value = result;
+ return true;
+ }
+ return false;
+}
+
+bool os::Container::memory_soft_limit(physical_memory_size_type& value) {
+ physical_memory_size_type result = 0;
+ if (OSContainer::memory_soft_limit_in_bytes(result) && result != 0 && result != value_unlimited) {
+ value = result;
+ return true;
+ }
+ return false;
+}
+
+bool os::Container::memory_throttle_limit(physical_memory_size_type& value) {
+ physical_memory_size_type result = 0;
+ if (OSContainer::memory_throttle_limit_in_bytes(result) && result != value_unlimited) {
+ value = result;
+ return true;
+ }
+ return false;
+}
+
+bool os::Container::used_memory(physical_memory_size_type& value) {
+ return OSContainer::memory_usage_in_bytes(value);
+}
+
bool os::available_memory(physical_memory_size_type& value) {
- if (OSContainer::is_containerized() && OSContainer::available_memory_in_bytes(value)) {
+ if (is_containerized() && Container::available_memory(value)) {
log_trace(os)("available container memory: " PHYS_MEM_TYPE_FORMAT, value);
return true;
}
+ return Machine::available_memory(value);
+}
+
+bool os::Machine::available_memory(physical_memory_size_type& value) {
return Linux::available_memory(value);
}
+bool os::Container::available_memory(physical_memory_size_type& value) {
+ return OSContainer::available_memory_in_bytes(value);
+}
+
bool os::Linux::available_memory(physical_memory_size_type& value) {
physical_memory_size_type avail_mem = 0;
@@ -251,11 +294,15 @@ bool os::Linux::available_memory(physical_memory_size_type& value) {
}
bool os::free_memory(physical_memory_size_type& value) {
- if (OSContainer::is_containerized() && OSContainer::available_memory_in_bytes(value)) {
+ if (is_containerized() && Container::available_memory(value)) {
log_trace(os)("free container memory: " PHYS_MEM_TYPE_FORMAT, value);
return true;
}
+ return Machine::free_memory(value);
+}
+
+bool os::Machine::free_memory(physical_memory_size_type& value) {
return Linux::free_memory(value);
}
@@ -274,21 +321,30 @@ bool os::Linux::free_memory(physical_memory_size_type& value) {
}
bool os::total_swap_space(physical_memory_size_type& value) {
- if (OSContainer::is_containerized()) {
- physical_memory_size_type mem_swap_limit = value_unlimited;
- physical_memory_size_type memory_limit = value_unlimited;
- if (OSContainer::memory_and_swap_limit_in_bytes(mem_swap_limit) &&
- OSContainer::memory_limit_in_bytes(memory_limit)) {
- if (memory_limit != value_unlimited && mem_swap_limit != value_unlimited &&
- mem_swap_limit >= memory_limit /* ensure swap is >= 0 */) {
- value = mem_swap_limit - memory_limit;
- return true;
- }
- }
- } // fallback to the host swap space if the container returned unlimited
+ if (is_containerized() && Container::total_swap_space(value)) {
+ return true;
+ } // fallback to the host swap space if the container value fails
+ return Machine::total_swap_space(value);
+}
+
+bool os::Machine::total_swap_space(physical_memory_size_type& value) {
return Linux::host_swap(value);
}
+bool os::Container::total_swap_space(physical_memory_size_type& value) {
+ physical_memory_size_type mem_swap_limit = value_unlimited;
+ physical_memory_size_type memory_limit = value_unlimited;
+ if (OSContainer::memory_and_swap_limit_in_bytes(mem_swap_limit) &&
+ OSContainer::memory_limit_in_bytes(memory_limit)) {
+ if (memory_limit != value_unlimited && mem_swap_limit != value_unlimited &&
+ mem_swap_limit >= memory_limit /* ensure swap is >= 0 */) {
+ value = mem_swap_limit - memory_limit;
+ return true;
+ }
+ }
+ return false;
+}
+
static bool host_free_swap_f(physical_memory_size_type& value) {
struct sysinfo si;
int ret = sysinfo(&si);
@@ -309,32 +365,45 @@ bool os::free_swap_space(physical_memory_size_type& value) {
return false;
}
physical_memory_size_type host_free_swap_val = MIN2(total_swap_space, host_free_swap);
- if (OSContainer::is_containerized()) {
- if (OSContainer::available_swap_in_bytes(host_free_swap_val, value)) {
+ if (is_containerized()) {
+ if (Container::free_swap_space(value)) {
return true;
}
// Fall through to use host value
log_trace(os,container)("os::free_swap_space: containerized value unavailable"
" returning host value: " PHYS_MEM_TYPE_FORMAT, host_free_swap_val);
}
+
value = host_free_swap_val;
return true;
}
+bool os::Machine::free_swap_space(physical_memory_size_type& value) {
+ return host_free_swap_f(value);
+}
+
+bool os::Container::free_swap_space(physical_memory_size_type& value) {
+ return OSContainer::available_swap_in_bytes(value);
+}
+
physical_memory_size_type os::physical_memory() {
- if (OSContainer::is_containerized()) {
+ if (is_containerized()) {
physical_memory_size_type mem_limit = value_unlimited;
- if (OSContainer::memory_limit_in_bytes(mem_limit) && mem_limit != value_unlimited) {
+ if (Container::memory_limit(mem_limit) && mem_limit != value_unlimited) {
log_trace(os)("total container memory: " PHYS_MEM_TYPE_FORMAT, mem_limit);
return mem_limit;
}
}
- physical_memory_size_type phys_mem = Linux::physical_memory();
+ physical_memory_size_type phys_mem = Machine::physical_memory();
log_trace(os)("total system memory: " PHYS_MEM_TYPE_FORMAT, phys_mem);
return phys_mem;
}
+physical_memory_size_type os::Machine::physical_memory() {
+ return Linux::physical_memory();
+}
+
// Returns the resident set size (RSS) of the process.
// Falls back to using VmRSS from /proc/self/status if /proc/self/smaps_rollup is unavailable.
// Note: On kernels with memory cgroups or shared memory, VmRSS may underreport RSS.
@@ -2439,20 +2508,21 @@ bool os::Linux::print_container_info(outputStream* st) {
OSContainer::print_container_metric(st, "cpu_memory_nodes", p != nullptr ? p : "not supported");
free(p);
- int i = -1;
- bool supported = OSContainer::active_processor_count(i);
+ double cpus = -1;
+ bool supported = OSContainer::active_processor_count(cpus);
if (supported) {
- assert(i > 0, "must be");
+ assert(cpus > 0, "must be");
if (ActiveProcessorCount > 0) {
OSContainer::print_container_metric(st, "active_processor_count", ActiveProcessorCount, "(from -XX:ActiveProcessorCount)");
} else {
- OSContainer::print_container_metric(st, "active_processor_count", i);
+ OSContainer::print_container_metric(st, "active_processor_count", cpus);
}
} else {
OSContainer::print_container_metric(st, "active_processor_count", "not supported");
}
+ int i = -1;
supported = OSContainer::cpu_quota(i);
if (supported && i > 0) {
OSContainer::print_container_metric(st, "cpu_quota", i);
@@ -3453,6 +3523,9 @@ bool os::pd_uncommit_memory(char* addr, size_t size, bool exec) {
log_trace(os, map)("mmap failed: " RANGEFMT " errno=(%s)",
RANGEFMTARGS(addr, size),
os::strerror(ep.saved_errno()));
+ if (ep.saved_errno() == ENOMEM) {
+ fatal("Failed to uncommit " RANGEFMT ". It is possible that the process's maximum number of mappings would have been exceeded. Try increasing the limit.", RANGEFMTARGS(addr, size));
+ }
return false;
}
return true;
@@ -3563,14 +3636,16 @@ bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
// It's safe to always unmap guard pages for primordial thread because we
// always place it right after end of the mapped region.
-bool os::remove_stack_guard_pages(char* addr, size_t size) {
- uintptr_t stack_extent, stack_base;
+void os::remove_stack_guard_pages(char* addr, size_t size) {
if (os::is_primordial_thread()) {
- return ::munmap(addr, size) == 0;
+ if (::munmap(addr, size) != 0) {
+ fatal("Failed to munmap " RANGEFMT, RANGEFMTARGS(addr, size));
+ }
+ return;
}
- return os::uncommit_memory(addr, size);
+ os::uncommit_memory(addr, size);
}
// 'requested_addr' is only treated as a hint, the return value may or
@@ -4133,12 +4208,6 @@ char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, size_t page_
return addr;
}
-bool os::pd_release_memory_special(char* base, size_t bytes) {
- assert(UseLargePages, "only for large pages");
- // Plain munmap is sufficient
- return pd_release_memory(base, bytes);
-}
-
size_t os::large_page_size() {
return _large_page_size;
}
@@ -4737,15 +4806,26 @@ int os::active_processor_count() {
return ActiveProcessorCount;
}
- int active_cpus = -1;
- if (OSContainer::is_containerized() && OSContainer::active_processor_count(active_cpus)) {
- log_trace(os)("active_processor_count: determined by OSContainer: %d",
- active_cpus);
- } else {
- active_cpus = os::Linux::active_processor_count();
+ if (is_containerized()) {
+ double cpu_quota;
+ if (Container::processor_count(cpu_quota)) {
+ int active_cpus = ceilf(cpu_quota); // Round fractional CPU quota up.
+ assert(active_cpus <= Machine::active_processor_count(), "must be");
+ log_trace(os)("active_processor_count: determined by OSContainer: %d",
+ active_cpus);
+ return active_cpus;
+ }
}
- return active_cpus;
+ return Machine::active_processor_count();
+}
+
+int os::Machine::active_processor_count() {
+ return os::Linux::active_processor_count();
+}
+
+bool os::Container::processor_count(double& value) {
+ return OSContainer::active_processor_count(value);
}
static bool should_warn_invalid_processor_id() {
@@ -4878,36 +4958,18 @@ int os::open(const char *path, int oflag, int mode) {
// All file descriptors that are opened in the Java process and not
// specifically destined for a subprocess should have the close-on-exec
// flag set. If we don't set it, then careless 3rd party native code
- // might fork and exec without closing all appropriate file descriptors,
- // and this in turn might:
- //
- // - cause end-of-file to fail to be detected on some file
- // descriptors, resulting in mysterious hangs, or
- //
- // - might cause an fopen in the subprocess to fail on a system
- // suffering from bug 1085341.
- //
- // (Yes, the default setting of the close-on-exec flag is a Unix
- // design flaw)
- //
- // See:
- // 1085341: 32-bit stdio routines should support file descriptors >255
- // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
- // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
- //
- // Modern Linux kernels (after 2.6.23 2007) support O_CLOEXEC with open().
- // O_CLOEXEC is preferable to using FD_CLOEXEC on an open file descriptor
- // because it saves a system call and removes a small window where the flag
- // is unset. On ancient Linux kernels the O_CLOEXEC flag will be ignored
- // and we fall back to using FD_CLOEXEC (see below).
-#ifdef O_CLOEXEC
+ // might fork and exec without closing all appropriate file descriptors.
oflag |= O_CLOEXEC;
-#endif
int fd = ::open(path, oflag, mode);
- if (fd == -1) return -1;
+ // No further checking is needed if open() returned an error or
+ // access mode is not read only.
+ if (fd == -1 || (oflag & O_ACCMODE) != O_RDONLY) {
+ return fd;
+ }
- //If the open succeeded, the file might still be a directory
+ // If the open succeeded and is read only, the file might be a directory
+ // which the JVM doesn't allow to be read.
{
struct stat buf;
int ret = ::fstat(fd, &buf);
@@ -4925,21 +4987,6 @@ int os::open(const char *path, int oflag, int mode) {
}
}
-#ifdef FD_CLOEXEC
- // Validate that the use of the O_CLOEXEC flag on open above worked.
- // With recent kernels, we will perform this check exactly once.
- static sig_atomic_t O_CLOEXEC_is_known_to_work = 0;
- if (!O_CLOEXEC_is_known_to_work) {
- int flags = ::fcntl(fd, F_GETFD);
- if (flags != -1) {
- if ((flags & FD_CLOEXEC) != 0)
- O_CLOEXEC_is_known_to_work = 1;
- else
- ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
- }
- }
-#endif
-
return fd;
}
diff --git a/src/hotspot/os/posix/os_posix.cpp b/src/hotspot/os/posix/os_posix.cpp
index 5412e2bc92d..f147ed4be93 100644
--- a/src/hotspot/os/posix/os_posix.cpp
+++ b/src/hotspot/os/posix/os_posix.cpp
@@ -906,8 +906,25 @@ FILE* os::fdopen(int fd, const char* mode) {
ssize_t os::pd_write(int fd, const void *buf, size_t nBytes) {
ssize_t res;
+#ifdef __APPLE__
+ // macOS fails for individual write operations > 2GB.
+ // See https://gitlab.haskell.org/ghc/ghc/-/issues/17414
+ ssize_t total = 0;
+ while (nBytes > 0) {
+ size_t bytes_to_write = MIN2(nBytes, (size_t)INT_MAX);
+ RESTARTABLE(::write(fd, buf, bytes_to_write), res);
+ if (res == OS_ERR) {
+ return OS_ERR;
+ }
+ buf = (const char*)buf + res;
+ nBytes -= res;
+ total += res;
+ }
+ return total;
+#else
RESTARTABLE(::write(fd, buf, nBytes), res);
return res;
+#endif
}
ssize_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
diff --git a/src/hotspot/os/posix/perfMemory_posix.cpp b/src/hotspot/os/posix/perfMemory_posix.cpp
index 08a19270943..d9bde6fa825 100644
--- a/src/hotspot/os/posix/perfMemory_posix.cpp
+++ b/src/hotspot/os/posix/perfMemory_posix.cpp
@@ -112,6 +112,10 @@ static void save_memory_to_file(char* addr, size_t size) {
result = ::close(fd);
if (result == OS_ERR) {
warning("Could not close %s: %s\n", destfile, os::strerror(errno));
+ } else {
+ if (!successful_write) {
+ remove(destfile);
+ }
}
}
FREE_C_HEAP_ARRAY(char, destfile);
@@ -490,6 +494,7 @@ static char* get_user_name(uid_t uid) {
return user_name;
}
+#ifndef __APPLE__
// return the name of the user that owns the process identified by vmid.
//
// This method uses a slow directory search algorithm to find the backing
@@ -653,6 +658,7 @@ static char* get_user_name(int vmid, int *nspid, TRAPS) {
#endif
return result;
}
+#endif
// return the file name of the backing store file for the named
// shared memory region for the given user name and vmid.
@@ -949,6 +955,7 @@ static int create_sharedmem_file(const char* dirname, const char* filename, size
warning("Insufficient space for shared memory file: %s/%s\n", dirname, filename);
}
result = OS_ERR;
+ remove(filename);
break;
}
}
diff --git a/src/hotspot/os/windows/os_windows.cpp b/src/hotspot/os/windows/os_windows.cpp
index efbd1fe7c68..76f47640e5a 100644
--- a/src/hotspot/os/windows/os_windows.cpp
+++ b/src/hotspot/os/windows/os_windows.cpp
@@ -839,10 +839,18 @@ bool os::available_memory(physical_memory_size_type& value) {
return win32::available_memory(value);
}
+bool os::Machine::available_memory(physical_memory_size_type& value) {
+ return win32::available_memory(value);
+}
+
bool os::free_memory(physical_memory_size_type& value) {
return win32::available_memory(value);
}
+bool os::Machine::free_memory(physical_memory_size_type& value) {
+ return win32::available_memory(value);
+}
+
bool os::win32::available_memory(physical_memory_size_type& value) {
// Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
// value if total memory is larger than 4GB
@@ -858,7 +866,11 @@ bool os::win32::available_memory(physical_memory_size_type& value) {
}
}
-bool os::total_swap_space(physical_memory_size_type& value) {
+bool os::total_swap_space(physical_memory_size_type& value) {
+ return Machine::total_swap_space(value);
+}
+
+bool os::Machine::total_swap_space(physical_memory_size_type& value) {
MEMORYSTATUSEX ms;
ms.dwLength = sizeof(ms);
BOOL res = GlobalMemoryStatusEx(&ms);
@@ -872,6 +884,10 @@ bool os::total_swap_space(physical_memory_size_type& value) {
}
bool os::free_swap_space(physical_memory_size_type& value) {
+ return Machine::free_swap_space(value);
+}
+
+bool os::Machine::free_swap_space(physical_memory_size_type& value) {
MEMORYSTATUSEX ms;
ms.dwLength = sizeof(ms);
BOOL res = GlobalMemoryStatusEx(&ms);
@@ -888,6 +904,10 @@ physical_memory_size_type os::physical_memory() {
return win32::physical_memory();
}
+physical_memory_size_type os::Machine::physical_memory() {
+ return win32::physical_memory();
+}
+
size_t os::rss() {
size_t rss = 0;
PROCESS_MEMORY_COUNTERS_EX pmex;
@@ -911,6 +931,10 @@ int os::active_processor_count() {
return ActiveProcessorCount;
}
+ return Machine::active_processor_count();
+}
+
+int os::Machine::active_processor_count() {
bool schedules_all_processor_groups = win32::is_windows_11_or_greater() || win32::is_windows_server_2022_or_greater();
if (UseAllWindowsProcessorGroups && !schedules_all_processor_groups && !win32::processor_group_warning_displayed()) {
win32::set_processor_group_warning_displayed(true);
@@ -3257,11 +3281,10 @@ static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int fi
// Do manual alignment
aligned_base = align_up(extra_base, alignment);
- bool rc = (file_desc != -1) ? os::unmap_memory(extra_base, extra_size) :
- os::release_memory(extra_base, extra_size);
- assert(rc, "release failed");
- if (!rc) {
- return nullptr;
+ if (file_desc != -1) {
+ os::unmap_memory(extra_base, extra_size);
+ } else {
+ os::release_memory(extra_base, extra_size);
}
// Attempt to map, into the just vacated space, the slightly smaller aligned area.
@@ -3494,11 +3517,6 @@ char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, size_t page_
return reserve_large_pages(bytes, addr, exec);
}
-bool os::pd_release_memory_special(char* base, size_t bytes) {
- assert(base != nullptr, "Sanity check");
- return pd_release_memory(base, bytes);
-}
-
static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
int err = os::get_last_error();
char buf[256];
@@ -3657,8 +3675,8 @@ bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
return os::commit_memory(addr, size, !ExecMem);
}
-bool os::remove_stack_guard_pages(char* addr, size_t size) {
- return os::uncommit_memory(addr, size);
+void os::remove_stack_guard_pages(char* addr, size_t size) {
+ os::uncommit_memory(addr, size);
}
static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {
diff --git a/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp b/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp
index afef21b091a..3ab81697280 100644
--- a/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp
+++ b/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2025 SAP SE. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -412,12 +412,8 @@ run_stub:
}
void os::Aix::init_thread_fpu_state(void) {
-#if !defined(USE_XLC_BUILTINS)
// Disable FP exceptions.
__asm__ __volatile__ ("mtfsfi 6,0");
-#else
- __mtfsfi(6, 0);
-#endif
}
////////////////////////////////////////////////////////////////////////////////
diff --git a/src/hotspot/os_cpu/aix_ppc/prefetch_aix_ppc.inline.hpp b/src/hotspot/os_cpu/aix_ppc/prefetch_aix_ppc.inline.hpp
index c741335b5f0..d9dac0e231f 100644
--- a/src/hotspot/os_cpu/aix_ppc/prefetch_aix_ppc.inline.hpp
+++ b/src/hotspot/os_cpu/aix_ppc/prefetch_aix_ppc.inline.hpp
@@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2013 SAP SE. All rights reserved.
+ * Copyright (c) 2012, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,29 +29,21 @@
// Included in runtime/prefetch.inline.hpp
inline void Prefetch::read(const void *loc, intx interval) {
-#if !defined(USE_XLC_BUILTINS)
__asm__ __volatile__ (
" dcbt 0, %0 \n"
:
: /*%0*/"r" ( ((address)loc) +((long)interval) )
//:
);
-#else
- __dcbt(((address)loc) +((long)interval));
-#endif
}
inline void Prefetch::write(void *loc, intx interval) {
-#if !defined(USE_XLC_BUILTINS)
__asm__ __volatile__ (
" dcbtst 0, %0 \n"
:
: /*%0*/"r" ( ((address)loc) +((long)interval) )
//:
);
-#else
- __dcbtst( ((address)loc) +((long)interval) );
-#endif
}
#endif // OS_CPU_AIX_PPC_PREFETCH_AIX_PPC_INLINE_HPP
diff --git a/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp b/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp
index 62dba218b2f..36599594842 100644
--- a/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp
+++ b/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp
@@ -54,8 +54,11 @@
#include "signals_posix.hpp"
#include "utilities/align.hpp"
#include "utilities/debug.hpp"
+#include "utilities/decoder.hpp"
#include "utilities/events.hpp"
+#include "utilities/nativeStackPrinter.hpp"
#include "utilities/vmError.hpp"
+#include "compiler/disassembler.hpp"
// put OS-includes here
# include
@@ -85,6 +88,8 @@
#define SPELL_REG_SP "sp"
#ifdef __APPLE__
+WXMode DefaultWXWriteMode;
+
// see darwin-xnu/osfmk/mach/arm/_structs.h
// 10.5 UNIX03 member name prefixes
@@ -233,19 +238,56 @@ NOINLINE frame os::current_frame() {
bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
ucontext_t* uc, JavaThread* thread) {
- // Enable WXWrite: this function is called by the signal handler at arbitrary
- // point of execution.
- ThreadWXEnable wx(WXWrite, thread);
-
// decide if this trap can be handled by a stub
address stub = nullptr;
-
- address pc = nullptr;
+ address pc = nullptr;
//%note os_trap_1
if (info != nullptr && uc != nullptr && thread != nullptr) {
pc = (address) os::Posix::ucontext_get_pc(uc);
+#ifdef MACOS_AARCH64
+ // If we got a SIGBUS because we tried to write into the code
+ // cache, try enabling WXWrite mode.
+ if (sig == SIGBUS
+ && pc != info->si_addr
+ && CodeCache::contains(info->si_addr)
+ && os::address_is_in_vm(pc)) {
+ WXMode *entry_mode = thread->_cur_wx_mode;
+ if (entry_mode != nullptr && *entry_mode == WXArmedForWrite) {
+ if (TraceWXHealing) {
+ static const char *mode_names[3] = {"WXWrite", "WXExec", "WXArmedForWrite"};
+ tty->print("Healing WXMode %s at %p to WXWrite",
+ mode_names[*entry_mode], entry_mode);
+ char name[128];
+ int offset = 0;
+ if (os::dll_address_to_function_name(pc, name, sizeof name, &offset)) {
+ tty->print_cr(" (%s+0x%x)", name, offset);
+ } else {
+ tty->cr();
+ }
+ if (Verbose) {
+ char buf[O_BUFLEN];
+ NativeStackPrinter nsp(thread);
+ nsp.print_stack(tty, buf, sizeof(buf), pc,
+ true /* print_source_info */, -1 /* max stack */);
+ }
+ }
+#ifndef PRODUCT
+ guarantee(StressWXHealing,
+ "We should not reach here unless StressWXHealing");
+#endif
+ *(thread->_cur_wx_mode) = WXWrite;
+ return thread->wx_enable_write();
+ }
+ }
+
+ // There may be cases where code after this point that we call
+ // from the signal handler changes WX state, so we protect against
+ // that by saving and restoring the state.
+ ThreadWXEnable wx(thread->get_wx_state(), thread);
+#endif
+
// Handle ALL stack overflow variations here
if (sig == SIGSEGV || sig == SIGBUS) {
address addr = (address) info->si_addr;
@@ -515,11 +557,42 @@ int os::extra_bang_size_in_bytes() {
return 0;
}
-#ifdef __APPLE__
+#ifdef MACOS_AARCH64
+THREAD_LOCAL bool os::_jit_exec_enabled;
+
+// This is a wrapper around the standard library function
+// pthread_jit_write_protect_np(3). We keep track of the state of
+// per-thread write protection on the MAP_JIT region in the
+// thread-local variable os::_jit_exec_enabled
void os::current_thread_enable_wx(WXMode mode) {
- pthread_jit_write_protect_np(mode == WXExec);
+ bool exec_enabled = mode != WXWrite;
+ if (exec_enabled != _jit_exec_enabled NOT_PRODUCT( || DefaultWXWriteMode == WXWrite)) {
+ permit_forbidden_function::pthread_jit_write_protect_np(exec_enabled);
+ _jit_exec_enabled = exec_enabled;
+ }
}
-#endif
+
+// If the current thread is in the WX state WXArmedForWrite, change
+// the state to WXWrite.
+bool Thread::wx_enable_write() {
+ if (_wx_state == WXArmedForWrite) {
+ _wx_state = WXWrite;
+ os::current_thread_enable_wx(WXWrite);
+ return true;
+ } else {
+ return false;
+ }
+}
+
+// A wrapper around wx_enable_write() for when the current thread is
+// not known.
+void os::thread_wx_enable_write_impl() {
+ if (!StressWXHealing) {
+ Thread::current()->wx_enable_write();
+ }
+}
+
+#endif // MACOS_AARCH64
static inline void atomic_copy64(const volatile void *src, volatile void *dst) {
*(jlong *) dst = *(const jlong *) src;
diff --git a/src/hotspot/os_cpu/linux_arm/javaThread_linux_arm.cpp b/src/hotspot/os_cpu/linux_arm/javaThread_linux_arm.cpp
index 3dc0035ed87..2b96e978980 100644
--- a/src/hotspot/os_cpu/linux_arm/javaThread_linux_arm.cpp
+++ b/src/hotspot/os_cpu/linux_arm/javaThread_linux_arm.cpp
@@ -42,8 +42,19 @@ frame JavaThread::pd_last_frame() {
void JavaThread::cache_global_variables() {
BarrierSet* bs = BarrierSet::barrier_set();
+#if INCLUDE_G1GC
+ if (bs->is_a(BarrierSet::G1BarrierSet)) {
+ _card_table_base = nullptr;
+ } else
+#endif
+#if INCLUDE_SHENANDOAHGC
+ if (bs->is_a(BarrierSet::ShenandoahBarrierSet)) {
+ _card_table_base = nullptr;
+ } else
+#endif
if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
- _card_table_base = (address) (barrier_set_cast(bs)->card_table()->byte_map_base());
+ CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
+ _card_table_base = (address)ctbs->card_table_base_const();
} else {
_card_table_base = nullptr;
}
diff --git a/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp b/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp
index 07f53582a76..ee08738c678 100644
--- a/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp
+++ b/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp
@@ -52,6 +52,7 @@
#include "utilities/debug.hpp"
#include "utilities/events.hpp"
#include "utilities/vmError.hpp"
+#include "runtime/vm_version.hpp"
// put OS-includes here
# include
@@ -380,6 +381,43 @@ size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
/////////////////////////////////////////////////////////////////////////////
// helper functions for fatal error handler
+// XSAVE constants - from Intel SDM Vol. 1, Chapter 13
+#define XSAVE_HDR_OFFSET 512
+#define XFEATURE_APX (1ULL << 19)
+
+// XSAVE header structure
+// See: Intel SDM Vol. 1, Section 13.4.2 "XSAVE Header"
+// Also: Linux kernel arch/x86/include/asm/fpu/types.h
+struct xstate_header {
+ uint64_t xfeatures;
+ uint64_t xcomp_bv;
+ uint64_t reserved[6];
+};
+
+// APX extended state - R16-R31 (16 x 64-bit registers)
+// See: Intel APX Architecture Specification
+struct apx_state {
+ uint64_t regs[16]; // r16-r31
+};
+
+static apx_state* get_apx_state(const ucontext_t* uc) {
+ uint32_t offset = VM_Version::apx_xstate_offset();
+ if (offset == 0 || uc->uc_mcontext.fpregs == nullptr) {
+ return nullptr;
+ }
+
+ char* xsave = (char*)uc->uc_mcontext.fpregs;
+ xstate_header* hdr = (xstate_header*)(xsave + XSAVE_HDR_OFFSET);
+
+ // Check if APX state is present in this context
+ if (!(hdr->xfeatures & XFEATURE_APX)) {
+ return nullptr;
+ }
+
+ return (apx_state*)(xsave + offset);
+}
+
+
void os::print_context(outputStream *st, const void *context) {
if (context == nullptr) return;
@@ -406,6 +444,14 @@ void os::print_context(outputStream *st, const void *context) {
st->print(", R14=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_R14]);
st->print(", R15=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_R15]);
st->cr();
+ // Dump APX EGPRs (R16-R31)
+ apx_state* apx = UseAPX ? get_apx_state(uc) : nullptr;
+ if (apx != nullptr) {
+ for (int i = 0; i < 16; i++) {
+ st->print("%sR%d=" INTPTR_FORMAT, (i % 4 == 0) ? "" : ", ", 16 + i, (intptr_t)apx->regs[i]);
+ if (i % 4 == 3) st->cr();
+ }
+ }
st->print( "RIP=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_RIP]);
st->print(", EFLAGS=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_EFL]);
st->print(", CSGSFS=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_CSGSFS]);
@@ -432,37 +478,50 @@ void os::print_context(outputStream *st, const void *context) {
}
void os::print_register_info(outputStream *st, const void *context, int& continuation) {
- const int register_count = 16;
+ if (context == nullptr) {
+ return;
+ }
+ const ucontext_t *uc = (const ucontext_t*)context;
+ apx_state* apx = UseAPX ? get_apx_state(uc) : nullptr;
+
+ const int register_count = 16 + (apx != nullptr ? 16 : 0);
int n = continuation;
assert(n >= 0 && n <= register_count, "Invalid continuation value");
- if (context == nullptr || n == register_count) {
+ if (n == register_count) {
return;
}
- const ucontext_t *uc = (const ucontext_t*)context;
while (n < register_count) {
// Update continuation with next index before printing location
continuation = n + 1;
+
+ if (n < 16) {
+ // Standard registers (RAX-R15)
# define CASE_PRINT_REG(n, str, id) case n: st->print(str); print_location(st, uc->uc_mcontext.gregs[REG_##id]);
- switch (n) {
- CASE_PRINT_REG( 0, "RAX=", RAX); break;
- CASE_PRINT_REG( 1, "RBX=", RBX); break;
- CASE_PRINT_REG( 2, "RCX=", RCX); break;
- CASE_PRINT_REG( 3, "RDX=", RDX); break;
- CASE_PRINT_REG( 4, "RSP=", RSP); break;
- CASE_PRINT_REG( 5, "RBP=", RBP); break;
- CASE_PRINT_REG( 6, "RSI=", RSI); break;
- CASE_PRINT_REG( 7, "RDI=", RDI); break;
- CASE_PRINT_REG( 8, "R8 =", R8); break;
- CASE_PRINT_REG( 9, "R9 =", R9); break;
- CASE_PRINT_REG(10, "R10=", R10); break;
- CASE_PRINT_REG(11, "R11=", R11); break;
- CASE_PRINT_REG(12, "R12=", R12); break;
- CASE_PRINT_REG(13, "R13=", R13); break;
- CASE_PRINT_REG(14, "R14=", R14); break;
- CASE_PRINT_REG(15, "R15=", R15); break;
- }
+ switch (n) {
+ CASE_PRINT_REG( 0, "RAX=", RAX); break;
+ CASE_PRINT_REG( 1, "RBX=", RBX); break;
+ CASE_PRINT_REG( 2, "RCX=", RCX); break;
+ CASE_PRINT_REG( 3, "RDX=", RDX); break;
+ CASE_PRINT_REG( 4, "RSP=", RSP); break;
+ CASE_PRINT_REG( 5, "RBP=", RBP); break;
+ CASE_PRINT_REG( 6, "RSI=", RSI); break;
+ CASE_PRINT_REG( 7, "RDI=", RDI); break;
+ CASE_PRINT_REG( 8, "R8 =", R8); break;
+ CASE_PRINT_REG( 9, "R9 =", R9); break;
+ CASE_PRINT_REG(10, "R10=", R10); break;
+ CASE_PRINT_REG(11, "R11=", R11); break;
+ CASE_PRINT_REG(12, "R12=", R12); break;
+ CASE_PRINT_REG(13, "R13=", R13); break;
+ CASE_PRINT_REG(14, "R14=", R14); break;
+ CASE_PRINT_REG(15, "R15=", R15); break;
+ }
# undef CASE_PRINT_REG
+ } else {
+ // APX extended general purpose registers (R16-R31)
+ st->print("R%d=", n);
+ print_location(st, apx->regs[n - 16]);
+ }
++n;
}
}
diff --git a/src/hotspot/os_cpu/windows_aarch64/vm_version_windows_aarch64.cpp b/src/hotspot/os_cpu/windows_aarch64/vm_version_windows_aarch64.cpp
index a20feadcba4..93beb549366 100644
--- a/src/hotspot/os_cpu/windows_aarch64/vm_version_windows_aarch64.cpp
+++ b/src/hotspot/os_cpu/windows_aarch64/vm_version_windows_aarch64.cpp
@@ -27,22 +27,30 @@
#include "runtime/vm_version.hpp"
int VM_Version::get_current_sve_vector_length() {
- assert(_features & CPU_SVE, "should not call this");
+ assert(VM_Version::supports_sve(), "should not call this");
ShouldNotReachHere();
return 0;
}
int VM_Version::set_and_get_current_sve_vector_length(int length) {
- assert(_features & CPU_SVE, "should not call this");
+ assert(VM_Version::supports_sve(), "should not call this");
ShouldNotReachHere();
return 0;
}
void VM_Version::get_os_cpu_info() {
- if (IsProcessorFeaturePresent(PF_ARM_V8_CRC32_INSTRUCTIONS_AVAILABLE)) _features |= CPU_CRC32;
- if (IsProcessorFeaturePresent(PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE)) _features |= CPU_AES | CPU_SHA1 | CPU_SHA2;
- if (IsProcessorFeaturePresent(PF_ARM_VFP_32_REGISTERS_AVAILABLE)) _features |= CPU_ASIMD;
+ if (IsProcessorFeaturePresent(PF_ARM_V8_CRC32_INSTRUCTIONS_AVAILABLE)) {
+ set_feature(CPU_CRC32);
+ }
+ if (IsProcessorFeaturePresent(PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE)) {
+ set_feature(CPU_AES);
+ set_feature(CPU_SHA1);
+ set_feature(CPU_SHA2);
+ }
+ if (IsProcessorFeaturePresent(PF_ARM_VFP_32_REGISTERS_AVAILABLE)) {
+ set_feature(CPU_ASIMD);
+ }
// No check for CPU_PMULL, CPU_SVE, CPU_SVE2
__int64 dczid_el0 = _ReadStatusReg(0x5807 /* ARM64_DCZID_EL0 */);
diff --git a/src/hotspot/share/adlc/adlArena.cpp b/src/hotspot/share/adlc/adlArena.cpp
index ebd1f74911d..e3ae60e91a9 100644
--- a/src/hotspot/share/adlc/adlArena.cpp
+++ b/src/hotspot/share/adlc/adlArena.cpp
@@ -136,9 +136,9 @@ void *AdlArena::Acalloc( size_t items, size_t x ) {
}
//------------------------------realloc----------------------------------------
-static size_t pointer_delta(const void *left, const void *right) {
- assert(left >= right, "pointer delta underflow");
- return (uintptr_t)left - (uintptr_t)right;
+static size_t pointer_delta(const void* high, const void* low) {
+ assert(high >= low, "pointer delta underflow");
+ return (uintptr_t)high - (uintptr_t)low;
}
// Reallocate storage in AdlArena.
diff --git a/src/hotspot/share/adlc/main.cpp b/src/hotspot/share/adlc/main.cpp
index 4e8a96617e8..8e6ea5bbec9 100644
--- a/src/hotspot/share/adlc/main.cpp
+++ b/src/hotspot/share/adlc/main.cpp
@@ -213,6 +213,7 @@ int main(int argc, char *argv[])
AD.addInclude(AD._CPP_file, "adfiles", get_basename(AD._VM_file._name));
AD.addInclude(AD._CPP_file, "adfiles", get_basename(AD._HPP_file._name));
AD.addInclude(AD._CPP_file, "memory/allocation.inline.hpp");
+ AD.addInclude(AD._CPP_file, "code/aotCodeCache.hpp");
AD.addInclude(AD._CPP_file, "code/codeCache.hpp");
AD.addInclude(AD._CPP_file, "code/compiledIC.hpp");
AD.addInclude(AD._CPP_file, "code/nativeInst.hpp");
@@ -257,6 +258,7 @@ int main(int argc, char *argv[])
AD.addInclude(AD._CPP_PEEPHOLE_file, "adfiles", get_basename(AD._HPP_file._name));
AD.addInclude(AD._CPP_PIPELINE_file, "adfiles", get_basename(AD._HPP_file._name));
AD.addInclude(AD._DFA_file, "adfiles", get_basename(AD._HPP_file._name));
+ AD.addInclude(AD._DFA_file, "code/aotCodeCache.hpp");
AD.addInclude(AD._DFA_file, "oops/compressedOops.hpp");
AD.addInclude(AD._DFA_file, "opto/cfgnode.hpp"); // Use PROB_MAX in predicate.
AD.addInclude(AD._DFA_file, "opto/intrinsicnode.hpp");
diff --git a/src/hotspot/share/asm/codeBuffer.cpp b/src/hotspot/share/asm/codeBuffer.cpp
index 7871134e923..ba525588f32 100644
--- a/src/hotspot/share/asm/codeBuffer.cpp
+++ b/src/hotspot/share/asm/codeBuffer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -98,6 +98,8 @@ CodeBuffer::CodeBuffer(const CodeBlob* blob) DEBUG_ONLY(: Scrubber(this, sizeof(
}
void CodeBuffer::initialize(csize_t code_size, csize_t locs_size) {
+ MACOS_AARCH64_ONLY(os::thread_wx_enable_write());
+
// Always allow for empty slop around each section.
int slop = (int) CodeSection::end_slop();
@@ -466,9 +468,7 @@ void CodeBuffer::compute_final_layout(CodeBuffer* dest) const {
assert(!_finalize_stubs, "non-finalized stubs");
{
- // not sure why this is here, but why not...
- int alignSize = MAX2((intx) sizeof(jdouble), CodeEntryAlignment);
- assert( (dest->_total_start - _insts.start()) % alignSize == 0, "copy must preserve alignment");
+ assert( (dest->_total_start - _insts.start()) % CodeEntryAlignment == 0, "copy must preserve alignment");
}
const CodeSection* prev_cs = nullptr;
diff --git a/src/hotspot/share/asm/register.hpp b/src/hotspot/share/asm/register.hpp
index f406995b8ac..95c7c7922cf 100644
--- a/src/hotspot/share/asm/register.hpp
+++ b/src/hotspot/share/asm/register.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -49,15 +49,7 @@ class AbstractRegisterImpl {
// Macros to help define all kinds of registers
-#ifndef USE_POINTERS_TO_REGISTER_IMPL_ARRAY
-
-#define AS_REGISTER(type,name) ((type)name##_##type##EnumValue)
-
-#define CONSTANT_REGISTER_DECLARATION(type, name, value) \
-const type name = ((type)value); \
-enum { name##_##type##EnumValue = (value) }
-
-#else // USE_POINTERS_TO_REGISTER_IMPL_ARRAY
+#ifdef USE_POINTERS_TO_REGISTER_IMPL_ARRAY
#define REGISTER_IMPL_DECLARATION(type, impl_type, reg_count) \
inline constexpr type as_ ## type(int encoding) { \
@@ -69,16 +61,8 @@ inline constexpr type impl_type::first() { return all_ ## type ## s + 1; }
#define REGISTER_IMPL_DEFINITION(type, impl_type, reg_count) \
impl_type all_ ## type ## s[reg_count + 1];
-#define CONSTANT_REGISTER_DECLARATION(type, name, value) \
-constexpr type name = as_ ## type(value);
-
#endif // USE_POINTERS_TO_REGISTER_IMPL_ARRAY
-
-#define REGISTER_DECLARATION(type, name, value) \
-const type name = ((type)value)
-
-
// For definitions of RegisterImpl* instances. To be redefined in an
// OS-specific way.
#ifdef __GNUC__
diff --git a/src/hotspot/share/c1/c1_Runtime1.cpp b/src/hotspot/share/c1/c1_Runtime1.cpp
index a4c956ff5be..63764dd113a 100644
--- a/src/hotspot/share/c1/c1_Runtime1.cpp
+++ b/src/hotspot/share/c1/c1_Runtime1.cpp
@@ -541,6 +541,7 @@ extern void vm_exit(int code);
// unpack_with_exception entry instead. This makes life for the exception blob easier
// because making that same check and diverting is painful from assembly language.
JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* current, oopDesc* ex, address pc, nmethod*& nm))
+ MACOS_AARCH64_ONLY(current->wx_enable_write());
Handle exception(current, ex);
// This function is called when we are about to throw an exception. Therefore,
diff --git a/src/hotspot/share/cds/aotClassLocation.cpp b/src/hotspot/share/cds/aotClassLocation.cpp
index f77aac3540c..9275b914ef9 100644
--- a/src/hotspot/share/cds/aotClassLocation.cpp
+++ b/src/hotspot/share/cds/aotClassLocation.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -426,7 +426,8 @@ bool AOTClassLocation::check(const char* runtime_path, bool has_aot_linked_class
bool size_differs = _filesize != st.st_size;
bool time_differs = _check_time && (_timestamp != st.st_mtime);
if (size_differs || time_differs) {
- aot_log_warning(aot)("This file is not the one used while building the shared archive file: '%s'%s%s",
+ aot_log_warning(aot)("This file is not the one used while building the %s: '%s'%s%s",
+ CDSConfig::type_of_archive_being_loaded(),
runtime_path,
time_differs ? ", timestamp has changed" : "",
size_differs ? ", size has changed" : "");
@@ -448,6 +449,13 @@ void AOTClassLocationConfig::dumptime_init(JavaThread* current) {
java_lang_Throwable::print(current->pending_exception(), tty);
vm_exit_during_initialization("AOTClassLocationConfig::dumptime_init_helper() failed unexpectedly");
}
+
+ if (CDSConfig::is_dumping_final_static_archive()) {
+ // The _max_used_index is usually updated by ClassLoader::record_result(). However,
+ // when dumping the final archive, the classes are loaded from their images in
+ // the AOT config file, so we don't go through ClassLoader::record_result().
+ dumptime_update_max_used_index(runtime()->_max_used_index); // Same value as recorded in the training run.
+ }
}
void AOTClassLocationConfig::dumptime_init_helper(TRAPS) {
diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/CallTypeDataInterface.java b/src/hotspot/share/cds/aotCompressedPointers.cpp
similarity index 75%
rename from src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/CallTypeDataInterface.java
rename to src/hotspot/share/cds/aotCompressedPointers.cpp
index 0a8bf4721e4..c3efa7a7185 100644
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/CallTypeDataInterface.java
+++ b/src/hotspot/share/cds/aotCompressedPointers.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,14 +22,9 @@
*
*/
-package sun.jvm.hotspot.oops;
+#include "cds/aotCompressedPointers.hpp"
+#include "cds/archiveBuilder.hpp"
-public interface CallTypeDataInterface {
- int numberOfArguments();
- boolean hasArguments();
- K argumentType(int i);
- boolean hasReturn();
- K returnType();
- int argumentTypeIndex(int i);
- int returnTypeIndex();
+size_t AOTCompressedPointers::compute_byte_offset(address p) {
+ return ArchiveBuilder::current()->any_to_offset(p);
}
diff --git a/src/hotspot/share/cds/aotCompressedPointers.hpp b/src/hotspot/share/cds/aotCompressedPointers.hpp
new file mode 100644
index 00000000000..a3919fb95a9
--- /dev/null
+++ b/src/hotspot/share/cds/aotCompressedPointers.hpp
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_CDS_AOTCOMPRESSEDPOINTERS_HPP
+#define SHARE_CDS_AOTCOMPRESSEDPOINTERS_HPP
+
+#include "cds/cds_globals.hpp"
+#include "memory/allStatic.hpp"
+#include "memory/metaspace.hpp"
+#include "metaprogramming/enableIf.hpp"
+#include "utilities/align.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
+
+class AOTCompressedPointers: public AllStatic {
+public:
+ // For space saving, we can encode the location of metadata objects in the "rw" and "ro"
+ // regions using a 32-bit offset from the bottom of the mapped AOT metaspace. Since metadata
+ // objects are 8-byte aligned, we store scaled offset units (offset_bytes >> 3) to address
+ // up to ~32GB on 64-bit platforms. We currently limit the MaxMetadataOffsetBytes to about
+ // 3.5 GB to be compatible with +CompactObjectHeaders.
+ enum class narrowPtr : u4;
+ static constexpr size_t MetadataOffsetShift = LP64_ONLY(3) NOT_LP64(0);
+ static constexpr size_t MaxMetadataOffsetBytes = LP64_ONLY(3584ULL * M) NOT_LP64(0x7FFFFFFF);
+
+ // Convert the encoded narrowPtr to a byte offset by applying the shift.
+ inline static size_t get_byte_offset(narrowPtr narrowp) {
+ return ((size_t)checked_cast(narrowp)) << MetadataOffsetShift;
+ }
+
+ inline static narrowPtr null() {
+ return static_cast(0);
+ }
+
+ // Encoding ------
+
+ // ptr can point to one of the following
+ // - an object in the ArchiveBuilder's buffer.
+ // - an object in the currently mapped AOT cache rw/ro regions.
+ // - an object that has been copied into the ArchiveBuilder's buffer.
+ template
+ static narrowPtr encode_not_null(T ptr) {
+ address p = reinterpret_cast(ptr);
+ return encode_byte_offset(compute_byte_offset(p));
+ }
+
+ template
+ static narrowPtr encode(T ptr) { // may be null
+ if (ptr == nullptr) {
+ return null();
+ } else {
+ return encode_not_null(ptr);
+ }
+ }
+
+ // ptr must be in the currently mapped AOT cache rw/ro regions.
+ template
+ static narrowPtr encode_address_in_cache(T ptr) {
+ assert(Metaspace::in_aot_cache(ptr), "must be");
+ address p = reinterpret_cast(ptr);
+ address base = reinterpret_cast(SharedBaseAddress);
+ return encode_byte_offset(pointer_delta(p, base, 1));
+ }
+
+ template
+ static narrowPtr encode_address_in_cache_or_null(T ptr) {
+ if (ptr == nullptr) {
+ return null();
+ } else {
+ return encode_address_in_cache(ptr);
+ }
+ }
+
+ // Decoding -----
+
+ // If base_address is null, decode an address within the mapped aot cache range.
+ template
+ static T decode_not_null(narrowPtr narrowp, address base_address = nullptr) {
+ assert(narrowp != null(), "sanity");
+ if (base_address == nullptr) {
+ T p = reinterpret_cast(reinterpret_cast(SharedBaseAddress) + get_byte_offset(narrowp));
+ assert(Metaspace::in_aot_cache(p), "must be");
+ return p;
+ } else {
+ // This is usually called before the cache is fully mapped.
+ return reinterpret_cast(base_address + get_byte_offset(narrowp));
+ }
+ }
+
+ template
+ static T decode(narrowPtr narrowp, address base_address = nullptr) { // may be null
+ if (narrowp == null()) {
+ return nullptr;
+ } else {
+ return decode_not_null(narrowp, base_address);
+ }
+ }
+
+private:
+ static size_t compute_byte_offset(address p);
+
+ static narrowPtr encode_byte_offset(size_t offset) {
+ assert(offset != 0, "offset 0 is in protection zone");
+ precond(offset <= MaxMetadataOffsetBytes);
+ assert(is_aligned(offset, (size_t)1 << MetadataOffsetShift), "offset not aligned");
+ return checked_cast(offset >> MetadataOffsetShift);
+ }
+};
+
+// Type casts -- declared as global functions to save a few keystrokes
+
+// A simple type cast. No change in numerical value.
+inline AOTCompressedPointers::narrowPtr cast_from_u4(u4 narrowp) {
+ return checked_cast(narrowp);
+}
+
+// A simple type cast. No change in numerical value.
+// !!!DO NOT CALL THIS if you want a byte offset!!!
+inline u4 cast_to_u4(AOTCompressedPointers::narrowPtr narrowp) {
+ return checked_cast(narrowp);
+}
+
+#endif // SHARE_CDS_AOTCOMPRESSEDPOINTERS_HPP
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMarkObjArrayProcessor.inline.hpp b/src/hotspot/share/cds/aotGrowableArray.cpp
similarity index 62%
rename from src/hotspot/share/gc/g1/g1ConcurrentMarkObjArrayProcessor.inline.hpp
rename to src/hotspot/share/cds/aotGrowableArray.cpp
index f6d47acdc01..ec63e7aa57f 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentMarkObjArrayProcessor.inline.hpp
+++ b/src/hotspot/share/cds/aotGrowableArray.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,17 +22,13 @@
*
*/
-#ifndef SHARE_GC_G1_G1CONCURRENTMARKOBJARRAYPROCESSOR_INLINE_HPP
-#define SHARE_GC_G1_G1CONCURRENTMARKOBJARRAYPROCESSOR_INLINE_HPP
+#include "cds/aotGrowableArray.hpp"
+#include "cds/aotMetaspace.hpp"
+#include "memory/allocation.inline.hpp"
+#include "utilities/growableArray.hpp"
-#include "gc/g1/g1ConcurrentMarkObjArrayProcessor.hpp"
-
-#include "gc/shared/gc_globals.hpp"
-#include "oops/oop.inline.hpp"
-#include "oops/oopsHierarchy.hpp"
-
-inline bool G1CMObjArrayProcessor::should_be_sliced(oop obj) {
- return obj->is_objArray() && ((objArrayOop)obj)->size() >= 2 * ObjArrayMarkingStride;
+void AOTGrowableArrayHelper::deallocate(void* mem) {
+ if (!AOTMetaspace::in_aot_cache(mem)) {
+ GrowableArrayCHeapAllocator::deallocate(mem);
+ }
}
-
-#endif // SHARE_GC_G1_G1CONCURRENTMARKOBJARRAYPROCESSOR_INLINE_HPP
diff --git a/src/hotspot/share/cds/aotGrowableArray.hpp b/src/hotspot/share/cds/aotGrowableArray.hpp
new file mode 100644
index 00000000000..0a0c137ed07
--- /dev/null
+++ b/src/hotspot/share/cds/aotGrowableArray.hpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_AOT_AOTGROWABLEARRAY_HPP
+#define SHARE_AOT_AOTGROWABLEARRAY_HPP
+
+#include
+#include
+
+class AOTGrowableArrayHelper {
+public:
+ static void deallocate(void* mem);
+};
+
+// An AOTGrowableArray provides the same functionality as a GrowableArray that
+// uses the C heap allocator. In addition, AOTGrowableArray can be iterated with
+// MetaspaceClosure. This type should be used for growable arrays that need to be
+// stored in the AOT cache. See ModuleEntry::_reads for an example.
+template
+class AOTGrowableArray : public GrowableArrayWithAllocator> {
+ friend class VMStructs;
+ friend class GrowableArrayWithAllocator;
+
+ static E* allocate(int max, MemTag mem_tag) {
+ return (E*)GrowableArrayCHeapAllocator::allocate(max, sizeof(E), mem_tag);
+ }
+
+ E* allocate() {
+ return allocate(this->_capacity, mtClass);
+ }
+
+ void deallocate(E* mem) {
+#if INCLUDE_CDS
+ AOTGrowableArrayHelper::deallocate(mem);
+#else
+ GrowableArrayCHeapAllocator::deallocate(mem);
+#endif
+ }
+
+public:
+ AOTGrowableArray(int initial_capacity, MemTag mem_tag) :
+ GrowableArrayWithAllocator(
+ allocate(initial_capacity, mem_tag),
+ initial_capacity) {}
+
+ AOTGrowableArray() : AOTGrowableArray(0, mtClassShared) {}
+
+ // methods required by MetaspaceClosure
+ void metaspace_pointers_do(MetaspaceClosure* it);
+ int size_in_heapwords() const { return (int)heap_word_size(sizeof(*this)); }
+ MetaspaceClosureType type() const { return MetaspaceClosureType::GrowableArrayType; }
+ static bool is_read_only_by_default() { return false; }
+};
+
+#endif // SHARE_AOT_AOTGROWABLEARRAY_HPP
diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/MethodDataInterface.java b/src/hotspot/share/cds/aotGrowableArray.inline.hpp
similarity index 66%
rename from src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/MethodDataInterface.java
rename to src/hotspot/share/cds/aotGrowableArray.inline.hpp
index 8e6b131ee9d..8c6e8cb6503 100644
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/MethodDataInterface.java
+++ b/src/hotspot/share/cds/aotGrowableArray.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,18 +22,16 @@
*
*/
-package sun.jvm.hotspot.oops;
+#ifndef SHARE_CDS_AOTGROWABLEARRAY_INLINE_HPP
+#define SHARE_CDS_AOTGROWABLEARRAY_INLINE_HPP
-import java.io.*;
-import java.util.*;
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.runtime.*;
-import sun.jvm.hotspot.types.*;
-import sun.jvm.hotspot.utilities.*;
+#include "cds/aotGrowableArray.hpp"
-public interface MethodDataInterface {
- K getKlassAtAddress(Address addr);
- M getMethodAtAddress(Address addr);
- void printKlassValueOn(K klass, PrintStream st);
- void printMethodValueOn(M klass, PrintStream st);
+#include "memory/metaspaceClosure.hpp"
+
+template
+void AOTGrowableArray::metaspace_pointers_do(MetaspaceClosure* it) {
+ it->push_c_array(AOTGrowableArray::data_addr(), AOTGrowableArray::capacity());
}
+
+#endif // SHARE_CDS_AOTGROWABLEARRAY_INLINE_HPP
diff --git a/src/hotspot/share/cds/aotMapLogger.cpp b/src/hotspot/share/cds/aotMapLogger.cpp
index a252eae4b84..fa769aee1bf 100644
--- a/src/hotspot/share/cds/aotMapLogger.cpp
+++ b/src/hotspot/share/cds/aotMapLogger.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,8 @@
#include "cds/aotStreamedHeapWriter.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/filemap.hpp"
+#include "classfile/moduleEntry.hpp"
+#include "classfile/packageEntry.hpp"
#include "classfile/systemDictionaryShared.hpp"
#include "classfile/vmClasses.hpp"
#include "logging/log.hpp"
@@ -86,7 +88,7 @@ void AOTMapLogger::ergo_initialize() {
}
void AOTMapLogger::dumptime_log(ArchiveBuilder* builder, FileMapInfo* mapinfo,
- ArchiveMappedHeapInfo* mapped_heap_info, ArchiveStreamedHeapInfo* streamed_heap_info,
+ AOTMappedHeapInfo* mapped_heap_info, AOTStreamedHeapInfo* streamed_heap_info,
char* bitmap, size_t bitmap_size_in_bytes) {
_is_runtime_logging = false;
_buffer_to_requested_delta = ArchiveBuilder::current()->buffer_to_requested_delta();
@@ -141,7 +143,7 @@ public:
info._buffered_addr = ref->obj();
info._requested_addr = ref->obj();
info._bytes = ref->size() * BytesPerWord;
- info._type = ref->msotype();
+ info._type = ref->type();
_objs.append(info);
}
@@ -214,7 +216,7 @@ void AOTMapLogger::dumptime_log_metaspace_region(const char* name, DumpRegion* r
info._buffered_addr = src_info->buffered_addr();
info._requested_addr = info._buffered_addr + _buffer_to_requested_delta;
info._bytes = src_info->size_in_bytes();
- info._type = src_info->msotype();
+ info._type = src_info->type();
objs.append(info);
}
@@ -332,43 +334,52 @@ void AOTMapLogger::log_metaspace_objects_impl(address region_base, address regio
address buffered_addr = info._buffered_addr;
address requested_addr = info._requested_addr;
int bytes = info._bytes;
- MetaspaceObj::Type type = info._type;
- const char* type_name = MetaspaceObj::type_name(type);
+ MetaspaceClosureType type = info._type;
+ const char* type_name = MetaspaceClosure::type_name(type);
log_as_hex(last_obj_base, buffered_addr, last_obj_base + _buffer_to_requested_delta);
switch (type) {
- case MetaspaceObj::ClassType:
+ case MetaspaceClosureType::ClassType:
log_klass((Klass*)src, requested_addr, type_name, bytes, current);
break;
- case MetaspaceObj::ConstantPoolType:
+ case MetaspaceClosureType::ConstantPoolType:
log_constant_pool((ConstantPool*)src, requested_addr, type_name, bytes, current);
break;
- case MetaspaceObj::ConstantPoolCacheType:
+ case MetaspaceClosureType::ConstantPoolCacheType:
log_constant_pool_cache((ConstantPoolCache*)src, requested_addr, type_name, bytes, current);
break;
- case MetaspaceObj::ConstMethodType:
+ case MetaspaceClosureType::ConstMethodType:
log_const_method((ConstMethod*)src, requested_addr, type_name, bytes, current);
break;
- case MetaspaceObj::MethodType:
+ case MetaspaceClosureType::MethodType:
log_method((Method*)src, requested_addr, type_name, bytes, current);
break;
- case MetaspaceObj::MethodCountersType:
+ case MetaspaceClosureType::MethodCountersType:
log_method_counters((MethodCounters*)src, requested_addr, type_name, bytes, current);
break;
- case MetaspaceObj::MethodDataType:
+ case MetaspaceClosureType::MethodDataType:
log_method_data((MethodData*)src, requested_addr, type_name, bytes, current);
break;
- case MetaspaceObj::SymbolType:
+ case MetaspaceClosureType::ModuleEntryType:
+ log_module_entry((ModuleEntry*)src, requested_addr, type_name, bytes, current);
+ break;
+ case MetaspaceClosureType::PackageEntryType:
+ log_package_entry((PackageEntry*)src, requested_addr, type_name, bytes, current);
+ break;
+ case MetaspaceClosureType::GrowableArrayType:
+ log_growable_array((GrowableArrayBase*)src, requested_addr, type_name, bytes, current);
+ break;
+ case MetaspaceClosureType::SymbolType:
log_symbol((Symbol*)src, requested_addr, type_name, bytes, current);
break;
- case MetaspaceObj::KlassTrainingDataType:
+ case MetaspaceClosureType::KlassTrainingDataType:
log_klass_training_data((KlassTrainingData*)src, requested_addr, type_name, bytes, current);
break;
- case MetaspaceObj::MethodTrainingDataType:
+ case MetaspaceClosureType::MethodTrainingDataType:
log_method_training_data((MethodTrainingData*)src, requested_addr, type_name, bytes, current);
break;
- case MetaspaceObj::CompileTrainingDataType:
+ case MetaspaceClosureType::CompileTrainingDataType:
log_compile_training_data((CompileTrainingData*)src, requested_addr, type_name, bytes, current);
break;
default:
@@ -421,6 +432,27 @@ void AOTMapLogger::log_method_data(MethodData* md, address requested_addr, const
log_debug(aot, map)(_LOG_PREFIX " %s", p2i(requested_addr), type_name, bytes, md->method()->external_name());
}
+void AOTMapLogger::log_module_entry(ModuleEntry* mod, address requested_addr, const char* type_name,
+ int bytes, Thread* current) {
+ ResourceMark rm(current);
+ log_debug(aot, map)(_LOG_PREFIX " %s", p2i(requested_addr), type_name, bytes,
+ mod->name_as_C_string());
+}
+
+void AOTMapLogger::log_package_entry(PackageEntry* pkg, address requested_addr, const char* type_name,
+ int bytes, Thread* current) {
+ ResourceMark rm(current);
+ log_debug(aot, map)(_LOG_PREFIX " %s - %s", p2i(requested_addr), type_name, bytes,
+ pkg->module()->name_as_C_string(), pkg->name_as_C_string());
+}
+
+void AOTMapLogger::log_growable_array(GrowableArrayBase* arr, address requested_addr, const char* type_name,
+ int bytes, Thread* current) {
+ ResourceMark rm(current);
+ log_debug(aot, map)(_LOG_PREFIX " %d (%d)", p2i(requested_addr), type_name, bytes,
+ arr->length(), arr->capacity());
+}
+
void AOTMapLogger::log_klass(Klass* k, address requested_addr, const char* type_name,
int bytes, Thread* current) {
ResourceMark rm(current);
@@ -791,7 +823,7 @@ public:
}
}; // AOTMapLogger::ArchivedFieldPrinter
-void AOTMapLogger::dumptime_log_mapped_heap_region(ArchiveMappedHeapInfo* heap_info) {
+void AOTMapLogger::dumptime_log_mapped_heap_region(AOTMappedHeapInfo* heap_info) {
MemRegion r = heap_info->buffer_region();
address buffer_start = address(r.start()); // start of the current oop inside the buffer
address buffer_end = address(r.end());
@@ -803,7 +835,7 @@ void AOTMapLogger::dumptime_log_mapped_heap_region(ArchiveMappedHeapInfo* heap_i
log_archived_objects(AOTMappedHeapWriter::oop_iterator(heap_info));
}
-void AOTMapLogger::dumptime_log_streamed_heap_region(ArchiveStreamedHeapInfo* heap_info) {
+void AOTMapLogger::dumptime_log_streamed_heap_region(AOTStreamedHeapInfo* heap_info) {
MemRegion r = heap_info->buffer_region();
address buffer_start = address(r.start()); // start of the current oop inside the buffer
address buffer_end = address(r.end());
diff --git a/src/hotspot/share/cds/aotMapLogger.hpp b/src/hotspot/share/cds/aotMapLogger.hpp
index ba188514861..f495ed97f40 100644
--- a/src/hotspot/share/cds/aotMapLogger.hpp
+++ b/src/hotspot/share/cds/aotMapLogger.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,18 +28,23 @@
#include "cds/archiveBuilder.hpp"
#include "memory/allocation.hpp"
#include "memory/allStatic.hpp"
+#include "memory/metaspaceClosureType.hpp"
#include "oops/oopsHierarchy.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/growableArray.hpp"
-class ArchiveMappedHeapInfo;
-class ArchiveStreamedHeapInfo;
+class AOTMappedHeapInfo;
+class AOTStreamedHeapInfo;
class CompileTrainingData;
class DumpRegion;
class FileMapInfo;
+class GrowableArrayBase;
class KlassTrainingData;
+class MethodCounters;
class MethodTrainingData;
+class ModuleEntry;
class outputStream;
+class PackageEntry;
// Write detailed info to a mapfile to analyze contents of the AOT cache/CDS archive.
// -Xlog:aot+map* can be used both when creating an AOT cache, or when using an AOT cache.
@@ -62,7 +67,7 @@ class AOTMapLogger : AllStatic {
address _buffered_addr;
address _requested_addr;
int _bytes;
- MetaspaceObj::Type _type;
+ MetaspaceClosureType _type;
};
public:
@@ -142,6 +147,9 @@ private:
Thread* current);
static void log_klass(Klass* k, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_method(Method* m, address requested_addr, const char* type_name, int bytes, Thread* current);
+ static void log_module_entry(ModuleEntry* mod, address requested_addr, const char* type_name, int bytes, Thread* current);
+ static void log_package_entry(PackageEntry* pkg, address requested_addr, const char* type_name, int bytes, Thread* current);
+ static void log_growable_array(GrowableArrayBase* arr, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_symbol(Symbol* s, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_klass_training_data(KlassTrainingData* ktd, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_method_training_data(MethodTrainingData* mtd, address requested_addr, const char* type_name, int bytes, Thread* current);
@@ -149,8 +157,8 @@ private:
#if INCLUDE_CDS_JAVA_HEAP
- static void dumptime_log_mapped_heap_region(ArchiveMappedHeapInfo* mapped_heap_info);
- static void dumptime_log_streamed_heap_region(ArchiveStreamedHeapInfo* streamed_heap_info);
+ static void dumptime_log_mapped_heap_region(AOTMappedHeapInfo* mapped_heap_info);
+ static void dumptime_log_streamed_heap_region(AOTStreamedHeapInfo* streamed_heap_info);
static void runtime_log_heap_region(FileMapInfo* mapinfo);
static void print_oop_info_cr(outputStream* st, FakeOop fake_oop, bool print_location = true);
@@ -165,7 +173,7 @@ public:
static bool is_logging_at_bootstrap() { return _is_logging_at_bootstrap; }
static void dumptime_log(ArchiveBuilder* builder, FileMapInfo* mapinfo,
- ArchiveMappedHeapInfo* mapped_heap_info, ArchiveStreamedHeapInfo* streamed_heap_info,
+ AOTMappedHeapInfo* mapped_heap_info, AOTStreamedHeapInfo* streamed_heap_info,
char* bitmap, size_t bitmap_size_in_bytes);
static void runtime_log(FileMapInfo* static_mapinfo, FileMapInfo* dynamic_mapinfo);
};
diff --git a/src/hotspot/share/cds/aotMappedHeap.cpp b/src/hotspot/share/cds/aotMappedHeap.cpp
new file mode 100644
index 00000000000..ba24c43eea1
--- /dev/null
+++ b/src/hotspot/share/cds/aotMappedHeap.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "cds/aotMappedHeap.hpp"
+
+// Anything that goes in the header must be thoroughly purged from uninitialized memory
+// as it will be written to disk. Therefore, the constructors memset the memory to 0.
+// This is not the prettiest thing, but we need to know every byte is initialized,
+// including potential padding between fields.
+
+AOTMappedHeapHeader::AOTMappedHeapHeader(size_t ptrmap_start_pos,
+ size_t oopmap_start_pos,
+ HeapRootSegments root_segments) {
+ memset((char*)this, 0, sizeof(*this));
+ _ptrmap_start_pos = ptrmap_start_pos;
+ _oopmap_start_pos = oopmap_start_pos;
+ _root_segments = root_segments;
+}
+
+AOTMappedHeapHeader::AOTMappedHeapHeader() {
+ memset((char*)this, 0, sizeof(*this));
+}
+
+AOTMappedHeapHeader AOTMappedHeapInfo::create_header() {
+ return AOTMappedHeapHeader{_ptrmap_start_pos,
+ _oopmap_start_pos,
+ _root_segments};
+}
diff --git a/src/hotspot/share/cds/aotMappedHeap.hpp b/src/hotspot/share/cds/aotMappedHeap.hpp
new file mode 100644
index 00000000000..307451b24d4
--- /dev/null
+++ b/src/hotspot/share/cds/aotMappedHeap.hpp
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_CDS_AOTMAPPEDHEAP_HPP
+#define SHARE_CDS_AOTMAPPEDHEAP_HPP
+
+#include "cds/aotMapLogger.hpp"
+#include "utilities/growableArray.hpp"
+#include "utilities/macros.hpp"
+
+class AOTMappedHeapHeader {
+ size_t _ptrmap_start_pos; // The first bit in the ptrmap corresponds to this position in the heap.
+ size_t _oopmap_start_pos; // The first bit in the oopmap corresponds to this position in the heap.
+ HeapRootSegments _root_segments; // Heap root segments info
+
+public:
+ AOTMappedHeapHeader();
+ AOTMappedHeapHeader(size_t ptrmap_start_pos,
+ size_t oopmap_start_pos,
+ HeapRootSegments root_segments);
+
+ size_t ptrmap_start_pos() const { return _ptrmap_start_pos; }
+ size_t oopmap_start_pos() const { return _oopmap_start_pos; }
+ HeapRootSegments root_segments() const { return _root_segments; }
+
+ // This class is trivially copyable and assignable.
+ AOTMappedHeapHeader(const AOTMappedHeapHeader&) = default;
+ AOTMappedHeapHeader& operator=(const AOTMappedHeapHeader&) = default;
+};
+
+class AOTMappedHeapInfo {
+ MemRegion _buffer_region; // Contains the archived objects to be written into the CDS archive.
+ CHeapBitMap _oopmap;
+ CHeapBitMap _ptrmap;
+ HeapRootSegments _root_segments;
+ size_t _oopmap_start_pos; // How many zeros were removed from the beginning of the bit map?
+ size_t _ptrmap_start_pos; // How many zeros were removed from the beginning of the bit map?
+
+public:
+ AOTMappedHeapInfo() :
+ _buffer_region(),
+ _oopmap(128, mtClassShared),
+ _ptrmap(128, mtClassShared),
+ _root_segments(),
+ _oopmap_start_pos(),
+ _ptrmap_start_pos() {}
+ bool is_used() { return !_buffer_region.is_empty(); }
+
+ MemRegion buffer_region() { return _buffer_region; }
+ void set_buffer_region(MemRegion r) { _buffer_region = r; }
+
+ char* buffer_start() { return (char*)_buffer_region.start(); }
+ size_t buffer_byte_size() { return _buffer_region.byte_size(); }
+
+ CHeapBitMap* oopmap() { return &_oopmap; }
+ CHeapBitMap* ptrmap() { return &_ptrmap; }
+
+ void set_oopmap_start_pos(size_t start_pos) { _oopmap_start_pos = start_pos; }
+ void set_ptrmap_start_pos(size_t start_pos) { _ptrmap_start_pos = start_pos; }
+
+ void set_root_segments(HeapRootSegments segments) { _root_segments = segments; };
+ HeapRootSegments root_segments() { return _root_segments; }
+
+ AOTMappedHeapHeader create_header();
+};
+
+#if INCLUDE_CDS_JAVA_HEAP
+class AOTMappedHeapOopIterator : public AOTMapLogger::OopDataIterator {
+protected:
+ address _current;
+ address _next;
+
+ address _buffer_start;
+ address _buffer_end;
+ uint64_t _buffer_start_narrow_oop;
+ intptr_t _buffer_to_requested_delta;
+ int _requested_shift;
+
+ size_t _num_root_segments;
+ size_t _num_obj_arrays_logged;
+
+public:
+ AOTMappedHeapOopIterator(address buffer_start,
+ address buffer_end,
+ address requested_base,
+ address requested_start,
+ int requested_shift,
+ size_t num_root_segments)
+ : _current(nullptr),
+ _next(buffer_start),
+ _buffer_start(buffer_start),
+ _buffer_end(buffer_end),
+ _requested_shift(requested_shift),
+ _num_root_segments(num_root_segments),
+ _num_obj_arrays_logged(0) {
+ _buffer_to_requested_delta = requested_start - buffer_start;
+ _buffer_start_narrow_oop = 0xdeadbeed;
+ if (UseCompressedOops) {
+ _buffer_start_narrow_oop = (uint64_t)(pointer_delta(requested_start, requested_base, 1)) >> requested_shift;
+ assert(_buffer_start_narrow_oop < 0xffffffff, "sanity");
+ }
+ }
+
+ virtual AOTMapLogger::OopData capture(address buffered_addr) = 0;
+
+ bool has_next() override {
+ return _next < _buffer_end;
+ }
+
+ AOTMapLogger::OopData next() override {
+ _current = _next;
+ AOTMapLogger::OopData result = capture(_current);
+ if (result._klass->is_objArray_klass()) {
+ result._is_root_segment = _num_obj_arrays_logged++ < _num_root_segments;
+ }
+ _next = _current + result._size * BytesPerWord;
+ return result;
+ }
+
+ AOTMapLogger::OopData obj_at(narrowOop* addr) override {
+ uint64_t n = (uint64_t)(*addr);
+ if (n == 0) {
+ return null_data();
+ } else {
+ precond(n >= _buffer_start_narrow_oop);
+ address buffer_addr = _buffer_start + ((n - _buffer_start_narrow_oop) << _requested_shift);
+ return capture(buffer_addr);
+ }
+ }
+
+ AOTMapLogger::OopData obj_at(oop* addr) override {
+ address requested_value = cast_from_oop(*addr);
+ if (requested_value == nullptr) {
+ return null_data();
+ } else {
+ address buffer_addr = requested_value - _buffer_to_requested_delta;
+ return capture(buffer_addr);
+ }
+ }
+
+ GrowableArrayCHeap* roots() override {
+ return new GrowableArrayCHeap();
+ }
+};
+#endif // INCLUDE_CDS_JAVA_HEAP
+
+#endif // SHARE_CDS_AOTMAPPEDHEAP_HPP
diff --git a/src/hotspot/share/cds/aotMappedHeapLoader.cpp b/src/hotspot/share/cds/aotMappedHeapLoader.cpp
index 84051cbd9e5..7a201d8297f 100644
--- a/src/hotspot/share/cds/aotMappedHeapLoader.cpp
+++ b/src/hotspot/share/cds/aotMappedHeapLoader.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,7 @@
*/
#include "cds/aotLogging.hpp"
+#include "cds/aotMappedHeap.hpp"
#include "cds/aotMappedHeapLoader.inline.hpp"
#include "cds/aotMappedHeapWriter.hpp"
#include "cds/aotMetaspace.hpp"
@@ -221,7 +222,7 @@ void AOTMappedHeapLoader::patch_embedded_pointers(FileMapInfo* info,
// the heap object may be loaded at a different address at run time. This structure is used
// to translate the dump time addresses for all objects in FileMapInfo::space_at(region_index)
// to their runtime addresses.
-struct LoadedArchiveHeapRegion {
+struct AOTMappedHeapRegion {
int _region_index; // index for FileMapInfo::space_at(index)
size_t _region_size; // number of bytes in this region
uintptr_t _dumptime_base; // The dump-time (decoded) address of the first object in this region
@@ -232,7 +233,7 @@ struct LoadedArchiveHeapRegion {
}
};
-void AOTMappedHeapLoader::init_loaded_heap_relocation(LoadedArchiveHeapRegion* loaded_region) {
+void AOTMappedHeapLoader::init_loaded_heap_relocation(AOTMappedHeapRegion* loaded_region) {
_dumptime_base = loaded_region->_dumptime_base;
_dumptime_top = loaded_region->top();
_runtime_offset = loaded_region->_runtime_offset;
@@ -249,7 +250,7 @@ class AOTMappedHeapLoader::PatchLoadedRegionPointers: public BitMapClosure {
uintptr_t _top;
public:
- PatchLoadedRegionPointers(narrowOop* start, LoadedArchiveHeapRegion* loaded_region)
+ PatchLoadedRegionPointers(narrowOop* start, AOTMappedHeapRegion* loaded_region)
: _start(start),
_offset(loaded_region->_runtime_offset),
_base(loaded_region->_dumptime_base),
@@ -270,7 +271,7 @@ class AOTMappedHeapLoader::PatchLoadedRegionPointers: public BitMapClosure {
}
};
-bool AOTMappedHeapLoader::init_loaded_region(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_region,
+bool AOTMappedHeapLoader::init_loaded_region(FileMapInfo* mapinfo, AOTMappedHeapRegion* loaded_region,
MemRegion& archive_space) {
size_t total_bytes = 0;
FileMapRegion* r = mapinfo->region_at(AOTMetaspace::hp);
@@ -301,7 +302,7 @@ bool AOTMappedHeapLoader::init_loaded_region(FileMapInfo* mapinfo, LoadedArchive
return true;
}
-bool AOTMappedHeapLoader::load_heap_region_impl(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_region,
+bool AOTMappedHeapLoader::load_heap_region_impl(FileMapInfo* mapinfo, AOTMappedHeapRegion* loaded_region,
uintptr_t load_address) {
uintptr_t bitmap_base = (uintptr_t)mapinfo->map_bitmap_region();
if (bitmap_base == 0) {
@@ -340,7 +341,7 @@ bool AOTMappedHeapLoader::load_heap_region(FileMapInfo* mapinfo) {
assert(can_load(), "loaded heap for must be supported");
init_narrow_oop_decoding(mapinfo->narrow_oop_base(), mapinfo->narrow_oop_shift());
- LoadedArchiveHeapRegion loaded_region;
+ AOTMappedHeapRegion loaded_region;
memset(&loaded_region, 0, sizeof(loaded_region));
MemRegion archive_space;
@@ -360,10 +361,8 @@ bool AOTMappedHeapLoader::load_heap_region(FileMapInfo* mapinfo) {
}
objArrayOop AOTMappedHeapLoader::root_segment(int segment_idx) {
- if (CDSConfig::is_dumping_heap()) {
- assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
- } else {
- assert(CDSConfig::is_using_archive(), "must be");
+ if (!CDSConfig::is_using_archive()) {
+ assert(CDSConfig::is_dumping_heap() && Thread::current() == (Thread*)VMThread::vm_thread(), "sanity");
}
objArrayOop segment = (objArrayOop)_root_segments->at(segment_idx).resolve();
@@ -466,7 +465,9 @@ void AOTMappedHeapLoader::finish_initialization(FileMapInfo* info) {
add_root_segment((objArrayOop)segment_oop);
}
- StringTable::load_shared_strings_array();
+ if (CDSConfig::is_dumping_final_static_archive()) {
+ StringTable::move_shared_strings_into_runtime_table();
+ }
}
}
@@ -619,7 +620,7 @@ bool AOTMappedHeapLoader::map_heap_region_impl(FileMapInfo* info) {
aot_log_info(aot)("Preferred address to map heap data (to avoid relocation) is " INTPTR_FORMAT, p2i(requested_start));
// allocate from java heap
- HeapWord* start = G1CollectedHeap::heap()->alloc_archive_region(word_size, (HeapWord*)requested_start);
+ HeapWord* start = G1CollectedHeap::heap()->alloc_archive_region(word_size);
if (start == nullptr) {
AOTMetaspace::report_loading_error("UseSharedSpaces: Unable to allocate java heap region for archive heap.");
return false;
@@ -733,40 +734,22 @@ void AOTMappedHeapLoader::dealloc_heap_region(FileMapInfo* info) {
}
AOTMapLogger::OopDataIterator* AOTMappedHeapLoader::oop_iterator(FileMapInfo* info, address buffer_start, address buffer_end) {
- class MappedLoaderOopIterator : public AOTMapLogger::OopDataIterator {
- private:
- address _current;
- address _next;
-
- address _buffer_start;
- address _buffer_end;
- uint64_t _buffer_start_narrow_oop;
- intptr_t _buffer_to_requested_delta;
- int _requested_shift;
-
- size_t _num_root_segments;
- size_t _num_obj_arrays_logged;
-
+ class MappedLoaderOopIterator : public AOTMappedHeapOopIterator {
public:
MappedLoaderOopIterator(address buffer_start,
address buffer_end,
- uint64_t buffer_start_narrow_oop,
- intptr_t buffer_to_requested_delta,
+ address requested_base,
+ address requested_start,
int requested_shift,
- size_t num_root_segments)
- : _current(nullptr),
- _next(buffer_start),
- _buffer_start(buffer_start),
- _buffer_end(buffer_end),
- _buffer_start_narrow_oop(buffer_start_narrow_oop),
- _buffer_to_requested_delta(buffer_to_requested_delta),
- _requested_shift(requested_shift),
- _num_root_segments(num_root_segments),
- _num_obj_arrays_logged(0) {
- }
+ size_t num_root_segments) :
+ AOTMappedHeapOopIterator(buffer_start,
+ buffer_end,
+ requested_base,
+ requested_start,
+ requested_shift,
+ num_root_segments) {}
-
- AOTMapLogger::OopData capture(address buffered_addr) {
+ AOTMapLogger::OopData capture(address buffered_addr) override {
oopDesc* raw_oop = (oopDesc*)buffered_addr;
size_t size = raw_oop->size();
address requested_addr = buffered_addr + _buffer_to_requested_delta;
@@ -784,62 +767,17 @@ AOTMapLogger::OopDataIterator* AOTMappedHeapLoader::oop_iterator(FileMapInfo* in
size,
false };
}
-
- bool has_next() override {
- return _next < _buffer_end;
- }
-
- AOTMapLogger::OopData next() override {
- _current = _next;
- AOTMapLogger::OopData result = capture(_current);
- if (result._klass->is_objArray_klass()) {
- result._is_root_segment = _num_obj_arrays_logged++ < _num_root_segments;
- }
- _next = _current + result._size * BytesPerWord;
- return result;
- }
-
- AOTMapLogger::OopData obj_at(narrowOop* addr) override {
- uint64_t n = (uint64_t)(*addr);
- if (n == 0) {
- return null_data();
- } else {
- precond(n >= _buffer_start_narrow_oop);
- address buffer_addr = _buffer_start + ((n - _buffer_start_narrow_oop) << _requested_shift);
- return capture(buffer_addr);
- }
- }
-
- AOTMapLogger::OopData obj_at(oop* addr) override {
- address requested_value = cast_from_oop(*addr);
- if (requested_value == nullptr) {
- return null_data();
- } else {
- address buffer_addr = requested_value - _buffer_to_requested_delta;
- return capture(buffer_addr);
- }
- }
-
- GrowableArrayCHeap* roots() override {
- return new GrowableArrayCHeap();
- }
};
FileMapRegion* r = info->region_at(AOTMetaspace::hp);
address requested_base = UseCompressedOops ? (address)info->narrow_oop_base() : heap_region_requested_address(info);
address requested_start = requested_base + r->mapping_offset();
int requested_shift = info->narrow_oop_shift();
- intptr_t buffer_to_requested_delta = requested_start - buffer_start;
- uint64_t buffer_start_narrow_oop = 0xdeadbeed;
- if (UseCompressedOops) {
- buffer_start_narrow_oop = (uint64_t)(pointer_delta(requested_start, requested_base, 1)) >> requested_shift;
- assert(buffer_start_narrow_oop < 0xffffffff, "sanity");
- }
return new MappedLoaderOopIterator(buffer_start,
buffer_end,
- buffer_start_narrow_oop,
- buffer_to_requested_delta,
+ requested_base,
+ requested_start,
requested_shift,
info->mapped_heap()->root_segments().count());
}
diff --git a/src/hotspot/share/cds/aotMappedHeapLoader.hpp b/src/hotspot/share/cds/aotMappedHeapLoader.hpp
index d344d7b0b0a..7c5ca1b1f9e 100644
--- a/src/hotspot/share/cds/aotMappedHeapLoader.hpp
+++ b/src/hotspot/share/cds/aotMappedHeapLoader.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -37,8 +37,8 @@
#include "utilities/growableArray.hpp"
#include "utilities/macros.hpp"
+struct AOTMappedHeapRegion;
class FileMapInfo;
-struct LoadedArchiveHeapRegion;
class AOTMappedHeapLoader : AllStatic {
friend class AOTMapLogger;
@@ -93,7 +93,7 @@ public:
// function instead.
inline static oop decode_from_archive(narrowOop v) NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
- // More efficient version, but works only when ArchiveHeap is mapped.
+ // More efficient version, but works only when is_mapped()
inline static oop decode_from_mapped_archive(narrowOop v) NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
static void patch_compressed_embedded_pointers(BitMapView bm,
@@ -113,7 +113,7 @@ private:
static bool _is_loaded;
// Support for loaded archived heap. These are cached values from
- // LoadedArchiveHeapRegion's.
+ // AOTMappedHeapRegion's.
static uintptr_t _dumptime_base;
static uintptr_t _dumptime_top;
static intx _runtime_offset;
@@ -141,10 +141,10 @@ private:
static bool _heap_pointers_need_patching;
static void init_narrow_oop_decoding(address base, int shift);
- static bool init_loaded_region(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_region,
+ static bool init_loaded_region(FileMapInfo* mapinfo, AOTMappedHeapRegion* loaded_region,
MemRegion& archive_space);
- static bool load_heap_region_impl(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_region, uintptr_t buffer);
- static void init_loaded_heap_relocation(LoadedArchiveHeapRegion* reloc_info);
+ static bool load_heap_region_impl(FileMapInfo* mapinfo, AOTMappedHeapRegion* loaded_region, uintptr_t buffer);
+ static void init_loaded_heap_relocation(AOTMappedHeapRegion* reloc_info);
static void patch_native_pointers();
static void finish_loaded_heap();
static void verify_loaded_heap();
diff --git a/src/hotspot/share/cds/aotMappedHeapWriter.cpp b/src/hotspot/share/cds/aotMappedHeapWriter.cpp
index e73b980614a..64c0e3c40e8 100644
--- a/src/hotspot/share/cds/aotMappedHeapWriter.cpp
+++ b/src/hotspot/share/cds/aotMappedHeapWriter.cpp
@@ -22,7 +22,7 @@
*
*/
-#include "cds/aotMappedHeapLoader.hpp"
+#include "cds/aotMappedHeap.hpp"
#include "cds/aotMappedHeapWriter.hpp"
#include "cds/aotReferenceObjSupport.hpp"
#include "cds/cdsConfig.hpp"
@@ -151,7 +151,7 @@ void AOTMappedHeapWriter::add_source_obj(oop src_obj) {
}
void AOTMappedHeapWriter::write(GrowableArrayCHeap* roots,
- ArchiveMappedHeapInfo* heap_info) {
+ AOTMappedHeapInfo* heap_info) {
assert(CDSConfig::is_dumping_heap(), "sanity");
allocate_buffer();
copy_source_objs_to_buffer(roots);
@@ -598,7 +598,7 @@ size_t AOTMappedHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
//
// So we just hard code it to NOCOOPS_REQUESTED_BASE.
//
-void AOTMappedHeapWriter::set_requested_address_range(ArchiveMappedHeapInfo* info) {
+void AOTMappedHeapWriter::set_requested_address_range(AOTMappedHeapInfo* info) {
assert(!info->is_used(), "only set once");
size_t heap_region_byte_size = _buffer_used;
@@ -792,7 +792,7 @@ static void log_bitmap_usage(const char* which, BitMap* bitmap, size_t total_bit
// Update all oop fields embedded in the buffered objects
void AOTMappedHeapWriter::relocate_embedded_oops(GrowableArrayCHeap* roots,
- ArchiveMappedHeapInfo* heap_info) {
+ AOTMappedHeapInfo* heap_info) {
size_t oopmap_unit = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
size_t heap_region_byte_size = _buffer_used;
heap_info->oopmap()->resize(heap_region_byte_size / oopmap_unit);
@@ -862,7 +862,7 @@ void AOTMappedHeapWriter::mark_native_pointers(oop orig_obj) {
});
}
-void AOTMappedHeapWriter::compute_ptrmap(ArchiveMappedHeapInfo* heap_info) {
+void AOTMappedHeapWriter::compute_ptrmap(AOTMappedHeapInfo* heap_info) {
int num_non_null_ptrs = 0;
Metadata** bottom = (Metadata**) _requested_bottom;
Metadata** top = (Metadata**) _requested_top; // exclusive
@@ -909,40 +909,23 @@ void AOTMappedHeapWriter::compute_ptrmap(ArchiveMappedHeapInfo* heap_info) {
num_non_null_ptrs, size_t(heap_info->ptrmap()->size()));
}
-AOTMapLogger::OopDataIterator* AOTMappedHeapWriter::oop_iterator(ArchiveMappedHeapInfo* heap_info) {
- class MappedWriterOopIterator : public AOTMapLogger::OopDataIterator {
- private:
- address _current;
- address _next;
-
- address _buffer_start;
- address _buffer_end;
- uint64_t _buffer_start_narrow_oop;
- intptr_t _buffer_to_requested_delta;
- int _requested_shift;
-
- size_t _num_root_segments;
- size_t _num_obj_arrays_logged;
-
+AOTMapLogger::OopDataIterator* AOTMappedHeapWriter::oop_iterator(AOTMappedHeapInfo* heap_info) {
+ class MappedWriterOopIterator : public AOTMappedHeapOopIterator {
public:
MappedWriterOopIterator(address buffer_start,
address buffer_end,
- uint64_t buffer_start_narrow_oop,
- intptr_t buffer_to_requested_delta,
+ address requested_base,
+ address requested_start,
int requested_shift,
- size_t num_root_segments)
- : _current(nullptr),
- _next(buffer_start),
- _buffer_start(buffer_start),
- _buffer_end(buffer_end),
- _buffer_start_narrow_oop(buffer_start_narrow_oop),
- _buffer_to_requested_delta(buffer_to_requested_delta),
- _requested_shift(requested_shift),
- _num_root_segments(num_root_segments),
- _num_obj_arrays_logged(0) {
- }
+ size_t num_root_segments) :
+ AOTMappedHeapOopIterator(buffer_start,
+ buffer_end,
+ requested_base,
+ requested_start,
+ requested_shift,
+ num_root_segments) {}
- AOTMapLogger::OopData capture(address buffered_addr) {
+ AOTMapLogger::OopData capture(address buffered_addr) override {
oopDesc* raw_oop = (oopDesc*)buffered_addr;
size_t size = size_of_buffered_oop(buffered_addr);
address requested_addr = buffered_addr_to_requested_addr(buffered_addr);
@@ -960,45 +943,6 @@ AOTMapLogger::OopDataIterator* AOTMappedHeapWriter::oop_iterator(ArchiveMappedHe
size,
false };
}
-
- bool has_next() override {
- return _next < _buffer_end;
- }
-
- AOTMapLogger::OopData next() override {
- _current = _next;
- AOTMapLogger::OopData result = capture(_current);
- if (result._klass->is_objArray_klass()) {
- result._is_root_segment = _num_obj_arrays_logged++ < _num_root_segments;
- }
- _next = _current + result._size * BytesPerWord;
- return result;
- }
-
- AOTMapLogger::OopData obj_at(narrowOop* addr) override {
- uint64_t n = (uint64_t)(*addr);
- if (n == 0) {
- return null_data();
- } else {
- precond(n >= _buffer_start_narrow_oop);
- address buffer_addr = _buffer_start + ((n - _buffer_start_narrow_oop) << _requested_shift);
- return capture(buffer_addr);
- }
- }
-
- AOTMapLogger::OopData obj_at(oop* addr) override {
- address requested_value = cast_from_oop(*addr);
- if (requested_value == nullptr) {
- return null_data();
- } else {
- address buffer_addr = requested_value - _buffer_to_requested_delta;
- return capture(buffer_addr);
- }
- }
-
- GrowableArrayCHeap* roots() override {
- return new GrowableArrayCHeap();
- }
};
MemRegion r = heap_info->buffer_region();
@@ -1008,17 +952,11 @@ AOTMapLogger::OopDataIterator* AOTMappedHeapWriter::oop_iterator(ArchiveMappedHe
address requested_base = UseCompressedOops ? AOTMappedHeapWriter::narrow_oop_base() : (address)AOTMappedHeapWriter::NOCOOPS_REQUESTED_BASE;
address requested_start = UseCompressedOops ? AOTMappedHeapWriter::buffered_addr_to_requested_addr(buffer_start) : requested_base;
int requested_shift = AOTMappedHeapWriter::narrow_oop_shift();
- intptr_t buffer_to_requested_delta = requested_start - buffer_start;
- uint64_t buffer_start_narrow_oop = 0xdeadbeed;
- if (UseCompressedOops) {
- buffer_start_narrow_oop = (uint64_t)(pointer_delta(requested_start, requested_base, 1)) >> requested_shift;
- assert(buffer_start_narrow_oop < 0xffffffff, "sanity");
- }
return new MappedWriterOopIterator(buffer_start,
buffer_end,
- buffer_start_narrow_oop,
- buffer_to_requested_delta,
+ requested_base,
+ requested_start,
requested_shift,
heap_info->root_segments().count());
}
diff --git a/src/hotspot/share/cds/aotMappedHeapWriter.hpp b/src/hotspot/share/cds/aotMappedHeapWriter.hpp
index eafd38ac8bb..7481e7922a0 100644
--- a/src/hotspot/share/cds/aotMappedHeapWriter.hpp
+++ b/src/hotspot/share/cds/aotMappedHeapWriter.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2024, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -196,10 +196,10 @@ private:
static int filler_array_length(size_t fill_bytes);
static HeapWord* init_filler_array_at_buffer_top(int array_length, size_t fill_bytes);
- static void set_requested_address_range(ArchiveMappedHeapInfo* info);
+ static void set_requested_address_range(AOTMappedHeapInfo* info);
static void mark_native_pointers(oop orig_obj);
- static void relocate_embedded_oops(GrowableArrayCHeap* roots, ArchiveMappedHeapInfo* info);
- static void compute_ptrmap(ArchiveMappedHeapInfo *info);
+ static void relocate_embedded_oops(GrowableArrayCHeap* roots, AOTMappedHeapInfo* info);
+ static void compute_ptrmap(AOTMappedHeapInfo *info);
static bool is_in_requested_range(oop o);
static oop requested_obj_from_buffer_offset(size_t offset);
@@ -229,7 +229,7 @@ public:
static bool is_string_too_large_to_archive(oop string);
static bool is_dumped_interned_string(oop o);
static void add_to_dumped_interned_strings(oop string);
- static void write(GrowableArrayCHeap*, ArchiveMappedHeapInfo* heap_info);
+ static void write(GrowableArrayCHeap*, AOTMappedHeapInfo* heap_info);
static address requested_address(); // requested address of the lowest achived heap object
static size_t get_filler_size_at(address buffered_addr);
@@ -240,7 +240,7 @@ public:
static Klass* real_klass_of_buffered_oop(address buffered_addr);
static size_t size_of_buffered_oop(address buffered_addr);
- static AOTMapLogger::OopDataIterator* oop_iterator(ArchiveMappedHeapInfo* heap_info);
+ static AOTMapLogger::OopDataIterator* oop_iterator(AOTMappedHeapInfo* heap_info);
};
#endif // INCLUDE_CDS_JAVA_HEAP
#endif // SHARE_CDS_AOTMAPPEDHEAPWRITER_HPP
diff --git a/src/hotspot/share/cds/aotMetaspace.cpp b/src/hotspot/share/cds/aotMetaspace.cpp
index 79d789e0c70..b75d7628aa9 100644
--- a/src/hotspot/share/cds/aotMetaspace.cpp
+++ b/src/hotspot/share/cds/aotMetaspace.cpp
@@ -661,8 +661,8 @@ void AOTMetaspace::rewrite_bytecodes_and_calculate_fingerprints(Thread* thread,
class VM_PopulateDumpSharedSpace : public VM_Operation {
private:
- ArchiveMappedHeapInfo _mapped_heap_info;
- ArchiveStreamedHeapInfo _streamed_heap_info;
+ AOTMappedHeapInfo _mapped_heap_info;
+ AOTStreamedHeapInfo _streamed_heap_info;
FileMapInfo* _map_info;
StaticArchiveBuilder& _builder;
@@ -682,8 +682,8 @@ public:
bool skip_operation() const { return false; }
VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; }
- ArchiveMappedHeapInfo* mapped_heap_info() { return &_mapped_heap_info; }
- ArchiveStreamedHeapInfo* streamed_heap_info() { return &_streamed_heap_info; }
+ AOTMappedHeapInfo* mapped_heap_info() { return &_mapped_heap_info; }
+ AOTStreamedHeapInfo* streamed_heap_info() { return &_streamed_heap_info; }
void doit(); // outline because gdb sucks
bool allow_nested_vm_operations() const { return true; }
}; // class VM_PopulateDumpSharedSpace
@@ -698,6 +698,9 @@ public:
Universe::metaspace_pointers_do(it);
vmSymbols::metaspace_pointers_do(it);
TrainingData::iterate_roots(it);
+ if (CDSConfig::is_dumping_full_module_graph()) {
+ ClassLoaderDataShared::iterate_roots(it);
+ }
// The above code should find all the symbols that are referenced by the
// archived classes. We just need to add the extra symbols which
@@ -795,6 +798,10 @@ void VM_PopulateDumpSharedSpace::doit() {
_builder.make_klasses_shareable();
AOTMetaspace::make_method_handle_intrinsics_shareable();
+ if (CDSConfig::is_dumping_full_module_graph()) {
+ ClassLoaderDataShared::remove_unshareable_info();
+ }
+
dump_java_heap_objects();
dump_shared_symbol_table(_builder.symbols());
@@ -1097,7 +1104,12 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS
#if INCLUDE_CDS_JAVA_HEAP
if (CDSConfig::is_dumping_heap()) {
- assert(CDSConfig::allow_only_single_java_thread(), "Required");
+ if (!CDSConfig::is_dumping_preimage_static_archive()) {
+ // A single thread is required for Reference handling and deterministic CDS archive.
+ // Its's not required for dumping preimage, where References won't be archived and
+ // determinism is not needed.
+ assert(CDSConfig::allow_only_single_java_thread(), "Required");
+ }
if (!HeapShared::is_archived_boot_layer_available(THREAD)) {
report_loading_error("archivedBootLayer not available, disabling full module graph");
CDSConfig::stop_dumping_full_module_graph();
@@ -1135,7 +1147,8 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS
HeapShared::init_heap_writer();
if (CDSConfig::is_dumping_full_module_graph()) {
ClassLoaderDataShared::ensure_module_entry_tables_exist();
- HeapShared::reset_archived_object_states(CHECK);
+ ClassLoaderDataShared::build_tables(CHECK);
+ HeapShared::prepare_for_archiving(CHECK);
}
AOTReferenceObjSupport::initialize(CHECK);
@@ -1154,12 +1167,6 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS
// Perhaps there is a way to avoid hard-coding these names here.
// See discussion in JDK-8342481.
}
-
- if (HeapShared::is_writing_mapping_mode()) {
- // Do this at the very end, when no Java code will be executed. Otherwise
- // some new strings may be added to the intern table.
- StringTable::allocate_shared_strings_array(CHECK);
- }
} else {
log_info(aot)("Not dumping heap, reset CDSConfig::_is_using_optimized_module_handling");
CDSConfig::stop_using_optimized_module_handling();
@@ -1205,8 +1212,8 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS
bool AOTMetaspace::write_static_archive(ArchiveBuilder* builder,
FileMapInfo* map_info,
- ArchiveMappedHeapInfo* mapped_heap_info,
- ArchiveStreamedHeapInfo* streamed_heap_info) {
+ AOTMappedHeapInfo* mapped_heap_info,
+ AOTStreamedHeapInfo* streamed_heap_info) {
// relocate the data so that it can be mapped to AOTMetaspace::requested_base_address()
// without runtime relocation.
builder->relocate_to_requested();
@@ -2099,7 +2106,7 @@ MapArchiveResult AOTMetaspace::map_archive(FileMapInfo* mapinfo, char* mapped_ba
// Currently, only static archive uses early serialized data.
char* buffer = mapinfo->early_serialized_data();
intptr_t* array = (intptr_t*)buffer;
- ReadClosure rc(&array, (intptr_t)mapped_base_address);
+ ReadClosure rc(&array, (address)mapped_base_address);
early_serialize(&rc);
}
@@ -2145,7 +2152,7 @@ void AOTMetaspace::initialize_shared_spaces() {
// shared string/symbol tables.
char* buffer = static_mapinfo->serialized_data();
intptr_t* array = (intptr_t*)buffer;
- ReadClosure rc(&array, (intptr_t)SharedBaseAddress);
+ ReadClosure rc(&array, (address)SharedBaseAddress);
serialize(&rc);
// Finish initializing the heap dump mode used in the archive
@@ -2157,7 +2164,7 @@ void AOTMetaspace::initialize_shared_spaces() {
if (dynamic_mapinfo != nullptr) {
intptr_t* buffer = (intptr_t*)dynamic_mapinfo->serialized_data();
- ReadClosure rc(&buffer, (intptr_t)SharedBaseAddress);
+ ReadClosure rc(&buffer, (address)SharedBaseAddress);
DynamicArchive::serialize(&rc);
}
diff --git a/src/hotspot/share/cds/aotMetaspace.hpp b/src/hotspot/share/cds/aotMetaspace.hpp
index ab78787288f..4607a936abe 100644
--- a/src/hotspot/share/cds/aotMetaspace.hpp
+++ b/src/hotspot/share/cds/aotMetaspace.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,8 +33,8 @@
#include "utilities/macros.hpp"
class ArchiveBuilder;
-class ArchiveMappedHeapInfo;
-class ArchiveStreamedHeapInfo;
+class AOTMappedHeapInfo;
+class AOTStreamedHeapInfo;
class FileMapInfo;
class Method;
class outputStream;
@@ -192,8 +192,8 @@ private:
static void open_output_mapinfo();
static bool write_static_archive(ArchiveBuilder* builder,
FileMapInfo* map_info,
- ArchiveMappedHeapInfo* mapped_heap_info,
- ArchiveStreamedHeapInfo* streamed_heap_info);
+ AOTMappedHeapInfo* mapped_heap_info,
+ AOTStreamedHeapInfo* streamed_heap_info);
static FileMapInfo* open_static_archive();
static FileMapInfo* open_dynamic_archive();
// use_requested_addr: If true (default), attempt to map at the address the
diff --git a/src/hotspot/share/cds/aotReferenceObjSupport.cpp b/src/hotspot/share/cds/aotReferenceObjSupport.cpp
index aa7cc875533..0c27c8ce5f0 100644
--- a/src/hotspot/share/cds/aotReferenceObjSupport.cpp
+++ b/src/hotspot/share/cds/aotReferenceObjSupport.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -177,12 +177,17 @@ void AOTReferenceObjSupport::init_keep_alive_objs_table() {
// Returns true IFF obj is an instance of java.lang.ref.Reference. If so, perform extra eligibility checks.
bool AOTReferenceObjSupport::check_if_ref_obj(oop obj) {
- // We have a single Java thread. This means java.lang.ref.Reference$ReferenceHandler thread
- // is not running. Otherwise the checks for next/discovered may not work.
- precond(CDSConfig::allow_only_single_java_thread());
assert_at_safepoint(); // _keep_alive_objs_table uses raw oops
if (obj->klass()->is_subclass_of(vmClasses::Reference_klass())) {
+ // The following check works only if the java.lang.ref.Reference$ReferenceHandler thread
+ // is not running.
+ //
+ // This code is called on every object found by AOTArtifactFinder. When dumping the
+ // preimage archive, AOTArtifactFinder should not find any Reference objects.
+ precond(!CDSConfig::is_dumping_preimage_static_archive());
+ precond(CDSConfig::allow_only_single_java_thread());
+
precond(AOTReferenceObjSupport::is_enabled());
precond(JavaClasses::is_supported_for_archiving(obj));
precond(_keep_alive_objs_table != nullptr);
diff --git a/src/hotspot/share/cds/aotStreamedHeap.cpp b/src/hotspot/share/cds/aotStreamedHeap.cpp
new file mode 100644
index 00000000000..3378924bf32
--- /dev/null
+++ b/src/hotspot/share/cds/aotStreamedHeap.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "cds/aotStreamedHeap.hpp"
+
+// Anything that goes in the header must be thoroughly purged from uninitialized memory
+// as it will be written to disk. Therefore, the constructors memset the memory to 0.
+// This is not the prettiest thing, but we need to know every byte is initialized,
+// including potential padding between fields.
+
+AOTStreamedHeapHeader::AOTStreamedHeapHeader(size_t forwarding_offset,
+ size_t roots_offset,
+ size_t num_roots,
+ size_t root_highest_object_index_table_offset,
+ size_t num_archived_objects) {
+ memset((char*)this, 0, sizeof(*this));
+ _forwarding_offset = forwarding_offset;
+ _roots_offset = roots_offset;
+ _num_roots = num_roots;
+ _root_highest_object_index_table_offset = root_highest_object_index_table_offset;
+ _num_archived_objects = num_archived_objects;
+}
+
+AOTStreamedHeapHeader::AOTStreamedHeapHeader() {
+ memset((char*)this, 0, sizeof(*this));
+}
+
+AOTStreamedHeapHeader AOTStreamedHeapInfo::create_header() {
+ return AOTStreamedHeapHeader{_forwarding_offset,
+ _roots_offset,
+ _num_roots,
+ _root_highest_object_index_table_offset,
+ _num_archived_objects};
+}
diff --git a/src/hotspot/share/cds/aotStreamedHeap.hpp b/src/hotspot/share/cds/aotStreamedHeap.hpp
new file mode 100644
index 00000000000..f06b1bcb4c6
--- /dev/null
+++ b/src/hotspot/share/cds/aotStreamedHeap.hpp
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_CDS_AOTSTREAMEDHEAP_HPP
+#define SHARE_CDS_AOTSTREAMEDHEAP_HPP
+
+#include "cds/aotMapLogger.hpp"
+#include "utilities/growableArray.hpp"
+#include "utilities/macros.hpp"
+
+class AOTStreamedHeapHeader {
+ size_t _forwarding_offset; // Offset of forwarding information in the heap region.
+ size_t _roots_offset; // Start position for the roots
+ size_t _root_highest_object_index_table_offset; // Offset of root dfs depth information
+ size_t _num_roots; // Number of embedded roots
+ size_t _num_archived_objects; // The number of archived heap objects
+
+public:
+ AOTStreamedHeapHeader();
+ AOTStreamedHeapHeader(size_t forwarding_offset,
+ size_t roots_offset,
+ size_t num_roots,
+ size_t root_highest_object_index_table_offset,
+ size_t num_archived_objects);
+
+ size_t forwarding_offset() const { return _forwarding_offset; }
+ size_t roots_offset() const { return _roots_offset; }
+ size_t num_roots() const { return _num_roots; }
+ size_t root_highest_object_index_table_offset() const { return _root_highest_object_index_table_offset; }
+ size_t num_archived_objects() const { return _num_archived_objects; }
+
+ // This class is trivially copyable and assignable.
+ AOTStreamedHeapHeader(const AOTStreamedHeapHeader&) = default;
+ AOTStreamedHeapHeader& operator=(const AOTStreamedHeapHeader&) = default;
+};
+
+class AOTStreamedHeapInfo {
+ MemRegion _buffer_region; // Contains the archived objects to be written into the CDS archive.
+ CHeapBitMap _oopmap;
+ size_t _roots_offset; // Offset of the HeapShared::roots() object, from the bottom
+ // of the archived heap objects, in bytes.
+ size_t _num_roots;
+
+ size_t _forwarding_offset; // Offset of forwarding information from the bottom
+ size_t _root_highest_object_index_table_offset; // Offset to root dfs depth information
+ size_t _num_archived_objects; // The number of archived objects written into the CDS archive.
+
+public:
+ AOTStreamedHeapInfo()
+ : _buffer_region(),
+ _oopmap(128, mtClassShared),
+ _roots_offset(),
+ _forwarding_offset(),
+ _root_highest_object_index_table_offset(),
+ _num_archived_objects() {}
+
+ bool is_used() { return !_buffer_region.is_empty(); }
+
+ void set_buffer_region(MemRegion r) { _buffer_region = r; }
+ MemRegion buffer_region() { return _buffer_region; }
+ char* buffer_start() { return (char*)_buffer_region.start(); }
+ size_t buffer_byte_size() { return _buffer_region.byte_size(); }
+
+ CHeapBitMap* oopmap() { return &_oopmap; }
+ void set_roots_offset(size_t n) { _roots_offset = n; }
+ size_t roots_offset() { return _roots_offset; }
+ void set_num_roots(size_t n) { _num_roots = n; }
+ size_t num_roots() { return _num_roots; }
+ void set_forwarding_offset(size_t n) { _forwarding_offset = n; }
+ void set_root_highest_object_index_table_offset(size_t n) { _root_highest_object_index_table_offset = n; }
+ void set_num_archived_objects(size_t n) { _num_archived_objects = n; }
+ size_t num_archived_objects() { return _num_archived_objects; }
+
+ AOTStreamedHeapHeader create_header();
+};
+
+#if INCLUDE_CDS_JAVA_HEAP
+class AOTStreamedHeapOopIterator : public AOTMapLogger::OopDataIterator {
+protected:
+ int _current;
+ int _next;
+ address _buffer_start;
+ int _num_archived_objects;
+
+public:
+ AOTStreamedHeapOopIterator(address buffer_start,
+ int num_archived_objects)
+ : _current(0),
+ _next(1),
+ _buffer_start(buffer_start),
+ _num_archived_objects(num_archived_objects) {}
+
+ virtual AOTMapLogger::OopData capture(int dfs_index) = 0;
+
+ bool has_next() override {
+ return _next <= _num_archived_objects;
+ }
+
+ AOTMapLogger::OopData next() override {
+ _current = _next;
+ AOTMapLogger::OopData result = capture(_current);
+ _next = _current + 1;
+ return result;
+ }
+
+ AOTMapLogger::OopData obj_at(narrowOop* addr) override {
+ int dfs_index = (int)(*addr);
+ if (dfs_index == 0) {
+ return null_data();
+ } else {
+ return capture(dfs_index);
+ }
+ }
+
+ AOTMapLogger::OopData obj_at(oop* addr) override {
+ int dfs_index = (int)cast_from_oop(*addr);
+ if (dfs_index == 0) {
+ return null_data();
+ } else {
+ return capture(dfs_index);
+ }
+ }
+};
+#endif // INCLUDE_CDS_JAVA_HEAP
+
+#endif // SHARE_CDS_AOTSTREAMEDHEAP_HPP
diff --git a/src/hotspot/share/cds/aotStreamedHeapLoader.cpp b/src/hotspot/share/cds/aotStreamedHeapLoader.cpp
index 6719f9bf898..39f735543cd 100644
--- a/src/hotspot/share/cds/aotStreamedHeapLoader.cpp
+++ b/src/hotspot/share/cds/aotStreamedHeapLoader.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1102,25 +1102,13 @@ void AOTStreamedHeapLoader::finish_initialization(FileMapInfo* static_mapinfo) {
}
AOTMapLogger::OopDataIterator* AOTStreamedHeapLoader::oop_iterator(FileMapInfo* info, address buffer_start, address buffer_end) {
- class StreamedLoaderOopIterator : public AOTMapLogger::OopDataIterator {
- private:
- int _current;
- int _next;
-
- address _buffer_start;
-
- int _num_archived_objects;
-
+ class StreamedLoaderOopIterator : public AOTStreamedHeapOopIterator {
public:
StreamedLoaderOopIterator(address buffer_start,
int num_archived_objects)
- : _current(0),
- _next(1),
- _buffer_start(buffer_start),
- _num_archived_objects(num_archived_objects) {
- }
+ : AOTStreamedHeapOopIterator(buffer_start, num_archived_objects) {}
- AOTMapLogger::OopData capture(int dfs_index) {
+ AOTMapLogger::OopData capture(int dfs_index) override {
size_t buffered_offset = buffer_offset_for_object_index(dfs_index);
address buffered_addr = _buffer_start + buffered_offset;
oopDesc* raw_oop = (oopDesc*)buffered_addr;
@@ -1142,35 +1130,6 @@ AOTMapLogger::OopDataIterator* AOTStreamedHeapLoader::oop_iterator(FileMapInfo*
false };
}
- bool has_next() override {
- return _next <= _num_archived_objects;
- }
-
- AOTMapLogger::OopData next() override {
- _current = _next;
- AOTMapLogger::OopData result = capture(_current);
- _next = _current + 1;
- return result;
- }
-
- AOTMapLogger::OopData obj_at(narrowOop* addr) override {
- int dfs_index = (int)(*addr);
- if (dfs_index == 0) {
- return null_data();
- } else {
- return capture(dfs_index);
- }
- }
-
- AOTMapLogger::OopData obj_at(oop* addr) override {
- int dfs_index = (int)cast_from_oop(*addr);
- if (dfs_index == 0) {
- return null_data();
- } else {
- return capture(dfs_index);
- }
- }
-
GrowableArrayCHeap* roots() override {
GrowableArrayCHeap* result = new GrowableArrayCHeap();
diff --git a/src/hotspot/share/cds/aotStreamedHeapWriter.cpp b/src/hotspot/share/cds/aotStreamedHeapWriter.cpp
index 16acebc7d8d..f52532b2f2a 100644
--- a/src/hotspot/share/cds/aotStreamedHeapWriter.cpp
+++ b/src/hotspot/share/cds/aotStreamedHeapWriter.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -163,7 +163,7 @@ void AOTStreamedHeapWriter::order_source_objs(GrowableArrayCHeap* roots,
- ArchiveStreamedHeapInfo* heap_info) {
+ AOTStreamedHeapInfo* heap_info) {
assert(CDSConfig::is_dumping_heap(), "sanity");
allocate_buffer();
order_source_objs(roots);
@@ -453,7 +453,7 @@ static void log_bitmap_usage(const char* which, BitMap* bitmap, size_t total_bit
}
// Update all oop fields embedded in the buffered objects
-void AOTStreamedHeapWriter::map_embedded_oops(ArchiveStreamedHeapInfo* heap_info) {
+void AOTStreamedHeapWriter::map_embedded_oops(AOTStreamedHeapInfo* heap_info) {
size_t oopmap_unit = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
size_t heap_region_byte_size = _buffer_used;
heap_info->oopmap()->resize(heap_region_byte_size / oopmap_unit);
@@ -497,7 +497,7 @@ oop AOTStreamedHeapWriter::buffered_addr_to_source_obj(address buffered_addr) {
return buffered_offset_to_source_obj(buffered_address_to_offset(buffered_addr));
}
-void AOTStreamedHeapWriter::populate_archive_heap_info(ArchiveStreamedHeapInfo* info) {
+void AOTStreamedHeapWriter::populate_archive_heap_info(AOTStreamedHeapInfo* info) {
assert(!info->is_used(), "only set once");
size_t heap_region_byte_size = _buffer_used;
@@ -512,15 +512,9 @@ void AOTStreamedHeapWriter::populate_archive_heap_info(ArchiveStreamedHeapInfo*
info->set_num_archived_objects((size_t)_source_objs->length());
}
-AOTMapLogger::OopDataIterator* AOTStreamedHeapWriter::oop_iterator(ArchiveStreamedHeapInfo* heap_info) {
- class StreamedWriterOopIterator : public AOTMapLogger::OopDataIterator {
+AOTMapLogger::OopDataIterator* AOTStreamedHeapWriter::oop_iterator(AOTStreamedHeapInfo* heap_info) {
+ class StreamedWriterOopIterator : public AOTStreamedHeapOopIterator {
private:
- int _current;
- int _next;
-
- address _buffer_start;
-
- int _num_archived_objects;
int _num_archived_roots;
int* _roots;
@@ -529,15 +523,11 @@ AOTMapLogger::OopDataIterator* AOTStreamedHeapWriter::oop_iterator(ArchiveStream
int num_archived_objects,
int num_archived_roots,
int* roots)
- : _current(0),
- _next(1),
- _buffer_start(buffer_start),
- _num_archived_objects(num_archived_objects),
+ : AOTStreamedHeapOopIterator(buffer_start, num_archived_objects),
_num_archived_roots(num_archived_roots),
- _roots(roots) {
- }
+ _roots(roots) {}
- AOTMapLogger::OopData capture(int dfs_index) {
+ AOTMapLogger::OopData capture(int dfs_index) override {
size_t buffered_offset = _dfs_to_archive_object_table[dfs_index];
address buffered_addr = _buffer_start + buffered_offset;
oop src_obj = AOTStreamedHeapWriter::buffered_offset_to_source_obj(buffered_offset);
@@ -561,35 +551,6 @@ AOTMapLogger::OopDataIterator* AOTStreamedHeapWriter::oop_iterator(ArchiveStream
false };
}
- bool has_next() override {
- return _next <= _num_archived_objects;
- }
-
- AOTMapLogger::OopData next() override {
- _current = _next;
- AOTMapLogger::OopData result = capture(_current);
- _next = _current + 1;
- return result;
- }
-
- AOTMapLogger::OopData obj_at(narrowOop* addr) override {
- int dfs_index = (int)(*addr);
- if (dfs_index == 0) {
- return null_data();
- } else {
- return capture(dfs_index);
- }
- }
-
- AOTMapLogger::OopData obj_at(oop* addr) override {
- int dfs_index = (int)cast_from_oop(*addr);
- if (dfs_index == 0) {
- return null_data();
- } else {
- return capture(dfs_index);
- }
- }
-
GrowableArrayCHeap* roots() override {
GrowableArrayCHeap* result = new GrowableArrayCHeap();
diff --git a/src/hotspot/share/cds/aotStreamedHeapWriter.hpp b/src/hotspot/share/cds/aotStreamedHeapWriter.hpp
index bde82f8ce29..ab5aec0327b 100644
--- a/src/hotspot/share/cds/aotStreamedHeapWriter.hpp
+++ b/src/hotspot/share/cds/aotStreamedHeapWriter.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -117,7 +117,7 @@ class AOTStreamedHeapWriter : AllStatic {
static void copy_forwarding_to_buffer();
static void copy_roots_max_dfs_to_buffer(int roots_length);
- static void map_embedded_oops(ArchiveStreamedHeapInfo* info);
+ static void map_embedded_oops(AOTStreamedHeapInfo* info);
static bool is_in_requested_range(oop o);
static oop requested_obj_from_buffer_offset(size_t offset);
@@ -131,14 +131,14 @@ class AOTStreamedHeapWriter : AllStatic {
static void update_header_for_buffered_addr(address buffered_addr, oop src_obj, Klass* src_klass);
- static void populate_archive_heap_info(ArchiveStreamedHeapInfo* info);
+ static void populate_archive_heap_info(AOTStreamedHeapInfo* info);
public:
static void init() NOT_CDS_JAVA_HEAP_RETURN;
static void delete_tables_with_raw_oops();
static void add_source_obj(oop src_obj);
- static void write(GrowableArrayCHeap*, ArchiveStreamedHeapInfo* heap_info);
+ static void write(GrowableArrayCHeap*, AOTStreamedHeapInfo* heap_info);
static address buffered_heap_roots_addr() {
return offset_to_buffered_address(_roots_offset);
}
@@ -156,7 +156,7 @@ public:
static oop buffered_offset_to_source_obj(size_t buffered_offset);
static oop buffered_addr_to_source_obj(address buffered_addr);
- static AOTMapLogger::OopDataIterator* oop_iterator(ArchiveStreamedHeapInfo* heap_info);
+ static AOTMapLogger::OopDataIterator* oop_iterator(AOTStreamedHeapInfo* heap_info);
};
#endif // INCLUDE_CDS_JAVA_HEAP
#endif // SHARE_CDS_AOTSTREAMEDHEAPWRITER_HPP
diff --git a/src/hotspot/share/cds/archiveBuilder.cpp b/src/hotspot/share/cds/archiveBuilder.cpp
index 6bbefea5cd9..0ea5d6c6ecb 100644
--- a/src/hotspot/share/cds/archiveBuilder.cpp
+++ b/src/hotspot/share/cds/archiveBuilder.cpp
@@ -24,6 +24,7 @@
#include "cds/aotArtifactFinder.hpp"
#include "cds/aotClassLinker.hpp"
+#include "cds/aotCompressedPointers.hpp"
#include "cds/aotLogging.hpp"
#include "cds/aotMapLogger.hpp"
#include "cds/aotMetaspace.hpp"
@@ -175,10 +176,10 @@ ArchiveBuilder::ArchiveBuilder() :
_mapped_static_archive_bottom(nullptr),
_mapped_static_archive_top(nullptr),
_buffer_to_requested_delta(0),
- _pz_region("pz", MAX_SHARED_DELTA), // protection zone -- used only during dumping; does NOT exist in cds archive.
- _rw_region("rw", MAX_SHARED_DELTA),
- _ro_region("ro", MAX_SHARED_DELTA),
- _ac_region("ac", MAX_SHARED_DELTA),
+ _pz_region("pz"), // protection zone -- used only during dumping; does NOT exist in cds archive.
+ _rw_region("rw"),
+ _ro_region("ro"),
+ _ac_region("ac"),
_ptrmap(mtClassShared),
_rw_ptrmap(mtClassShared),
_ro_ptrmap(mtClassShared),
@@ -243,7 +244,7 @@ bool ArchiveBuilder::gather_klass_and_symbol(MetaspaceClosure::Ref* ref, bool re
if (get_follow_mode(ref) != make_a_copy) {
return false;
}
- if (ref->msotype() == MetaspaceObj::ClassType) {
+ if (ref->type() == MetaspaceClosureType::ClassType) {
Klass* klass = (Klass*)ref->obj();
assert(klass->is_klass(), "must be");
if (!is_excluded(klass)) {
@@ -252,7 +253,7 @@ bool ArchiveBuilder::gather_klass_and_symbol(MetaspaceClosure::Ref* ref, bool re
assert(klass->is_instance_klass(), "must be");
}
}
- } else if (ref->msotype() == MetaspaceObj::SymbolType) {
+ } else if (ref->type() == MetaspaceClosureType::SymbolType) {
// Make sure the symbol won't be GC'ed while we are dumping the archive.
Symbol* sym = (Symbol*)ref->obj();
sym->increment_refcount();
@@ -271,11 +272,6 @@ void ArchiveBuilder::gather_klasses_and_symbols() {
aot_log_info(aot)("Gathering classes and symbols ... ");
GatherKlassesAndSymbols doit(this);
iterate_roots(&doit);
-#if INCLUDE_CDS_JAVA_HEAP
- if (CDSConfig::is_dumping_full_module_graph()) {
- ClassLoaderDataShared::iterate_symbols(&doit);
- }
-#endif
doit.finish();
if (CDSConfig::is_dumping_static_archive()) {
@@ -325,8 +321,10 @@ void ArchiveBuilder::sort_klasses() {
}
address ArchiveBuilder::reserve_buffer() {
- // AOTCodeCache::max_aot_code_size() accounts for aot code region.
- size_t buffer_size = LP64_ONLY(CompressedClassSpaceSize) NOT_LP64(256 * M) + AOTCodeCache::max_aot_code_size();
+ // On 64-bit: reserve address space for archives up to the max encoded offset limit.
+ // On 32-bit: use 256MB + AOT code size due to limited virtual address space.
+ size_t buffer_size = LP64_ONLY(AOTCompressedPointers::MaxMetadataOffsetBytes)
+ NOT_LP64(256 * M + AOTCodeCache::max_aot_code_size());
ReservedSpace rs = MemoryReserver::reserve(buffer_size,
AOTMetaspace::core_region_alignment(),
os::vm_page_size(),
@@ -446,14 +444,14 @@ bool ArchiveBuilder::gather_one_source_obj(MetaspaceClosure::Ref* ref, bool read
}
#ifdef ASSERT
- if (ref->msotype() == MetaspaceObj::MethodType) {
+ if (ref->type() == MetaspaceClosureType::MethodType) {
Method* m = (Method*)ref->obj();
assert(!RegeneratedClasses::has_been_regenerated((address)m->method_holder()),
"Should not archive methods in a class that has been regenerated");
}
#endif
- if (ref->msotype() == MetaspaceObj::MethodDataType) {
+ if (ref->type() == MetaspaceClosureType::MethodDataType) {
MethodData* md = (MethodData*)ref->obj();
md->clean_method_data(false /* always_clean */);
}
@@ -554,16 +552,16 @@ ArchiveBuilder::FollowMode ArchiveBuilder::get_follow_mode(MetaspaceClosure::Ref
if (CDSConfig::is_dumping_dynamic_archive() && AOTMetaspace::in_aot_cache(obj)) {
// Don't dump existing shared metadata again.
return point_to_it;
- } else if (ref->msotype() == MetaspaceObj::MethodDataType ||
- ref->msotype() == MetaspaceObj::MethodCountersType ||
- ref->msotype() == MetaspaceObj::KlassTrainingDataType ||
- ref->msotype() == MetaspaceObj::MethodTrainingDataType ||
- ref->msotype() == MetaspaceObj::CompileTrainingDataType) {
+ } else if (ref->type() == MetaspaceClosureType::MethodDataType ||
+ ref->type() == MetaspaceClosureType::MethodCountersType ||
+ ref->type() == MetaspaceClosureType::KlassTrainingDataType ||
+ ref->type() == MetaspaceClosureType::MethodTrainingDataType ||
+ ref->type() == MetaspaceClosureType::CompileTrainingDataType) {
return (TrainingData::need_data() || TrainingData::assembling_data()) ? make_a_copy : set_to_null;
- } else if (ref->msotype() == MetaspaceObj::AdapterHandlerEntryType) {
+ } else if (ref->type() == MetaspaceClosureType::AdapterHandlerEntryType) {
return CDSConfig::is_dumping_adapters() ? make_a_copy : set_to_null;
} else {
- if (ref->msotype() == MetaspaceObj::ClassType) {
+ if (ref->type() == MetaspaceClosureType::ClassType) {
Klass* klass = (Klass*)ref->obj();
assert(klass->is_klass(), "must be");
if (RegeneratedClasses::has_been_regenerated(klass)) {
@@ -571,7 +569,12 @@ ArchiveBuilder::FollowMode ArchiveBuilder::get_follow_mode(MetaspaceClosure::Ref
}
if (is_excluded(klass)) {
ResourceMark rm;
- log_debug(cds, dynamic)("Skipping class (excluded): %s", klass->external_name());
+ aot_log_trace(aot)("pointer set to null: class (excluded): %s", klass->external_name());
+ return set_to_null;
+ }
+ if (klass->is_array_klass() && CDSConfig::is_dumping_dynamic_archive()) {
+ ResourceMark rm;
+ aot_log_trace(aot)("pointer set to null: array class not supported in dynamic region: %s", klass->external_name());
return set_to_null;
}
}
@@ -615,15 +618,6 @@ void ArchiveBuilder::dump_rw_metadata() {
ResourceMark rm;
aot_log_info(aot)("Allocating RW objects ... ");
make_shallow_copies(&_rw_region, &_rw_src_objs);
-
-#if INCLUDE_CDS_JAVA_HEAP
- if (CDSConfig::is_dumping_full_module_graph()) {
- // Archive the ModuleEntry's and PackageEntry's of the 3 built-in loaders
- char* start = rw_region()->top();
- ClassLoaderDataShared::allocate_archived_tables();
- alloc_stats()->record_modules(rw_region()->top() - start, /*read_only*/false);
- }
-#endif
}
void ArchiveBuilder::dump_ro_metadata() {
@@ -632,15 +626,6 @@ void ArchiveBuilder::dump_ro_metadata() {
start_dump_region(&_ro_region);
make_shallow_copies(&_ro_region, &_ro_src_objs);
-
-#if INCLUDE_CDS_JAVA_HEAP
- if (CDSConfig::is_dumping_full_module_graph()) {
- char* start = ro_region()->top();
- ClassLoaderDataShared::init_archived_tables();
- alloc_stats()->record_modules(ro_region()->top() - start, /*read_only*/true);
- }
-#endif
-
RegeneratedClasses::record_regenerated_objects();
}
@@ -658,7 +643,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
size_t alignment = SharedSpaceObjectAlignment; // alignment for the dest pointer
char* oldtop = dump_region->top();
- if (src_info->msotype() == MetaspaceObj::ClassType) {
+ if (src_info->type() == MetaspaceClosureType::ClassType) {
// Allocate space for a pointer directly in front of the future InstanceKlass, so
// we can do a quick lookup from InstanceKlass* -> RunTimeClassInfo*
// without building another hashtable. See RunTimeClassInfo::get_for()
@@ -674,7 +659,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
alignment = nth_bit(ArchiveBuilder::precomputed_narrow_klass_shift());
}
#endif
- } else if (src_info->msotype() == MetaspaceObj::SymbolType) {
+ } else if (src_info->type() == MetaspaceClosureType::SymbolType) {
// Symbols may be allocated by using AllocateHeap, so their sizes
// may be less than size_in_bytes() indicates.
bytes = ((Symbol*)src)->byte_size();
@@ -684,7 +669,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
memcpy(dest, src, bytes);
// Update the hash of buffered sorted symbols for static dump so that the symbols have deterministic contents
- if (CDSConfig::is_dumping_static_archive() && (src_info->msotype() == MetaspaceObj::SymbolType)) {
+ if (CDSConfig::is_dumping_static_archive() && (src_info->type() == MetaspaceClosureType::SymbolType)) {
Symbol* buffered_symbol = (Symbol*)dest;
assert(((Symbol*)src)->is_permanent(), "archived symbols must be permanent");
buffered_symbol->update_identity_hash();
@@ -699,7 +684,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
}
}
- intptr_t* archived_vtable = CppVtables::get_archived_vtable(src_info->msotype(), (address)dest);
+ intptr_t* archived_vtable = CppVtables::get_archived_vtable(src_info->type(), (address)dest);
if (archived_vtable != nullptr) {
*(address*)dest = (address)archived_vtable;
ArchivePtrMarker::mark_pointer((address*)dest);
@@ -709,7 +694,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
src_info->set_buffered_addr((address)dest);
char* newtop = dump_region->top();
- _alloc_stats.record(src_info->msotype(), int(newtop - oldtop), src_info->read_only());
+ _alloc_stats.record(src_info->type(), int(newtop - oldtop), src_info->read_only());
DEBUG_ONLY(_alloc_stats.verify((int)dump_region->used(), src_info->read_only()));
}
@@ -992,15 +977,15 @@ void ArchiveBuilder::make_training_data_shareable() {
return;
}
- if (info.msotype() == MetaspaceObj::KlassTrainingDataType ||
- info.msotype() == MetaspaceObj::MethodTrainingDataType ||
- info.msotype() == MetaspaceObj::CompileTrainingDataType) {
+ if (info.type() == MetaspaceClosureType::KlassTrainingDataType ||
+ info.type() == MetaspaceClosureType::MethodTrainingDataType ||
+ info.type() == MetaspaceClosureType::CompileTrainingDataType) {
TrainingData* buffered_td = (TrainingData*)info.buffered_addr();
buffered_td->remove_unshareable_info();
- } else if (info.msotype() == MetaspaceObj::MethodDataType) {
+ } else if (info.type() == MetaspaceClosureType::MethodDataType) {
MethodData* buffered_mdo = (MethodData*)info.buffered_addr();
buffered_mdo->remove_unshareable_info();
- } else if (info.msotype() == MetaspaceObj::MethodCountersType) {
+ } else if (info.type() == MetaspaceClosureType::MethodCountersType) {
MethodCounters* buffered_mc = (MethodCounters*)info.buffered_addr();
buffered_mc->remove_unshareable_info();
}
@@ -1008,16 +993,15 @@ void ArchiveBuilder::make_training_data_shareable() {
_src_obj_table.iterate_all(clean_td);
}
-uintx ArchiveBuilder::buffer_to_offset(address p) const {
+size_t ArchiveBuilder::buffer_to_offset(address p) const {
address requested_p = to_requested(p);
- assert(requested_p >= _requested_static_archive_bottom, "must be");
- return requested_p - _requested_static_archive_bottom;
+ return pointer_delta(requested_p, _requested_static_archive_bottom, 1);
}
-uintx ArchiveBuilder::any_to_offset(address p) const {
+size_t ArchiveBuilder::any_to_offset(address p) const {
if (is_in_mapped_static_archive(p)) {
assert(CDSConfig::is_dumping_dynamic_archive(), "must be");
- return p - _mapped_static_archive_bottom;
+ return pointer_delta(p, _mapped_static_archive_bottom, 1);
}
if (!is_in_buffer_space(p)) {
// p must be a "source" address
@@ -1026,7 +1010,7 @@ uintx ArchiveBuilder::any_to_offset(address p) const {
return buffer_to_offset(p);
}
-address ArchiveBuilder::offset_to_buffered_address(u4 offset) const {
+address ArchiveBuilder::offset_to_buffered_address(size_t offset) const {
address requested_addr = _requested_static_archive_bottom + offset;
address buffered_addr = requested_addr - _buffer_to_requested_delta;
assert(is_in_buffer_space(buffered_addr), "bad offset");
@@ -1172,7 +1156,7 @@ void ArchiveBuilder::print_stats() {
_alloc_stats.print_stats(int(_ro_region.used()), int(_rw_region.used()));
}
-void ArchiveBuilder::write_archive(FileMapInfo* mapinfo, ArchiveMappedHeapInfo* mapped_heap_info, ArchiveStreamedHeapInfo* streamed_heap_info) {
+void ArchiveBuilder::write_archive(FileMapInfo* mapinfo, AOTMappedHeapInfo* mapped_heap_info, AOTStreamedHeapInfo* streamed_heap_info) {
// Make sure NUM_CDS_REGIONS (exported in cds.h) agrees with
// AOTMetaspace::n_regions (internal to hotspot).
assert(NUM_CDS_REGIONS == AOTMetaspace::n_regions, "sanity");
@@ -1231,8 +1215,8 @@ void ArchiveBuilder::count_relocated_pointer(bool tagged, bool nulled) {
}
void ArchiveBuilder::print_region_stats(FileMapInfo *mapinfo,
- ArchiveMappedHeapInfo* mapped_heap_info,
- ArchiveStreamedHeapInfo* streamed_heap_info) {
+ AOTMappedHeapInfo* mapped_heap_info,
+ AOTStreamedHeapInfo* streamed_heap_info) {
// Print statistics of all the regions
const size_t bitmap_used = mapinfo->region_at(AOTMetaspace::bm)->used();
const size_t bitmap_reserved = mapinfo->region_at(AOTMetaspace::bm)->used_aligned();
diff --git a/src/hotspot/share/cds/archiveBuilder.hpp b/src/hotspot/share/cds/archiveBuilder.hpp
index 9a628439039..b3667ea11b4 100644
--- a/src/hotspot/share/cds/archiveBuilder.hpp
+++ b/src/hotspot/share/cds/archiveBuilder.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -39,8 +39,8 @@
#include "utilities/hashTable.hpp"
#include "utilities/resizableHashTable.hpp"
-class ArchiveMappedHeapInfo;
-class ArchiveStreamedHeapInfo;
+class AOTMappedHeapInfo;
+class AOTStreamedHeapInfo;
class CHeapBitMap;
class FileMapInfo;
class Klass;
@@ -134,13 +134,13 @@ private:
int _size_in_bytes;
int _id; // Each object has a unique serial ID, starting from zero. The ID is assigned
// when the object is added into _source_objs.
- MetaspaceObj::Type _msotype;
+ MetaspaceClosureType _type;
address _source_addr; // The source object to be copied.
address _buffered_addr; // The copy of this object insider the buffer.
public:
SourceObjInfo(MetaspaceClosure::Ref* ref, bool read_only, FollowMode follow_mode) :
_ptrmap_start(0), _ptrmap_end(0), _read_only(read_only), _has_embedded_pointer(false), _follow_mode(follow_mode),
- _size_in_bytes(ref->size() * BytesPerWord), _id(0), _msotype(ref->msotype()),
+ _size_in_bytes(ref->size() * BytesPerWord), _id(0), _type(ref->type()),
_source_addr(ref->obj()) {
if (follow_mode == point_to_it) {
_buffered_addr = ref->obj();
@@ -155,7 +155,7 @@ private:
SourceObjInfo(address src, SourceObjInfo* renegerated_obj_info) :
_ptrmap_start(0), _ptrmap_end(0), _read_only(false),
_follow_mode(renegerated_obj_info->_follow_mode),
- _size_in_bytes(0), _msotype(renegerated_obj_info->_msotype),
+ _size_in_bytes(0), _type(renegerated_obj_info->_type),
_source_addr(src), _buffered_addr(renegerated_obj_info->_buffered_addr) {}
bool should_copy() const { return _follow_mode == make_a_copy; }
@@ -182,7 +182,7 @@ private:
}
return _buffered_addr;
}
- MetaspaceObj::Type msotype() const { return _msotype; }
+ MetaspaceClosureType type() const { return _type; }
FollowMode follow_mode() const { return _follow_mode; }
};
@@ -247,8 +247,8 @@ private:
} _relocated_ptr_info;
void print_region_stats(FileMapInfo *map_info,
- ArchiveMappedHeapInfo* mapped_heap_info,
- ArchiveStreamedHeapInfo* streamed_heap_info);
+ AOTMappedHeapInfo* mapped_heap_info,
+ AOTStreamedHeapInfo* streamed_heap_info);
void print_bitmap_region_stats(size_t size, size_t total_size);
void print_heap_region_stats(char* start, size_t size, size_t total_size);
@@ -329,49 +329,22 @@ public:
return current()->buffer_to_requested_delta();
}
- inline static u4 to_offset_u4(uintx offset) {
- guarantee(offset <= MAX_SHARED_DELTA, "must be 32-bit offset " INTPTR_FORMAT, offset);
- return (u4)offset;
- }
-
public:
- static const uintx MAX_SHARED_DELTA = ArchiveUtils::MAX_SHARED_DELTA;;
-
// The address p points to an object inside the output buffer. When the archive is mapped
// at the requested address, what's the offset of this object from _requested_static_archive_bottom?
- uintx buffer_to_offset(address p) const;
+ size_t buffer_to_offset(address p) const;
- // Same as buffer_to_offset, except that the address p points to either (a) an object
- // inside the output buffer, or (b), an object in the currently mapped static archive.
- uintx any_to_offset(address p) const;
+ // Same as buffer_to_offset, except that the address p points to one of the following:
+ // - an object in the ArchiveBuilder's buffer.
+ // - an object in the currently mapped AOT cache rw/ro regions.
+ // - an object that has been copied into the ArchiveBuilder's buffer.
+ size_t any_to_offset(address p) const;
// The reverse of buffer_to_offset()
- address offset_to_buffered_address(u4 offset) const;
+ address offset_to_buffered_address(size_t offset) const;
template
- u4 buffer_to_offset_u4(T p) const {
- uintx offset = buffer_to_offset((address)p);
- return to_offset_u4(offset);
- }
-
- template
- u4 any_to_offset_u4(T p) const {
- assert(p != nullptr, "must not be null");
- uintx offset = any_to_offset((address)p);
- return to_offset_u4(offset);
- }
-
- template
- u4 any_or_null_to_offset_u4(T p) const {
- if (p == nullptr) {
- return 0;
- } else {
- return any_to_offset_u4(p);
- }
- }
-
- template
- T offset_to_buffered(u4 offset) const {
+ T offset_to_buffered(size_t offset) const {
return (T)offset_to_buffered_address(offset);
}
@@ -438,8 +411,8 @@ public:
void make_training_data_shareable();
void relocate_to_requested();
void write_archive(FileMapInfo* mapinfo,
- ArchiveMappedHeapInfo* mapped_heap_info,
- ArchiveStreamedHeapInfo* streamed_heap_info);
+ AOTMappedHeapInfo* mapped_heap_info,
+ AOTStreamedHeapInfo* streamed_heap_info);
void write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region,
bool read_only, bool allow_exec);
diff --git a/src/hotspot/share/cds/archiveUtils.cpp b/src/hotspot/share/cds/archiveUtils.cpp
index 842668509cf..ea9bde8eb8d 100644
--- a/src/hotspot/share/cds/archiveUtils.cpp
+++ b/src/hotspot/share/cds/archiveUtils.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,6 +22,7 @@
*
*/
+#include "cds/aotCompressedPointers.hpp"
#include "cds/aotLogging.hpp"
#include "cds/aotMetaspace.hpp"
#include "cds/archiveBuilder.hpp"
@@ -201,13 +202,13 @@ char* DumpRegion::expand_top_to(char* newtop) {
commit_to(newtop);
_top = newtop;
- if (_max_delta > 0) {
+ if (ArchiveBuilder::is_active() && ArchiveBuilder::current()->is_in_buffer_space(_base)) {
uintx delta = ArchiveBuilder::current()->buffer_to_offset((address)(newtop-1));
- if (delta > _max_delta) {
+ if (delta > AOTCompressedPointers::MaxMetadataOffsetBytes) {
// This is just a sanity check and should not appear in any real world usage. This
// happens only if you allocate more than 2GB of shared objects and would require
// millions of shared classes.
- aot_log_error(aot)("Out of memory in the CDS archive: Please reduce the number of shared classes.");
+ aot_log_error(aot)("Out of memory in the %s: Please reduce the number of shared classes.", CDSConfig::type_of_archive_being_written());
AOTMetaspace::unrecoverable_writing_error();
}
}
@@ -311,29 +312,15 @@ void DumpRegion::pack(DumpRegion* next) {
}
void WriteClosure::do_ptr(void** p) {
- // Write ptr into the archive; ptr can be:
- // (a) null -> written as 0
- // (b) a "buffered" address -> written as is
- // (c) a "source" address -> convert to "buffered" and write
- // The common case is (c). E.g., when writing the vmClasses into the archive.
- // We have (b) only when we don't have a corresponding source object. E.g.,
- // the archived c++ vtable entries.
address ptr = *(address*)p;
- if (ptr != nullptr && !ArchiveBuilder::current()->is_in_buffer_space(ptr)) {
- ptr = ArchiveBuilder::current()->get_buffered_addr(ptr);
- }
- // null pointers do not need to be converted to offsets
- if (ptr != nullptr) {
- ptr = (address)ArchiveBuilder::current()->buffer_to_offset(ptr);
- }
- _dump_region->append_intptr_t((intptr_t)ptr, false);
+ AOTCompressedPointers::narrowPtr narrowp = AOTCompressedPointers::encode(ptr);
+ _dump_region->append_intptr_t(checked_cast(narrowp), false);
}
void ReadClosure::do_ptr(void** p) {
assert(*p == nullptr, "initializing previous initialized pointer.");
- intptr_t obj = nextPtr();
- assert(obj >= 0, "sanity.");
- *p = (obj != 0) ? (void*)(_base_address + obj) : (void*)obj;
+ u4 narrowp = checked_cast(nextPtr());
+ *p = AOTCompressedPointers::decode(cast_from_u4(narrowp), _base_address);
}
void ReadClosure::do_u4(u4* p) {
diff --git a/src/hotspot/share/cds/archiveUtils.hpp b/src/hotspot/share/cds/archiveUtils.hpp
index 79d894f0144..e5d1efa5eab 100644
--- a/src/hotspot/share/cds/archiveUtils.hpp
+++ b/src/hotspot/share/cds/archiveUtils.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -153,7 +153,6 @@ private:
char* _base;
char* _top;
char* _end;
- uintx _max_delta;
bool _is_packed;
ReservedSpace* _rs;
VirtualSpace* _vs;
@@ -161,9 +160,9 @@ private:
void commit_to(char* newtop);
public:
- DumpRegion(const char* name, uintx max_delta = 0)
+ DumpRegion(const char* name)
: _name(name), _base(nullptr), _top(nullptr), _end(nullptr),
- _max_delta(max_delta), _is_packed(false),
+ _is_packed(false),
_rs(nullptr), _vs(nullptr) {}
char* expand_top_to(char* newtop);
@@ -237,13 +236,13 @@ public:
class ReadClosure : public SerializeClosure {
private:
intptr_t** _ptr_array;
- intptr_t _base_address;
+ address _base_address;
inline intptr_t nextPtr() {
return *(*_ptr_array)++;
}
public:
- ReadClosure(intptr_t** ptr_array, intptr_t base_address) :
+ ReadClosure(intptr_t** ptr_array, address base_address) :
_ptr_array(ptr_array), _base_address(base_address) {}
void do_ptr(void** p);
@@ -260,7 +259,6 @@ class ArchiveUtils {
template static Array* archive_ptr_array(GrowableArray* tmp_array);
public:
- static const uintx MAX_SHARED_DELTA = 0x7FFFFFFF;
static void log_to_classlist(BootstrapInfo* bootstrap_specifier, TRAPS) NOT_CDS_RETURN;
static bool has_aot_initialized_mirror(InstanceKlass* src_ik);
@@ -273,50 +271,6 @@ public:
static Array* archive_array(GrowableArray* tmp_array) {
return archive_ptr_array(tmp_array);
}
-
- // The following functions translate between a u4 offset and an address in the
- // the range of the mapped CDS archive (e.g., Metaspace::in_aot_cache()).
- // Since the first 16 bytes in this range are dummy data (see ArchiveBuilder::reserve_buffer()),
- // we know that offset 0 never represents a valid object. As a result, an offset of 0
- // is used to encode a nullptr.
- //
- // Use the "archived_address_or_null" variants if a nullptr may be encoded.
-
- // offset must represent an object of type T in the mapped shared space. Return
- // a direct pointer to this object.
- template T static offset_to_archived_address(u4 offset) {
- assert(offset != 0, "sanity");
- T p = (T)(SharedBaseAddress + offset);
- assert(Metaspace::in_aot_cache(p), "must be");
- return p;
- }
-
- template T static offset_to_archived_address_or_null(u4 offset) {
- if (offset == 0) {
- return nullptr;
- } else {
- return offset_to_archived_address(offset);
- }
- }
-
- // p must be an archived object. Get its offset from SharedBaseAddress
- template static u4 archived_address_to_offset(T p) {
- uintx pn = (uintx)p;
- uintx base = (uintx)SharedBaseAddress;
- assert(Metaspace::in_aot_cache(p), "must be");
- assert(pn > base, "sanity"); // No valid object is stored at 0 offset from SharedBaseAddress
- uintx offset = pn - base;
- assert(offset <= MAX_SHARED_DELTA, "range check");
- return static_cast(offset);
- }
-
- template static u4 archived_address_or_null_to_offset(T p) {
- if (p == nullptr) {
- return 0;
- } else {
- return archived_address_to_offset(p);
- }
- }
};
class HeapRootSegments {
diff --git a/src/hotspot/share/cds/cdsConfig.cpp b/src/hotspot/share/cds/cdsConfig.cpp
index 5f6b568dd6e..f4ef3c66f7a 100644
--- a/src/hotspot/share/cds/cdsConfig.cpp
+++ b/src/hotspot/share/cds/cdsConfig.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -556,7 +556,9 @@ void CDSConfig::check_aotmode_record() {
// At VM exit, the module graph may be contaminated with program states.
// We will rebuild the module graph when dumping the CDS final image.
- disable_heap_dumping();
+ _is_using_optimized_module_handling = false;
+ _is_using_full_module_graph = false;
+ _is_dumping_full_module_graph = false;
}
void CDSConfig::check_aotmode_create() {
@@ -582,6 +584,7 @@ void CDSConfig::check_aotmode_create() {
substitute_aot_filename(FLAG_MEMBER_ENUM(AOTCache));
_is_dumping_final_static_archive = true;
+ _is_using_full_module_graph = false;
UseSharedSpaces = true;
RequireSharedSpaces = true;
@@ -954,7 +957,9 @@ bool CDSConfig::are_vm_options_incompatible_with_dumping_heap() {
}
bool CDSConfig::is_dumping_heap() {
- if (!(is_dumping_classic_static_archive() || is_dumping_final_static_archive())
+ // Note: when dumping preimage static archive, only a very limited set of oops
+ // are dumped.
+ if (!is_dumping_static_archive()
|| are_vm_options_incompatible_with_dumping_heap()
|| _disable_heap_dumping) {
return false;
@@ -966,6 +971,26 @@ bool CDSConfig::is_loading_heap() {
return HeapShared::is_archived_heap_in_use();
}
+bool CDSConfig::is_dumping_klass_subgraphs() {
+ if (is_dumping_classic_static_archive() || is_dumping_final_static_archive()) {
+ // KlassSubGraphs (see heapShared.cpp) is a legacy mechanism for archiving oops. It
+ // has been superceded by AOT class linking. This feature is used only when
+ // AOT class linking is disabled.
+ //
+ // KlassSubGraphs are disabled in the preimage static archive, which contains a very
+ // limited set of oops.
+ return is_dumping_heap() && !is_dumping_aot_linked_classes();
+ } else {
+ return false;
+ }
+}
+
+bool CDSConfig::is_using_klass_subgraphs() {
+ return (is_loading_heap() &&
+ !CDSConfig::is_using_aot_linked_classes() &&
+ !CDSConfig::is_dumping_final_static_archive());
+}
+
bool CDSConfig::is_using_full_module_graph() {
if (ClassLoaderDataShared::is_full_module_graph_loaded()) {
return true;
diff --git a/src/hotspot/share/cds/cdsConfig.hpp b/src/hotspot/share/cds/cdsConfig.hpp
index 202904e8231..739dbb4937b 100644
--- a/src/hotspot/share/cds/cdsConfig.hpp
+++ b/src/hotspot/share/cds/cdsConfig.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -188,6 +188,9 @@ public:
static bool is_dumping_heap() NOT_CDS_JAVA_HEAP_RETURN_(false);
static bool is_loading_heap() NOT_CDS_JAVA_HEAP_RETURN_(false);
+ static bool is_dumping_klass_subgraphs() NOT_CDS_JAVA_HEAP_RETURN_(false);
+ static bool is_using_klass_subgraphs() NOT_CDS_JAVA_HEAP_RETURN_(false);
+
static bool is_dumping_invokedynamic() NOT_CDS_JAVA_HEAP_RETURN_(false);
static bool is_dumping_method_handles() NOT_CDS_JAVA_HEAP_RETURN_(false);
diff --git a/src/hotspot/share/cds/cppVtables.cpp b/src/hotspot/share/cds/cppVtables.cpp
index f2862454286..dc5a777d7b1 100644
--- a/src/hotspot/share/cds/cppVtables.cpp
+++ b/src/hotspot/share/cds/cppVtables.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,12 +22,14 @@
*
*/
+#include "cds/aotGrowableArray.hpp"
#include "cds/aotMetaspace.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/archiveUtils.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/cppVtables.hpp"
#include "logging/log.hpp"
+#include "memory/resourceArea.hpp"
#include "oops/instanceClassLoaderKlass.hpp"
#include "oops/instanceMirrorKlass.hpp"
#include "oops/instanceRefKlass.hpp"
@@ -53,6 +55,19 @@
// + at run time: we clone the actual contents of the vtables from libjvm.so
// into our own tables.
+
+#ifndef PRODUCT
+
+// AOTGrowableArray has a vtable only when in non-product builds (due to
+// the virtual printing functions in AnyObj).
+
+using GrowableArray_ModuleEntry_ptr = AOTGrowableArray;
+
+#define DEBUG_CPP_VTABLE_TYPES_DO(f) \
+ f(GrowableArray_ModuleEntry_ptr) \
+
+#endif
+
// Currently, the archive contains ONLY the following types of objects that have C++ vtables.
#define CPP_VTABLE_TYPES_DO(f) \
f(ConstantPool) \
@@ -68,7 +83,8 @@
f(TypeArrayKlass) \
f(KlassTrainingData) \
f(MethodTrainingData) \
- f(CompileTrainingData)
+ f(CompileTrainingData) \
+ NOT_PRODUCT(DEBUG_CPP_VTABLE_TYPES_DO(f))
class CppVtableInfo {
intptr_t _vtable_size;
@@ -86,7 +102,7 @@ public:
}
};
-static inline intptr_t* vtable_of(const Metadata* m) {
+static inline intptr_t* vtable_of(const void* m) {
return *((intptr_t**)m);
}
@@ -116,6 +132,7 @@ CppVtableInfo* CppVtableCloner::allocate_and_initialize(const char* name) {
template
void CppVtableCloner::initialize(const char* name, CppVtableInfo* info) {
+ ResourceMark rm;
T tmp; // Allocate temporary dummy metadata object to get to the original vtable.
int n = info->vtable_size();
intptr_t* srcvtable = vtable_of(&tmp);
@@ -123,7 +140,7 @@ void CppVtableCloner::initialize(const char* name, CppVtableInfo* info) {
// We already checked (and, if necessary, adjusted n) when the vtables were allocated, so we are
// safe to do memcpy.
- log_debug(aot, vtables)("Copying %3d vtable entries for %s", n, name);
+ log_debug(aot, vtables)("Copying %3d vtable entries for %s to " INTPTR_FORMAT, n, name, p2i(dstvtable));
memcpy(dstvtable, srcvtable, sizeof(intptr_t) * n);
}
@@ -268,7 +285,7 @@ void CppVtables::serialize(SerializeClosure* soc) {
}
}
-intptr_t* CppVtables::get_archived_vtable(MetaspaceObj::Type msotype, address obj) {
+intptr_t* CppVtables::get_archived_vtable(MetaspaceClosureType type, address obj) {
if (!_orig_cpp_vtptrs_inited) {
CPP_VTABLE_TYPES_DO(INIT_ORIG_CPP_VTPTRS);
_orig_cpp_vtptrs_inited = true;
@@ -276,19 +293,23 @@ intptr_t* CppVtables::get_archived_vtable(MetaspaceObj::Type msotype, address ob
assert(CDSConfig::is_dumping_archive(), "sanity");
int kind = -1;
- switch (msotype) {
- case MetaspaceObj::SymbolType:
- case MetaspaceObj::TypeArrayU1Type:
- case MetaspaceObj::TypeArrayU2Type:
- case MetaspaceObj::TypeArrayU4Type:
- case MetaspaceObj::TypeArrayU8Type:
- case MetaspaceObj::TypeArrayOtherType:
- case MetaspaceObj::ConstMethodType:
- case MetaspaceObj::ConstantPoolCacheType:
- case MetaspaceObj::AnnotationsType:
- case MetaspaceObj::RecordComponentType:
- case MetaspaceObj::AdapterHandlerEntryType:
- case MetaspaceObj::AdapterFingerPrintType:
+ switch (type) {
+ case MetaspaceClosureType::SymbolType:
+ case MetaspaceClosureType::TypeArrayU1Type:
+ case MetaspaceClosureType::TypeArrayU2Type:
+ case MetaspaceClosureType::TypeArrayU4Type:
+ case MetaspaceClosureType::TypeArrayU8Type:
+ case MetaspaceClosureType::TypeArrayOtherType:
+ case MetaspaceClosureType::CArrayType:
+ case MetaspaceClosureType::ConstMethodType:
+ case MetaspaceClosureType::ConstantPoolCacheType:
+ case MetaspaceClosureType::AnnotationsType:
+ case MetaspaceClosureType::ModuleEntryType:
+ case MetaspaceClosureType::PackageEntryType:
+ case MetaspaceClosureType::RecordComponentType:
+ case MetaspaceClosureType::AdapterHandlerEntryType:
+ case MetaspaceClosureType::AdapterFingerPrintType:
+ PRODUCT_ONLY(case MetaspaceClosureType::GrowableArrayType:)
// These have no vtables.
break;
default:
diff --git a/src/hotspot/share/cds/cppVtables.hpp b/src/hotspot/share/cds/cppVtables.hpp
index b40ca036023..9e28ba020ee 100644
--- a/src/hotspot/share/cds/cppVtables.hpp
+++ b/src/hotspot/share/cds/cppVtables.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
#include "memory/allocation.hpp"
#include "memory/allStatic.hpp"
+#include "memory/metaspaceClosureType.hpp"
#include "utilities/globalDefinitions.hpp"
class ArchiveBuilder;
@@ -40,7 +41,7 @@ class CppVtables : AllStatic {
public:
static void dumptime_init(ArchiveBuilder* builder);
static void zero_archived_vtables();
- static intptr_t* get_archived_vtable(MetaspaceObj::Type msotype, address obj);
+ static intptr_t* get_archived_vtable(MetaspaceClosureType type, address obj);
static void serialize(SerializeClosure* sc);
static bool is_valid_shared_method(const Method* m) NOT_CDS_RETURN_(false);
static char* vtables_serialized_base() { return _vtables_serialized_base; }
diff --git a/src/hotspot/share/cds/dumpAllocStats.hpp b/src/hotspot/share/cds/dumpAllocStats.hpp
index 7d651320e6f..4553f0f6a01 100644
--- a/src/hotspot/share/cds/dumpAllocStats.hpp
+++ b/src/hotspot/share/cds/dumpAllocStats.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,32 +27,34 @@
#include "classfile/compactHashtable.hpp"
#include "memory/allocation.hpp"
+#include "memory/metaspaceClosureType.hpp"
// This is for dumping detailed statistics for the allocations
// in the shared spaces.
class DumpAllocStats : public StackObj {
public:
- // Here's poor man's enum inheritance
-#define SHAREDSPACE_OBJ_TYPES_DO(f) \
- METASPACE_OBJ_TYPES_DO(f) \
+#define DUMPED_OBJ_TYPES_DO(f) \
+ METASPACE_CLOSURE_TYPES_DO(f) \
f(SymbolHashentry) \
f(SymbolBucket) \
f(StringHashentry) \
f(StringBucket) \
- f(ModulesNatives) \
f(CppVTables) \
f(Other)
+#define DUMPED_TYPE_DECLARE(name) name ## Type,
+#define DUMPED_TYPE_NAME_CASE(name) case name ## Type: return #name;
+
enum Type {
// Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc
- SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE)
+ DUMPED_OBJ_TYPES_DO(DUMPED_TYPE_DECLARE)
_number_of_types
};
static const char* type_name(Type type) {
switch(type) {
- SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE)
+ DUMPED_OBJ_TYPES_DO(DUMPED_TYPE_NAME_CASE)
default:
ShouldNotReachHere();
return nullptr;
@@ -101,16 +103,12 @@ public:
CompactHashtableStats* symbol_stats() { return &_symbol_stats; }
CompactHashtableStats* string_stats() { return &_string_stats; }
- void record(MetaspaceObj::Type type, int byte_size, bool read_only) {
- assert(int(type) >= 0 && type < MetaspaceObj::_number_of_types, "sanity");
+ void record(MetaspaceClosureType type, int byte_size, bool read_only) {
+ int t = (int)type;
+ assert(t >= 0 && t < (int)MetaspaceClosureType::_number_of_types, "sanity");
int which = (read_only) ? RO : RW;
- _counts[which][type] ++;
- _bytes [which][type] += byte_size;
- }
-
- void record_modules(int byte_size, bool read_only) {
- int which = (read_only) ? RO : RW;
- _bytes [which][ModulesNativesType] += byte_size;
+ _counts[which][t] ++;
+ _bytes [which][t] += byte_size;
}
void record_other_type(int byte_size, bool read_only) {
diff --git a/src/hotspot/share/cds/dynamicArchive.cpp b/src/hotspot/share/cds/dynamicArchive.cpp
index d39cf3775e4..cd6890555d3 100644
--- a/src/hotspot/share/cds/dynamicArchive.cpp
+++ b/src/hotspot/share/cds/dynamicArchive.cpp
@@ -25,6 +25,7 @@
#include "cds/aotArtifactFinder.hpp"
#include "cds/aotClassLinker.hpp"
#include "cds/aotClassLocation.hpp"
+#include "cds/aotCompressedPointers.hpp"
#include "cds/aotLogging.hpp"
#include "cds/aotMetaspace.hpp"
#include "cds/archiveBuilder.hpp"
@@ -75,13 +76,13 @@ public:
return 0;
}
- u4 a_offset = ArchiveBuilder::current()->any_to_offset_u4(a_name);
- u4 b_offset = ArchiveBuilder::current()->any_to_offset_u4(b_name);
+ u4 a_narrowp = cast_to_u4(AOTCompressedPointers::encode_not_null(a_name));
+ u4 b_narrowp = cast_to_u4(AOTCompressedPointers::encode_not_null(b_name));
- if (a_offset < b_offset) {
+ if (a_narrowp < b_narrowp) {
return -1;
} else {
- assert(a_offset > b_offset, "must be");
+ assert(a_narrowp > b_narrowp, "must be");
return 1;
}
}
diff --git a/src/hotspot/share/cds/filemap.cpp b/src/hotspot/share/cds/filemap.cpp
index 0eeb96bb269..91fbce701e5 100644
--- a/src/hotspot/share/cds/filemap.cpp
+++ b/src/hotspot/share/cds/filemap.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -298,11 +298,11 @@ void FileMapHeader::print(outputStream* st) {
st->print_cr("- compressed_class_ptrs: %d", _compressed_class_ptrs);
st->print_cr("- narrow_klass_pointer_bits: %d", _narrow_klass_pointer_bits);
st->print_cr("- narrow_klass_shift: %d", _narrow_klass_shift);
- st->print_cr("- cloned_vtables_offset: 0x%zx", _cloned_vtables_offset);
- st->print_cr("- early_serialized_data_offset: 0x%zx", _early_serialized_data_offset);
- st->print_cr("- serialized_data_offset: 0x%zx", _serialized_data_offset);
+ st->print_cr("- cloned_vtables: %u", cast_to_u4(_cloned_vtables));
+ st->print_cr("- early_serialized_data: %u", cast_to_u4(_early_serialized_data));
+ st->print_cr("- serialized_data: %u", cast_to_u4(_serialized_data));
st->print_cr("- jvm_ident: %s", _jvm_ident);
- st->print_cr("- class_location_config_offset: 0x%zx", _class_location_config_offset);
+ st->print_cr("- class_location_config: %d", cast_to_u4(_class_location_config));
st->print_cr("- verify_local: %d", _verify_local);
st->print_cr("- verify_remote: %d", _verify_remote);
st->print_cr("- has_platform_or_app_classes: %d", _has_platform_or_app_classes);
@@ -717,8 +717,8 @@ bool FileMapInfo::init_from_file(int fd) {
}
void FileMapInfo::seek_to_position(size_t pos) {
- if (os::lseek(_fd, (long)pos, SEEK_SET) < 0) {
- aot_log_error(aot)("Unable to seek to position %zu", pos);
+ if (os::lseek(_fd, (jlong)pos, SEEK_SET) < 0) {
+ aot_log_error(aot)("Unable to seek to position %zu (errno=%d: %s)", pos, errno, os::strerror(errno));
AOTMetaspace::unrecoverable_loading_error();
}
}
@@ -974,8 +974,8 @@ size_t FileMapInfo::remove_bitmap_zeros(CHeapBitMap* map) {
char* FileMapInfo::write_bitmap_region(CHeapBitMap* rw_ptrmap,
CHeapBitMap* ro_ptrmap,
- ArchiveMappedHeapInfo* mapped_heap_info,
- ArchiveStreamedHeapInfo* streamed_heap_info,
+ AOTMappedHeapInfo* mapped_heap_info,
+ AOTStreamedHeapInfo* streamed_heap_info,
size_t &size_in_bytes) {
size_t removed_rw_leading_zeros = remove_bitmap_zeros(rw_ptrmap);
size_t removed_ro_leading_zeros = remove_bitmap_zeros(ro_ptrmap);
@@ -1035,7 +1035,7 @@ char* FileMapInfo::write_bitmap_region(CHeapBitMap* rw_ptrmap,
}
#if INCLUDE_CDS_JAVA_HEAP
-size_t FileMapInfo::write_mapped_heap_region(ArchiveMappedHeapInfo* heap_info) {
+size_t FileMapInfo::write_mapped_heap_region(AOTMappedHeapInfo* heap_info) {
char* buffer_start = heap_info->buffer_start();
size_t buffer_size = heap_info->buffer_byte_size();
write_region(AOTMetaspace::hp, buffer_start, buffer_size, false, false);
@@ -1043,7 +1043,7 @@ size_t FileMapInfo::write_mapped_heap_region(ArchiveMappedHeapInfo* heap_info) {
return buffer_size;
}
-size_t FileMapInfo::write_streamed_heap_region(ArchiveStreamedHeapInfo* heap_info) {
+size_t FileMapInfo::write_streamed_heap_region(AOTStreamedHeapInfo* heap_info) {
char* buffer_start = heap_info->buffer_start();
size_t buffer_size = heap_info->buffer_byte_size();
write_region(AOTMetaspace::hp, buffer_start, buffer_size, true, false);
@@ -1325,9 +1325,7 @@ char* FileMapInfo::map_auxiliary_region(int region_index, bool read_only) {
if (VerifySharedSpaces && !r->check_region_crc(mapped_base)) {
aot_log_error(aot)("region %d CRC error", region_index);
- if (!os::unmap_memory(mapped_base, r->used_aligned())) {
- fatal("os::unmap_memory of region %d failed", region_index);
- }
+ os::unmap_memory(mapped_base, r->used_aligned());
return nullptr;
}
@@ -1654,9 +1652,7 @@ void FileMapInfo::unmap_region(int i) {
// is released. Zero it so that we don't accidentally read its content.
aot_log_info(aot)("Region #%d (%s) is in a reserved space, it will be freed when the space is released", i, shared_region_name[i]);
} else {
- if (!os::unmap_memory(mapped_base, size)) {
- fatal("os::unmap_memory failed");
- }
+ os::unmap_memory(mapped_base, size);
}
}
r->set_mapped_base(nullptr);
@@ -1767,10 +1763,6 @@ void FileMapInfo::print(outputStream* st) const {
}
}
-void FileMapHeader::set_as_offset(char* p, size_t *offset) {
- *offset = ArchiveBuilder::current()->any_to_offset((address)p);
-}
-
int FileMapHeader::compute_crc() {
char* start = (char*)this;
// start computing from the field after _header_size to end of base archive name.
diff --git a/src/hotspot/share/cds/filemap.hpp b/src/hotspot/share/cds/filemap.hpp
index fbd3c8e1681..56b88df378a 100644
--- a/src/hotspot/share/cds/filemap.hpp
+++ b/src/hotspot/share/cds/filemap.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,10 @@
#ifndef SHARE_CDS_FILEMAP_HPP
#define SHARE_CDS_FILEMAP_HPP
+#include "cds/aotCompressedPointers.hpp"
+#include "cds/aotMappedHeap.hpp"
#include "cds/aotMetaspace.hpp"
+#include "cds/aotStreamedHeap.hpp"
#include "cds/archiveUtils.hpp"
#include "cds/heapShared.hpp"
#include "include/cds.h"
@@ -102,7 +105,7 @@ public:
class FileMapHeader: private CDSFileMapHeaderBase {
friend class CDSConstants;
friend class VMStructs;
-
+ using narrowPtr = AOTCompressedPointers::narrowPtr;
private:
// The following fields record the states of the VM during dump time.
// They are compared with the runtime states to see if the archive
@@ -120,16 +123,16 @@ private:
bool _compressed_class_ptrs; // save the flag UseCompressedClassPointers
int _narrow_klass_pointer_bits; // save number of bits in narrowKlass
int _narrow_klass_shift; // save shift width used to pre-compute narrowKlass IDs in archived heap objects
- size_t _cloned_vtables_offset; // The address of the first cloned vtable
- size_t _early_serialized_data_offset; // Data accessed using {ReadClosure,WriteClosure}::serialize()
- size_t _serialized_data_offset; // Data accessed using {ReadClosure,WriteClosure}::serialize()
+ narrowPtr _cloned_vtables; // The address of the first cloned vtable
+ narrowPtr _early_serialized_data; // Data accessed using {ReadClosure,WriteClosure}::serialize()
+ narrowPtr _serialized_data; // Data accessed using {ReadClosure,WriteClosure}::serialize()
// The following fields are all sanity checks for whether this archive
// will function correctly with this JVM and the bootclasspath it's
// invoked with.
char _jvm_ident[JVM_IDENT_MAX]; // identifier string of the jvm that created this dump
- size_t _class_location_config_offset;
+ narrowPtr _class_location_config;
bool _verify_local; // BytecodeVerificationLocal setting
bool _verify_remote; // BytecodeVerificationRemote setting
@@ -144,8 +147,8 @@ private:
size_t _rw_ptrmap_start_pos; // The first bit in the ptrmap corresponds to this position in the rw region
size_t _ro_ptrmap_start_pos; // The first bit in the ptrmap corresponds to this position in the ro region
- ArchiveMappedHeapHeader _mapped_heap_header;
- ArchiveStreamedHeapHeader _streamed_heap_header;
+ AOTMappedHeapHeader _mapped_heap_header;
+ AOTStreamedHeapHeader _streamed_heap_header;
// The following are parameters that affect MethodData layout.
u1 _compiler_type;
@@ -158,12 +161,8 @@ private:
bool _type_profile_casts;
int _spec_trap_limit_extra_entries;
- template T from_mapped_offset(size_t offset) const {
- return (T)(mapped_base_address() + offset);
- }
- void set_as_offset(char* p, size_t *offset);
- template void set_as_offset(T p, size_t *offset) {
- set_as_offset((char*)p, offset);
+ template T decode(narrowPtr narrowp) const {
+ return AOTCompressedPointers::decode_not_null(narrowp, reinterpret_cast(mapped_base_address()));
}
public:
@@ -191,9 +190,9 @@ public:
bool compact_headers() const { return _compact_headers; }
uintx max_heap_size() const { return _max_heap_size; }
CompressedOops::Mode narrow_oop_mode() const { return _narrow_oop_mode; }
- char* cloned_vtables() const { return from_mapped_offset(_cloned_vtables_offset); }
- char* early_serialized_data() const { return from_mapped_offset(_early_serialized_data_offset); }
- char* serialized_data() const { return from_mapped_offset(_serialized_data_offset); }
+ char* cloned_vtables() const { return decode(_cloned_vtables); }
+ char* early_serialized_data() const { return decode(_early_serialized_data); }
+ char* serialized_data() const { return decode(_serialized_data); }
bool object_streaming_mode() const { return _object_streaming_mode; }
const char* jvm_ident() const { return _jvm_ident; }
char* requested_base_address() const { return _requested_base_address; }
@@ -209,16 +208,16 @@ public:
size_t ro_ptrmap_start_pos() const { return _ro_ptrmap_start_pos; }
// Heap archiving
- const ArchiveMappedHeapHeader* mapped_heap() const { return &_mapped_heap_header; }
- const ArchiveStreamedHeapHeader* streamed_heap() const { return &_streamed_heap_header; }
+ const AOTMappedHeapHeader* mapped_heap() const { return &_mapped_heap_header; }
+ const AOTStreamedHeapHeader* streamed_heap() const { return &_streamed_heap_header; }
- void set_streamed_heap_header(ArchiveStreamedHeapHeader header) { _streamed_heap_header = header; }
- void set_mapped_heap_header(ArchiveMappedHeapHeader header) { _mapped_heap_header = header; }
+ void set_streamed_heap_header(AOTStreamedHeapHeader header) { _streamed_heap_header = header; }
+ void set_mapped_heap_header(AOTMappedHeapHeader header) { _mapped_heap_header = header; }
void set_has_platform_or_app_classes(bool v) { _has_platform_or_app_classes = v; }
- void set_cloned_vtables(char* p) { set_as_offset(p, &_cloned_vtables_offset); }
- void set_early_serialized_data(char* p) { set_as_offset(p, &_early_serialized_data_offset); }
- void set_serialized_data(char* p) { set_as_offset(p, &_serialized_data_offset); }
+ void set_cloned_vtables(char* p) { _cloned_vtables = AOTCompressedPointers::encode_not_null(p); }
+ void set_early_serialized_data(char* p) { _early_serialized_data = AOTCompressedPointers::encode_not_null(p); }
+ void set_serialized_data(char* p) { _serialized_data = AOTCompressedPointers::encode_not_null(p); }
void set_mapped_base_address(char* p) { _mapped_base_address = p; }
void set_rw_ptrmap_start_pos(size_t n) { _rw_ptrmap_start_pos = n; }
void set_ro_ptrmap_start_pos(size_t n) { _ro_ptrmap_start_pos = n; }
@@ -226,11 +225,11 @@ public:
void copy_base_archive_name(const char* name);
void set_class_location_config(AOTClassLocationConfig* table) {
- set_as_offset(table, &_class_location_config_offset);
+ _class_location_config = AOTCompressedPointers::encode_not_null(table);
}
AOTClassLocationConfig* class_location_config() {
- return from_mapped_offset(_class_location_config_offset);
+ return decode(_class_location_config);
}
void set_requested_base(char* b) {
@@ -309,8 +308,8 @@ public:
uintx max_heap_size() const { return header()->max_heap_size(); }
size_t core_region_alignment() const { return header()->core_region_alignment(); }
- const ArchiveMappedHeapHeader* mapped_heap() const { return header()->mapped_heap(); }
- const ArchiveStreamedHeapHeader* streamed_heap() const { return header()->streamed_heap(); }
+ const AOTMappedHeapHeader* mapped_heap() const { return header()->mapped_heap(); }
+ const AOTStreamedHeapHeader* streamed_heap() const { return header()->streamed_heap(); }
bool object_streaming_mode() const { return header()->object_streaming_mode(); }
CompressedOops::Mode narrow_oop_mode() const { return header()->narrow_oop_mode(); }
@@ -372,11 +371,11 @@ public:
size_t remove_bitmap_zeros(CHeapBitMap* map);
char* write_bitmap_region(CHeapBitMap* rw_ptrmap,
CHeapBitMap* ro_ptrmap,
- ArchiveMappedHeapInfo* mapped_heap_info,
- ArchiveStreamedHeapInfo* streamed_heap_info,
+ AOTMappedHeapInfo* mapped_heap_info,
+ AOTStreamedHeapInfo* streamed_heap_info,
size_t &size_in_bytes);
- size_t write_mapped_heap_region(ArchiveMappedHeapInfo* heap_info) NOT_CDS_JAVA_HEAP_RETURN_(0);
- size_t write_streamed_heap_region(ArchiveStreamedHeapInfo* heap_info) NOT_CDS_JAVA_HEAP_RETURN_(0);
+ size_t write_mapped_heap_region(AOTMappedHeapInfo* heap_info) NOT_CDS_JAVA_HEAP_RETURN_(0);
+ size_t write_streamed_heap_region(AOTStreamedHeapInfo* heap_info) NOT_CDS_JAVA_HEAP_RETURN_(0);
void write_bytes(const void* buffer, size_t count);
void write_bytes_aligned(const void* buffer, size_t count);
size_t read_bytes(void* buffer, size_t count);
diff --git a/src/hotspot/share/cds/heapShared.cpp b/src/hotspot/share/cds/heapShared.cpp
index fdc335f3799..0c0f70eac0a 100644
--- a/src/hotspot/share/cds/heapShared.cpp
+++ b/src/hotspot/share/cds/heapShared.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
#include "cds/aotArtifactFinder.hpp"
#include "cds/aotClassInitializer.hpp"
#include "cds/aotClassLocation.hpp"
+#include "cds/aotCompressedPointers.hpp"
#include "cds/aotLogging.hpp"
#include "cds/aotMappedHeapLoader.hpp"
#include "cds/aotMappedHeapWriter.hpp"
@@ -95,55 +96,6 @@ struct ArchivableStaticFieldInfo {
}
};
-// Anything that goes in the header must be thoroughly purged from uninitialized memory
-// as it will be written to disk. Therefore, the constructors memset the memory to 0.
-// This is not the prettiest thing, but we need to know every byte is initialized,
-// including potential padding between fields.
-
-ArchiveMappedHeapHeader::ArchiveMappedHeapHeader(size_t ptrmap_start_pos,
- size_t oopmap_start_pos,
- HeapRootSegments root_segments) {
- memset((char*)this, 0, sizeof(*this));
- _ptrmap_start_pos = ptrmap_start_pos;
- _oopmap_start_pos = oopmap_start_pos;
- _root_segments = root_segments;
-}
-
-ArchiveMappedHeapHeader::ArchiveMappedHeapHeader() {
- memset((char*)this, 0, sizeof(*this));
-}
-
-ArchiveMappedHeapHeader ArchiveMappedHeapInfo::create_header() {
- return ArchiveMappedHeapHeader{_ptrmap_start_pos,
- _oopmap_start_pos,
- _root_segments};
-}
-
-ArchiveStreamedHeapHeader::ArchiveStreamedHeapHeader(size_t forwarding_offset,
- size_t roots_offset,
- size_t num_roots,
- size_t root_highest_object_index_table_offset,
- size_t num_archived_objects) {
- memset((char*)this, 0, sizeof(*this));
- _forwarding_offset = forwarding_offset;
- _roots_offset = roots_offset;
- _num_roots = num_roots;
- _root_highest_object_index_table_offset = root_highest_object_index_table_offset;
- _num_archived_objects = num_archived_objects;
-}
-
-ArchiveStreamedHeapHeader::ArchiveStreamedHeapHeader() {
- memset((char*)this, 0, sizeof(*this));
-}
-
-ArchiveStreamedHeapHeader ArchiveStreamedHeapInfo::create_header() {
- return ArchiveStreamedHeapHeader{_forwarding_offset,
- _roots_offset,
- _num_roots,
- _root_highest_object_index_table_offset,
- _num_archived_objects};
-}
-
HeapArchiveMode HeapShared::_heap_load_mode = HeapArchiveMode::_uninitialized;
HeapArchiveMode HeapShared::_heap_write_mode = HeapArchiveMode::_uninitialized;
@@ -210,7 +162,7 @@ static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], Instan
bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) {
assert(CDSConfig::is_dumping_heap(), "dump-time only");
- if (!CDSConfig::is_dumping_aot_linked_classes()) {
+ if (CDSConfig::is_dumping_klass_subgraphs()) {
// Legacy CDS archive support (to be deprecated)
return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) ||
is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik);
@@ -295,6 +247,28 @@ void HeapShared::reset_archived_object_states(TRAPS) {
reset_states(boot_loader(), CHECK);
}
+void HeapShared::ensure_determinism(TRAPS) {
+ TempNewSymbol class_name = SymbolTable::new_symbol("jdk/internal/util/WeakReferenceKey");
+ TempNewSymbol method_name = SymbolTable::new_symbol("ensureDeterministicAOTCache");
+
+ Klass* weak_ref_key_class = SystemDictionary::resolve_or_fail(class_name, true, CHECK);
+ precond(weak_ref_key_class != nullptr);
+
+ log_debug(aot)("Calling WeakReferenceKey::ensureDeterministicAOTCache(Object.class)");
+ JavaValue result(T_BOOLEAN);
+ JavaCalls::call_static(&result,
+ weak_ref_key_class,
+ method_name,
+ vmSymbols::void_boolean_signature(),
+ CHECK);
+ assert(result.get_jboolean() == false, "sanity");
+}
+
+void HeapShared::prepare_for_archiving(TRAPS) {
+ reset_archived_object_states(CHECK);
+ ensure_determinism(CHECK);
+}
+
HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = nullptr;
bool HeapShared::is_archived_heap_in_use() {
@@ -413,6 +387,8 @@ void HeapShared::materialize_thread_object() {
void HeapShared::add_to_dumped_interned_strings(oop string) {
assert(HeapShared::is_writing_mapping_mode(), "Only used by this mode");
AOTMappedHeapWriter::add_to_dumped_interned_strings(string);
+ bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, string);
+ assert(success, "shared strings array must not point to arrays or strings that are too large to archive");
}
void HeapShared::finalize_initialization(FileMapInfo* static_mapinfo) {
@@ -453,7 +429,6 @@ int HeapShared::append_root(oop obj) {
oop HeapShared::get_root(int index, bool clear) {
assert(index >= 0, "sanity");
- assert(!CDSConfig::is_dumping_heap() && CDSConfig::is_using_archive(), "runtime only");
assert(is_archived_heap_in_use(), "getting roots into heap that is not used");
oop result;
@@ -598,8 +573,7 @@ public:
void set_oop(MetaspaceObj* ptr, oop o) {
MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
OopHandle handle(Universe::vm_global(), o);
- bool is_new = put(ptr, handle);
- assert(is_new, "cannot set twice");
+ put_when_absent(ptr, handle);
}
void remove_oop(MetaspaceObj* ptr) {
MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
@@ -612,6 +586,11 @@ public:
};
void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) {
+ if (CDSConfig::is_dumping_preimage_static_archive() && scratch_resolved_references(src) != nullptr) {
+ // We are in AOT training run. The class has been redefined and we are giving it a new resolved_reference.
+ // Ignore it, as this class will be excluded from the AOT config.
+ return;
+ }
if (SystemDictionaryShared::is_builtin_loader(src->pool_holder()->class_loader_data())) {
_scratch_objects_table->set_oop(src, dest);
}
@@ -831,14 +810,6 @@ static objArrayOop get_archived_resolved_references(InstanceKlass* src_ik) {
return nullptr;
}
-void HeapShared::archive_strings() {
- assert(HeapShared::is_writing_mapping_mode(), "should not reach here");
- oop shared_strings_array = StringTable::init_shared_strings_array();
- bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, shared_strings_array);
- assert(success, "shared strings array must not point to arrays or strings that are too large to archive");
- StringTable::set_shared_strings_array_index(append_root(shared_strings_array));
-}
-
int HeapShared::archive_exception_instance(oop exception) {
bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, exception);
assert(success, "sanity");
@@ -890,12 +861,12 @@ void HeapShared::start_scanning_for_oops() {
void HeapShared::end_scanning_for_oops() {
if (is_writing_mapping_mode()) {
- archive_strings();
+ StringTable::init_shared_table();
}
delete_seen_objects_table();
}
-void HeapShared::write_heap(ArchiveMappedHeapInfo* mapped_heap_info, ArchiveStreamedHeapInfo* streamed_heap_info) {
+void HeapShared::write_heap(AOTMappedHeapInfo* mapped_heap_info, AOTStreamedHeapInfo* streamed_heap_info) {
{
NoSafepointVerifier nsv;
CDSHeapVerifier::verify();
@@ -940,7 +911,7 @@ void HeapShared::scan_java_class(Klass* orig_k) {
void HeapShared::archive_subgraphs() {
assert(CDSConfig::is_dumping_heap(), "must be");
- if (!CDSConfig::is_dumping_aot_linked_classes()) {
+ if (CDSConfig::is_dumping_klass_subgraphs()) {
archive_object_subgraphs(archive_subgraph_entry_fields,
false /* is_full_module_graph */);
if (CDSConfig::is_dumping_full_module_graph()) {
@@ -948,10 +919,6 @@ void HeapShared::archive_subgraphs() {
true /* is_full_module_graph */);
}
}
-
- if (CDSConfig::is_dumping_full_module_graph()) {
- Modules::verify_archived_modules();
- }
}
//
@@ -1204,8 +1171,7 @@ public:
ArchivedKlassSubGraphInfoRecord* record = HeapShared::archive_subgraph_info(&info);
Klass* buffered_k = ArchiveBuilder::get_buffered_klass(klass);
unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary((address)buffered_k);
- u4 delta = ArchiveBuilder::current()->any_to_offset_u4(record);
- _writer->add(hash, delta);
+ _writer->add(hash, AOTCompressedPointers::encode_not_null(record));
}
return true; // keep on iterating
}
@@ -1302,10 +1268,7 @@ static void verify_the_heap(Klass* k, const char* which) {
// this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots.
void HeapShared::resolve_classes(JavaThread* current) {
assert(CDSConfig::is_using_archive(), "runtime only!");
- if (!is_archived_heap_in_use()) {
- return; // nothing to do
- }
- if (!CDSConfig::is_using_aot_linked_classes()) {
+ if (CDSConfig::is_using_klass_subgraphs()) {
resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields);
resolve_classes_for_subgraphs(current, fmg_archive_subgraph_entry_fields);
}
@@ -1395,7 +1358,7 @@ void HeapShared::init_classes_for_special_subgraph(Handle class_loader, TRAPS) {
void HeapShared::initialize_from_archived_subgraph(JavaThread* current, Klass* k) {
JavaThread* THREAD = current;
- if (!is_archived_heap_in_use()) {
+ if (!CDSConfig::is_using_klass_subgraphs()) {
return; // nothing to do
}
@@ -1871,7 +1834,7 @@ void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k,
const char* klass_name,
int field_offset,
const char* field_name) {
- assert(CDSConfig::is_dumping_heap(), "dump time only");
+ precond(CDSConfig::is_dumping_klass_subgraphs());
assert(k->defined_by_boot_loader(), "must be boot class");
oop m = k->java_mirror();
@@ -1922,7 +1885,7 @@ class VerifySharedOopClosure: public BasicOopIterateClosure {
};
void HeapShared::verify_subgraph_from_static_field(InstanceKlass* k, int field_offset) {
- assert(CDSConfig::is_dumping_heap(), "dump time only");
+ precond(CDSConfig::is_dumping_klass_subgraphs());
assert(k->defined_by_boot_loader(), "must be boot class");
oop m = k->java_mirror();
@@ -2148,7 +2111,7 @@ void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[],
void HeapShared::init_subgraph_entry_fields(TRAPS) {
assert(CDSConfig::is_dumping_heap(), "must be");
_dump_time_subgraph_info_table = new (mtClass)DumpTimeKlassSubGraphInfoTable();
- if (!CDSConfig::is_dumping_aot_linked_classes()) {
+ if (CDSConfig::is_dumping_klass_subgraphs()) {
init_subgraph_entry_fields(archive_subgraph_entry_fields, CHECK);
if (CDSConfig::is_dumping_full_module_graph()) {
init_subgraph_entry_fields(fmg_archive_subgraph_entry_fields, CHECK);
diff --git a/src/hotspot/share/cds/heapShared.hpp b/src/hotspot/share/cds/heapShared.hpp
index 118c60faa60..2cb330160e4 100644
--- a/src/hotspot/share/cds/heapShared.hpp
+++ b/src/hotspot/share/cds/heapShared.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -142,129 +142,6 @@ enum class HeapArchiveMode {
_streaming
};
-class ArchiveMappedHeapHeader {
- size_t _ptrmap_start_pos; // The first bit in the ptrmap corresponds to this position in the heap.
- size_t _oopmap_start_pos; // The first bit in the oopmap corresponds to this position in the heap.
- HeapRootSegments _root_segments; // Heap root segments info
-
-public:
- ArchiveMappedHeapHeader();
- ArchiveMappedHeapHeader(size_t ptrmap_start_pos,
- size_t oopmap_start_pos,
- HeapRootSegments root_segments);
-
- size_t ptrmap_start_pos() const { return _ptrmap_start_pos; }
- size_t oopmap_start_pos() const { return _oopmap_start_pos; }
- HeapRootSegments root_segments() const { return _root_segments; }
-
- // This class is trivially copyable and assignable.
- ArchiveMappedHeapHeader(const ArchiveMappedHeapHeader&) = default;
- ArchiveMappedHeapHeader& operator=(const ArchiveMappedHeapHeader&) = default;
-};
-
-
-class ArchiveStreamedHeapHeader {
- size_t _forwarding_offset; // Offset of forwarding information in the heap region.
- size_t _roots_offset; // Start position for the roots
- size_t _root_highest_object_index_table_offset; // Offset of root dfs depth information
- size_t _num_roots; // Number of embedded roots
- size_t _num_archived_objects; // The number of archived heap objects
-
-public:
- ArchiveStreamedHeapHeader();
- ArchiveStreamedHeapHeader(size_t forwarding_offset,
- size_t roots_offset,
- size_t num_roots,
- size_t root_highest_object_index_table_offset,
- size_t num_archived_objects);
-
- size_t forwarding_offset() const { return _forwarding_offset; }
- size_t roots_offset() const { return _roots_offset; }
- size_t num_roots() const { return _num_roots; }
- size_t root_highest_object_index_table_offset() const { return _root_highest_object_index_table_offset; }
- size_t num_archived_objects() const { return _num_archived_objects; }
-
- // This class is trivially copyable and assignable.
- ArchiveStreamedHeapHeader(const ArchiveStreamedHeapHeader&) = default;
- ArchiveStreamedHeapHeader& operator=(const ArchiveStreamedHeapHeader&) = default;
-};
-
-class ArchiveMappedHeapInfo {
- MemRegion _buffer_region; // Contains the archived objects to be written into the CDS archive.
- CHeapBitMap _oopmap;
- CHeapBitMap _ptrmap;
- HeapRootSegments _root_segments;
- size_t _oopmap_start_pos; // How many zeros were removed from the beginning of the bit map?
- size_t _ptrmap_start_pos; // How many zeros were removed from the beginning of the bit map?
-
-public:
- ArchiveMappedHeapInfo() :
- _buffer_region(),
- _oopmap(128, mtClassShared),
- _ptrmap(128, mtClassShared),
- _root_segments(),
- _oopmap_start_pos(),
- _ptrmap_start_pos() {}
- bool is_used() { return !_buffer_region.is_empty(); }
-
- MemRegion buffer_region() { return _buffer_region; }
- void set_buffer_region(MemRegion r) { _buffer_region = r; }
-
- char* buffer_start() { return (char*)_buffer_region.start(); }
- size_t buffer_byte_size() { return _buffer_region.byte_size(); }
-
- CHeapBitMap* oopmap() { return &_oopmap; }
- CHeapBitMap* ptrmap() { return &_ptrmap; }
-
- void set_oopmap_start_pos(size_t start_pos) { _oopmap_start_pos = start_pos; }
- void set_ptrmap_start_pos(size_t start_pos) { _ptrmap_start_pos = start_pos; }
-
- void set_root_segments(HeapRootSegments segments) { _root_segments = segments; };
- HeapRootSegments root_segments() { return _root_segments; }
-
- ArchiveMappedHeapHeader create_header();
-};
-
-class ArchiveStreamedHeapInfo {
- MemRegion _buffer_region; // Contains the archived objects to be written into the CDS archive.
- CHeapBitMap _oopmap;
- size_t _roots_offset; // Offset of the HeapShared::roots() object, from the bottom
- // of the archived heap objects, in bytes.
- size_t _num_roots;
-
- size_t _forwarding_offset; // Offset of forwarding information from the bottom
- size_t _root_highest_object_index_table_offset; // Offset to root dfs depth information
- size_t _num_archived_objects; // The number of archived objects written into the CDS archive.
-
-public:
- ArchiveStreamedHeapInfo()
- : _buffer_region(),
- _oopmap(128, mtClassShared),
- _roots_offset(),
- _forwarding_offset(),
- _root_highest_object_index_table_offset(),
- _num_archived_objects() {}
-
- bool is_used() { return !_buffer_region.is_empty(); }
-
- void set_buffer_region(MemRegion r) { _buffer_region = r; }
- MemRegion buffer_region() { return _buffer_region; }
- char* buffer_start() { return (char*)_buffer_region.start(); }
- size_t buffer_byte_size() { return _buffer_region.byte_size(); }
-
- CHeapBitMap* oopmap() { return &_oopmap; }
- void set_roots_offset(size_t n) { _roots_offset = n; }
- size_t roots_offset() { return _roots_offset; }
- void set_num_roots(size_t n) { _num_roots = n; }
- size_t num_roots() { return _num_roots; }
- void set_forwarding_offset(size_t n) { _forwarding_offset = n; }
- void set_root_highest_object_index_table_offset(size_t n) { _root_highest_object_index_table_offset = n; }
- void set_num_archived_objects(size_t n) { _num_archived_objects = n; }
- size_t num_archived_objects() { return _num_archived_objects; }
-
- ArchiveStreamedHeapHeader create_header();
-};
-
class HeapShared: AllStatic {
friend class VerifySharedOopClosure;
@@ -478,7 +355,6 @@ private:
static bool has_been_archived(oop orig_obj);
static void prepare_resolved_references();
- static void archive_strings();
static void archive_subgraphs();
static void copy_java_mirror(oop orig_mirror, oop scratch_m);
@@ -506,8 +382,10 @@ private:
static bool walk_one_object(PendingOopStack* stack, int level, KlassSubGraphInfo* subgraph_info,
oop orig_obj, oop referrer);
- public:
static void reset_archived_object_states(TRAPS);
+ static void ensure_determinism(TRAPS);
+ public:
+ static void prepare_for_archiving(TRAPS);
static void create_archived_object_cache() {
_archived_object_cache =
new (mtClass)ArchivedObjectCache(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE);
@@ -576,7 +454,7 @@ private:
public:
static void finish_materialize_objects() NOT_CDS_JAVA_HEAP_RETURN;
- static void write_heap(ArchiveMappedHeapInfo* mapped_heap_info, ArchiveStreamedHeapInfo* streamed_heap_info) NOT_CDS_JAVA_HEAP_RETURN;
+ static void write_heap(AOTMappedHeapInfo* mapped_heap_info, AOTStreamedHeapInfo* streamed_heap_info) NOT_CDS_JAVA_HEAP_RETURN;
static objArrayOop scratch_resolved_references(ConstantPool* src);
static void add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) NOT_CDS_JAVA_HEAP_RETURN;
static void init_dumping() NOT_CDS_JAVA_HEAP_RETURN;
diff --git a/src/hotspot/share/cds/lambdaFormInvokers.cpp b/src/hotspot/share/cds/lambdaFormInvokers.cpp
index 19dae28c5b5..3ff5705b79d 100644
--- a/src/hotspot/share/cds/lambdaFormInvokers.cpp
+++ b/src/hotspot/share/cds/lambdaFormInvokers.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,7 @@
*/
#include "cds/aotClassFilter.hpp"
+#include "cds/aotCompressedPointers.hpp"
#include "cds/aotMetaspace.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/cdsConfig.hpp"
@@ -52,7 +53,7 @@
#include "runtime/mutexLocker.hpp"
GrowableArrayCHeap* LambdaFormInvokers::_lambdaform_lines = nullptr;
-Array* LambdaFormInvokers::_static_archive_invokers = nullptr;
+Array* LambdaFormInvokers::_static_archive_invokers = nullptr;
static bool _stop_appending = false;
#define NUM_FILTER 4
@@ -252,7 +253,7 @@ void LambdaFormInvokers::dump_static_archive_invokers() {
}
}
if (count > 0) {
- _static_archive_invokers = ArchiveBuilder::new_ro_array(count);
+ _static_archive_invokers = ArchiveBuilder::new_ro_array(count);
int index = 0;
for (int i = 0; i < len; i++) {
char* str = _lambdaform_lines->at(i);
@@ -261,7 +262,7 @@ void LambdaFormInvokers::dump_static_archive_invokers() {
Array* line = ArchiveBuilder::new_ro_array((int)str_len);
strncpy(line->adr_at(0), str, str_len);
- _static_archive_invokers->at_put(index, ArchiveBuilder::current()->any_to_offset_u4(line));
+ _static_archive_invokers->at_put(index, AOTCompressedPointers::encode_not_null(line));
index++;
}
}
@@ -274,8 +275,8 @@ void LambdaFormInvokers::dump_static_archive_invokers() {
void LambdaFormInvokers::read_static_archive_invokers() {
if (_static_archive_invokers != nullptr) {
for (int i = 0; i < _static_archive_invokers->length(); i++) {
- u4 offset = _static_archive_invokers->at(i);
- Array* line = ArchiveUtils::offset_to_archived_address*>(offset);
+ narrowPtr encoded = _static_archive_invokers->at(i);
+ Array* line = AOTCompressedPointers::decode_not_null*>(encoded);
char* str = line->adr_at(0);
append(str);
}
diff --git a/src/hotspot/share/cds/lambdaFormInvokers.hpp b/src/hotspot/share/cds/lambdaFormInvokers.hpp
index 583a863a1c2..9b91850f5b1 100644
--- a/src/hotspot/share/cds/lambdaFormInvokers.hpp
+++ b/src/hotspot/share/cds/lambdaFormInvokers.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,8 @@
#ifndef SHARE_CDS_LAMBDAFORMINVOKERS_HPP
#define SHARE_CDS_LAMBDAFORMINVOKERS_HPP
+
+#include "cds/aotCompressedPointers.hpp"
#include "memory/allStatic.hpp"
#include "oops/oopHandle.hpp"
#include "runtime/handles.hpp"
@@ -35,10 +37,11 @@ class Array;
class SerializeClosure;
class LambdaFormInvokers : public AllStatic {
+ using narrowPtr = AOTCompressedPointers::narrowPtr;
private:
static GrowableArrayCHeap* _lambdaform_lines;
// For storing LF form lines (LF_RESOLVE only) in read only table.
- static Array* _static_archive_invokers;
+ static Array* _static_archive_invokers;
static void regenerate_class(char* name, ClassFileStream& st, TRAPS);
public:
static void append(char* line);
diff --git a/src/hotspot/share/cds/lambdaProxyClassDictionary.cpp b/src/hotspot/share/cds/lambdaProxyClassDictionary.cpp
index d091067c116..4d212dbf2c2 100644
--- a/src/hotspot/share/cds/lambdaProxyClassDictionary.cpp
+++ b/src/hotspot/share/cds/lambdaProxyClassDictionary.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,7 @@
*/
#include "cds/aotClassFilter.hpp"
+#include "cds/aotCompressedPointers.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/cdsProtectionDomain.hpp"
@@ -49,11 +50,11 @@ unsigned int LambdaProxyClassKey::hash() const {
}
unsigned int RunTimeLambdaProxyClassKey::hash() const {
- return primitive_hash(_caller_ik) +
- primitive_hash(_invoked_name) +
- primitive_hash(_invoked_type) +
- primitive_hash(_method_type) +
- primitive_hash(_instantiated_method_type);
+ return primitive_hash(cast_to_u4(_caller_ik)) +
+ primitive_hash(cast_to_u4(_invoked_name)) +
+ primitive_hash(cast_to_u4(_invoked_type)) +
+ primitive_hash(cast_to_u4(_method_type)) +
+ primitive_hash(cast_to_u4(_instantiated_method_type));
}
#ifndef PRODUCT
@@ -71,12 +72,12 @@ void LambdaProxyClassKey::print_on(outputStream* st) const {
void RunTimeLambdaProxyClassKey::print_on(outputStream* st) const {
ResourceMark rm;
st->print_cr("LambdaProxyClassKey : " INTPTR_FORMAT " hash: %0x08x", p2i(this), hash());
- st->print_cr("_caller_ik : %d", _caller_ik);
- st->print_cr("_instantiated_method_type : %d", _instantiated_method_type);
- st->print_cr("_invoked_name : %d", _invoked_name);
- st->print_cr("_invoked_type : %d", _invoked_type);
- st->print_cr("_member_method : %d", _member_method);
- st->print_cr("_method_type : %d", _method_type);
+ st->print_cr("_caller_ik : %d", cast_to_u4(_caller_ik));
+ st->print_cr("_instantiated_method_type : %d", cast_to_u4(_instantiated_method_type));
+ st->print_cr("_invoked_name : %d", cast_to_u4(_invoked_name));
+ st->print_cr("_invoked_type : %d", cast_to_u4(_invoked_type));
+ st->print_cr("_member_method : %d", cast_to_u4(_member_method));
+ st->print_cr("_method_type : %d", cast_to_u4(_method_type));
}
void RunTimeLambdaProxyClassInfo::print_on(outputStream* st) const {
@@ -418,8 +419,7 @@ public:
(RunTimeLambdaProxyClassInfo*)ArchiveBuilder::ro_region_alloc(byte_size);
runtime_info->init(key, info);
unsigned int hash = runtime_info->hash();
- u4 delta = _builder->any_to_offset_u4((void*)runtime_info);
- _writer->add(hash, delta);
+ _writer->add(hash, AOTCompressedPointers::encode_not_null(runtime_info));
return true;
}
};
diff --git a/src/hotspot/share/cds/lambdaProxyClassDictionary.hpp b/src/hotspot/share/cds/lambdaProxyClassDictionary.hpp
index 91e508bfdc5..dfb75532917 100644
--- a/src/hotspot/share/cds/lambdaProxyClassDictionary.hpp
+++ b/src/hotspot/share/cds/lambdaProxyClassDictionary.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,8 +25,9 @@
#ifndef SHARE_CDS_LAMBDAPROXYCLASSINFO_HPP
#define SHARE_CDS_LAMBDAPROXYCLASSINFO_HPP
+#include "cds/aotCompressedPointers.hpp"
#include "cds/aotMetaspace.hpp"
-#include "cds/archiveBuilder.hpp"
+#include "classfile/compactHashtable.hpp"
#include "classfile/javaClasses.hpp"
#include "memory/metaspaceClosure.hpp"
#include "utilities/growableArray.hpp"
@@ -132,19 +133,20 @@ public:
};
class RunTimeLambdaProxyClassKey {
- u4 _caller_ik;
- u4 _invoked_name;
- u4 _invoked_type;
- u4 _method_type;
- u4 _member_method;
- u4 _instantiated_method_type;
+ using narrowPtr = AOTCompressedPointers::narrowPtr;
+ narrowPtr _caller_ik;
+ narrowPtr _invoked_name;
+ narrowPtr _invoked_type;
+ narrowPtr _method_type;
+ narrowPtr _member_method;
+ narrowPtr _instantiated_method_type;
- RunTimeLambdaProxyClassKey(u4 caller_ik,
- u4 invoked_name,
- u4 invoked_type,
- u4 method_type,
- u4 member_method,
- u4 instantiated_method_type) :
+ RunTimeLambdaProxyClassKey(narrowPtr caller_ik,
+ narrowPtr invoked_name,
+ narrowPtr invoked_type,
+ narrowPtr method_type,
+ narrowPtr member_method,
+ narrowPtr instantiated_method_type) :
_caller_ik(caller_ik),
_invoked_name(invoked_name),
_invoked_type(invoked_type),
@@ -154,15 +156,12 @@ class RunTimeLambdaProxyClassKey {
public:
static RunTimeLambdaProxyClassKey init_for_dumptime(LambdaProxyClassKey& key) {
- assert(ArchiveBuilder::is_active(), "sanity");
- ArchiveBuilder* b = ArchiveBuilder::current();
-
- u4 caller_ik = b->any_to_offset_u4(key.caller_ik());
- u4 invoked_name = b->any_to_offset_u4(key.invoked_name());
- u4 invoked_type = b->any_to_offset_u4(key.invoked_type());
- u4 method_type = b->any_to_offset_u4(key.method_type());
- u4 member_method = b->any_or_null_to_offset_u4(key.member_method()); // could be null
- u4 instantiated_method_type = b->any_to_offset_u4(key.instantiated_method_type());
+ narrowPtr caller_ik = AOTCompressedPointers::encode_not_null(key.caller_ik());
+ narrowPtr invoked_name = AOTCompressedPointers::encode_not_null(key.invoked_name());
+ narrowPtr invoked_type = AOTCompressedPointers::encode_not_null(key.invoked_type());
+ narrowPtr method_type = AOTCompressedPointers::encode_not_null(key.method_type());
+ narrowPtr member_method = AOTCompressedPointers::encode(key.member_method()); // could be null
+ narrowPtr instantiated_method_type = AOTCompressedPointers::encode_not_null(key.instantiated_method_type());
return RunTimeLambdaProxyClassKey(caller_ik, invoked_name, invoked_type, method_type,
member_method, instantiated_method_type);
@@ -176,12 +175,12 @@ public:
Symbol* instantiated_method_type) {
// All parameters must be in shared space, or else you'd get an assert in
// ArchiveUtils::to_offset().
- return RunTimeLambdaProxyClassKey(ArchiveUtils::archived_address_to_offset(caller_ik),
- ArchiveUtils::archived_address_to_offset(invoked_name),
- ArchiveUtils::archived_address_to_offset(invoked_type),
- ArchiveUtils::archived_address_to_offset(method_type),
- ArchiveUtils::archived_address_or_null_to_offset(member_method), // could be null
- ArchiveUtils::archived_address_to_offset(instantiated_method_type));
+ return RunTimeLambdaProxyClassKey(AOTCompressedPointers::encode_address_in_cache(caller_ik),
+ AOTCompressedPointers::encode_address_in_cache(invoked_name),
+ AOTCompressedPointers::encode_address_in_cache(invoked_type),
+ AOTCompressedPointers::encode_address_in_cache(method_type),
+ AOTCompressedPointers::encode_address_in_cache_or_null(member_method), // could be null
+ AOTCompressedPointers::encode_address_in_cache(instantiated_method_type));
}
unsigned int hash() const;
diff --git a/src/hotspot/share/cds/runTimeClassInfo.cpp b/src/hotspot/share/cds/runTimeClassInfo.cpp
index fe940ca6c18..a1f50ab4ffa 100644
--- a/src/hotspot/share/cds/runTimeClassInfo.cpp
+++ b/src/hotspot/share/cds/runTimeClassInfo.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,15 +22,15 @@
*
*/
+#include "cds/aotCompressedPointers.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/dumpTimeClassInfo.hpp"
#include "cds/runTimeClassInfo.hpp"
#include "classfile/systemDictionaryShared.hpp"
void RunTimeClassInfo::init(DumpTimeClassInfo& info) {
- ArchiveBuilder* builder = ArchiveBuilder::current();
InstanceKlass* k = info._klass;
- _klass_offset = builder->any_to_offset_u4(k);
+ _klass = AOTCompressedPointers::encode_not_null(k);
if (!SystemDictionaryShared::is_builtin(k)) {
CrcInfo* c = crc();
@@ -50,8 +50,8 @@ void RunTimeClassInfo::init(DumpTimeClassInfo& info) {
RTVerifierConstraint* vf_constraints = verifier_constraints();
char* flags = verifier_constraint_flags();
for (i = 0; i < _num_verifier_constraints; i++) {
- vf_constraints[i]._name = builder->any_to_offset_u4(info._verifier_constraints->at(i).name());
- vf_constraints[i]._from_name = builder->any_or_null_to_offset_u4(info._verifier_constraints->at(i).from_name());
+ vf_constraints[i]._name = AOTCompressedPointers::encode_not_null(info._verifier_constraints->at(i).name());
+ vf_constraints[i]._from_name = AOTCompressedPointers::encode(info._verifier_constraints->at(i).from_name());
}
for (i = 0; i < _num_verifier_constraints; i++) {
flags[i] = info._verifier_constraint_flags->at(i);
@@ -61,14 +61,14 @@ void RunTimeClassInfo::init(DumpTimeClassInfo& info) {
if (_num_loader_constraints > 0) {
RTLoaderConstraint* ld_constraints = loader_constraints();
for (i = 0; i < _num_loader_constraints; i++) {
- ld_constraints[i]._name = builder->any_to_offset_u4(info._loader_constraints->at(i).name());
+ ld_constraints[i]._name = AOTCompressedPointers::encode_not_null(info._loader_constraints->at(i).name());
ld_constraints[i]._loader_type1 = info._loader_constraints->at(i).loader_type1();
ld_constraints[i]._loader_type2 = info._loader_constraints->at(i).loader_type2();
}
}
if (k->is_hidden() && info.nest_host() != nullptr) {
- _nest_host_offset = builder->any_to_offset_u4(info.nest_host());
+ _nest_host = AOTCompressedPointers::encode_not_null(info.nest_host());
}
if (k->has_archived_enum_objs()) {
int num = info.num_enum_klass_static_fields();
@@ -83,11 +83,12 @@ void RunTimeClassInfo::init(DumpTimeClassInfo& info) {
InstanceKlass* RunTimeClassInfo::klass() const {
if (AOTMetaspace::in_aot_cache(this)) {
// is inside a mmaped CDS archive.
- return ArchiveUtils::offset_to_archived_address(_klass_offset);
+ return AOTCompressedPointers::decode_not_null(_klass);
} else {
// is a temporary copy of a RunTimeClassInfo that's being initialized
// by the ArchiveBuilder.
- return ArchiveBuilder::current()->offset_to_buffered(_klass_offset);
+ size_t byte_offset = AOTCompressedPointers::get_byte_offset(_klass);
+ return ArchiveBuilder::current()->offset_to_buffered(byte_offset);
}
}
diff --git a/src/hotspot/share/cds/runTimeClassInfo.hpp b/src/hotspot/share/cds/runTimeClassInfo.hpp
index 371924f9065..d63a04698bb 100644
--- a/src/hotspot/share/cds/runTimeClassInfo.hpp
+++ b/src/hotspot/share/cds/runTimeClassInfo.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
#ifndef SHARE_CDS_RUNTIMECLASSINFO_HPP
#define SHARE_CDS_RUNTIMECLASSINFO_HPP
+#include "cds/aotCompressedPointers.hpp"
#include "cds/aotMetaspace.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/archiveUtils.hpp"
@@ -41,8 +42,10 @@ class Method;
class Symbol;
class RunTimeClassInfo {
- public:
- enum : char {
+ using narrowPtr = AOTCompressedPointers::narrowPtr;
+
+public:
+ enum : char {
FROM_FIELD_IS_PROTECTED = 1 << 0,
FROM_IS_ARRAY = 1 << 1,
FROM_IS_OBJECT = 1 << 2
@@ -56,19 +59,19 @@ class RunTimeClassInfo {
// This is different than DumpTimeClassInfo::DTVerifierConstraint. We use
// u4 instead of Symbol* to save space on 64-bit CPU.
struct RTVerifierConstraint {
- u4 _name;
- u4 _from_name;
- Symbol* name() { return ArchiveUtils::offset_to_archived_address(_name); }
+ narrowPtr _name;
+ narrowPtr _from_name;
+ Symbol* name() { return AOTCompressedPointers::decode_not_null(_name); }
Symbol* from_name() {
- return (_from_name == 0) ? nullptr : ArchiveUtils::offset_to_archived_address(_from_name);
+ return AOTCompressedPointers::decode(_from_name);
}
};
struct RTLoaderConstraint {
- u4 _name;
+ narrowPtr _name;
char _loader_type1;
char _loader_type2;
- Symbol* constraint_name() { return ArchiveUtils::offset_to_archived_address(_name); }
+ Symbol* constraint_name() { return AOTCompressedPointers::decode_not_null(_name); }
};
struct RTEnumKlassStaticFields {
int _num;
@@ -76,8 +79,8 @@ class RunTimeClassInfo {
};
private:
- u4 _klass_offset;
- u4 _nest_host_offset;
+ narrowPtr _klass;
+ narrowPtr _nest_host;
int _num_verifier_constraints;
int _num_loader_constraints;
@@ -185,7 +188,7 @@ public:
InstanceKlass* nest_host() {
assert(!ArchiveBuilder::is_active(), "not called when dumping archive");
- return ArchiveUtils::offset_to_archived_address_or_null(_nest_host_offset);
+ return AOTCompressedPointers::decode(_nest_host); // may be null
}
RTLoaderConstraint* loader_constraints() {
diff --git a/src/hotspot/share/ci/ciField.cpp b/src/hotspot/share/ci/ciField.cpp
index 19e05784f4d..946fea5346f 100644
--- a/src/hotspot/share/ci/ciField.cpp
+++ b/src/hotspot/share/ci/ciField.cpp
@@ -213,9 +213,13 @@ ciField::ciField(fieldDescriptor *fd) :
"bootstrap classes must not create & cache unshared fields");
}
-static bool trust_final_non_static_fields(ciInstanceKlass* holder) {
+static bool trust_final_nonstatic_fields(ciInstanceKlass* holder) {
if (holder == nullptr)
return false;
+ if (holder->trust_final_fields()) {
+ // Explicit opt-in from system classes
+ return true;
+ }
// Even if general trusting is disabled, trust system-built closures in these packages.
if (holder->is_in_package("java/lang/invoke") || holder->is_in_package("sun/invoke") ||
holder->is_in_package("java/lang/reflect") || holder->is_in_package("jdk/internal/reflect") ||
@@ -230,14 +234,6 @@ static bool trust_final_non_static_fields(ciInstanceKlass* holder) {
// Trust final fields in records
if (holder->is_record())
return true;
- // Trust Atomic*FieldUpdaters: they are very important for performance, and make up one
- // more reason not to use Unsafe, if their final fields are trusted. See more in JDK-8140483.
- if (holder->name() == ciSymbols::java_util_concurrent_atomic_AtomicIntegerFieldUpdater_Impl() ||
- holder->name() == ciSymbols::java_util_concurrent_atomic_AtomicLongFieldUpdater_CASUpdater() ||
- holder->name() == ciSymbols::java_util_concurrent_atomic_AtomicLongFieldUpdater_LockedUpdater() ||
- holder->name() == ciSymbols::java_util_concurrent_atomic_AtomicReferenceFieldUpdater_Impl()) {
- return true;
- }
return TrustFinalNonStaticFields;
}
@@ -263,7 +259,7 @@ void ciField::initialize_from(fieldDescriptor* fd) {
// An instance field can be constant if it's a final static field or if
// it's a final non-static field of a trusted class (classes in
// java.lang.invoke and sun.invoke packages and subpackages).
- _is_constant = is_stable_field || trust_final_non_static_fields(_holder);
+ _is_constant = is_stable_field || trust_final_nonstatic_fields(_holder);
}
} else {
// For CallSite objects treat the target field as a compile time constant.
diff --git a/src/hotspot/share/ci/ciInstanceKlass.cpp b/src/hotspot/share/ci/ciInstanceKlass.cpp
index 64b9acf9146..6243258acd9 100644
--- a/src/hotspot/share/ci/ciInstanceKlass.cpp
+++ b/src/hotspot/share/ci/ciInstanceKlass.cpp
@@ -65,6 +65,7 @@ ciInstanceKlass::ciInstanceKlass(Klass* k) :
_has_nonstatic_concrete_methods = ik->has_nonstatic_concrete_methods();
_is_hidden = ik->is_hidden();
_is_record = ik->is_record();
+ _trust_final_fields = ik->trust_final_fields();
_nonstatic_fields = nullptr; // initialized lazily by compute_nonstatic_fields:
_has_injected_fields = -1;
_implementor = nullptr; // we will fill these lazily
@@ -391,7 +392,7 @@ bool ciInstanceKlass::contains_field_offset(int offset) {
return get_instanceKlass()->contains_field_offset(offset);
}
-ciField* ciInstanceKlass::get_non_static_field_by_offset(const int field_offset) {
+ciField* ciInstanceKlass::get_nonstatic_field_by_offset(const int field_offset) {
for (int i = 0, len = nof_nonstatic_fields(); i < len; i++) {
ciField* field = _nonstatic_fields->at(i);
int field_off = field->offset_in_bytes();
@@ -405,7 +406,7 @@ ciField* ciInstanceKlass::get_non_static_field_by_offset(const int field_offset)
// ciInstanceKlass::get_field_by_offset
ciField* ciInstanceKlass::get_field_by_offset(int field_offset, bool is_static) {
if (!is_static) {
- return get_non_static_field_by_offset(field_offset);
+ return get_nonstatic_field_by_offset(field_offset);
}
VM_ENTRY_MARK;
InstanceKlass* k = get_instanceKlass();
@@ -436,7 +437,7 @@ ciField* ciInstanceKlass::get_field_by_name(ciSymbol* name, ciSymbol* signature,
// except this does not require allocating memory for a new ciField
BasicType ciInstanceKlass::get_field_type_by_offset(const int field_offset, const bool is_static) {
if (!is_static) {
- ciField* field = get_non_static_field_by_offset(field_offset);
+ ciField* field = get_nonstatic_field_by_offset(field_offset);
return field != nullptr ? field->layout_type() : T_ILLEGAL;
}
diff --git a/src/hotspot/share/ci/ciInstanceKlass.hpp b/src/hotspot/share/ci/ciInstanceKlass.hpp
index a1b2d8dd12d..a84c63981c9 100644
--- a/src/hotspot/share/ci/ciInstanceKlass.hpp
+++ b/src/hotspot/share/ci/ciInstanceKlass.hpp
@@ -59,6 +59,7 @@ private:
bool _has_nonstatic_concrete_methods;
bool _is_hidden;
bool _is_record;
+ bool _trust_final_fields;
bool _has_trusted_loader;
ciFlags _flags;
@@ -82,7 +83,7 @@ private:
bool compute_injected_fields_helper();
void compute_transitive_interfaces();
- ciField* get_non_static_field_by_offset(int field_offset);
+ ciField* get_nonstatic_field_by_offset(int field_offset);
protected:
ciInstanceKlass(Klass* k);
@@ -207,6 +208,10 @@ public:
return _is_record;
}
+ bool trust_final_fields() const {
+ return _trust_final_fields;
+ }
+
ciInstanceKlass* get_canonical_holder(int offset);
ciField* get_field_by_offset(int field_offset, bool is_static);
ciField* get_field_by_name(ciSymbol* name, ciSymbol* signature, bool is_static);
diff --git a/src/hotspot/share/ci/ciUtilities.cpp b/src/hotspot/share/ci/ciUtilities.cpp
index 0c5b4d9824f..011fa049275 100644
--- a/src/hotspot/share/ci/ciUtilities.cpp
+++ b/src/hotspot/share/ci/ciUtilities.cpp
@@ -42,10 +42,7 @@ const char* basictype_to_str(BasicType t) {
// ------------------------------------------------------------------
// card_table_base
-CardTable::CardValue* ci_card_table_address() {
- BarrierSet* bs = BarrierSet::barrier_set();
- CardTableBarrierSet* ctbs = barrier_set_cast(bs);
- CardTable* ct = ctbs->card_table();
- assert(!UseShenandoahGC, "Shenandoah byte_map_base is not constant.");
- return ct->byte_map_base();
+CardTable::CardValue* ci_card_table_address_const() {
+ CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
+ return ctbs->card_table_base_const();
}
diff --git a/src/hotspot/share/ci/ciUtilities.hpp b/src/hotspot/share/ci/ciUtilities.hpp
index 75dbb03adf4..3333972d57d 100644
--- a/src/hotspot/share/ci/ciUtilities.hpp
+++ b/src/hotspot/share/ci/ciUtilities.hpp
@@ -51,9 +51,9 @@ inline const char* bool_to_str(bool b) {
const char* basictype_to_str(BasicType t);
-CardTable::CardValue* ci_card_table_address();
+CardTable::CardValue* ci_card_table_address_const();
template T ci_card_table_address_as() {
- return reinterpret_cast(ci_card_table_address());
+ return reinterpret_cast(ci_card_table_address_const());
}
#endif // SHARE_CI_CIUTILITIES_HPP
diff --git a/src/hotspot/share/classfile/classFileParser.cpp b/src/hotspot/share/classfile/classFileParser.cpp
index c9d9d3632b5..c1f00cbe536 100644
--- a/src/hotspot/share/classfile/classFileParser.cpp
+++ b/src/hotspot/share/classfile/classFileParser.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -943,6 +943,7 @@ public:
_java_lang_Deprecated_for_removal,
_jdk_internal_vm_annotation_AOTSafeClassInitializer,
_method_AOTRuntimeSetup,
+ _jdk_internal_vm_annotation_TrustFinalFields,
_annotation_LIMIT
};
const Location _location;
@@ -1016,7 +1017,8 @@ public:
};
-static int skip_annotation_value(const u1*, int, int); // fwd decl
+static int skip_annotation_value(const u1* buffer, int limit, int index, int recursion_depth); // fwd decl
+static const int max_recursion_depth = 5;
// Safely increment index by val if does not pass limit
#define SAFE_ADD(index, limit, val) \
@@ -1024,23 +1026,29 @@ if (index >= limit - val) return limit; \
index += val;
// Skip an annotation. Return >=limit if there is any problem.
-static int skip_annotation(const u1* buffer, int limit, int index) {
+static int skip_annotation(const u1* buffer, int limit, int index, int recursion_depth = 0) {
assert(buffer != nullptr, "invariant");
+ if (recursion_depth > max_recursion_depth) {
+ return limit;
+ }
// annotation := atype:u2 do(nmem:u2) {member:u2 value}
// value := switch (tag:u1) { ... }
SAFE_ADD(index, limit, 4); // skip atype and read nmem
int nmem = Bytes::get_Java_u2((address)buffer + index - 2);
while (--nmem >= 0 && index < limit) {
SAFE_ADD(index, limit, 2); // skip member
- index = skip_annotation_value(buffer, limit, index);
+ index = skip_annotation_value(buffer, limit, index, recursion_depth + 1);
}
return index;
}
// Skip an annotation value. Return >=limit if there is any problem.
-static int skip_annotation_value(const u1* buffer, int limit, int index) {
+static int skip_annotation_value(const u1* buffer, int limit, int index, int recursion_depth) {
assert(buffer != nullptr, "invariant");
+ if (recursion_depth > max_recursion_depth) {
+ return limit;
+ }
// value := switch (tag:u1) {
// case B, C, I, S, Z, D, F, J, c: con:u2;
// case e: e_class:u2 e_name:u2;
@@ -1072,12 +1080,12 @@ static int skip_annotation_value(const u1* buffer, int limit, int index) {
SAFE_ADD(index, limit, 2); // read nval
int nval = Bytes::get_Java_u2((address)buffer + index - 2);
while (--nval >= 0 && index < limit) {
- index = skip_annotation_value(buffer, limit, index);
+ index = skip_annotation_value(buffer, limit, index, recursion_depth + 1);
}
}
break;
case '@':
- index = skip_annotation(buffer, limit, index);
+ index = skip_annotation(buffer, limit, index, recursion_depth + 1);
break;
default:
return limit; // bad tag byte
@@ -1878,6 +1886,11 @@ AnnotationCollector::annotation_index(const ClassLoaderData* loader_data,
if (!privileged) break; // only allow in privileged code
return _field_Stable;
}
+ case VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_TrustFinalFields_signature): {
+ if (_location != _in_class) break; // only allow for classes
+ if (!privileged) break; // only allow in privileged code
+ return _jdk_internal_vm_annotation_TrustFinalFields;
+ }
case VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_Contended_signature): {
if (_location != _in_field && _location != _in_class) {
break; // only allow for fields and classes
@@ -1992,6 +2005,9 @@ void ClassFileParser::ClassAnnotationCollector::apply_to(InstanceKlass* ik) {
if (has_annotation(_jdk_internal_vm_annotation_AOTSafeClassInitializer)) {
ik->set_has_aot_safe_initializer();
}
+ if (has_annotation(_jdk_internal_vm_annotation_TrustFinalFields)) {
+ ik->set_trust_final_fields(true);
+ }
}
#define MAX_ARGS_SIZE 255
diff --git a/src/hotspot/share/classfile/classLoader.cpp b/src/hotspot/share/classfile/classLoader.cpp
index d9a63cd154b..eced83577cb 100644
--- a/src/hotspot/share/classfile/classLoader.cpp
+++ b/src/hotspot/share/classfile/classLoader.cpp
@@ -127,6 +127,7 @@ PerfCounter* ClassLoader::_perf_ik_link_methods_count = nullptr;
PerfCounter* ClassLoader::_perf_method_adapters_count = nullptr;
PerfCounter* ClassLoader::_unsafe_defineClassCallCounter = nullptr;
PerfCounter* ClassLoader::_perf_secondary_hash_time = nullptr;
+PerfCounter* ClassLoader::_perf_change_wx_time = nullptr;
PerfCounter* ClassLoader::_perf_resolve_indy_time = nullptr;
PerfCounter* ClassLoader::_perf_resolve_invokehandle_time = nullptr;
@@ -1370,6 +1371,7 @@ void ClassLoader::initialize(TRAPS) {
NEWPERFBYTECOUNTER(_perf_sys_classfile_bytes_read, SUN_CLS, "sysClassBytes");
NEWPERFEVENTCOUNTER(_unsafe_defineClassCallCounter, SUN_CLS, "unsafeDefineClassCalls");
NEWPERFTICKCOUNTER(_perf_secondary_hash_time, SUN_CLS, "secondarySuperHashTime");
+ NEWPERFTICKCOUNTER(_perf_change_wx_time, SUN_CLS, "changeWXTime");
if (log_is_enabled(Info, perf, class, link)) {
NEWPERFTICKCOUNTER(_perf_ik_link_methods_time, SUN_CLS, "linkMethodsTime");
@@ -1418,6 +1420,10 @@ char* ClassLoader::lookup_vm_options() {
jio_snprintf(modules_path, JVM_MAXPATHLEN, "%s%slib%smodules", Arguments::get_java_home(), fileSep, fileSep);
JImage_file =(*JImageOpen)(modules_path, &error);
if (JImage_file == nullptr) {
+ if (Arguments::has_jimage()) {
+ // The modules file exists but is unreadable or corrupt
+ vm_exit_during_initialization(err_msg("Unable to load %s", modules_path));
+ }
return nullptr;
}
diff --git a/src/hotspot/share/classfile/classLoader.hpp b/src/hotspot/share/classfile/classLoader.hpp
index afb0a581dcc..a935d3027ac 100644
--- a/src/hotspot/share/classfile/classLoader.hpp
+++ b/src/hotspot/share/classfile/classLoader.hpp
@@ -184,6 +184,7 @@ class ClassLoader: AllStatic {
// Count the time taken to hash the scondary superclass arrays.
static PerfCounter* _perf_secondary_hash_time;
+ static PerfCounter* _perf_change_wx_time;
// The boot class path consists of 3 ordered pieces:
// 1. the module/path pairs specified to --patch-module
@@ -268,6 +269,9 @@ class ClassLoader: AllStatic {
static PerfCounter* perf_secondary_hash_time() {
return _perf_secondary_hash_time;
}
+ static PerfCounter* perf_change_wx_time() {
+ return _perf_change_wx_time;
+ }
static PerfCounter* perf_sys_classload_time() { return _perf_sys_classload_time; }
static PerfCounter* perf_app_classload_time() { return _perf_app_classload_time; }
static PerfCounter* perf_app_classload_selftime() { return _perf_app_classload_selftime; }
diff --git a/src/hotspot/share/classfile/classLoaderDataShared.cpp b/src/hotspot/share/classfile/classLoaderDataShared.cpp
index 7a7743edd03..d415fe64bac 100644
--- a/src/hotspot/share/classfile/classLoaderDataShared.cpp
+++ b/src/hotspot/share/classfile/classLoaderDataShared.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,6 +33,7 @@
#include "classfile/packageEntry.hpp"
#include "classfile/systemDictionary.hpp"
#include "logging/log.hpp"
+#include "memory/metaspaceClosure.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/safepoint.hpp"
@@ -56,9 +57,9 @@ class ArchivedClassLoaderData {
public:
ArchivedClassLoaderData() : _packages(nullptr), _modules(nullptr), _unnamed_module(nullptr) {}
- void iterate_symbols(ClassLoaderData* loader_data, MetaspaceClosure* closure);
- void allocate(ClassLoaderData* loader_data);
- void init_archived_entries(ClassLoaderData* loader_data);
+ void iterate_roots(MetaspaceClosure* closure);
+ void build_tables(ClassLoaderData* loader_data, TRAPS);
+ void remove_unshareable_info();
ModuleEntry* unnamed_module() {
return _unnamed_module;
}
@@ -80,17 +81,14 @@ static ModuleEntry* _archived_javabase_moduleEntry = nullptr;
static int _platform_loader_root_index = -1;
static int _system_loader_root_index = -1;
-void ArchivedClassLoaderData::iterate_symbols(ClassLoaderData* loader_data, MetaspaceClosure* closure) {
+void ArchivedClassLoaderData::iterate_roots(MetaspaceClosure* it) {
assert(CDSConfig::is_dumping_full_module_graph(), "must be");
- assert_valid(loader_data);
- if (loader_data != nullptr) {
- loader_data->packages()->iterate_symbols(closure);
- loader_data->modules() ->iterate_symbols(closure);
- loader_data->unnamed_module()->iterate_symbols(closure);
- }
+ it->push(&_packages);
+ it->push(&_modules);
+ it->push(&_unnamed_module);
}
-void ArchivedClassLoaderData::allocate(ClassLoaderData* loader_data) {
+void ArchivedClassLoaderData::build_tables(ClassLoaderData* loader_data, TRAPS) {
assert(CDSConfig::is_dumping_full_module_graph(), "must be");
assert_valid(loader_data);
if (loader_data != nullptr) {
@@ -98,19 +96,28 @@ void ArchivedClassLoaderData::allocate(ClassLoaderData* loader_data) {
// address of the Symbols, which may be relocated at runtime due to ASLR.
// So we store the packages/modules in Arrays. At runtime, we create
// the hashtables using these arrays.
- _packages = loader_data->packages()->allocate_archived_entries();
- _modules = loader_data->modules() ->allocate_archived_entries();
- _unnamed_module = loader_data->unnamed_module()->allocate_archived_entry();
+ _packages = loader_data->packages()->build_aot_table(loader_data, CHECK);
+ _modules = loader_data->modules()->build_aot_table(loader_data, CHECK);
+ _unnamed_module = loader_data->unnamed_module();
}
}
-void ArchivedClassLoaderData::init_archived_entries(ClassLoaderData* loader_data) {
- assert(CDSConfig::is_dumping_full_module_graph(), "must be");
- assert_valid(loader_data);
- if (loader_data != nullptr) {
- loader_data->packages()->init_archived_entries(_packages);
- loader_data->modules() ->init_archived_entries(_modules);
- _unnamed_module->init_as_archived_entry();
+void ArchivedClassLoaderData::remove_unshareable_info() {
+ if (_packages != nullptr) {
+ _packages = ArchiveBuilder::current()->get_buffered_addr(_packages);
+ for (int i = 0; i < _packages->length(); i++) {
+ _packages->at(i)->remove_unshareable_info();
+ }
+ }
+ if (_modules != nullptr) {
+ _modules = ArchiveBuilder::current()->get_buffered_addr(_modules);
+ for (int i = 0; i < _modules->length(); i++) {
+ _modules->at(i)->remove_unshareable_info();
+ }
+ }
+ if (_unnamed_module != nullptr) {
+ _unnamed_module = ArchiveBuilder::current()->get_buffered_addr(_unnamed_module);
+ _unnamed_module->remove_unshareable_info();
}
}
@@ -153,7 +160,6 @@ void ArchivedClassLoaderData::clear_archived_oops() {
// ------------------------------
void ClassLoaderDataShared::load_archived_platform_and_system_class_loaders() {
-#if INCLUDE_CDS_JAVA_HEAP
// The streaming object loader prefers loading the class loader related objects before
// the CLD constructor which has a NoSafepointVerifier.
if (!HeapShared::is_loading_streaming_mode()) {
@@ -178,7 +184,6 @@ void ClassLoaderDataShared::load_archived_platform_and_system_class_loaders() {
if (system_loader_module_entry != nullptr) {
system_loader_module_entry->preload_archived_oops();
}
-#endif
}
static ClassLoaderData* null_class_loader_data() {
@@ -210,28 +215,27 @@ void ClassLoaderDataShared::ensure_module_entry_table_exists(oop class_loader) {
assert(met != nullptr, "sanity");
}
-void ClassLoaderDataShared::iterate_symbols(MetaspaceClosure* closure) {
+void ClassLoaderDataShared::build_tables(TRAPS) {
assert(CDSConfig::is_dumping_full_module_graph(), "must be");
- _archived_boot_loader_data.iterate_symbols (null_class_loader_data(), closure);
- _archived_platform_loader_data.iterate_symbols(java_platform_loader_data_or_null(), closure);
- _archived_system_loader_data.iterate_symbols (java_system_loader_data_or_null(), closure);
+ _archived_boot_loader_data.build_tables(null_class_loader_data(), CHECK);
+ _archived_platform_loader_data.build_tables(java_platform_loader_data_or_null(), CHECK);
+ _archived_system_loader_data.build_tables(java_system_loader_data_or_null(), CHECK);
}
-void ClassLoaderDataShared::allocate_archived_tables() {
+void ClassLoaderDataShared::iterate_roots(MetaspaceClosure* it) {
assert(CDSConfig::is_dumping_full_module_graph(), "must be");
- _archived_boot_loader_data.allocate (null_class_loader_data());
- _archived_platform_loader_data.allocate(java_platform_loader_data_or_null());
- _archived_system_loader_data.allocate (java_system_loader_data_or_null());
+ _archived_boot_loader_data.iterate_roots(it);
+ _archived_platform_loader_data.iterate_roots(it);
+ _archived_system_loader_data.iterate_roots(it);
}
-void ClassLoaderDataShared::init_archived_tables() {
+void ClassLoaderDataShared::remove_unshareable_info() {
assert(CDSConfig::is_dumping_full_module_graph(), "must be");
+ _archived_boot_loader_data.remove_unshareable_info();
+ _archived_platform_loader_data.remove_unshareable_info();
+ _archived_system_loader_data.remove_unshareable_info();
- _archived_boot_loader_data.init_archived_entries (null_class_loader_data());
- _archived_platform_loader_data.init_archived_entries(java_platform_loader_data_or_null());
- _archived_system_loader_data.init_archived_entries (java_system_loader_data_or_null());
-
- _archived_javabase_moduleEntry = ModuleEntry::get_archived_entry(ModuleEntryTable::javabase_moduleEntry());
+ _archived_javabase_moduleEntry = ArchiveBuilder::current()->get_buffered_addr(ModuleEntryTable::javabase_moduleEntry());
_platform_loader_root_index = HeapShared::append_root(SystemDictionary::java_platform_loader());
_system_loader_root_index = HeapShared::append_root(SystemDictionary::java_system_loader());
@@ -271,7 +275,6 @@ ModuleEntry* ClassLoaderDataShared::archived_unnamed_module(ClassLoaderData* loa
return archived_module;
}
-
void ClassLoaderDataShared::clear_archived_oops() {
assert(!CDSConfig::is_using_full_module_graph(), "must be");
_archived_boot_loader_data.clear_archived_oops();
diff --git a/src/hotspot/share/classfile/classLoaderDataShared.hpp b/src/hotspot/share/classfile/classLoaderDataShared.hpp
index 39d0a89418f..2cf37310e50 100644
--- a/src/hotspot/share/classfile/classLoaderDataShared.hpp
+++ b/src/hotspot/share/classfile/classLoaderDataShared.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -40,11 +40,11 @@ class ClassLoaderDataShared : AllStatic {
public:
static void load_archived_platform_and_system_class_loaders() NOT_CDS_JAVA_HEAP_RETURN;
static void restore_archived_modules_for_preloading_classes(JavaThread* current) NOT_CDS_JAVA_HEAP_RETURN;
+ static void build_tables(TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
+ static void iterate_roots(MetaspaceClosure* closure) NOT_CDS_JAVA_HEAP_RETURN;
+ static void remove_unshareable_info() NOT_CDS_JAVA_HEAP_RETURN;
#if INCLUDE_CDS_JAVA_HEAP
static void ensure_module_entry_tables_exist();
- static void allocate_archived_tables();
- static void iterate_symbols(MetaspaceClosure* closure);
- static void init_archived_tables();
static void serialize(SerializeClosure* f);
static void clear_archived_oops();
static void restore_archived_entries_for_null_class_loader_data();
diff --git a/src/hotspot/share/classfile/compactHashtable.cpp b/src/hotspot/share/classfile/compactHashtable.cpp
index 6808ae7bb8f..de67971c403 100644
--- a/src/hotspot/share/classfile/compactHashtable.cpp
+++ b/src/hotspot/share/classfile/compactHashtable.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -96,14 +96,16 @@ void CompactHashtableWriter::allocate_table() {
"Too many entries.");
}
- _compact_buckets = ArchiveBuilder::new_ro_array(_num_buckets + 1);
- _compact_entries = ArchiveBuilder::new_ro_array(entries_space);
+ _num_compact_buckets = checked_cast(_num_buckets + 1); // extra slot for TABLEEND_BUCKET_TYPE
+ _num_compact_entries = checked_cast(entries_space);
+ _compact_buckets = (u4*)ArchiveBuilder::ro_region_alloc(_num_compact_buckets * sizeof(u4));
+ _compact_entries = (u4*)ArchiveBuilder::ro_region_alloc(_num_compact_entries * sizeof(u4));
_stats->bucket_count = _num_buckets;
- _stats->bucket_bytes = align_up(_compact_buckets->size() * BytesPerWord,
+ _stats->bucket_bytes = align_up(checked_cast(_num_compact_buckets * sizeof(u4)),
SharedSpaceObjectAlignment);
_stats->hashentry_count = _num_entries_written;
- _stats->hashentry_bytes = align_up(_compact_entries->size() * BytesPerWord,
+ _stats->hashentry_bytes = align_up(checked_cast(_num_compact_entries * sizeof(u4)),
SharedSpaceObjectAlignment);
}
@@ -114,21 +116,21 @@ void CompactHashtableWriter::dump_table(NumberSeq* summary) {
GrowableArray* bucket = _buckets[index];
int bucket_size = bucket->length();
if (bucket_size == 1) {
- _compact_buckets->at_put(index, BUCKET_INFO(offset, VALUE_ONLY_BUCKET_TYPE));
+ compact_buckets_set(index, BUCKET_INFO(offset, VALUE_ONLY_BUCKET_TYPE));
Entry ent = bucket->at(0);
// bucket with one entry is value_only and only has the encoded_value
- _compact_entries->at_put(offset++, ent.encoded_value());
+ compact_entries_set(offset++, ent.encoded_value());
_num_value_only_buckets++;
} else {
// regular bucket, it could contain zero or more than one entry,
// each entry is a pair
- _compact_buckets->at_put(index, BUCKET_INFO(offset, REGULAR_BUCKET_TYPE));
+ compact_buckets_set(index, BUCKET_INFO(offset, REGULAR_BUCKET_TYPE));
for (int i=0; iat(i);
- _compact_entries->at_put(offset++, u4(ent.hash())); // write entry hash
- _compact_entries->at_put(offset++, ent.encoded_value()); // write entry encoded_value
+ compact_entries_set(offset++, u4(ent.hash())); // write entry hash
+ compact_entries_set(offset++, ent.encoded_value()); // write entry encoded_value
}
if (bucket_size == 0) {
_num_empty_buckets++;
@@ -140,10 +142,19 @@ void CompactHashtableWriter::dump_table(NumberSeq* summary) {
}
// Mark the end of the buckets
- _compact_buckets->at_put(_num_buckets, BUCKET_INFO(offset, TABLEEND_BUCKET_TYPE));
- assert(offset == (u4)_compact_entries->length(), "sanity");
+ compact_buckets_set(_num_buckets, BUCKET_INFO(offset, TABLEEND_BUCKET_TYPE));
+ assert(offset == checked_cast(_num_compact_entries), "sanity");
}
+void CompactHashtableWriter::compact_buckets_set(u4 index, u4 value) {
+ precond(index < _num_compact_buckets);
+ _compact_buckets[index] = value;
+}
+
+void CompactHashtableWriter::compact_entries_set(u4 index, u4 value) {
+ precond(index < _num_compact_entries);
+ _compact_entries[index] = value;
+}
// Write the compact table
void CompactHashtableWriter::dump(SimpleCompactHashtable *cht, const char* table_name) {
@@ -154,7 +165,7 @@ void CompactHashtableWriter::dump(SimpleCompactHashtable *cht, const char* table
int table_bytes = _stats->bucket_bytes + _stats->hashentry_bytes;
address base_address = address(SharedBaseAddress);
cht->init(base_address, _num_entries_written, _num_buckets,
- _compact_buckets->data(), _compact_entries->data());
+ _compact_buckets, _compact_entries);
LogMessage(aot, hashtables) msg;
if (msg.is_info()) {
diff --git a/src/hotspot/share/classfile/compactHashtable.hpp b/src/hotspot/share/classfile/compactHashtable.hpp
index 944fb876521..81f2951289d 100644
--- a/src/hotspot/share/classfile/compactHashtable.hpp
+++ b/src/hotspot/share/classfile/compactHashtable.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
#ifndef SHARE_CLASSFILE_COMPACTHASHTABLE_HPP
#define SHARE_CLASSFILE_COMPACTHASHTABLE_HPP
+#include "cds/aotCompressedPointers.hpp"
#include "cds/cds_globals.hpp"
#include "oops/array.hpp"
#include "oops/symbol.hpp"
@@ -114,8 +115,13 @@ private:
int _num_other_buckets;
GrowableArray** _buckets;
CompactHashtableStats* _stats;
- Array* _compact_buckets;
- Array* _compact_entries;
+ u4* _compact_buckets;
+ size_t _num_compact_buckets;
+ u4* _compact_entries;
+ size_t _num_compact_entries;
+
+ void compact_buckets_set(u4 index, u4 value);
+ void compact_entries_set(u4 index, u4 value);
public:
// This is called at dump-time only
@@ -123,6 +129,9 @@ public:
~CompactHashtableWriter();
void add(unsigned int hash, u4 encoded_value);
+ void add(unsigned int hash, AOTCompressedPointers::narrowPtr encoded_value) {
+ add(hash, cast_to_u4(encoded_value));
+ }
void dump(SimpleCompactHashtable *cht, const char* table_name);
private:
@@ -371,11 +380,11 @@ public:
//
// OffsetCompactHashtable -- This is used to store many types of objects
// in the CDS archive. On 64-bit platforms, we save space by using a 32-bit
-// offset from the CDS base address.
+// narrowPtr from the CDS base address.
template
-inline V read_value_from_compact_hashtable(address base_address, u4 offset) {
- return (V)(base_address + offset);
+inline V read_value_from_compact_hashtable(address base_address, u4 narrowp) {
+ return AOTCompressedPointers::decode_not_null