Merge branch 'openjdk:master' into JDK-8373118

This commit is contained in:
Doug Lea 2026-02-04 09:32:34 -05:00 committed by GitHub
commit 7ae93ed72b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
1687 changed files with 64533 additions and 40947 deletions

View File

@ -1,7 +1,7 @@
#!/bin/bash -f
#
# Copyright (c) 2010, 2025, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2010, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -23,9 +23,13 @@
# questions.
#
# Script to update the Copyright YEAR range in Mercurial & Git sources.
# Script to update the Copyright YEAR range in Git sources.
# (Originally from xdono, Thanks!)
# To update Copyright years for changes in a specific branch,
# you use a command along these lines:
# $ git diff upstream/master...<branch-name> | lsdiff | cut -d '/' -f 2- | bash bin/update_copyright_year.sh -m -
#------------------------------------------------------------
copyright="Copyright"
copyright_symbol="(c)"
@ -47,7 +51,7 @@ rm -f -r ${tmp}
mkdir -p ${tmp}
total=0
usage="Usage: `basename "$0"` [-c company] [-y year] [-h|f]"
usage="Usage: `basename "$0"` [-c company] [-y year] [-m file] [-h|f]"
Help()
{
# Display Help
@ -65,15 +69,18 @@ Help()
echo "-b Specifies the base reference for change set lookup."
echo "-f Updates the copyright for all change sets in a given year,"
echo " as specified by -y. Overrides -b flag."
echo "-m Read the list of modified files from the given file,"
echo " use - to read from stdin"
echo "-h Print this help."
echo
}
full_year=false
base_reference=master
modified_files_origin="";
# Process options
while getopts "b:c:fhy:" option; do
while getopts "b:c:fhm:y:" option; do
case $option in
b) # supplied base reference
base_reference=${OPTARG}
@ -91,6 +98,9 @@ while getopts "b:c:fhy:" option; do
y) # supplied company year
year=${OPTARG}
;;
m) # modified files will be read from the given origin
modified_files_origin="${OPTARG}"
;;
\?) # illegal option
echo "$usage"
exit 1
@ -110,18 +120,10 @@ git status &> /dev/null && git_found=true
if [ "$git_found" != "true" ]; then
echo "Error: Please execute script from within a JDK git repository."
exit 1
else
echo "Using Git version control system"
vcs_status=(git ls-files -m)
if [ "$full_year" = "true" ]; then
vcs_list_changesets=(git log --no-merges --since="${year}-01-01T00:00:00Z" --until="${year}-12-31T23:59:59Z" --pretty=tformat:"%H")
else
vcs_list_changesets=(git log --no-merges "${base_reference}..HEAD" --since="${year}-01-01T00:00:00Z" --until="${year}-12-31T23:59:59Z" --pretty=tformat:"%H")
fi
vcs_changeset_message=(git log -1 --pretty=tformat:"%B") # followed by ${changeset}
vcs_changeset_files=(git diff-tree --no-commit-id --name-only -r) # followed by ${changeset}
fi
echo "Using Git version control system"
# Return true if it makes sense to edit this file
saneFileToCheck()
{
@ -168,6 +170,25 @@ updateFile() # file
echo "${changed}"
}
# Update the copyright year on files sent in stdin
updateFiles() # stdin: list of files to update
{
count=0
fcount=0
while read i; do
fcount=`expr ${fcount} '+' 1`
if [ `updateFile "${i}"` = "true" ] ; then
count=`expr ${count} '+' 1`
fi
done
if [ ${count} -gt 0 ] ; then
printf " UPDATED year on %d of %d files.\n" ${count} ${fcount}
total=`expr ${total} '+' ${count}`
else
printf " None of the %d files were changed.\n" ${fcount}
fi
}
# Update the copyright year on all files changed by this changeset
updateChangesetFiles() # changeset
{
@ -178,18 +199,7 @@ updateChangesetFiles() # changeset
| ${awk} -F' ' '{for(i=1;i<=NF;i++)print $i}' \
> ${files}
if [ -f "${files}" -a -s "${files}" ] ; then
fcount=`cat ${files}| wc -l`
for i in `cat ${files}` ; do
if [ `updateFile "${i}"` = "true" ] ; then
count=`expr ${count} '+' 1`
fi
done
if [ ${count} -gt 0 ] ; then
printf " UPDATED year on %d of %d files.\n" ${count} ${fcount}
total=`expr ${total} '+' ${count}`
else
printf " None of the %d files were changed.\n" ${fcount}
fi
cat ${files} | updateFiles
else
printf " ERROR: No files changed in the changeset? Must be a mistake.\n"
set -x
@ -204,67 +214,80 @@ updateChangesetFiles() # changeset
}
# Check if repository is clean
vcs_status=(git ls-files -m)
previous=`"${vcs_status[@]}"|wc -l`
if [ ${previous} -ne 0 ] ; then
echo "WARNING: This repository contains previously edited working set files."
echo " ${vcs_status[*]} | wc -l = `"${vcs_status[@]}" | wc -l`"
fi
# Get all changesets this year
all_changesets=${tmp}/all_changesets
rm -f ${all_changesets}
"${vcs_list_changesets[@]}" > ${all_changesets}
# Check changeset to see if it is Copyright only changes, filter changesets
if [ -s ${all_changesets} ] ; then
echo "Changesets made in ${year}: `cat ${all_changesets} | wc -l`"
index=0
cat ${all_changesets} | while read changeset ; do
index=`expr ${index} '+' 1`
desc=${tmp}/desc.${changeset}
rm -f ${desc}
echo "------------------------------------------------"
"${vcs_changeset_message[@]}" "${changeset}" > ${desc}
printf "%d: %s\n%s\n" ${index} "${changeset}" "`cat ${desc}|head -1`"
if [ "${year}" = "2010" ] ; then
if cat ${desc} | grep -i -F "Added tag" > /dev/null ; then
printf " EXCLUDED tag changeset.\n"
elif cat ${desc} | grep -i -F rebrand > /dev/null ; then
printf " EXCLUDED rebrand changeset.\n"
elif cat ${desc} | grep -i -F copyright > /dev/null ; then
printf " EXCLUDED copyright changeset.\n"
else
updateChangesetFiles ${changeset}
fi
else
if cat ${desc} | grep -i -F "Added tag" > /dev/null ; then
printf " EXCLUDED tag changeset.\n"
elif cat ${desc} | grep -i -F "copyright year" > /dev/null ; then
printf " EXCLUDED copyright year changeset.\n"
else
updateChangesetFiles ${changeset}
fi
fi
rm -f ${desc}
done
fi
if [ ${total} -gt 0 ] ; then
echo "---------------------------------------------"
echo "Updated the copyright year on a total of ${total} files."
if [ ${previous} -eq 0 ] ; then
echo "This count should match the count of modified files in the repository: ${vcs_status[*]}"
else
echo "WARNING: This repository contained previously edited working set files."
fi
echo " ${vcs_status[*]} | wc -l = `"${vcs_status[@]}" | wc -l`"
if [ "x$modified_files_origin" != "x" ]; then
cat $modified_files_origin | updateFiles
else
echo "---------------------------------------------"
echo "No files were changed"
if [ ${previous} -ne 0 ] ; then
echo "WARNING: This repository contained previously edited working set files."
fi
echo " ${vcs_status[*]} | wc -l = `"${vcs_status[@]}" | wc -l`"
# Get all changesets this year
if [ "$full_year" = "true" ]; then
vcs_list_changesets=(git log --no-merges --since="${year}-01-01T00:00:00Z" --until="${year}-12-31T23:59:59Z" --pretty=tformat:"%H")
else
vcs_list_changesets=(git log --no-merges "${base_reference}..HEAD" --since="${year}-01-01T00:00:00Z" --until="${year}-12-31T23:59:59Z" --pretty=tformat:"%H")
fi
vcs_changeset_message=(git log -1 --pretty=tformat:"%B") # followed by ${changeset}
vcs_changeset_files=(git diff-tree --no-commit-id --name-only -r) # followed by ${changeset}
all_changesets=${tmp}/all_changesets
rm -f ${all_changesets}
"${vcs_list_changesets[@]}" > ${all_changesets}
# Check changeset to see if it is Copyright only changes, filter changesets
if [ -s ${all_changesets} ] ; then
echo "Changesets made in ${year}: `cat ${all_changesets} | wc -l`"
index=0
cat ${all_changesets} | while read changeset ; do
index=`expr ${index} '+' 1`
desc=${tmp}/desc.${changeset}
rm -f ${desc}
echo "------------------------------------------------"
"${vcs_changeset_message[@]}" "${changeset}" > ${desc}
printf "%d: %s\n%s\n" ${index} "${changeset}" "`cat ${desc}|head -1`"
if [ "${year}" = "2010" ] ; then
if cat ${desc} | grep -i -F "Added tag" > /dev/null ; then
printf " EXCLUDED tag changeset.\n"
elif cat ${desc} | grep -i -F rebrand > /dev/null ; then
printf " EXCLUDED rebrand changeset.\n"
elif cat ${desc} | grep -i -F copyright > /dev/null ; then
printf " EXCLUDED copyright changeset.\n"
else
updateChangesetFiles ${changeset}
fi
else
if cat ${desc} | grep -i -F "Added tag" > /dev/null ; then
printf " EXCLUDED tag changeset.\n"
elif cat ${desc} | grep -i -F "copyright year" > /dev/null ; then
printf " EXCLUDED copyright year changeset.\n"
else
updateChangesetFiles ${changeset}
fi
fi
rm -f ${desc}
done
fi
if [ ${total} -gt 0 ] ; then
echo "---------------------------------------------"
echo "Updated the copyright year on a total of ${total} files."
if [ ${previous} -eq 0 ] ; then
echo "This count should match the count of modified files in the repository: ${vcs_status[*]}"
else
echo "WARNING: This repository contained previously edited working set files."
fi
echo " ${vcs_status[*]} | wc -l = `"${vcs_status[@]}" | wc -l`"
else
echo "---------------------------------------------"
echo "No files were changed"
if [ ${previous} -ne 0 ] ; then
echo "WARNING: This repository contained previously edited working set files."
fi
echo " ${vcs_status[*]} | wc -l = `"${vcs_status[@]}" | wc -l`"
fi
fi
# Cleanup

View File

@ -1385,10 +1385,9 @@ dpkg-deb -x /tmp/libasound2-dev_1.0.25-4_armhf.deb .</code></pre></li>
can specify it by <code>--with-alsa</code>.</p></li>
</ul>
<h4 id="x11-1">X11</h4>
<p>You will need X11 libraries suitable for your <em>target</em> system.
In most cases, using Debian's pre-built libraries work fine.</p>
<p>Note that X11 is needed even if you only want to build a headless
JDK.</p>
<p>When not building a headless JDK, you will need X11 libraries
suitable for your <em>target</em> system. In most cases, using Debian's
pre-built libraries work fine.</p>
<ul>
<li><p>Go to <a href="https://www.debian.org/distrib/packages">Debian
Package Search</a>, search for the following packages for your

View File

@ -1178,10 +1178,8 @@ Note that alsa is needed even if you only want to build a headless JDK.
#### X11
You will need X11 libraries suitable for your *target* system. In most cases,
using Debian's pre-built libraries work fine.
Note that X11 is needed even if you only want to build a headless JDK.
When not building a headless JDK, you will need X11 libraries suitable for your
*target* system. In most cases, using Debian's pre-built libraries work fine.
* Go to [Debian Package Search](https://www.debian.org/distrib/packages),
search for the following packages for your *target* system, and download them

View File

@ -72,6 +72,7 @@ id="toc-notes-for-specific-tests">Notes for Specific Tests</a>
<li><a href="#non-us-locale" id="toc-non-us-locale">Non-US
locale</a></li>
<li><a href="#pkcs11-tests" id="toc-pkcs11-tests">PKCS11 Tests</a></li>
<li><a href="#sctp-tests" id="toc-sctp-tests">SCTP Tests</a></li>
<li><a href="#testing-ahead-of-time-optimizations"
id="toc-testing-ahead-of-time-optimizations">Testing Ahead-of-time
Optimizations</a></li>
@ -621,6 +622,21 @@ element of the appropriate <code>@Artifact</code> class. (See
JTREG=&quot;JAVA_OPTIONS=-Djdk.test.lib.artifacts.nsslib-linux_aarch64=/path/to/NSS-libs&quot;</code></pre>
<p>For more notes about the PKCS11 tests, please refer to
test/jdk/sun/security/pkcs11/README.</p>
<h3 id="sctp-tests">SCTP Tests</h3>
<p>The SCTP tests require the SCTP runtime library, which is often not
installed by default in popular Linux distributions. Without this
library, the SCTP tests will be skipped. If you want to enable the SCTP
tests, you should install the SCTP library before running the tests.</p>
<p>For distributions using the .deb packaging format and the apt tool
(such as Debian, Ubuntu, etc.), try this:</p>
<pre><code>sudo apt install libsctp1
sudo modprobe sctp
lsmod | grep sctp</code></pre>
<p>For distributions using the .rpm packaging format and the dnf tool
(such as Fedora, Red Hat, etc.), try this:</p>
<pre><code>sudo dnf install -y lksctp-tools
sudo modprobe sctp
lsmod | grep sctp</code></pre>
<h3 id="testing-ahead-of-time-optimizations">Testing Ahead-of-time
Optimizations</h3>
<p>One way to improve test coverage of ahead-of-time (AOT) optimizations

View File

@ -640,6 +640,32 @@ $ make test TEST="jtreg:sun/security/pkcs11/Secmod/AddTrustedCert.java" \
For more notes about the PKCS11 tests, please refer to
test/jdk/sun/security/pkcs11/README.
### SCTP Tests
The SCTP tests require the SCTP runtime library, which is often not installed
by default in popular Linux distributions. Without this library, the SCTP tests
will be skipped. If you want to enable the SCTP tests, you should install the
SCTP library before running the tests.
For distributions using the .deb packaging format and the apt tool
(such as Debian, Ubuntu, etc.), try this:
```
sudo apt install libsctp1
sudo modprobe sctp
lsmod | grep sctp
```
For distributions using the .rpm packaging format and the dnf tool
(such as Fedora, Red Hat, etc.), try this:
```
sudo dnf install -y lksctp-tools
sudo modprobe sctp
lsmod | grep sctp
```
### Testing Ahead-of-time Optimizations
One way to improve test coverage of ahead-of-time (AOT) optimizations in

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -972,6 +972,10 @@ define SetupRunJtregTestBody
JTREG_AUTO_PROBLEM_LISTS += ProblemList-enable-preview.txt
endif
ifneq ($$(findstring -XX:+UseCompactObjectHeaders, $$(JTREG_ALL_OPTIONS)), )
JTREG_AUTO_PROBLEM_LISTS += ProblemList-coh.txt
endif
ifneq ($$(JTREG_EXTRA_PROBLEM_LISTS), )
# Accept both absolute paths as well as relative to the current test root.

View File

@ -69,22 +69,18 @@ AC_DEFUN([FLAGS_SETUP_DEBUG_SYMBOLS],
# Debug prefix mapping if supported by compiler
DEBUG_PREFIX_CFLAGS=
UTIL_ARG_WITH(NAME: native-debug-symbols-level, TYPE: string,
DEFAULT: "",
RESULT: DEBUG_SYMBOLS_LEVEL,
UTIL_ARG_WITH(NAME: native-debug-symbols-level, TYPE: literal,
DEFAULT: [auto], VALID_VALUES: [auto 1 2 3],
CHECK_AVAILABLE: [
if test x$TOOLCHAIN_TYPE = xmicrosoft; then
AVAILABLE=false
fi
],
DESC: [set the native debug symbol level (GCC and Clang only)],
DEFAULT_DESC: [toolchain default])
AC_SUBST(DEBUG_SYMBOLS_LEVEL)
if test "x${TOOLCHAIN_TYPE}" = xgcc || \
test "x${TOOLCHAIN_TYPE}" = xclang; then
DEBUG_SYMBOLS_LEVEL_FLAGS="-g"
if test "x${DEBUG_SYMBOLS_LEVEL}" != "x"; then
DEBUG_SYMBOLS_LEVEL_FLAGS="-g${DEBUG_SYMBOLS_LEVEL}"
FLAGS_COMPILER_CHECK_ARGUMENTS(ARGUMENT: [${DEBUG_SYMBOLS_LEVEL_FLAGS}],
IF_FALSE: AC_MSG_ERROR("Debug info level ${DEBUG_SYMBOLS_LEVEL} is not supported"))
fi
fi
DEFAULT_DESC: [toolchain default],
IF_AUTO: [
RESULT=""
])
# Debug symbols
if test "x$TOOLCHAIN_TYPE" = xgcc; then
@ -111,8 +107,8 @@ AC_DEFUN([FLAGS_SETUP_DEBUG_SYMBOLS],
fi
# Debug info level should follow the debug format to be effective.
CFLAGS_DEBUG_SYMBOLS="-gdwarf-4 ${DEBUG_SYMBOLS_LEVEL_FLAGS}"
ASFLAGS_DEBUG_SYMBOLS="${DEBUG_SYMBOLS_LEVEL_FLAGS}"
CFLAGS_DEBUG_SYMBOLS="-gdwarf-4 -g${NATIVE_DEBUG_SYMBOLS_LEVEL}"
ASFLAGS_DEBUG_SYMBOLS="-g${NATIVE_DEBUG_SYMBOLS_LEVEL}"
elif test "x$TOOLCHAIN_TYPE" = xclang; then
if test "x$ALLOW_ABSOLUTE_PATHS_IN_OUTPUT" = "xfalse"; then
# Check if compiler supports -fdebug-prefix-map. If so, use that to make
@ -132,8 +128,8 @@ AC_DEFUN([FLAGS_SETUP_DEBUG_SYMBOLS],
IF_FALSE: [GDWARF_FLAGS=""])
# Debug info level should follow the debug format to be effective.
CFLAGS_DEBUG_SYMBOLS="${GDWARF_FLAGS} ${DEBUG_SYMBOLS_LEVEL_FLAGS}"
ASFLAGS_DEBUG_SYMBOLS="${DEBUG_SYMBOLS_LEVEL_FLAGS}"
CFLAGS_DEBUG_SYMBOLS="${GDWARF_FLAGS} -g${NATIVE_DEBUG_SYMBOLS_LEVEL}"
ASFLAGS_DEBUG_SYMBOLS="-g${NATIVE_DEBUG_SYMBOLS_LEVEL}"
elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
CFLAGS_DEBUG_SYMBOLS="-Z7"
fi

View File

@ -42,12 +42,12 @@ m4_include([lib-tests.m4])
AC_DEFUN_ONCE([LIB_DETERMINE_DEPENDENCIES],
[
# Check if X11 is needed
if test "x$OPENJDK_TARGET_OS" = xwindows || test "x$OPENJDK_TARGET_OS" = xmacosx; then
# No X11 support on windows or macosx
if test "x$OPENJDK_TARGET_OS" = xwindows ||
test "x$OPENJDK_TARGET_OS" = xmacosx ||
test "x$ENABLE_HEADLESS_ONLY" = xtrue; then
NEEDS_LIB_X11=false
else
# All other instances need X11, even if building headless only, libawt still
# needs X11 headers.
# All other instances need X11 for libawt.
NEEDS_LIB_X11=true
fi

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -61,7 +61,8 @@ $(eval $(call SetupJdkLibrary, BUILD_GTEST_LIBGTEST, \
INCLUDE_FILES := gtest-all.cc gmock-all.cc, \
DISABLED_WARNINGS_gcc := format-nonliteral maybe-uninitialized undef \
unused-result zero-as-null-pointer-constant, \
DISABLED_WARNINGS_clang := format-nonliteral undef unused-result, \
DISABLED_WARNINGS_clang := format-nonliteral undef unused-result \
zero-as-null-pointer-constant, \
DISABLED_WARNINGS_microsoft := 4530, \
DEFAULT_CFLAGS := false, \
CFLAGS := $(JVM_CFLAGS) \

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -49,13 +49,15 @@ import static com.sun.source.doctree.DocTree.Kind.*;
* The tags can be used as follows:
*
* <pre>
* &commat;jls section-number description
* &commat;jls chapter.section description
* &commat;jls preview-feature-chapter.section description
* </pre>
*
* For example:
*
* <pre>
* &commat;jls 3.4 Line Terminators
* &commat;jls primitive-types-in-patterns-instanceof-switch-5.7.1 Exact Testing Conversions
* </pre>
*
* will produce the following HTML, depending on the file containing
@ -64,10 +66,24 @@ import static com.sun.source.doctree.DocTree.Kind.*;
* <pre>{@code
* <dt>See <i>Java Language Specification</i>:
* <dd><a href="../../specs/jls/jls-3.html#jls-3.4">3.4 Line terminators</a>
* <dd><a href="../../specs/primitive-types-in-patterns-instanceof-switch-jls.html#jls-5.7.1">
* 5.7.1 Exact Testing Conversions</a><sup class="preview-mark">
* <a href="../../specs/jls/jls-1.html#jls-1.5.1">PREVIEW</a></sup>
* }</pre>
*
* Copies of JLS and JVMS are expected to have been placed in the {@code specs}
* folder. These documents are not included in open-source repositories.
* In inline tags (note you need manual JLS/JVMS prefix):
* <pre>
* JLS {&commat;jls 3.4}
* </pre>
*
* produces (note the section sign and no trailing dot):
* <pre>
* JLS <a href="../../specs/jls/jls-3.html#jls-3.4">§3.4</a>
* </pre>
*
* Copies of JLS, JVMS, and preview JLS and JVMS changes are expected to have
* been placed in the {@code specs} folder. These documents are not included
* in open-source repositories.
*/
public class JSpec implements Taglet {
@ -87,9 +103,9 @@ public class JSpec implements Taglet {
}
}
private String tagName;
private String specTitle;
private String idPrefix;
private final String tagName;
private final String specTitle;
private final String idPrefix;
JSpec(String tagName, String specTitle, String idPrefix) {
this.tagName = tagName;
@ -98,7 +114,7 @@ public class JSpec implements Taglet {
}
// Note: Matches special cases like @jvms 6.5.checkcast
private static final Pattern TAG_PATTERN = Pattern.compile("(?s)(.+ )?(?<chapter>[1-9][0-9]*)(?<section>[0-9a-z_.]*)( .*)?$");
private static final Pattern TAG_PATTERN = Pattern.compile("(?s)(.+ )?(?<preview>([a-z0-9]+-)+)?(?<chapter>[1-9][0-9]*)(?<section>[0-9a-z_.]*)( .*)?$");
/**
* Returns the set of locations in which the tag may be used.
@ -157,19 +173,50 @@ public class JSpec implements Taglet {
.trim();
Matcher m = TAG_PATTERN.matcher(tagText);
if (m.find()) {
// preview-feature-4.6 is preview-feature-, 4, .6
String preview = m.group("preview"); // null if no preview feature
String chapter = m.group("chapter");
String section = m.group("section");
String rootParent = currentPath().replaceAll("[^/]+", "..");
String url = String.format("%1$s/specs/%2$s/%2$s-%3$s.html#%2$s-%3$s%4$s",
rootParent, idPrefix, chapter, section);
String url = preview == null ?
String.format("%1$s/specs/%2$s/%2$s-%3$s.html#%2$s-%3$s%4$s",
rootParent, idPrefix, chapter, section) :
String.format("%1$s/specs/%5$s%2$s.html#%2$s-%3$s%4$s",
rootParent, idPrefix, chapter, section, preview);
var literal = expand(contents).trim();
var prefix = (preview == null ? "" : preview) + chapter + section;
if (literal.startsWith(prefix)) {
var hasFullTitle = literal.length() > prefix.length();
if (hasFullTitle) {
// Drop the preview identifier
literal = chapter + section + literal.substring(prefix.length());
} else {
// No section sign if the tag refers to a chapter, like {@jvms 4}
String sectionSign = section.isEmpty() ? "" : "§";
// Change whole text to "§chapter.x" in inline tags.
literal = sectionSign + chapter + section;
}
}
sb.append("<a href=\"")
.append(url)
.append("\">")
.append(expand(contents))
.append(literal)
.append("</a>");
if (preview != null) {
// Add PREVIEW superscript that links to JLS/JVMS 1.5.1
// "Restrictions on the Use of Preview Features"
// Similar to how APIs link to the Preview info box warning
var sectionLink = String.format("%1$s/specs/%2$s/%2$s-%3$s.html#%2$s-%3$s%4$s",
rootParent, idPrefix, "1", ".5.1");
sb.append("<sup class=\"preview-mark\"><a href=\"")
.append(sectionLink)
.append("\">PREVIEW</a></sup>");
}
if (tag.getKind() == DocTree.Kind.UNKNOWN_BLOCK_TAG) {
sb.append("<br>");
}

View File

@ -88,6 +88,10 @@ LIBAWT_EXTRA_HEADER_DIRS := \
LIBAWT_CFLAGS := -D__MEDIALIB_OLD_NAMES -D__USE_J2D_NAMES -DMLIB_NO_LIBSUNMATH
ifeq ($(ENABLE_HEADLESS_ONLY), true)
LIBAWT_CFLAGS += -DHEADLESS
endif
ifeq ($(call isTargetOs, windows), true)
LIBAWT_CFLAGS += -EHsc -DUNICODE -D_UNICODE -DMLIB_OS64BIT
LIBAWT_RCFLAGS ?= -I$(TOPDIR)/src/java.base/windows/native/launcher/icons
@ -167,11 +171,18 @@ ifeq ($(call isTargetOs, windows macosx), false)
$(TOPDIR)/src/$(MODULE)/$(OPENJDK_TARGET_OS_TYPE)/native/common/awt \
#
LIBAWT_HEADLESS_EXCLUDE_FILES := \
GLXGraphicsConfig.c \
GLXSurfaceData.c \
X11PMBlitLoops.c \
X11Renderer.c \
X11SurfaceData.c \
#
LIBAWT_HEADLESS_EXTRA_HEADER_DIRS := \
$(LIBAWT_DEFAULT_HEADER_DIRS) \
common/awt/debug \
common/font \
common/java2d/opengl \
java.base:libjvm \
#
@ -191,7 +202,8 @@ ifeq ($(call isTargetOs, windows macosx), false)
$(eval $(call SetupJdkLibrary, BUILD_LIBAWT_HEADLESS, \
NAME := awt_headless, \
EXTRA_SRC := $(LIBAWT_HEADLESS_EXTRA_SRC), \
EXCLUDES := medialib, \
EXCLUDES := medialib opengl, \
EXCLUDE_FILES := $(LIBAWT_HEADLESS_EXCLUDE_FILES), \
ONLY_EXPORTED := $(LIBAWT_HEADLESS_ONLY_EXPORTED), \
OPTIMIZATION := LOW, \
CFLAGS := -DHEADLESS=true $(CUPS_CFLAGS) $(FONTCONFIG_CFLAGS) \

View File

@ -31,13 +31,14 @@ include LibCommon.gmk
## Build libjaas
################################################################################
$(eval $(call SetupJdkLibrary, BUILD_LIBJAAS, \
NAME := jaas, \
OPTIMIZATION := LOW, \
EXTRA_HEADER_DIRS := java.base:libjava, \
LIBS_windows := advapi32.lib mpr.lib netapi32.lib user32.lib, \
))
TARGETS += $(BUILD_LIBJAAS)
ifeq ($(call isTargetOs, windows), true)
$(eval $(call SetupJdkLibrary, BUILD_LIBJAAS, \
NAME := jaas, \
OPTIMIZATION := LOW, \
EXTRA_HEADER_DIRS := java.base:libjava, \
LIBS_windows := advapi32.lib mpr.lib netapi32.lib user32.lib, \
))
TARGETS += $(BUILD_LIBJAAS)
endif
################################################################################

View File

@ -1,264 +0,0 @@
/*
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Neither the name of Oracle nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
* IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* This source code is provided to illustrate the usage of a given feature
* or technique and has been deliberately simplified. Additional steps
* required for a production-quality application, such as security checks,
* input validation and proper error handling, might not be present in
* this sample code.
*/
import java.util.EventObject;
import java.util.List;
import javax.swing.JTable;
import javax.swing.table.DefaultTableModel;
import javax.swing.table.TableCellEditor;
import javax.swing.table.TableCellRenderer;
import javax.swing.table.TableColumn;
/**
* The OldJTable is an unsupported class containing some methods that were
* deleted from the JTable between releases 0.6 and 0.7
*/
@SuppressWarnings("serial")
public class OldJTable extends JTable
{
/*
* A new convenience method returning the index of the column in the
* co-ordinate space of the view.
*/
public int getColumnIndex(Object identifier) {
return getColumnModel().getColumnIndex(identifier);
}
//
// Methods deleted from the JTable because they only work with the
// DefaultTableModel.
//
public TableColumn addColumn(Object columnIdentifier, int width) {
return addColumn(columnIdentifier, width, null, null, null);
}
public TableColumn addColumn(Object columnIdentifier, List<?> columnData) {
return addColumn(columnIdentifier, -1, null, null, columnData);
}
// Override the new JTable implementation - it will not add a column to the
// DefaultTableModel.
public TableColumn addColumn(Object columnIdentifier, int width,
TableCellRenderer renderer,
TableCellEditor editor) {
return addColumn(columnIdentifier, width, renderer, editor, null);
}
public TableColumn addColumn(Object columnIdentifier, int width,
TableCellRenderer renderer,
TableCellEditor editor, List<?> columnData) {
checkDefaultTableModel();
// Set up the model side first
DefaultTableModel m = (DefaultTableModel)getModel();
m.addColumn(columnIdentifier, columnData.toArray());
// The column will have been added to the end, so the index of the
// column in the model is the last element.
TableColumn newColumn = new TableColumn(
m.getColumnCount()-1, width, renderer, editor);
super.addColumn(newColumn);
return newColumn;
}
// Not possilble to make this work the same way ... change it so that
// it does not delete columns from the model.
public void removeColumn(Object columnIdentifier) {
super.removeColumn(getColumn(columnIdentifier));
}
public void addRow(Object[] rowData) {
checkDefaultTableModel();
((DefaultTableModel)getModel()).addRow(rowData);
}
public void addRow(List<?> rowData) {
checkDefaultTableModel();
((DefaultTableModel)getModel()).addRow(rowData.toArray());
}
public void removeRow(int rowIndex) {
checkDefaultTableModel();
((DefaultTableModel)getModel()).removeRow(rowIndex);
}
public void moveRow(int startIndex, int endIndex, int toIndex) {
checkDefaultTableModel();
((DefaultTableModel)getModel()).moveRow(startIndex, endIndex, toIndex);
}
public void insertRow(int rowIndex, Object[] rowData) {
checkDefaultTableModel();
((DefaultTableModel)getModel()).insertRow(rowIndex, rowData);
}
public void insertRow(int rowIndex, List<?> rowData) {
checkDefaultTableModel();
((DefaultTableModel)getModel()).insertRow(rowIndex, rowData.toArray());
}
public void setNumRows(int newSize) {
checkDefaultTableModel();
((DefaultTableModel)getModel()).setNumRows(newSize);
}
public void setDataVector(Object[][] newData, List<?> columnIds) {
checkDefaultTableModel();
((DefaultTableModel)getModel()).setDataVector(
newData, columnIds.toArray());
}
public void setDataVector(Object[][] newData, Object[] columnIds) {
checkDefaultTableModel();
((DefaultTableModel)getModel()).setDataVector(newData, columnIds);
}
protected void checkDefaultTableModel() {
if(!(dataModel instanceof DefaultTableModel))
throw new InternalError("In order to use this method, the data model must be an instance of DefaultTableModel.");
}
//
// Methods removed from JTable in the move from identifiers to ints.
//
public Object getValueAt(Object columnIdentifier, int rowIndex) {
return super.getValueAt(rowIndex, getColumnIndex(columnIdentifier));
}
public boolean isCellEditable(Object columnIdentifier, int rowIndex) {
return super.isCellEditable(rowIndex, getColumnIndex(columnIdentifier));
}
public void setValueAt(Object aValue, Object columnIdentifier, int rowIndex) {
super.setValueAt(aValue, rowIndex, getColumnIndex(columnIdentifier));
}
public boolean editColumnRow(Object identifier, int row) {
return super.editCellAt(row, getColumnIndex(identifier));
}
public void moveColumn(Object columnIdentifier, Object targetColumnIdentifier) {
moveColumn(getColumnIndex(columnIdentifier),
getColumnIndex(targetColumnIdentifier));
}
public boolean isColumnSelected(Object identifier) {
return isColumnSelected(getColumnIndex(identifier));
}
public TableColumn addColumn(int modelColumn, int width) {
return addColumn(modelColumn, width, null, null);
}
public TableColumn addColumn(int modelColumn) {
return addColumn(modelColumn, 75, null, null);
}
/**
* Creates a new column with <I>modelColumn</I>, <I>width</I>,
* <I>renderer</I>, and <I>editor</I> and adds it to the end of
* the JTable's array of columns. This method also retrieves the
* name of the column using the model's <I>getColumnName(modelColumn)</I>
* method, and sets the both the header value and the identifier
* for this TableColumn accordingly.
* <p>
* The <I>modelColumn</I> is the index of the column in the model which
* will supply the data for this column in the table. This, like the
* <I>columnIdentifier</I> in previous releases, does not change as the
* columns are moved in the view.
* <p>
* For the rest of the JTable API, and all of its associated classes,
* columns are referred to in the co-ordinate system of the view, the
* index of the column in the model is kept inside the TableColumn
* and is used only to retrieve the information from the appropraite
* column in the model.
* <p>
*
* @param modelColumn The index of the column in the model
* @param width The new column's width. Or -1 to use
* the default width
* @param renderer The renderer used with the new column.
* Or null to use the default renderer.
* @param editor The editor used with the new column.
* Or null to use the default editor.
*/
public TableColumn addColumn(int modelColumn, int width,
TableCellRenderer renderer,
TableCellEditor editor) {
TableColumn newColumn = new TableColumn(
modelColumn, width, renderer, editor);
addColumn(newColumn);
return newColumn;
}
//
// Methods that had their arguments switched.
//
// These won't work with the new table package.
/*
public Object getValueAt(int columnIndex, int rowIndex) {
return super.getValueAt(rowIndex, columnIndex);
}
public boolean isCellEditable(int columnIndex, int rowIndex) {
return super.isCellEditable(rowIndex, columnIndex);
}
public void setValueAt(Object aValue, int columnIndex, int rowIndex) {
super.setValueAt(aValue, rowIndex, columnIndex);
}
*/
public boolean editColumnRow(int columnIndex, int rowIndex) {
return super.editCellAt(rowIndex, columnIndex);
}
public boolean editColumnRow(int columnIndex, int rowIndex, EventObject e){
return super.editCellAt(rowIndex, columnIndex, e);
}
} // End Of Class OldJTable

View File

@ -1229,7 +1229,7 @@ public:
// predicate controlling addressing modes
bool size_fits_all_mem_uses(AddPNode* addp, int shift);
// Convert BootTest condition to Assembler condition.
// Convert BoolTest condition to Assembler condition.
// Replicate the logic of cmpOpOper::ccode() and cmpOpUOper::ccode().
Assembler::Condition to_assembler_cond(BoolTest::mask cond);
%}
@ -2579,7 +2579,7 @@ bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
return true;
}
// Convert BootTest condition to Assembler condition.
// Convert BoolTest condition to Assembler condition.
// Replicate the logic of cmpOpOper::ccode() and cmpOpUOper::ccode().
Assembler::Condition to_assembler_cond(BoolTest::mask cond) {
Assembler::Condition result;

View File

@ -1218,43 +1218,11 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
__ bind(*op->stub()->continuation());
}
void LIR_Assembler::type_profile_helper(Register mdo,
ciMethodData *md, ciProfileData *data,
Register recv, Label* update_done) {
void LIR_Assembler::type_profile_helper(Register mdo, ciMethodData *md,
ciProfileData *data, Register recv) {
// Given a profile data offset, generate an Address which points to
// the corresponding slot in mdo->data().
// Clobbers rscratch2.
auto slot_at = [=](ByteSize offset) -> Address {
return __ form_address(rscratch2, mdo,
md->byte_offset_of_slot(data, offset),
LogBytesPerWord);
};
for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
Label next_test;
// See if the receiver is receiver[n].
__ ldr(rscratch1, slot_at(ReceiverTypeData::receiver_offset(i)));
__ cmp(recv, rscratch1);
__ br(Assembler::NE, next_test);
__ addptr(slot_at(ReceiverTypeData::receiver_count_offset(i)),
DataLayout::counter_increment);
__ b(*update_done);
__ bind(next_test);
}
// Didn't find receiver; find next empty slot and fill it in
for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
Label next_test;
Address recv_addr(slot_at(ReceiverTypeData::receiver_offset(i)));
__ ldr(rscratch1, recv_addr);
__ cbnz(rscratch1, next_test);
__ str(recv, recv_addr);
__ mov(rscratch1, DataLayout::counter_increment);
__ str(rscratch1, slot_at(ReceiverTypeData::receiver_count_offset(i)));
__ b(*update_done);
__ bind(next_test);
}
int mdp_offset = md->byte_offset_of_slot(data, in_ByteSize(0));
__ profile_receiver_type(recv, mdo, mdp_offset);
}
void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
@ -1316,14 +1284,9 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
__ b(*obj_is_null);
__ bind(not_null);
Label update_done;
Register recv = k_RInfo;
__ load_klass(recv, obj);
type_profile_helper(mdo, md, data, recv, &update_done);
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
__ addptr(counter_addr, DataLayout::counter_increment);
__ bind(update_done);
type_profile_helper(mdo, md, data, recv);
} else {
__ cbz(obj, *obj_is_null);
}
@ -1430,13 +1393,9 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
__ b(done);
__ bind(not_null);
Label update_done;
Register recv = k_RInfo;
__ load_klass(recv, value);
type_profile_helper(mdo, md, data, recv, &update_done);
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
__ addptr(counter_addr, DataLayout::counter_increment);
__ bind(update_done);
type_profile_helper(mdo, md, data, recv);
} else {
__ cbz(value, done);
}
@ -2540,13 +2499,9 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) {
// We know the type that will be seen at this call site; we can
// statically update the MethodData* rather than needing to do
// dynamic tests on the receiver type
// NOTE: we should probably put a lock around this search to
// avoid collisions by concurrent compilations
// dynamic tests on the receiver type.
ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
uint i;
for (i = 0; i < VirtualCallData::row_limit(); i++) {
for (uint i = 0; i < VirtualCallData::row_limit(); i++) {
ciKlass* receiver = vc_data->receiver(i);
if (known_klass->equals(receiver)) {
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
@ -2554,36 +2509,13 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
return;
}
}
// Receiver type not found in profile data; select an empty slot
// Note that this is less efficient than it should be because it
// always does a write to the receiver part of the
// VirtualCallData rather than just the first time
for (i = 0; i < VirtualCallData::row_limit(); i++) {
ciKlass* receiver = vc_data->receiver(i);
if (receiver == nullptr) {
__ mov_metadata(rscratch1, known_klass->constant_encoding());
Address recv_addr =
__ form_address(rscratch2, mdo,
md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)),
LogBytesPerWord);
__ str(rscratch1, recv_addr);
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
__ addptr(data_addr, DataLayout::counter_increment);
return;
}
}
// Receiver type is not found in profile data.
// Fall back to runtime helper to handle the rest at runtime.
__ mov_metadata(recv, known_klass->constant_encoding());
} else {
__ load_klass(recv, recv);
Label update_done;
type_profile_helper(mdo, md, data, recv, &update_done);
// Receiver did not match any saved receiver and there is no empty row for it.
// Increment total counter to indicate polymorphic case.
__ addptr(counter_addr, DataLayout::counter_increment);
__ bind(update_done);
}
type_profile_helper(mdo, md, data, recv);
} else {
// Static call
__ addptr(counter_addr, DataLayout::counter_increment);

View File

@ -50,9 +50,8 @@ friend class ArrayCopyStub;
Address stack_slot_address(int index, uint shift, Register tmp, int adjust = 0);
// Record the type of the receiver in ReceiverTypeData
void type_profile_helper(Register mdo,
ciMethodData *md, ciProfileData *data,
Register recv, Label* update_done);
void type_profile_helper(Register mdo, ciMethodData *md,
ciProfileData *data, Register recv);
void add_debug_info_for_branch(address adr, CodeEmitInfo* info);
void casw(Register addr, Register newval, Register cmpval);

View File

@ -85,26 +85,16 @@ void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, Dec
}
}
void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp,
bool tosca_live,
bool expand_call) {
if (ShenandoahSATBBarrier) {
satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, rscratch1, tosca_live, expand_call);
}
}
void ShenandoahBarrierSetAssembler::satb_barrier(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp1,
Register tmp2,
bool tosca_live,
bool expand_call) {
assert(ShenandoahSATBBarrier, "Should be checked by caller");
void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp1,
Register tmp2,
bool tosca_live,
bool expand_call) {
// If expand_call is true then we expand the call_VM_leaf macro
// directly to skip generating the check by
// InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
@ -358,20 +348,20 @@ void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet d
if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
__ enter(/*strip_ret_addr*/true);
__ push_call_clobbered_registers();
satb_write_barrier_pre(masm /* masm */,
noreg /* obj */,
dst /* pre_val */,
rthread /* thread */,
tmp1 /* tmp1 */,
tmp2 /* tmp2 */,
true /* tosca_live */,
true /* expand_call */);
satb_barrier(masm /* masm */,
noreg /* obj */,
dst /* pre_val */,
rthread /* thread */,
tmp1 /* tmp1 */,
tmp2 /* tmp2 */,
true /* tosca_live */,
true /* expand_call */);
__ pop_call_clobbered_registers();
__ leave();
}
}
void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj) {
void ShenandoahBarrierSetAssembler::card_barrier(MacroAssembler* masm, Register obj) {
assert(ShenandoahCardBarrier, "Should have been checked by caller");
__ lsr(obj, obj, CardTable::card_shift());
@ -394,13 +384,13 @@ void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register o
void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
bool on_oop = is_reference_type(type);
if (!on_oop) {
// 1: non-reference types require no barriers
if (!is_reference_type(type)) {
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
return;
}
// flatten object address if needed
// Flatten object address right away for simplicity: likely needed by barriers
if (dst.index() == noreg && dst.offset() == 0) {
if (dst.base() != tmp3) {
__ mov(tmp3, dst.base());
@ -409,20 +399,26 @@ void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet
__ lea(tmp3, dst);
}
shenandoah_write_barrier_pre(masm,
tmp3 /* obj */,
tmp2 /* pre_val */,
rthread /* thread */,
tmp1 /* tmp */,
val != noreg /* tosca_live */,
false /* expand_call */);
bool storing_non_null = (val != noreg);
// 2: pre-barrier: SATB needs the previous value
if (ShenandoahBarrierSet::need_satb_barrier(decorators, type)) {
satb_barrier(masm,
tmp3 /* obj */,
tmp2 /* pre_val */,
rthread /* thread */,
tmp1 /* tmp */,
rscratch1 /* tmp2 */,
storing_non_null /* tosca_live */,
false /* expand_call */);
}
// Store!
BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), val, noreg, noreg, noreg);
bool in_heap = (decorators & IN_HEAP) != 0;
bool needs_post_barrier = (val != noreg) && in_heap && ShenandoahCardBarrier;
if (needs_post_barrier) {
store_check(masm, tmp3);
// 3: post-barrier: card barrier needs store address
if (ShenandoahBarrierSet::need_card_barrier(decorators, type) && storing_non_null) {
card_barrier(masm, tmp3);
}
}

View File

@ -40,23 +40,16 @@ class StubCodeGenerator;
class ShenandoahBarrierSetAssembler: public BarrierSetAssembler {
private:
void satb_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp1,
Register tmp2,
bool tosca_live,
bool expand_call);
void shenandoah_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp,
bool tosca_live,
bool expand_call);
void satb_barrier(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp1,
Register tmp2,
bool tosca_live,
bool expand_call);
void store_check(MacroAssembler* masm, Register obj);
void card_barrier(MacroAssembler* masm, Register obj);
void resolve_forward_pointer(MacroAssembler* masm, Register dst, Register tmp = noreg);
void resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp = noreg);

View File

@ -240,15 +240,14 @@ void InterpreterMacroAssembler::load_resolved_klass_at_offset(
// Rsub_klass: subklass
//
// Kills:
// r2, r5
// r2
void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
Label& ok_is_subtype) {
assert(Rsub_klass != r0, "r0 holds superklass");
assert(Rsub_klass != r2, "r2 holds 2ndary super array length");
assert(Rsub_klass != r5, "r5 holds 2ndary super array scan ptr");
// Profile the not-null value's klass.
profile_typecheck(r2, Rsub_klass, r5); // blows r2, reloads r5
profile_typecheck(r2, Rsub_klass); // blows r2
// Do the check.
check_klass_subtype(Rsub_klass, r0, r2, ok_is_subtype); // blows r2
@ -991,7 +990,6 @@ void InterpreterMacroAssembler::profile_final_call(Register mdp) {
void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
Register mdp,
Register reg2,
bool receiver_can_be_null) {
if (ProfileInterpreter) {
Label profile_continue;
@ -1009,7 +1007,7 @@ void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
}
// Record the receiver type.
record_klass_in_profile(receiver, mdp, reg2);
profile_receiver_type(receiver, mdp, 0);
bind(skip_receiver_profile);
// The method data pointer needs to be updated to reflect the new target.
@ -1018,131 +1016,6 @@ void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
}
}
// This routine creates a state machine for updating the multi-row
// type profile at a virtual call site (or other type-sensitive bytecode).
// The machine visits each row (of receiver/count) until the receiver type
// is found, or until it runs out of rows. At the same time, it remembers
// the location of the first empty row. (An empty row records null for its
// receiver, and can be allocated for a newly-observed receiver type.)
// Because there are two degrees of freedom in the state, a simple linear
// search will not work; it must be a decision tree. Hence this helper
// function is recursive, to generate the required tree structured code.
// It's the interpreter, so we are trading off code space for speed.
// See below for example code.
void InterpreterMacroAssembler::record_klass_in_profile_helper(
Register receiver, Register mdp,
Register reg2, int start_row,
Label& done) {
if (TypeProfileWidth == 0) {
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
} else {
record_item_in_profile_helper(receiver, mdp, reg2, 0, done, TypeProfileWidth,
&VirtualCallData::receiver_offset, &VirtualCallData::receiver_count_offset);
}
}
void InterpreterMacroAssembler::record_item_in_profile_helper(Register item, Register mdp,
Register reg2, int start_row, Label& done, int total_rows,
OffsetFunction item_offset_fn, OffsetFunction item_count_offset_fn) {
int last_row = total_rows - 1;
assert(start_row <= last_row, "must be work left to do");
// Test this row for both the item and for null.
// Take any of three different outcomes:
// 1. found item => increment count and goto done
// 2. found null => keep looking for case 1, maybe allocate this cell
// 3. found something else => keep looking for cases 1 and 2
// Case 3 is handled by a recursive call.
for (int row = start_row; row <= last_row; row++) {
Label next_test;
bool test_for_null_also = (row == start_row);
// See if the item is item[n].
int item_offset = in_bytes(item_offset_fn(row));
test_mdp_data_at(mdp, item_offset, item,
(test_for_null_also ? reg2 : noreg),
next_test);
// (Reg2 now contains the item from the CallData.)
// The item is item[n]. Increment count[n].
int count_offset = in_bytes(item_count_offset_fn(row));
increment_mdp_data_at(mdp, count_offset);
b(done);
bind(next_test);
if (test_for_null_also) {
Label found_null;
// Failed the equality check on item[n]... Test for null.
if (start_row == last_row) {
// The only thing left to do is handle the null case.
cbz(reg2, found_null);
// Item did not match any saved item and there is no empty row for it.
// Increment total counter to indicate polymorphic case.
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
b(done);
bind(found_null);
break;
}
// Since null is rare, make it be the branch-taken case.
cbz(reg2, found_null);
// Put all the "Case 3" tests here.
record_item_in_profile_helper(item, mdp, reg2, start_row + 1, done, total_rows,
item_offset_fn, item_count_offset_fn);
// Found a null. Keep searching for a matching item,
// but remember that this is an empty (unused) slot.
bind(found_null);
}
}
// In the fall-through case, we found no matching item, but we
// observed the item[start_row] is null.
// Fill in the item field and increment the count.
int item_offset = in_bytes(item_offset_fn(start_row));
set_mdp_data_at(mdp, item_offset, item);
int count_offset = in_bytes(item_count_offset_fn(start_row));
mov(reg2, DataLayout::counter_increment);
set_mdp_data_at(mdp, count_offset, reg2);
if (start_row > 0) {
b(done);
}
}
// Example state machine code for three profile rows:
// // main copy of decision tree, rooted at row[1]
// if (row[0].rec == rec) { row[0].incr(); goto done; }
// if (row[0].rec != nullptr) {
// // inner copy of decision tree, rooted at row[1]
// if (row[1].rec == rec) { row[1].incr(); goto done; }
// if (row[1].rec != nullptr) {
// // degenerate decision tree, rooted at row[2]
// if (row[2].rec == rec) { row[2].incr(); goto done; }
// if (row[2].rec != nullptr) { count.incr(); goto done; } // overflow
// row[2].init(rec); goto done;
// } else {
// // remember row[1] is empty
// if (row[2].rec == rec) { row[2].incr(); goto done; }
// row[1].init(rec); goto done;
// }
// } else {
// // remember row[0] is empty
// if (row[1].rec == rec) { row[1].incr(); goto done; }
// if (row[2].rec == rec) { row[2].incr(); goto done; }
// row[0].init(rec); goto done;
// }
// done:
void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
Register mdp, Register reg2) {
assert(ProfileInterpreter, "must be profiling");
Label done;
record_klass_in_profile_helper(receiver, mdp, reg2, 0, done);
bind (done);
}
void InterpreterMacroAssembler::profile_ret(Register return_bci,
Register mdp) {
if (ProfileInterpreter) {
@ -1200,7 +1073,7 @@ void InterpreterMacroAssembler::profile_null_seen(Register mdp) {
}
}
void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass, Register reg2) {
void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass) {
if (ProfileInterpreter) {
Label profile_continue;
@ -1213,7 +1086,7 @@ void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass,
mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
// Record the object type.
record_klass_in_profile(klass, mdp, reg2);
profile_receiver_type(klass, mdp, 0);
}
update_mdp_by_constant(mdp, mdp_delta);

View File

@ -273,15 +273,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
Register test_value_out,
Label& not_equal_continue);
void record_klass_in_profile(Register receiver, Register mdp,
Register reg2);
void record_klass_in_profile_helper(Register receiver, Register mdp,
Register reg2, int start_row,
Label& done);
void record_item_in_profile_helper(Register item, Register mdp,
Register reg2, int start_row, Label& done, int total_rows,
OffsetFunction item_offset_fn, OffsetFunction item_count_offset_fn);
void update_mdp_by_offset(Register mdp_in, int offset_of_offset);
void update_mdp_by_offset(Register mdp_in, Register reg, int offset_of_disp);
void update_mdp_by_constant(Register mdp_in, int constant);
@ -295,11 +286,10 @@ class InterpreterMacroAssembler: public MacroAssembler {
void profile_call(Register mdp);
void profile_final_call(Register mdp);
void profile_virtual_call(Register receiver, Register mdp,
Register scratch2,
bool receiver_can_be_null = false);
void profile_ret(Register return_bci, Register mdp);
void profile_null_seen(Register mdp);
void profile_typecheck(Register mdp, Register klass, Register scratch);
void profile_typecheck(Register mdp, Register klass);
void profile_typecheck_failed(Register mdp);
void profile_switch_default(Register mdp);
void profile_switch_case(Register index_in_scratch, Register mdp,

View File

@ -2118,6 +2118,161 @@ Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
}
}
// Handle the receiver type profile update given the "recv" klass.
//
// Normally updates the ReceiverData (RD) that starts at "mdp" + "mdp_offset".
// If there are no matching or claimable receiver entries in RD, updates
// the polymorphic counter.
//
// This code expected to run by either the interpreter or JIT-ed code, without
// extra synchronization. For safety, receiver cells are claimed atomically, which
// avoids grossly misrepresenting the profiles under concurrent updates. For speed,
// counter updates are not atomic.
//
void MacroAssembler::profile_receiver_type(Register recv, Register mdp, int mdp_offset) {
assert_different_registers(recv, mdp, rscratch1, rscratch2);
int base_receiver_offset = in_bytes(ReceiverTypeData::receiver_offset(0));
int end_receiver_offset = in_bytes(ReceiverTypeData::receiver_offset(ReceiverTypeData::row_limit()));
int poly_count_offset = in_bytes(CounterData::count_offset());
int receiver_step = in_bytes(ReceiverTypeData::receiver_offset(1)) - base_receiver_offset;
int receiver_to_count_step = in_bytes(ReceiverTypeData::receiver_count_offset(0)) - base_receiver_offset;
// Adjust for MDP offsets.
base_receiver_offset += mdp_offset;
end_receiver_offset += mdp_offset;
poly_count_offset += mdp_offset;
#ifdef ASSERT
// We are about to walk the MDO slots without asking for offsets.
// Check that our math hits all the right spots.
for (uint c = 0; c < ReceiverTypeData::row_limit(); c++) {
int real_recv_offset = mdp_offset + in_bytes(ReceiverTypeData::receiver_offset(c));
int real_count_offset = mdp_offset + in_bytes(ReceiverTypeData::receiver_count_offset(c));
int offset = base_receiver_offset + receiver_step*c;
int count_offset = offset + receiver_to_count_step;
assert(offset == real_recv_offset, "receiver slot math");
assert(count_offset == real_count_offset, "receiver count math");
}
int real_poly_count_offset = mdp_offset + in_bytes(CounterData::count_offset());
assert(poly_count_offset == real_poly_count_offset, "poly counter math");
#endif
// Corner case: no profile table. Increment poly counter and exit.
if (ReceiverTypeData::row_limit() == 0) {
increment(Address(mdp, poly_count_offset), DataLayout::counter_increment);
return;
}
Register offset = rscratch2;
Label L_loop_search_receiver, L_loop_search_empty;
Label L_restart, L_found_recv, L_found_empty, L_polymorphic, L_count_update;
// The code here recognizes three major cases:
// A. Fastest: receiver found in the table
// B. Fast: no receiver in the table, and the table is full
// C. Slow: no receiver in the table, free slots in the table
//
// The case A performance is most important, as perfectly-behaved code would end up
// there, especially with larger TypeProfileWidth. The case B performance is
// important as well, this is where bulk of code would land for normally megamorphic
// cases. The case C performance is not essential, its job is to deal with installation
// races, we optimize for code density instead. Case C needs to make sure that receiver
// rows are only claimed once. This makes sure we never overwrite a row for another
// receiver and never duplicate the receivers in the list, making profile type-accurate.
//
// It is very tempting to handle these cases in a single loop, and claim the first slot
// without checking the rest of the table. But, profiling code should tolerate free slots
// in the table, as class unloading can clear them. After such cleanup, the receiver
// we need might be _after_ the free slot. Therefore, we need to let at least full scan
// to complete, before trying to install new slots. Splitting the code in several tight
// loops also helpfully optimizes for cases A and B.
//
// This code is effectively:
//
// restart:
// // Fastest: receiver is already installed
// for (i = 0; i < receiver_count(); i++) {
// if (receiver(i) == recv) goto found_recv(i);
// }
//
// // Fast: no receiver, but profile is full
// for (i = 0; i < receiver_count(); i++) {
// if (receiver(i) == null) goto found_null(i);
// }
// goto polymorphic
//
// // Slow: try to install receiver
// found_null(i):
// CAS(&receiver(i), null, recv);
// goto restart
//
// polymorphic:
// count++;
// return
//
// found_recv(i):
// *receiver_count(i)++
//
bind(L_restart);
// Fastest: receiver is already installed
mov(offset, base_receiver_offset);
bind(L_loop_search_receiver);
ldr(rscratch1, Address(mdp, offset));
cmp(rscratch1, recv);
br(Assembler::EQ, L_found_recv);
add(offset, offset, receiver_step);
sub(rscratch1, offset, end_receiver_offset);
cbnz(rscratch1, L_loop_search_receiver);
// Fast: no receiver, but profile is full
mov(offset, base_receiver_offset);
bind(L_loop_search_empty);
ldr(rscratch1, Address(mdp, offset));
cbz(rscratch1, L_found_empty);
add(offset, offset, receiver_step);
sub(rscratch1, offset, end_receiver_offset);
cbnz(rscratch1, L_loop_search_empty);
b(L_polymorphic);
// Slow: try to install receiver
bind(L_found_empty);
// Atomically swing receiver slot: null -> recv.
//
// The update uses CAS, which clobbers rscratch1. Therefore, rscratch2
// is used to hold the destination address. This is safe because the
// offset is no longer needed after the address is computed.
lea(rscratch2, Address(mdp, offset));
cmpxchg(/*addr*/ rscratch2, /*expected*/ zr, /*new*/ recv, Assembler::xword,
/*acquire*/ false, /*release*/ false, /*weak*/ true, noreg);
// CAS success means the slot now has the receiver we want. CAS failure means
// something had claimed the slot concurrently: it can be the same receiver we want,
// or something else. Since this is a slow path, we can optimize for code density,
// and just restart the search from the beginning.
b(L_restart);
// Counter updates:
// Increment polymorphic counter instead of receiver slot.
bind(L_polymorphic);
mov(offset, poly_count_offset);
b(L_count_update);
// Found a receiver, convert its slot offset to corresponding count offset.
bind(L_found_recv);
add(offset, offset, receiver_to_count_step);
bind(L_count_update);
increment(Address(mdp, offset), DataLayout::counter_increment);
}
void MacroAssembler::call_VM_leaf_base(address entry_point,
int number_of_arguments,
Label *retaddr) {
@ -5606,12 +5761,11 @@ void MacroAssembler::adrp(Register reg1, const Address &dest, uint64_t &byte_off
}
void MacroAssembler::load_byte_map_base(Register reg) {
CardTable::CardValue* byte_map_base =
((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base();
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
// Strictly speaking the byte_map_base isn't an address at all, and it might
// Strictly speaking the card table base isn't an address at all, and it might
// even be negative. It is thus materialised as a constant.
mov(reg, (uint64_t)byte_map_base);
mov(reg, (uint64_t)ctbs->card_table_base_const());
}
void MacroAssembler::build_frame(int framesize) {
@ -5782,6 +5936,9 @@ address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3,
// return false;
bind(A_IS_NOT_NULL);
ldrw(cnt1, Address(a1, length_offset));
ldrw(tmp5, Address(a2, length_offset));
cmp(cnt1, tmp5);
br(NE, DONE); // If lengths differ, return false
// Increase loop counter by diff between base- and actual start-offset.
addw(cnt1, cnt1, extra_length);
lea(a1, Address(a1, start_offset));
@ -5848,6 +6005,9 @@ address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3,
cbz(a1, DONE);
ldrw(cnt1, Address(a1, length_offset));
cbz(a2, DONE);
ldrw(tmp5, Address(a2, length_offset));
cmp(cnt1, tmp5);
br(NE, DONE); // If lengths differ, return false
// Increase loop counter by diff between base- and actual start-offset.
addw(cnt1, cnt1, extra_length);

View File

@ -1122,6 +1122,8 @@ public:
Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
void profile_receiver_type(Register recv, Register mdp, int mdp_offset);
void verify_sve_vector_length(Register tmp = rscratch1);
void reinitialize_ptrue() {
if (UseSVE > 0) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -722,22 +722,20 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
// Class initialization barrier for static methods
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
if (VM_Version::supports_fast_class_init_checks()) {
Label L_skip_barrier;
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
Label L_skip_barrier;
{ // Bypass the barrier for non-static methods
__ ldrh(rscratch1, Address(rmethod, Method::access_flags_offset()));
__ andsw(zr, rscratch1, JVM_ACC_STATIC);
__ br(Assembler::EQ, L_skip_barrier); // non-static
}
// Bypass the barrier for non-static methods
__ ldrh(rscratch1, Address(rmethod, Method::access_flags_offset()));
__ andsw(zr, rscratch1, JVM_ACC_STATIC);
__ br(Assembler::EQ, L_skip_barrier); // non-static
__ load_method_holder(rscratch2, rmethod);
__ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
__ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
__ load_method_holder(rscratch2, rmethod);
__ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
__ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
__ bind(L_skip_barrier);
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
}
__ bind(L_skip_barrier);
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->c2i_entry_barrier(masm);
@ -1508,7 +1506,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// SVC, HVC, or SMC. Make it a NOP.
__ nop();
if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
if (method->needs_clinit_barrier()) {
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
Label L_skip_barrier;
__ mov_metadata(rscratch2, method->method_holder()); // InstanceKlass*
__ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);

View File

@ -6081,14 +6081,18 @@ class StubGenerator: public StubCodeGenerator {
// static int implKyber12To16(
// byte[] condensed, int index, short[] parsed, int parsedLength) {}
//
// (parsedLength or (parsedLength - 48) must be divisible by 64.)
// we assume that parsed and condensed are allocated such that for
// n = (parsedLength + 63) / 64
// n blocks of 96 bytes of input can be processed, i.e.
// index + n * 96 <= condensed.length and
// n * 64 <= parsed.length
//
// condensed (byte[]) = c_rarg0
// condensedIndex = c_rarg1
// parsed (short[112 or 256]) = c_rarg2
// parsedLength (112 or 256) = c_rarg3
// parsed (short[]) = c_rarg2
// parsedLength = c_rarg3
address generate_kyber12To16() {
Label L_F00, L_loop, L_end;
Label L_F00, L_loop;
__ align(CodeEntryAlignment);
StubId stub_id = StubId::stubgen_kyber12To16_id;
@ -6209,75 +6213,8 @@ class StubGenerator: public StubCodeGenerator {
vs_st2_post(vs_front(vb), __ T8H, parsed);
__ sub(parsedLength, parsedLength, 64);
__ cmp(parsedLength, (u1)64);
__ br(Assembler::GE, L_loop);
__ cbz(parsedLength, L_end);
// if anything is left it should be a final 72 bytes of input
// i.e. a final 48 12-bit values. so we handle this by loading
// 48 bytes into all 16B lanes of front(vin) and only 24
// bytes into the lower 8B lane of back(vin)
vs_ld3_post(vs_front(vin), __ T16B, condensed);
vs_ld3(vs_back(vin), __ T8B, condensed);
// Expand vin[0] into va[0:1], and vin[1] into va[2:3] and va[4:5]
// n.b. target elements 2 and 3 of va duplicate elements 4 and
// 5 and target element 2 of vb duplicates element 4.
__ ushll(va[0], __ T8H, vin[0], __ T8B, 0);
__ ushll2(va[1], __ T8H, vin[0], __ T16B, 0);
__ ushll(va[2], __ T8H, vin[1], __ T8B, 0);
__ ushll2(va[3], __ T8H, vin[1], __ T16B, 0);
__ ushll(va[4], __ T8H, vin[1], __ T8B, 0);
__ ushll2(va[5], __ T8H, vin[1], __ T16B, 0);
// This time expand just the lower 8 lanes
__ ushll(vb[0], __ T8H, vin[3], __ T8B, 0);
__ ushll(vb[2], __ T8H, vin[4], __ T8B, 0);
__ ushll(vb[4], __ T8H, vin[4], __ T8B, 0);
// shift lo byte of copy 1 of the middle stripe into the high byte
__ shl(va[2], __ T8H, va[2], 8);
__ shl(va[3], __ T8H, va[3], 8);
__ shl(vb[2], __ T8H, vb[2], 8);
// expand vin[2] into va[6:7] and lower 8 lanes of vin[5] into
// vb[6] pre-shifted by 4 to ensure top bits of the input 12-bit
// int are in bit positions [4..11].
__ ushll(va[6], __ T8H, vin[2], __ T8B, 4);
__ ushll2(va[7], __ T8H, vin[2], __ T16B, 4);
__ ushll(vb[6], __ T8H, vin[5], __ T8B, 4);
// mask hi 4 bits of each 1st 12-bit int in pair from copy1 and
// shift lo 4 bits of each 2nd 12-bit int in pair to bottom of
// copy2
__ andr(va[2], __ T16B, va[2], v31);
__ andr(va[3], __ T16B, va[3], v31);
__ ushr(va[4], __ T8H, va[4], 4);
__ ushr(va[5], __ T8H, va[5], 4);
__ andr(vb[2], __ T16B, vb[2], v31);
__ ushr(vb[4], __ T8H, vb[4], 4);
// sum hi 4 bits and lo 8 bits of each 1st 12-bit int in pair and
// hi 8 bits plus lo 4 bits of each 2nd 12-bit int in pair
// n.b. ordering ensures: i) inputs are consumed before they are
// overwritten ii) order of 16-bit results across succsessive
// pairs of vectors in va and then lower half of vb reflects order
// of corresponding 12-bit inputs
__ addv(va[0], __ T8H, va[0], va[2]);
__ addv(va[2], __ T8H, va[1], va[3]);
__ addv(va[1], __ T8H, va[4], va[6]);
__ addv(va[3], __ T8H, va[5], va[7]);
__ addv(vb[0], __ T8H, vb[0], vb[2]);
__ addv(vb[1], __ T8H, vb[4], vb[6]);
// store 48 results interleaved as shorts
vs_st2_post(vs_front(va), __ T8H, parsed);
vs_st2_post(vs_front(vs_front(vb)), __ T8H, parsed);
__ BIND(L_end);
__ cmp(parsedLength, (u1)0);
__ br(Assembler::GT, L_loop);
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ mov(r0, zr); // return 0

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -2290,7 +2290,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
__ subs(zr, temp, (int) code); // have we resolved this bytecode?
// Class initialization barrier for static methods
if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
if (bytecode() == Bytecodes::_invokestatic) {
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
__ br(Assembler::NE, L_clinit_barrier_slow);
__ ldr(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::method_offset())));
__ load_method_holder(temp, temp);
@ -2340,8 +2341,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
__ subs(zr, temp, (int) code); // have we resolved this bytecode?
// Class initialization barrier for static fields
if (VM_Version::supports_fast_class_init_checks() &&
(bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
const Register field_holder = temp;
__ br(Assembler::NE, L_clinit_barrier_slow);
@ -3369,7 +3370,7 @@ void TemplateTable::invokevirtual_helper(Register index,
__ load_klass(r0, recv);
// profile this call
__ profile_virtual_call(r0, rlocals, r3);
__ profile_virtual_call(r0, rlocals);
// get target Method & entry point
__ lookup_virtual_method(r0, index, method);
@ -3499,7 +3500,7 @@ void TemplateTable::invokeinterface(int byte_no) {
/*return_method=*/false);
// profile this call
__ profile_virtual_call(r3, r13, r19);
__ profile_virtual_call(r3, r13);
// Get declaring interface class from method, and itable index

View File

@ -201,16 +201,14 @@ void VM_Version::initialize() {
}
}
// Cortex A53
if (_cpu == CPU_ARM && model_is(0xd03)) {
if (_cpu == CPU_ARM && model_is(CPU_MODEL_ARM_CORTEX_A53)) {
set_feature(CPU_A53MAC);
if (FLAG_IS_DEFAULT(UseSIMDForArrayEquals)) {
FLAG_SET_DEFAULT(UseSIMDForArrayEquals, false);
}
}
// Cortex A73
if (_cpu == CPU_ARM && model_is(0xd09)) {
if (_cpu == CPU_ARM && model_is(CPU_MODEL_ARM_CORTEX_A73)) {
if (FLAG_IS_DEFAULT(SoftwarePrefetchHintDistance)) {
FLAG_SET_DEFAULT(SoftwarePrefetchHintDistance, -1);
}
@ -220,16 +218,11 @@ void VM_Version::initialize() {
}
}
// Neoverse
// N1: 0xd0c
// N2: 0xd49
// N3: 0xd8e
// V1: 0xd40
// V2: 0xd4f
// V3: 0xd84
if (_cpu == CPU_ARM && (model_is(0xd0c) || model_is(0xd49) ||
model_is(0xd40) || model_is(0xd4f) ||
model_is(0xd8e) || model_is(0xd84))) {
if (_cpu == CPU_ARM &&
model_is_in({ CPU_MODEL_ARM_NEOVERSE_N1, CPU_MODEL_ARM_NEOVERSE_V1,
CPU_MODEL_ARM_NEOVERSE_N2, CPU_MODEL_ARM_NEOVERSE_V2,
CPU_MODEL_ARM_NEOVERSE_N3, CPU_MODEL_ARM_NEOVERSE_V3,
CPU_MODEL_ARM_NEOVERSE_V3AE })) {
if (FLAG_IS_DEFAULT(UseSIMDForMemoryOps)) {
FLAG_SET_DEFAULT(UseSIMDForMemoryOps, true);
}
@ -261,12 +254,9 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseCRC32, false);
}
// Neoverse
// V1: 0xd40
// V2: 0xd4f
// V3: 0xd84
if (_cpu == CPU_ARM &&
(model_is(0xd40) || model_is(0xd4f) || model_is(0xd84))) {
model_is_in({ CPU_MODEL_ARM_NEOVERSE_V1, CPU_MODEL_ARM_NEOVERSE_V2,
CPU_MODEL_ARM_NEOVERSE_V3, CPU_MODEL_ARM_NEOVERSE_V3AE })) {
if (FLAG_IS_DEFAULT(UseCryptoPmullForCRC32)) {
FLAG_SET_DEFAULT(UseCryptoPmullForCRC32, true);
}

View File

@ -30,6 +30,8 @@
#include "runtime/abstract_vm_version.hpp"
#include "utilities/sizes.hpp"
#include <initializer_list>
class stringStream;
#define BIT_MASK(flag) (1ULL<<(flag))
@ -112,14 +114,26 @@ public:
CPU_APPLE = 'a',
};
enum Ampere_CPU_Model {
enum Ampere_CPU_Model {
CPU_MODEL_EMAG = 0x0, /* CPU implementer is CPU_AMCC */
CPU_MODEL_ALTRA = 0xd0c, /* CPU implementer is CPU_ARM, Neoverse N1 */
CPU_MODEL_ALTRAMAX = 0xd0c, /* CPU implementer is CPU_ARM, Neoverse N1 */
CPU_MODEL_AMPERE_1 = 0xac3, /* CPU implementer is CPU_AMPERE */
CPU_MODEL_AMPERE_1A = 0xac4, /* CPU implementer is CPU_AMPERE */
CPU_MODEL_AMPERE_1B = 0xac5 /* AMPERE_1B core Implements ARMv8.7 with CSSC, MTE, SM3/SM4 extensions */
};
};
enum ARM_CPU_Model {
CPU_MODEL_ARM_CORTEX_A53 = 0xd03,
CPU_MODEL_ARM_CORTEX_A73 = 0xd09,
CPU_MODEL_ARM_NEOVERSE_N1 = 0xd0c,
CPU_MODEL_ARM_NEOVERSE_V1 = 0xd40,
CPU_MODEL_ARM_NEOVERSE_N2 = 0xd49,
CPU_MODEL_ARM_NEOVERSE_V2 = 0xd4f,
CPU_MODEL_ARM_NEOVERSE_V3AE = 0xd83,
CPU_MODEL_ARM_NEOVERSE_V3 = 0xd84,
CPU_MODEL_ARM_NEOVERSE_N3 = 0xd8e,
};
#define CPU_FEATURE_FLAGS(decl) \
decl(FP, fp, 0) \
@ -181,6 +195,15 @@ enum Ampere_CPU_Model {
return _model == cpu_model || _model2 == cpu_model;
}
static bool model_is_in(std::initializer_list<int> cpu_models) {
for (const int& cpu_model : cpu_models) {
if (_model == cpu_model || _model2 == cpu_model) {
return true;
}
}
return false;
}
static bool is_zva_enabled() { return 0 <= _zva_length; }
static int zva_length() {
assert(is_zva_enabled(), "ZVA not available");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -356,10 +356,10 @@ frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
assert(is_interpreted_frame(), "Not an interpreted frame");
// These are reasonable sanity checks
if (fp() == 0 || (intptr_t(fp()) & (wordSize-1)) != 0) {
if (fp() == nullptr || (intptr_t(fp()) & (wordSize-1)) != 0) {
return false;
}
if (sp() == 0 || (intptr_t(sp()) & (wordSize-1)) != 0) {
if (sp() == nullptr || (intptr_t(sp()) & (wordSize-1)) != 0) {
return false;
}
if (fp() + interpreter_frame_initial_sp_offset < sp()) {

View File

@ -67,9 +67,7 @@ void CardTableBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet d
void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, Register tmp) {
BLOCK_COMMENT("CardTablePostBarrier");
BarrierSet* bs = BarrierSet::barrier_set();
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
Label L_cardtable_loop, L_done;
@ -83,7 +81,7 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
__ sub(count, count, addr); // nb of cards
// warning: Rthread has not been preserved
__ mov_address(tmp, (address) ct->byte_map_base());
__ mov_address(tmp, (address)ctbs->card_table_base_const());
__ add(addr,tmp, addr);
Register zero = __ zero_register(tmp);
@ -122,8 +120,7 @@ void CardTableBarrierSetAssembler::store_check_part1(MacroAssembler* masm, Regis
assert(bs->kind() == BarrierSet::CardTableBarrierSet,
"Wrong barrier set kind");
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
// Load card table base address.
@ -140,7 +137,7 @@ void CardTableBarrierSetAssembler::store_check_part1(MacroAssembler* masm, Regis
Possible cause is a cache miss (card table base address resides in a
rarely accessed area of thread descriptor).
*/
__ mov_address(card_table_base, (address)ct->byte_map_base());
__ mov_address(card_table_base, (address)ctbs->card_table_base_const());
}
// The 2nd part of the store check.
@ -170,8 +167,8 @@ void CardTableBarrierSetAssembler::store_check_part2(MacroAssembler* masm, Regis
void CardTableBarrierSetAssembler::set_card(MacroAssembler* masm, Register card_table_base, Address card_table_addr, Register tmp) {
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
CardTable* ct = ctbs->card_table();
if ((((uintptr_t)ct->byte_map_base() & 0xff) == 0)) {
if ((((uintptr_t)ctbs->card_table_base_const() & 0xff) == 0)) {
// Card table is aligned so the lowest byte of the table address base is zero.
// This works only if the code is not saved for later use, possibly
// in a context where the base would no longer be aligned.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -172,7 +172,7 @@ void NativeMovConstReg::set_data(intptr_t x, address pc) {
address addr = oop_addr != nullptr ? (address)oop_addr : (address)metadata_addr;
if(pc == 0) {
if (pc == nullptr) {
offset = addr - instruction_address() - 8;
} else {
offset = addr - pc - 8;
@ -228,7 +228,7 @@ void NativeMovConstReg::set_data(intptr_t x, address pc) {
void NativeMovConstReg::set_pc_relative_offset(address addr, address pc) {
int offset;
if (pc == 0) {
if (pc == nullptr) {
offset = addr - instruction_address() - 8;
} else {
offset = addr - pc - 8;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -371,7 +371,7 @@ class NativeMovConstReg: public NativeInstruction {
public:
intptr_t data() const;
void set_data(intptr_t x, address pc = 0);
void set_data(intptr_t x, address pc = nullptr);
bool is_pc_relative() {
return !is_movw();
}

View File

@ -568,6 +568,9 @@ class Assembler : public AbstractAssembler {
XSCVDPHP_OPCODE= (60u << OPCODE_SHIFT | 347u << 2 | 17u << 16), // XX2-FORM
XXPERM_OPCODE = (60u << OPCODE_SHIFT | 26u << 3),
XXSEL_OPCODE = (60u << OPCODE_SHIFT | 3u << 4),
XSCMPEQDP_OPCODE=(60u << OPCODE_SHIFT | 3u << 3),
XSCMPGEDP_OPCODE=(60u << OPCODE_SHIFT | 19u << 3),
XSCMPGTDP_OPCODE=(60u << OPCODE_SHIFT | 11u << 3),
XXSPLTIB_OPCODE= (60u << OPCODE_SHIFT | 360u << 1),
XVDIVDP_OPCODE = (60u << OPCODE_SHIFT | 120u << 3),
XVABSSP_OPCODE = (60u << OPCODE_SHIFT | 409u << 2),
@ -2424,6 +2427,9 @@ class Assembler : public AbstractAssembler {
inline void xscvdphp( VectorSRegister d, VectorSRegister b);
inline void xxland( VectorSRegister d, VectorSRegister a, VectorSRegister b);
inline void xxsel( VectorSRegister d, VectorSRegister a, VectorSRegister b, VectorSRegister c);
inline void xscmpeqdp(VectorSRegister t, VectorSRegister a, VectorSRegister b); // Requires Power9
inline void xscmpgedp(VectorSRegister t, VectorSRegister a, VectorSRegister b); // Requires Power9
inline void xscmpgtdp(VectorSRegister t, VectorSRegister a, VectorSRegister b); // Requires Power9
inline void xxspltib( VectorSRegister d, int ui8);
inline void xvdivsp( VectorSRegister d, VectorSRegister a, VectorSRegister b);
inline void xvdivdp( VectorSRegister d, VectorSRegister a, VectorSRegister b);

View File

@ -923,6 +923,10 @@ inline void Assembler::xxmrghw( VectorSRegister d, VectorSRegister a, VectorSReg
inline void Assembler::xxmrglw( VectorSRegister d, VectorSRegister a, VectorSRegister b) { emit_int32( XXMRGHW_OPCODE | vsrt(d) | vsra(a) | vsrb(b)); }
inline void Assembler::xxsel( VectorSRegister d, VectorSRegister a, VectorSRegister b, VectorSRegister c) { emit_int32( XXSEL_OPCODE | vsrt(d) | vsra(a) | vsrb(b) | vsrc(c)); }
inline void Assembler::xscmpeqdp(VectorSRegister t, VectorSRegister a, VectorSRegister b) { emit_int32( XSCMPEQDP_OPCODE | vsrt(t) | vsra(a) | vsrb(b) );}
inline void Assembler::xscmpgedp(VectorSRegister t, VectorSRegister a, VectorSRegister b) { emit_int32( XSCMPGEDP_OPCODE | vsrt(t) | vsra(a) | vsrb(b) );}
inline void Assembler::xscmpgtdp(VectorSRegister t, VectorSRegister a, VectorSRegister b) { emit_int32( XSCMPGTDP_OPCODE | vsrt(t) | vsra(a) | vsrb(b) );}
// VSX Extended Mnemonics
inline void Assembler::xxspltd( VectorSRegister d, VectorSRegister a, int x) { xxpermdi(d, a, a, x ? 3 : 0); }
inline void Assembler::xxmrghd( VectorSRegister d, VectorSRegister a, VectorSRegister b) { xxpermdi(d, a, b, 0); }

View File

@ -664,3 +664,37 @@ void C2_MacroAssembler::reduceI(int opcode, Register dst, Register iSrc, VectorR
fn_scalar_op(opcode, dst, iSrc, R0); // dst <- op(iSrc, R0)
}
// Works for single and double precision floats.
// dst = (op1 cmp(cc) op2) ? src1 : src2;
// Unordered semantics are the same as for CmpF3Node/CmpD3Node which implement the fcmpl/dcmpl bytecodes.
// Comparing unordered values has the same result as when src1 is less than src2.
// So dst = src1 for <, <=, != and dst = src2 for >, >=, ==.
void C2_MacroAssembler::cmovF(int cc, VectorSRegister dst, VectorSRegister op1, VectorSRegister op2,
VectorSRegister src1, VectorSRegister src2, VectorSRegister tmp) {
// See operand cmpOp() for details.
bool invert_cond = (cc & 8) == 0; // invert reflects bcondCRbiIs0
auto cmp = (Assembler::Condition)(cc & 3);
switch(cmp) {
case Assembler::Condition::equal:
// Use false_result if "unordered".
xscmpeqdp(tmp, op1, op2);
break;
case Assembler::Condition::greater:
// Use false_result if "unordered".
xscmpgtdp(tmp, op1, op2);
break;
case Assembler::Condition::less:
// Use true_result if "unordered".
xscmpgedp(tmp, op1, op2);
invert_cond = !invert_cond;
break;
default:
assert(false, "unsupported compare condition: %d", cc);
ShouldNotReachHere();
}
VectorSRegister true_result = invert_cond ? src2 : src1;
VectorSRegister false_result = invert_cond ? src1 : src2;
xxsel(dst, false_result, true_result, tmp);
}

View File

@ -74,5 +74,7 @@
void count_positives(Register src, Register cnt, Register result, Register tmp1, Register tmp2);
void reduceI(int opcode, Register dst, Register iSrc, VectorRegister vSrc, VectorRegister vTmp1, VectorRegister vTmp2);
void cmovF(int cc, VectorSRegister dst, VectorSRegister op1, VectorSRegister op2,
VectorSRegister src1, VectorSRegister src2, VectorSRegister tmp);
#endif // CPU_PPC_C2_MACROASSEMBLER_PPC_HPP

View File

@ -103,8 +103,7 @@ void CardTableBarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Registe
void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr,
Register count, Register preserve) {
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
CardTable* ct = ctbs->card_table();
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
assert_different_registers(addr, count, R0);
Label Lskip_loop, Lstore_loop;
@ -117,7 +116,7 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
__ srdi(addr, addr, CardTable::card_shift());
__ srdi(count, count, CardTable::card_shift());
__ subf(count, addr, count);
__ add_const_optimized(addr, addr, (address)ct->byte_map_base(), R0);
__ add_const_optimized(addr, addr, (address)ctbs->card_table_base_const(), R0);
__ addi(count, count, 1);
__ li(R0, 0);
__ mtctr(count);
@ -140,8 +139,8 @@ void CardTableBarrierSetAssembler::card_table_write(MacroAssembler* masm,
}
void CardTableBarrierSetAssembler::card_write_barrier_post(MacroAssembler* masm, Register store_addr, Register tmp) {
CardTableBarrierSet* bs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
card_table_write(masm, bs->card_table()->byte_map_base(), tmp, store_addr);
CardTableBarrierSet* bs = CardTableBarrierSet::barrier_set();
card_table_write(masm, bs->card_table_base_const(), tmp, store_addr);
}
void CardTableBarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,

View File

@ -50,14 +50,14 @@
#define __ masm->
void ShenandoahBarrierSetAssembler::satb_write_barrier(MacroAssembler *masm,
Register base, RegisterOrConstant ind_or_offs,
Register tmp1, Register tmp2, Register tmp3,
MacroAssembler::PreservationLevel preservation_level) {
void ShenandoahBarrierSetAssembler::satb_barrier(MacroAssembler *masm,
Register base, RegisterOrConstant ind_or_offs,
Register tmp1, Register tmp2, Register tmp3,
MacroAssembler::PreservationLevel preservation_level) {
if (ShenandoahSATBBarrier) {
__ block_comment("satb_write_barrier (shenandoahgc) {");
satb_write_barrier_impl(masm, 0, base, ind_or_offs, tmp1, tmp2, tmp3, preservation_level);
__ block_comment("} satb_write_barrier (shenandoahgc)");
__ block_comment("satb_barrier (shenandoahgc) {");
satb_barrier_impl(masm, 0, base, ind_or_offs, tmp1, tmp2, tmp3, preservation_level);
__ block_comment("} satb_barrier (shenandoahgc)");
}
}
@ -198,11 +198,12 @@ void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, Dec
// In "load mode", this register acts as a temporary register and must
// thus not be 'noreg'. In "preloaded mode", its content will be sustained.
// tmp1/tmp2: Temporary registers, one of which must be non-volatile in "preloaded mode".
void ShenandoahBarrierSetAssembler::satb_write_barrier_impl(MacroAssembler *masm, DecoratorSet decorators,
Register base, RegisterOrConstant ind_or_offs,
Register pre_val,
Register tmp1, Register tmp2,
MacroAssembler::PreservationLevel preservation_level) {
void ShenandoahBarrierSetAssembler::satb_barrier_impl(MacroAssembler *masm, DecoratorSet decorators,
Register base, RegisterOrConstant ind_or_offs,
Register pre_val,
Register tmp1, Register tmp2,
MacroAssembler::PreservationLevel preservation_level) {
assert(ShenandoahSATBBarrier, "Should be checked by caller");
assert_different_registers(tmp1, tmp2, pre_val, noreg);
Label skip_barrier;
@ -574,13 +575,13 @@ void ShenandoahBarrierSetAssembler::load_at(
if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
if (ShenandoahSATBBarrier) {
__ block_comment("keep_alive_barrier (shenandoahgc) {");
satb_write_barrier_impl(masm, 0, noreg, noreg, dst, tmp1, tmp2, preservation_level);
satb_barrier_impl(masm, 0, noreg, noreg, dst, tmp1, tmp2, preservation_level);
__ block_comment("} keep_alive_barrier (shenandoahgc)");
}
}
}
void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register base, RegisterOrConstant ind_or_offs, Register tmp) {
void ShenandoahBarrierSetAssembler::card_barrier(MacroAssembler* masm, Register base, RegisterOrConstant ind_or_offs, Register tmp) {
assert(ShenandoahCardBarrier, "Should have been checked by caller");
assert_different_registers(base, tmp, R0);
@ -603,21 +604,33 @@ void ShenandoahBarrierSetAssembler::store_at(MacroAssembler *masm, DecoratorSet
Register base, RegisterOrConstant ind_or_offs, Register val,
Register tmp1, Register tmp2, Register tmp3,
MacroAssembler::PreservationLevel preservation_level) {
if (is_reference_type(type)) {
if (ShenandoahSATBBarrier) {
satb_write_barrier(masm, base, ind_or_offs, tmp1, tmp2, tmp3, preservation_level);
}
// 1: non-reference types require no barriers
if (!is_reference_type(type)) {
BarrierSetAssembler::store_at(masm, decorators, type,
base, ind_or_offs,
val,
tmp1, tmp2, tmp3,
preservation_level);
return;
}
bool storing_non_null = (val != noreg);
// 2: pre-barrier: SATB needs the previous value
if (ShenandoahBarrierSet::need_satb_barrier(decorators, type)) {
satb_barrier(masm, base, ind_or_offs, tmp1, tmp2, tmp3, preservation_level);
}
// Store!
BarrierSetAssembler::store_at(masm, decorators, type,
base, ind_or_offs,
val,
tmp1, tmp2, tmp3,
preservation_level);
// No need for post barrier if storing null
if (ShenandoahCardBarrier && is_reference_type(type) && val != noreg) {
store_check(masm, base, ind_or_offs, tmp1);
// 3: post-barrier: card barrier needs store address
if (ShenandoahBarrierSet::need_card_barrier(decorators, type) && storing_non_null) {
card_barrier(masm, base, ind_or_offs, tmp1);
}
}
@ -771,9 +784,6 @@ void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler *masm, Register b
void ShenandoahBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, Register preserve) {
assert(ShenandoahCardBarrier, "Should have been checked by caller");
ShenandoahBarrierSet* bs = ShenandoahBarrierSet::barrier_set();
CardTable* ct = bs->card_table();
assert_different_registers(addr, count, R0);
Label L_skip_loop, L_store_loop;

View File

@ -45,15 +45,15 @@ class ShenandoahBarrierSetAssembler: public BarrierSetAssembler {
private:
/* ==== Actual barrier implementations ==== */
void satb_write_barrier_impl(MacroAssembler* masm, DecoratorSet decorators,
Register base, RegisterOrConstant ind_or_offs,
Register pre_val,
Register tmp1, Register tmp2,
MacroAssembler::PreservationLevel preservation_level);
void satb_barrier_impl(MacroAssembler* masm, DecoratorSet decorators,
Register base, RegisterOrConstant ind_or_offs,
Register pre_val,
Register tmp1, Register tmp2,
MacroAssembler::PreservationLevel preservation_level);
void store_check(MacroAssembler* masm,
Register base, RegisterOrConstant ind_or_offs,
Register tmp);
void card_barrier(MacroAssembler* masm,
Register base, RegisterOrConstant ind_or_offs,
Register tmp);
void load_reference_barrier_impl(MacroAssembler* masm, DecoratorSet decorators,
Register base, RegisterOrConstant ind_or_offs,
@ -85,10 +85,10 @@ public:
#endif
/* ==== Available barriers (facades of the actual implementations) ==== */
void satb_write_barrier(MacroAssembler* masm,
Register base, RegisterOrConstant ind_or_offs,
Register tmp1, Register tmp2, Register tmp3,
MacroAssembler::PreservationLevel preservation_level);
void satb_barrier(MacroAssembler* masm,
Register base, RegisterOrConstant ind_or_offs,
Register tmp1, Register tmp2, Register tmp3,
MacroAssembler::PreservationLevel preservation_level);
void load_reference_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register base, RegisterOrConstant ind_or_offs,

View File

@ -1109,11 +1109,11 @@ void InterpreterMacroAssembler::verify_method_data_pointer() {
lhz(R11_scratch1, in_bytes(DataLayout::bci_offset()), R28_mdx);
ld(R12_scratch2, in_bytes(Method::const_offset()), R19_method);
addi(R11_scratch1, R11_scratch1, in_bytes(ConstMethod::codes_offset()));
add(R11_scratch1, R12_scratch2, R12_scratch2);
add(R11_scratch1, R11_scratch1, R12_scratch2);
cmpd(CR0, R11_scratch1, R14_bcp);
beq(CR0, verify_continue);
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp ), R19_method, R14_bcp, R28_mdx);
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), R19_method, R14_bcp, R28_mdx);
bind(verify_continue);
#endif

View File

@ -4535,7 +4535,7 @@ void MacroAssembler::push_cont_fastpath() {
Label done;
ld_ptr(R0, JavaThread::cont_fastpath_offset(), R16_thread);
cmpld(CR0, R1_SP, R0);
ble(CR0, done);
ble(CR0, done); // if (SP <= _cont_fastpath) goto done;
st_ptr(R1_SP, JavaThread::cont_fastpath_offset(), R16_thread);
bind(done);
}
@ -4546,7 +4546,7 @@ void MacroAssembler::pop_cont_fastpath() {
Label done;
ld_ptr(R0, JavaThread::cont_fastpath_offset(), R16_thread);
cmpld(CR0, R1_SP, R0);
ble(CR0, done);
blt(CR0, done); // if (SP < _cont_fastpath) goto done;
li(R0, 0);
st_ptr(R0, JavaThread::cont_fastpath_offset(), R16_thread);
bind(done);

View File

@ -64,12 +64,10 @@
return true;
}
// Use conditional move (CMOVL) on Power7.
static constexpr int long_cmove_cost() { return 0; } // this only makes long cmoves more expensive than int cmoves
// Suppress CMOVF. Conditional move available (sort of) on PPC64 only from P7 onwards. Not exploited yet.
// fsel doesn't accept a condition register as input, so this would be slightly different.
static int float_cmove_cost() { return ConditionalMoveLimit; }
// Suppress CMOVF for Power8 because there are no fast nodes.
static int float_cmove_cost() { return (PowerArchitecturePPC64 >= 9) ? 0 : ConditionalMoveLimit; }
// This affects two different things:
// - how Decode nodes are matched

View File

@ -3024,7 +3024,6 @@ encode %{
%}
enc_class postalloc_expand_encode_oop(iRegNdst dst, iRegPdst src, flagsReg crx) %{
// use isel instruction with Power 7
cmpP_reg_imm16Node *n_compare = new cmpP_reg_imm16Node();
encodeP_subNode *n_sub_base = new encodeP_subNode();
encodeP_shiftNode *n_shift = new encodeP_shiftNode();
@ -3099,7 +3098,6 @@ encode %{
n_shift->_opnds[1] = op_src;
n_shift->_bottom_type = _bottom_type;
// use isel instruction with Power 7
decodeN_addNode *n_add_base = new decodeN_addNode();
n_add_base->add_req(n_region, n_shift);
n_add_base->_opnds[0] = op_dst;
@ -6618,7 +6616,6 @@ instruct cond_sub_base(iRegNdst dst, flagsRegSrc crx, iRegPsrc src1) %{
ins_pipe(pipe_class_default);
%}
// Power 7 can use isel instruction
instruct cond_set_0_oop(iRegNdst dst, flagsRegSrc crx, iRegPsrc src1) %{
// The match rule is needed to make it a 'MachTypeNode'!
match(Set dst (EncodeP (Binary crx src1)));
@ -7293,7 +7290,6 @@ instruct cmovF_reg(cmpOp cmp, flagsRegSrc crx, regF dst, regF src) %{
ins_variable_size_depending_on_alignment(true);
format %{ "CMOVEF $cmp, $crx, $dst, $src\n\t" %}
// Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode %{
Label done;
@ -7313,7 +7309,6 @@ instruct cmovD_reg(cmpOp cmp, flagsRegSrc crx, regD dst, regD src) %{
ins_variable_size_depending_on_alignment(true);
format %{ "CMOVEF $cmp, $crx, $dst, $src\n\t" %}
// Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode %{
Label done;
@ -7326,6 +7321,70 @@ instruct cmovD_reg(cmpOp cmp, flagsRegSrc crx, regD dst, regD src) %{
ins_pipe(pipe_class_default);
%}
instruct cmovF_cmpF(cmpOp cop, regF op1, regF op2, regF dst, regF false_result, regF true_result, regD tmp) %{
match(Set dst (CMoveF (Binary cop (CmpF op1 op2)) (Binary false_result true_result)));
predicate(PowerArchitecturePPC64 >= 9);
effect(TEMP tmp);
ins_cost(2*DEFAULT_COST);
format %{ "cmovF_cmpF $dst = ($op1 $cop $op2) ? $true_result : $false_result\n\t" %}
size(8);
ins_encode %{
__ cmovF($cop$$cmpcode, $dst$$FloatRegister->to_vsr(),
$op1$$FloatRegister->to_vsr(), $op2$$FloatRegister->to_vsr(),
$true_result$$FloatRegister->to_vsr(), $false_result$$FloatRegister->to_vsr(),
$tmp$$FloatRegister->to_vsr());
%}
ins_pipe(pipe_class_default);
%}
instruct cmovF_cmpD(cmpOp cop, regD op1, regD op2, regF dst, regF false_result, regF true_result, regD tmp) %{
match(Set dst (CMoveF (Binary cop (CmpD op1 op2)) (Binary false_result true_result)));
predicate(PowerArchitecturePPC64 >= 9);
effect(TEMP tmp);
ins_cost(2*DEFAULT_COST);
format %{ "cmovF_cmpD $dst = ($op1 $cop $op2) ? $true_result : $false_result\n\t" %}
size(8);
ins_encode %{
__ cmovF($cop$$cmpcode, $dst$$FloatRegister->to_vsr(),
$op1$$FloatRegister->to_vsr(), $op2$$FloatRegister->to_vsr(),
$true_result$$FloatRegister->to_vsr(), $false_result$$FloatRegister->to_vsr(),
$tmp$$FloatRegister->to_vsr());
%}
ins_pipe(pipe_class_default);
%}
instruct cmovD_cmpD(cmpOp cop, regD op1, regD op2, regD dst, regD false_result, regD true_result, regD tmp) %{
match(Set dst (CMoveD (Binary cop (CmpD op1 op2)) (Binary false_result true_result)));
predicate(PowerArchitecturePPC64 >= 9);
effect(TEMP tmp);
ins_cost(2*DEFAULT_COST);
format %{ "cmovD_cmpD $dst = ($op1 $cop $op2) ? $true_result : $false_result\n\t" %}
size(8);
ins_encode %{
__ cmovF($cop$$cmpcode, $dst$$FloatRegister->to_vsr(),
$op1$$FloatRegister->to_vsr(), $op2$$FloatRegister->to_vsr(),
$true_result$$FloatRegister->to_vsr(), $false_result$$FloatRegister->to_vsr(),
$tmp$$FloatRegister->to_vsr());
%}
ins_pipe(pipe_class_default);
%}
instruct cmovD_cmpF(cmpOp cop, regF op1, regF op2, regD dst, regD false_result, regD true_result, regD tmp) %{
match(Set dst (CMoveD (Binary cop (CmpF op1 op2)) (Binary false_result true_result)));
predicate(PowerArchitecturePPC64 >= 9);
effect(TEMP tmp);
ins_cost(2*DEFAULT_COST);
format %{ "cmovD_cmpF $dst = ($op1 $cop $op2) ? $true_result : $false_result\n\t" %}
size(8);
ins_encode %{
__ cmovF($cop$$cmpcode, $dst$$FloatRegister->to_vsr(),
$op1$$FloatRegister->to_vsr(), $op2$$FloatRegister->to_vsr(),
$true_result$$FloatRegister->to_vsr(), $false_result$$FloatRegister->to_vsr(),
$tmp$$FloatRegister->to_vsr());
%}
ins_pipe(pipe_class_default);
%}
//----------Compare-And-Swap---------------------------------------------------
// CompareAndSwap{P,I,L} have more than one output, therefore "CmpI
@ -8492,7 +8551,6 @@ instruct cmovI_bne_negI_reg(iRegIdst dst, flagsRegSrc crx, iRegIsrc src1) %{
ins_variable_size_depending_on_alignment(true);
format %{ "CMOVE $dst, neg($src1), $crx" %}
// Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode %{
Label done;
@ -8551,7 +8609,6 @@ instruct cmovL_bne_negL_reg(iRegLdst dst, flagsRegSrc crx, iRegLsrc src1) %{
ins_variable_size_depending_on_alignment(true);
format %{ "CMOVE $dst, neg($src1), $crx" %}
// Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode %{
Label done;
@ -10262,7 +10319,6 @@ instruct cmovI_bso_stackSlotL(iRegIdst dst, flagsRegSrc crx, stackSlotL src) %{
ins_variable_size_depending_on_alignment(true);
format %{ "cmovI $crx, $dst, $src" %}
// Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode( enc_cmove_bso_stackSlotL(dst, crx, src) );
ins_pipe(pipe_class_default);
@ -10276,7 +10332,6 @@ instruct cmovI_bso_reg(iRegIdst dst, flagsRegSrc crx, regD src) %{
ins_variable_size_depending_on_alignment(true);
format %{ "cmovI $crx, $dst, $src" %}
// Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode( enc_cmove_bso_reg(dst, crx, src) );
ins_pipe(pipe_class_default);
@ -10439,7 +10494,6 @@ instruct cmovL_bso_stackSlotL(iRegLdst dst, flagsRegSrc crx, stackSlotL src) %{
ins_variable_size_depending_on_alignment(true);
format %{ "cmovL $crx, $dst, $src" %}
// Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode( enc_cmove_bso_stackSlotL(dst, crx, src) );
ins_pipe(pipe_class_default);
@ -10453,7 +10507,6 @@ instruct cmovL_bso_reg(iRegLdst dst, flagsRegSrc crx, regD src) %{
ins_variable_size_depending_on_alignment(true);
format %{ "cmovL $crx, $dst, $src" %}
// Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode( enc_cmove_bso_reg(dst, crx, src) );
ins_pipe(pipe_class_default);
@ -11080,7 +11133,6 @@ instruct cmov_bns_less(flagsReg crx) %{
ins_variable_size_depending_on_alignment(true);
format %{ "cmov $crx" %}
// Worst case is branch + move + stop, no stop without scheduler.
size(12);
ins_encode %{
Label done;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -1237,26 +1237,24 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
// Class initialization barrier for static methods
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
if (VM_Version::supports_fast_class_init_checks()) {
Label L_skip_barrier;
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
Label L_skip_barrier;
{ // Bypass the barrier for non-static methods
__ lhz(R0, in_bytes(Method::access_flags_offset()), R19_method);
__ andi_(R0, R0, JVM_ACC_STATIC);
__ beq(CR0, L_skip_barrier); // non-static
}
// Bypass the barrier for non-static methods
__ lhz(R0, in_bytes(Method::access_flags_offset()), R19_method);
__ andi_(R0, R0, JVM_ACC_STATIC);
__ beq(CR0, L_skip_barrier); // non-static
Register klass = R11_scratch1;
__ load_method_holder(klass, R19_method);
__ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/);
Register klass = R11_scratch1;
__ load_method_holder(klass, R19_method);
__ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/);
__ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0);
__ mtctr(klass);
__ bctr();
__ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0);
__ mtctr(klass);
__ bctr();
__ bind(L_skip_barrier);
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
}
__ bind(L_skip_barrier);
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->c2i_entry_barrier(masm, /* tmp register*/ ic_klass, /* tmp register*/ receiver_klass, /* tmp register*/ code);
@ -2210,7 +2208,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// --------------------------------------------------------------------------
vep_start_pc = (intptr_t)__ pc();
if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
if (method->needs_clinit_barrier()) {
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
Label L_skip_barrier;
Register klass = r_temp_1;
// Notify OOP recorder (don't need the relocation)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2025 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -2199,7 +2199,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no, Register Rca
__ isync(); // Order load wrt. succeeding loads.
// Class initialization barrier for static methods
if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
if (bytecode() == Bytecodes::_invokestatic) {
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
const Register method = Rscratch;
const Register klass = Rscratch;
@ -2244,8 +2245,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no, Register Rcac
__ isync(); // Order load wrt. succeeding loads.
// Class initialization barrier for static fields
if (VM_Version::supports_fast_class_init_checks() &&
(bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
const Register field_holder = R4_ARG2;
// InterpreterRuntime::resolve_get_put sets field_holder and finally release-stores put_code.

View File

@ -88,26 +88,16 @@ void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, Dec
}
}
void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp,
bool tosca_live,
bool expand_call) {
if (ShenandoahSATBBarrier) {
satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, t0, tosca_live, expand_call);
}
}
void ShenandoahBarrierSetAssembler::satb_barrier(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp1,
Register tmp2,
bool tosca_live,
bool expand_call) {
assert(ShenandoahSATBBarrier, "Should be checked by caller");
void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp1,
Register tmp2,
bool tosca_live,
bool expand_call) {
// If expand_call is true then we expand the call_VM_leaf macro
// directly to skip generating the check by
// InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
@ -376,21 +366,21 @@ void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm,
if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
__ enter();
__ push_call_clobbered_registers();
satb_write_barrier_pre(masm /* masm */,
noreg /* obj */,
dst /* pre_val */,
xthread /* thread */,
tmp1 /* tmp1 */,
tmp2 /* tmp2 */,
true /* tosca_live */,
true /* expand_call */);
satb_barrier(masm /* masm */,
noreg /* obj */,
dst /* pre_val */,
xthread /* thread */,
tmp1 /* tmp1 */,
tmp2 /* tmp2 */,
true /* tosca_live */,
true /* expand_call */);
__ pop_call_clobbered_registers();
__ leave();
}
}
void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj) {
assert(ShenandoahCardBarrier, "Did you mean to enable ShenandoahCardBarrier?");
void ShenandoahBarrierSetAssembler::card_barrier(MacroAssembler* masm, Register obj) {
assert(ShenandoahCardBarrier, "Should have been checked by caller");
__ srli(obj, obj, CardTable::card_shift());
@ -413,13 +403,13 @@ void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register o
void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
bool on_oop = is_reference_type(type);
if (!on_oop) {
// 1: non-reference types require no barriers
if (!is_reference_type(type)) {
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
return;
}
// flatten object address if needed
// Flatten object address right away for simplicity: likely needed by barriers
if (dst.offset() == 0) {
if (dst.base() != tmp3) {
__ mv(tmp3, dst.base());
@ -428,20 +418,26 @@ void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet
__ la(tmp3, dst);
}
shenandoah_write_barrier_pre(masm,
tmp3 /* obj */,
tmp2 /* pre_val */,
xthread /* thread */,
tmp1 /* tmp */,
val != noreg /* tosca_live */,
false /* expand_call */);
bool storing_non_null = (val != noreg);
// 2: pre-barrier: SATB needs the previous value
if (ShenandoahBarrierSet::need_satb_barrier(decorators, type)) {
satb_barrier(masm,
tmp3 /* obj */,
tmp2 /* pre_val */,
xthread /* thread */,
tmp1 /* tmp */,
t0 /* tmp2 */,
storing_non_null /* tosca_live */,
false /* expand_call */);
}
// Store!
BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), val, noreg, noreg, noreg);
bool in_heap = (decorators & IN_HEAP) != 0;
bool needs_post_barrier = (val != noreg) && in_heap && ShenandoahCardBarrier;
if (needs_post_barrier) {
store_check(masm, tmp3);
// 3: post-barrier: card barrier needs store address
if (ShenandoahBarrierSet::need_card_barrier(decorators, type) && storing_non_null) {
card_barrier(masm, tmp3);
}
}

View File

@ -41,23 +41,16 @@ class StubCodeGenerator;
class ShenandoahBarrierSetAssembler: public BarrierSetAssembler {
private:
void satb_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp1,
Register tmp2,
bool tosca_live,
bool expand_call);
void shenandoah_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp,
bool tosca_live,
bool expand_call);
void satb_barrier(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp1,
Register tmp2,
bool tosca_live,
bool expand_call);
void store_check(MacroAssembler* masm, Register obj);
void card_barrier(MacroAssembler* masm, Register obj);
void resolve_forward_pointer(MacroAssembler* masm, Register dst, Register tmp = noreg);
void resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp = noreg);

View File

@ -5110,9 +5110,8 @@ void MacroAssembler::get_thread(Register thread) {
}
void MacroAssembler::load_byte_map_base(Register reg) {
CardTable::CardValue* byte_map_base =
((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base();
mv(reg, (uint64_t)byte_map_base);
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
mv(reg, (uint64_t)ctbs->card_table_base_const());
}
void MacroAssembler::build_frame(int framesize) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -213,7 +213,7 @@ void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
// Is vector's size (in bytes) bigger than a size saved by default?
// riscv does not ovlerlay the floating-point registers on vector registers like aarch64.
bool SharedRuntime::is_wide_vector(int size) {
return UseRVV;
return UseRVV && size > 0;
}
// ---------------------------------------------------------------------------
@ -637,22 +637,20 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
// Class initialization barrier for static methods
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
if (VM_Version::supports_fast_class_init_checks()) {
Label L_skip_barrier;
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
Label L_skip_barrier;
{ // Bypass the barrier for non-static methods
__ load_unsigned_short(t0, Address(xmethod, Method::access_flags_offset()));
__ test_bit(t1, t0, exact_log2(JVM_ACC_STATIC));
__ beqz(t1, L_skip_barrier); // non-static
}
// Bypass the barrier for non-static methods
__ load_unsigned_short(t0, Address(xmethod, Method::access_flags_offset()));
__ test_bit(t1, t0, exact_log2(JVM_ACC_STATIC));
__ beqz(t1, L_skip_barrier); // non-static
__ load_method_holder(t1, xmethod);
__ clinit_barrier(t1, t0, &L_skip_barrier);
__ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
__ load_method_holder(t1, xmethod);
__ clinit_barrier(t1, t0, &L_skip_barrier);
__ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
__ bind(L_skip_barrier);
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
}
__ bind(L_skip_barrier);
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->c2i_entry_barrier(masm);
@ -1443,7 +1441,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ nop(); // 4 bytes
}
if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
if (method->needs_clinit_barrier()) {
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
Label L_skip_barrier;
__ mov_metadata(t1, method->method_holder()); // InstanceKlass*
__ clinit_barrier(t1, t0, &L_skip_barrier);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -708,7 +708,6 @@ void TemplateTable::index_check(Register array, Register index) {
__ mv(x11, index);
}
Label ok;
__ sext(index, index, 32);
__ bltu(index, length, ok);
__ mv(x13, array);
__ mv(t1, Interpreter::_throw_ArrayIndexOutOfBoundsException_entry);
@ -1052,7 +1051,7 @@ void TemplateTable::aastore() {
transition(vtos, vtos);
// stack: ..., array, index, value
__ ld(x10, at_tos()); // value
__ ld(x12, at_tos_p1()); // index
__ lw(x12, at_tos_p1()); // index
__ ld(x13, at_tos_p2()); // array
index_check(x13, x12); // kills x11
@ -1462,9 +1461,9 @@ void TemplateTable::iinc() {
transition(vtos, vtos);
__ load_signed_byte(x11, at_bcp(2)); // get constant
locals_index(x12);
__ ld(x10, iaddress(x12, x10, _masm));
__ lw(x10, iaddress(x12, x10, _masm));
__ addw(x10, x10, x11);
__ sd(x10, iaddress(x12, t0, _masm));
__ sw(x10, iaddress(x12, t0, _masm));
}
void TemplateTable::wide_iinc() {
@ -1477,9 +1476,9 @@ void TemplateTable::wide_iinc() {
__ orr(x11, x11, t1);
locals_index_wide(x12);
__ ld(x10, iaddress(x12, t0, _masm));
__ lw(x10, iaddress(x12, t0, _masm));
__ addw(x10, x10, x11);
__ sd(x10, iaddress(x12, t0, _masm));
__ sw(x10, iaddress(x12, t0, _masm));
}
void TemplateTable::convert() {
@ -2192,7 +2191,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
__ mv(t0, (int) code);
// Class initialization barrier for static methods
if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
if (bytecode() == Bytecodes::_invokestatic) {
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
__ bne(temp, t0, L_clinit_barrier_slow); // have we resolved this bytecode?
__ ld(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::method_offset())));
__ load_method_holder(temp, temp);
@ -2243,8 +2243,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
__ mv(t0, (int) code); // have we resolved this bytecode?
// Class initialization barrier for static fields
if (VM_Version::supports_fast_class_init_checks() &&
(bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
const Register field_holder = temp;
__ bne(temp, t0, L_clinit_barrier_slow);

View File

@ -167,11 +167,6 @@ void VM_Version::common_initialize() {
(unaligned_scalar.value() == MISALIGNED_SCALAR_FAST));
}
if (FLAG_IS_DEFAULT(AlignVector)) {
FLAG_SET_DEFAULT(AlignVector,
unaligned_vector.value() != MISALIGNED_VECTOR_FAST);
}
#ifdef __riscv_ztso
// Hotspot is compiled with TSO support, it will only run on hardware which
// supports Ztso
@ -242,6 +237,11 @@ void VM_Version::c2_initialize() {
}
}
if (FLAG_IS_DEFAULT(AlignVector)) {
FLAG_SET_DEFAULT(AlignVector,
unaligned_vector.value() != MISALIGNED_VECTOR_FAST);
}
// NOTE: Make sure codes dependent on UseRVV are put after MaxVectorSize initialize,
// as there are extra checks inside it which could disable UseRVV
// in some situations.

View File

@ -83,8 +83,7 @@ void CardTableBarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Registe
void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count,
bool do_return) {
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
CardTable* ct = ctbs->card_table();
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
NearLabel doXC, done;
assert_different_registers(Z_R0, Z_R1, addr, count);
@ -105,7 +104,7 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
__ add2reg_with_index(count, -BytesPerHeapOop, count, addr);
// Get base address of card table.
__ load_const_optimized(Z_R1, (address)ct->byte_map_base());
__ load_const_optimized(Z_R1, (address)ctbs->card_table_base_const());
// count = (count>>shift) - (addr>>shift)
__ z_srlg(addr, addr, CardTable::card_shift());
@ -179,13 +178,12 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register store_addr, Register tmp) {
// Does a store check for the oop in register obj. The content of
// register obj is destroyed afterwards.
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
CardTable* ct = ctbs->card_table();
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
assert_different_registers(store_addr, tmp);
__ z_srlg(store_addr, store_addr, CardTable::card_shift());
__ load_absolute_address(tmp, (address)ct->byte_map_base());
__ load_absolute_address(tmp, (address)ctbs->card_table_base_const());
__ z_agr(store_addr, tmp);
__ z_mvi(0, store_addr, CardTable::dirty_card_val());
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2024 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -1567,7 +1567,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
//---------------------------------------------------------------------
wrapper_VEPStart = __ offset();
if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
if (method->needs_clinit_barrier()) {
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
Label L_skip_barrier;
Register klass = Z_R1_scratch;
// Notify OOP recorder (don't need the relocation)
@ -2378,24 +2379,22 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
// Class initialization barrier for static methods
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
if (VM_Version::supports_fast_class_init_checks()) {
Label L_skip_barrier;
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
Label L_skip_barrier;
{ // Bypass the barrier for non-static methods
__ testbit_ushort(Address(Z_method, Method::access_flags_offset()), JVM_ACC_STATIC_BIT);
__ z_bfalse(L_skip_barrier); // non-static
}
// Bypass the barrier for non-static methods
__ testbit_ushort(Address(Z_method, Method::access_flags_offset()), JVM_ACC_STATIC_BIT);
__ z_bfalse(L_skip_barrier); // non-static
Register klass = Z_R11;
__ load_method_holder(klass, Z_method);
__ clinit_barrier(klass, Z_thread, &L_skip_barrier /*L_fast_path*/);
Register klass = Z_R11;
__ load_method_holder(klass, Z_method);
__ clinit_barrier(klass, Z_thread, &L_skip_barrier /*L_fast_path*/);
__ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub());
__ z_br(klass);
__ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub());
__ z_br(klass);
__ bind(L_skip_barrier);
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
}
__ bind(L_skip_barrier);
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
return;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2024 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -2377,7 +2377,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
__ z_cli(Address(Rcache, bc_offset), code);
// Class initialization barrier for static methods
if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
if (bytecode() == Bytecodes::_invokestatic) {
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
const Register method = Z_R1_scratch;
const Register klass = Z_R1_scratch;
__ z_brne(L_clinit_barrier_slow);
@ -2427,8 +2428,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
__ z_cli(Address(cache, code_offset), code);
// Class initialization barrier for static fields
if (VM_Version::supports_fast_class_init_checks() &&
(bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
const Register field_holder = index;
__ z_brne(L_clinit_barrier_slow);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2024, 2025, Intel Corporation. All rights reserved.
* Copyright (c) 2024, 2026, Intel Corporation. All rights reserved.
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -1330,10 +1330,12 @@ static void big_case_loop_helper(bool sizeKnown, int size, Label &noMatch, Label
// Clarification: The BYTE_K compare above compares haystack[(n-32):(n-1)]. We need to
// compare haystack[(k-1):(k-1+31)]. Subtracting either index gives shift value of
// (k + 31 - n): x = (k-1+31)-(n-1) = k-1+31-n+1 = k+31-n.
// When isU is set, similarly, shift is from haystack[(n-32):(n-1)] to [(k-2):(k-2+31)]
if (sizeKnown) {
__ movl(temp2, 31 + size);
__ movl(temp2, (isU ? 30 : 31) + size);
} else {
__ movl(temp2, 31);
__ movl(temp2, isU ? 30 : 31);
__ addl(temp2, needleLen);
}
__ subl(temp2, hsLength);

View File

@ -95,11 +95,7 @@ void CardTableBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet d
void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, Register tmp) {
BarrierSet *bs = BarrierSet::barrier_set();
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
intptr_t disp = (intptr_t) ct->byte_map_base();
SHENANDOAHGC_ONLY(assert(!UseShenandoahGC, "Shenandoah byte_map_base is not constant.");)
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
Label L_loop, L_done;
const Register end = count;
@ -115,7 +111,7 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
__ shrptr(end, CardTable::card_shift());
__ subptr(end, addr); // end --> cards count
__ mov64(tmp, disp);
__ mov64(tmp, (intptr_t)ctbs->card_table_base_const());
__ addptr(addr, tmp);
__ BIND(L_loop);
__ movb(Address(addr, count, Address::times_1), 0);
@ -128,10 +124,7 @@ __ BIND(L_done);
void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj, Address dst) {
// Does a store check for the oop in register obj. The content of
// register obj is destroyed afterwards.
BarrierSet* bs = BarrierSet::barrier_set();
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
__ shrptr(obj, CardTable::card_shift());
@ -142,7 +135,7 @@ void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register ob
// So this essentially converts an address to a displacement and it will
// never need to be relocated. On 64bit however the value may be too
// large for a 32bit displacement.
intptr_t byte_map_base = (intptr_t)ct->byte_map_base();
intptr_t byte_map_base = (intptr_t)ctbs->card_table_base_const();
if (__ is_simm32(byte_map_base)) {
card_addr = Address(noreg, obj, Address::times_1, byte_map_base);
} else {

View File

@ -174,24 +174,14 @@ void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, Dec
}
}
void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register tmp,
bool tosca_live,
bool expand_call) {
void ShenandoahBarrierSetAssembler::satb_barrier(MacroAssembler* masm,
Register obj,
Register pre_val,
Register tmp,
bool tosca_live,
bool expand_call) {
assert(ShenandoahSATBBarrier, "Should be checked by caller");
if (ShenandoahSATBBarrier) {
satb_write_barrier_pre(masm, obj, pre_val, tmp, tosca_live, expand_call);
}
}
void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register tmp,
bool tosca_live,
bool expand_call) {
// If expand_call is true then we expand the call_VM_leaf macro
// directly to skip generating the check by
// InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
@ -533,18 +523,18 @@ void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet d
assert_different_registers(dst, tmp1, r15_thread);
// Generate the SATB pre-barrier code to log the value of
// the referent field in an SATB buffer.
shenandoah_write_barrier_pre(masm /* masm */,
noreg /* obj */,
dst /* pre_val */,
tmp1 /* tmp */,
true /* tosca_live */,
true /* expand_call */);
satb_barrier(masm /* masm */,
noreg /* obj */,
dst /* pre_val */,
tmp1 /* tmp */,
true /* tosca_live */,
true /* expand_call */);
restore_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ true);
}
}
void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj) {
void ShenandoahBarrierSetAssembler::card_barrier(MacroAssembler* masm, Register obj) {
assert(ShenandoahCardBarrier, "Should have been checked by caller");
// Does a store check for the oop in register obj. The content of
@ -575,41 +565,40 @@ void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register o
void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
bool on_oop = is_reference_type(type);
bool in_heap = (decorators & IN_HEAP) != 0;
bool as_normal = (decorators & AS_NORMAL) != 0;
if (on_oop && in_heap) {
bool needs_pre_barrier = as_normal;
// 1: non-reference types require no barriers
if (!is_reference_type(type)) {
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
return;
}
// flatten object address if needed
// We do it regardless of precise because we need the registers
if (dst.index() == noreg && dst.disp() == 0) {
if (dst.base() != tmp1) {
__ movptr(tmp1, dst.base());
}
} else {
__ lea(tmp1, dst);
}
assert_different_registers(val, tmp1, tmp2, tmp3, r15_thread);
if (needs_pre_barrier) {
shenandoah_write_barrier_pre(masm /*masm*/,
tmp1 /* obj */,
tmp2 /* pre_val */,
tmp3 /* tmp */,
val != noreg /* tosca_live */,
false /* expand_call */);
}
BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg, noreg);
if (val != noreg) {
if (ShenandoahCardBarrier) {
store_check(masm, tmp1);
}
// Flatten object address right away for simplicity: likely needed by barriers
assert_different_registers(val, tmp1, tmp2, tmp3, r15_thread);
if (dst.index() == noreg && dst.disp() == 0) {
if (dst.base() != tmp1) {
__ movptr(tmp1, dst.base());
}
} else {
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
__ lea(tmp1, dst);
}
bool storing_non_null = (val != noreg);
// 2: pre-barrier: SATB needs the previous value
if (ShenandoahBarrierSet::need_satb_barrier(decorators, type)) {
satb_barrier(masm,
tmp1 /* obj */,
tmp2 /* pre_val */,
tmp3 /* tmp */,
storing_non_null /* tosca_live */,
false /* expand_call */);
}
// Store!
BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg, noreg);
// 3: post-barrier: card barrier needs store address
if (ShenandoahBarrierSet::need_card_barrier(decorators, type) && storing_non_null) {
card_barrier(masm, tmp1);
}
}

View File

@ -41,21 +41,14 @@ class StubCodeGenerator;
class ShenandoahBarrierSetAssembler: public BarrierSetAssembler {
private:
void satb_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register tmp,
bool tosca_live,
bool expand_call);
void satb_barrier(MacroAssembler* masm,
Register obj,
Register pre_val,
Register tmp,
bool tosca_live,
bool expand_call);
void shenandoah_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register tmp,
bool tosca_live,
bool expand_call);
void store_check(MacroAssembler* masm, Register obj);
void card_barrier(MacroAssembler* masm, Register obj);
void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -6086,7 +6086,7 @@ void MacroAssembler::generate_fill(BasicType t, bool aligned,
vpbroadcastd(xtmp, xtmp, Assembler::AVX_512bit);
subptr(count, 16 << shift);
jccb(Assembler::less, L_check_fill_32_bytes);
jcc(Assembler::less, L_check_fill_32_bytes);
align(16);
BIND(L_fill_64_bytes_loop_avx3);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,14 +32,10 @@ const KRegister::KRegisterImpl all_KRegisterImpls [KRegister::number_
const char * Register::RegisterImpl::name() const {
static const char *const names[number_of_registers] = {
#ifdef _LP64
"rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
"r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
"r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
#else
"eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi"
#endif // _LP64
};
return is_valid() ? names[encoding()] : "noreg";
}
@ -54,11 +50,9 @@ const char* FloatRegister::FloatRegisterImpl::name() const {
const char* XMMRegister::XMMRegisterImpl::name() const {
static const char *const names[number_of_registers] = {
"xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
#ifdef _LP64
,"xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"
,"xmm16", "xmm17", "xmm18", "xmm19", "xmm20", "xmm21", "xmm22", "xmm23"
,"xmm24", "xmm25", "xmm26", "xmm27", "xmm28", "xmm29", "xmm30", "xmm31"
#endif // _LP64
};
return is_valid() ? names[encoding()] : "xnoreg";
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,7 +34,7 @@
class VMRegImpl;
typedef VMRegImpl* VMReg;
// The implementation of integer registers for the x86/x64 architectures.
// The implementation of integer registers for the x64 architectures.
class Register {
private:
int _encoding;
@ -44,11 +44,9 @@ private:
public:
inline friend constexpr Register as_Register(int encoding);
enum {
number_of_registers = LP64_ONLY( 32 ) NOT_LP64( 8 ),
number_of_byte_registers = LP64_ONLY( 32 ) NOT_LP64( 4 ),
max_slots_per_register = LP64_ONLY( 2 ) NOT_LP64( 1 )
};
static const int number_of_registers = 32;
static const int number_of_byte_registers = 32;
static const int max_slots_per_register = 2;
class RegisterImpl: public AbstractRegisterImpl {
friend class Register;
@ -79,11 +77,9 @@ public:
// Actually available GP registers for use, depending on actual CPU capabilities and flags.
static int available_gp_registers() {
#ifdef _LP64
if (!UseAPX) {
return number_of_registers / 2;
}
#endif // _LP64
return number_of_registers;
}
};
@ -116,9 +112,8 @@ constexpr Register rsp = as_Register(4);
constexpr Register rbp = as_Register(5);
constexpr Register rsi = as_Register(6);
constexpr Register rdi = as_Register(7);
#ifdef _LP64
constexpr Register r8 = as_Register( 8);
constexpr Register r9 = as_Register( 9);
constexpr Register r8 = as_Register(8);
constexpr Register r9 = as_Register(9);
constexpr Register r10 = as_Register(10);
constexpr Register r11 = as_Register(11);
constexpr Register r12 = as_Register(12);
@ -141,7 +136,6 @@ constexpr Register r28 = as_Register(28);
constexpr Register r29 = as_Register(29);
constexpr Register r30 = as_Register(30);
constexpr Register r31 = as_Register(31);
#endif // _LP64
// The implementation of x87 floating point registers for the ia32 architecture.
@ -154,10 +148,8 @@ private:
public:
inline friend constexpr FloatRegister as_FloatRegister(int encoding);
enum {
number_of_registers = 8,
max_slots_per_register = 2
};
static const int number_of_registers = 8;
static const int max_slots_per_register = 2;
class FloatRegisterImpl: public AbstractRegisterImpl {
friend class FloatRegister;
@ -217,10 +209,8 @@ private:
public:
inline friend constexpr XMMRegister as_XMMRegister(int encoding);
enum {
number_of_registers = LP64_ONLY( 32 ) NOT_LP64( 8 ),
max_slots_per_register = LP64_ONLY( 16 ) NOT_LP64( 16 ) // 512-bit
};
static const int number_of_registers = 32;
static const int max_slots_per_register = 16; // 512-bit
class XMMRegisterImpl: public AbstractRegisterImpl {
friend class XMMRegister;
@ -250,11 +240,9 @@ public:
// Actually available XMM registers for use, depending on actual CPU capabilities and flags.
static int available_xmm_registers() {
#ifdef _LP64
if (UseAVX < 3) {
return number_of_registers / 2;
}
#endif // _LP64
return number_of_registers;
}
};
@ -287,7 +275,6 @@ constexpr XMMRegister xmm4 = as_XMMRegister( 4);
constexpr XMMRegister xmm5 = as_XMMRegister( 5);
constexpr XMMRegister xmm6 = as_XMMRegister( 6);
constexpr XMMRegister xmm7 = as_XMMRegister( 7);
#ifdef _LP64
constexpr XMMRegister xmm8 = as_XMMRegister( 8);
constexpr XMMRegister xmm9 = as_XMMRegister( 9);
constexpr XMMRegister xmm10 = as_XMMRegister(10);
@ -312,7 +299,6 @@ constexpr XMMRegister xmm28 = as_XMMRegister(28);
constexpr XMMRegister xmm29 = as_XMMRegister(29);
constexpr XMMRegister xmm30 = as_XMMRegister(30);
constexpr XMMRegister xmm31 = as_XMMRegister(31);
#endif // _LP64
// The implementation of AVX-512 opmask registers.
@ -394,25 +380,17 @@ constexpr KRegister k7 = as_KRegister(7);
// Define a class that exports it.
class ConcreteRegisterImpl : public AbstractRegisterImpl {
public:
enum {
max_gpr = Register::number_of_registers * Register::max_slots_per_register,
max_fpr = max_gpr + FloatRegister::number_of_registers * FloatRegister::max_slots_per_register,
max_xmm = max_fpr + XMMRegister::number_of_registers * XMMRegister::max_slots_per_register,
max_kpr = max_xmm + KRegister::number_of_registers * KRegister::max_slots_per_register,
static const int max_gpr = Register::number_of_registers * Register::max_slots_per_register;
static const int max_fpr = max_gpr + FloatRegister::number_of_registers * FloatRegister::max_slots_per_register;
static const int max_xmm = max_fpr + XMMRegister::number_of_registers * XMMRegister::max_slots_per_register;
static const int max_kpr = max_xmm + KRegister::number_of_registers * KRegister::max_slots_per_register;
// A big enough number for C2: all the registers plus flags
// This number must be large enough to cover REG_COUNT (defined by c2) registers.
// There is no requirement that any ordering here matches any ordering c2 gives
// it's optoregs.
// x86_32.ad defines additional dummy FILL0-FILL7 registers, in order to tally
// REG_COUNT (computed by ADLC based on the number of reg_defs seen in .ad files)
// with ConcreteRegisterImpl::number_of_registers additional count of 8 is being
// added for 32 bit jvm.
number_of_registers = max_kpr + // gpr/fpr/xmm/kpr
NOT_LP64( 8 + ) // FILL0-FILL7 in x86_32.ad
1 // eflags
};
static const int number_of_registers = max_kpr + // gpr/fpr/xmm/kpr
1; // eflags
};
template <>

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1043,26 +1043,24 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
// Class initialization barrier for static methods
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
if (VM_Version::supports_fast_class_init_checks()) {
Label L_skip_barrier;
Register method = rbx;
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
Label L_skip_barrier;
Register method = rbx;
{ // Bypass the barrier for non-static methods
Register flags = rscratch1;
__ load_unsigned_short(flags, Address(method, Method::access_flags_offset()));
__ testl(flags, JVM_ACC_STATIC);
__ jcc(Assembler::zero, L_skip_barrier); // non-static
}
// Bypass the barrier for non-static methods
Register flags = rscratch1;
__ load_unsigned_short(flags, Address(method, Method::access_flags_offset()));
__ testl(flags, JVM_ACC_STATIC);
__ jcc(Assembler::zero, L_skip_barrier); // non-static
Register klass = rscratch1;
__ load_method_holder(klass, method);
__ clinit_barrier(klass, &L_skip_barrier /*L_fast_path*/);
Register klass = rscratch1;
__ load_method_holder(klass, method);
__ clinit_barrier(klass, &L_skip_barrier /*L_fast_path*/);
__ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path
__ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path
__ bind(L_skip_barrier);
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
}
__ bind(L_skip_barrier);
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->c2i_entry_barrier(masm);
@ -1904,7 +1902,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
int vep_offset = ((intptr_t)__ pc()) - start;
if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
if (method->needs_clinit_barrier()) {
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
Label L_skip_barrier;
Register klass = r10;
__ mov_metadata(klass, method->method_holder()); // InstanceKlass*
@ -3602,4 +3601,3 @@ RuntimeStub* SharedRuntime::generate_jfr_return_lease() {
}
#endif // INCLUDE_JFR

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -64,6 +64,39 @@ static address kyberAvx512ConstsAddr(int offset) {
const Register scratch = r10;
ATTRIBUTE_ALIGNED(64) static const uint8_t kyberAvx512_12To16Dup[] = {
// 0 - 63
0, 1, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11, 12, 13, 13, 14, 15, 16,
16, 17, 18, 19, 19, 20, 21, 22, 22, 23, 24, 25, 25, 26, 27, 28, 28, 29, 30,
31, 31, 32, 33, 34, 34, 35, 36, 37, 37, 38, 39, 40, 40, 41, 42, 43, 43, 44,
45, 46, 46, 47
};
static address kyberAvx512_12To16DupAddr() {
return (address) kyberAvx512_12To16Dup;
}
ATTRIBUTE_ALIGNED(64) static const uint16_t kyberAvx512_12To16Shift[] = {
// 0 - 31
0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0,
4, 0, 4, 0, 4, 0, 4
};
static address kyberAvx512_12To16ShiftAddr() {
return (address) kyberAvx512_12To16Shift;
}
ATTRIBUTE_ALIGNED(64) static const uint64_t kyberAvx512_12To16And[] = {
// 0 - 7
0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF,
0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF,
0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF
};
static address kyberAvx512_12To16AndAddr() {
return (address) kyberAvx512_12To16And;
}
ATTRIBUTE_ALIGNED(64) static const uint16_t kyberAvx512NttPerms[] = {
// 0
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
@ -822,10 +855,65 @@ address generate_kyber12To16_avx512(StubGenerator *stubgen,
const Register perms = r11;
Label Loop;
Label Loop, VBMILoop;
__ addptr(condensed, condensedOffs);
if (VM_Version::supports_avx512_vbmi()) {
// mask load for the first 48 bytes of each vector
__ mov64(rax, 0x0000FFFFFFFFFFFF);
__ kmovql(k1, rax);
__ lea(perms, ExternalAddress(kyberAvx512_12To16DupAddr()));
__ evmovdqub(xmm20, Address(perms), Assembler::AVX_512bit);
__ lea(perms, ExternalAddress(kyberAvx512_12To16ShiftAddr()));
__ evmovdquw(xmm21, Address(perms), Assembler::AVX_512bit);
__ lea(perms, ExternalAddress(kyberAvx512_12To16AndAddr()));
__ evmovdquq(xmm22, Address(perms), Assembler::AVX_512bit);
__ align(OptoLoopAlignment);
__ BIND(VBMILoop);
__ evmovdqub(xmm0, k1, Address(condensed, 0), false,
Assembler::AVX_512bit);
__ evmovdqub(xmm1, k1, Address(condensed, 48), false,
Assembler::AVX_512bit);
__ evmovdqub(xmm2, k1, Address(condensed, 96), false,
Assembler::AVX_512bit);
__ evmovdqub(xmm3, k1, Address(condensed, 144), false,
Assembler::AVX_512bit);
__ evpermb(xmm4, k0, xmm20, xmm0, false, Assembler::AVX_512bit);
__ evpermb(xmm5, k0, xmm20, xmm1, false, Assembler::AVX_512bit);
__ evpermb(xmm6, k0, xmm20, xmm2, false, Assembler::AVX_512bit);
__ evpermb(xmm7, k0, xmm20, xmm3, false, Assembler::AVX_512bit);
__ evpsrlvw(xmm4, xmm4, xmm21, Assembler::AVX_512bit);
__ evpsrlvw(xmm5, xmm5, xmm21, Assembler::AVX_512bit);
__ evpsrlvw(xmm6, xmm6, xmm21, Assembler::AVX_512bit);
__ evpsrlvw(xmm7, xmm7, xmm21, Assembler::AVX_512bit);
__ evpandq(xmm0, xmm22, xmm4, Assembler::AVX_512bit);
__ evpandq(xmm1, xmm22, xmm5, Assembler::AVX_512bit);
__ evpandq(xmm2, xmm22, xmm6, Assembler::AVX_512bit);
__ evpandq(xmm3, xmm22, xmm7, Assembler::AVX_512bit);
store4regs(parsed, 0, xmm0_3, _masm);
__ addptr(condensed, 192);
__ addptr(parsed, 256);
__ subl(parsedLength, 128);
__ jcc(Assembler::greater, VBMILoop);
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ mov64(rax, 0); // return 0
__ ret(0);
return start;
}
__ lea(perms, ExternalAddress(kyberAvx512_12To16PermsAddr()));
load4regs(xmm24_27, perms, 0, _masm);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -2216,7 +2216,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
__ cmpl(temp, code); // have we resolved this bytecode?
// Class initialization barrier for static methods
if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
if (bytecode() == Bytecodes::_invokestatic) {
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
const Register method = temp;
const Register klass = temp;
@ -2264,8 +2265,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
__ cmpl(temp, code); // have we resolved this bytecode?
// Class initialization barrier for static fields
if (VM_Version::supports_fast_class_init_checks() &&
(bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
const Register field_holder = temp;
__ jcc(Assembler::notEqual, L_clinit_barrier_slow);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -143,7 +143,7 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4, std_cpuid24, std_cpuid29;
Label sef_cpuid, sefsl1_cpuid, ext_cpuid, ext_cpuid1, ext_cpuid5, ext_cpuid7;
Label ext_cpuid8, done, wrapup, vector_save_restore, apx_save_restore_warning;
Label ext_cpuid8, done, wrapup, vector_save_restore, apx_save_restore_warning, apx_xstate;
Label legacy_setup, save_restore_except, legacy_save_restore, start_simd_check;
StubCodeMark mark(this, "VM_Version", "get_cpu_info_stub");
@ -468,6 +468,20 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
__ movq(Address(rsi, 0), r16);
__ movq(Address(rsi, 8), r31);
//
// Query CPUID 0xD.19 for APX XSAVE offset
// Extended State Enumeration Sub-leaf 19 (APX)
// EAX = size of APX state (should be 128)
// EBX = offset in standard XSAVE format
//
__ movl(rax, 0xD);
__ movl(rcx, 19);
__ cpuid();
__ lea(rsi, Address(rbp, in_bytes(VM_Version::apx_xstate_size_offset())));
__ movl(Address(rsi, 0), rax);
__ lea(rsi, Address(rbp, in_bytes(VM_Version::apx_xstate_offset_offset())));
__ movl(Address(rsi, 0), rbx);
UseAPX = save_apx;
__ bind(vector_save_restore);
//
@ -921,8 +935,9 @@ void VM_Version::get_processor_features() {
// Check if processor has Intel Ecore
if (FLAG_IS_DEFAULT(EnableX86ECoreOpts) && is_intel() && is_intel_server_family() &&
(_model == 0x97 || _model == 0xAA || _model == 0xAC || _model == 0xAF ||
_model == 0xCC || _model == 0xDD)) {
(supports_hybrid() ||
_model == 0xAF /* Xeon 6 E-cores (Sierra Forest) */ ||
_model == 0xDD /* Xeon 6+ E-cores (Clearwater Forest) */ )) {
FLAG_SET_DEFAULT(EnableX86ECoreOpts, true);
}

View File

@ -676,6 +676,10 @@ protected:
// Space to save apx registers after signal handle
jlong apx_save[2]; // Save r16 and r31
// cpuid function 0xD, subleaf 19 (APX extended state)
uint32_t apx_xstate_size; // EAX: size of APX state (128)
uint32_t apx_xstate_offset; // EBX: offset in standard XSAVE area
VM_Features feature_flags() const;
// Asserts
@ -739,6 +743,11 @@ public:
static ByteSize ymm_save_offset() { return byte_offset_of(CpuidInfo, ymm_save); }
static ByteSize zmm_save_offset() { return byte_offset_of(CpuidInfo, zmm_save); }
static ByteSize apx_save_offset() { return byte_offset_of(CpuidInfo, apx_save); }
static ByteSize apx_xstate_offset_offset() { return byte_offset_of(CpuidInfo, apx_xstate_offset); }
static ByteSize apx_xstate_size_offset() { return byte_offset_of(CpuidInfo, apx_xstate_size); }
static uint32_t apx_xstate_offset() { return _cpuid_info.apx_xstate_offset; }
static uint32_t apx_xstate_size() { return _cpuid_info.apx_xstate_size; }
// The value used to check ymm register after signal handle
static int ymm_test_value() { return 0xCAFEBABE; }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2006, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2006, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,9 +32,7 @@ void VMRegImpl::set_regName() {
int i;
for (i = 0; i < ConcreteRegisterImpl::max_gpr ; ) {
regName[i++] = reg->name();
#ifdef AMD64
regName[i++] = reg->name();
#endif // AMD64
reg = reg->successor();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2006, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2006, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -52,14 +52,8 @@ inline bool is_KRegister() {
}
inline Register as_Register() {
assert( is_Register(), "must be");
// Yuk
#ifdef AMD64
assert(is_Register(), "must be");
return ::as_Register(value() >> 1);
#else
return ::as_Register(value());
#endif // AMD64
}
inline FloatRegister as_FloatRegister() {
@ -82,9 +76,6 @@ inline KRegister as_KRegister() {
inline bool is_concrete() {
assert(is_reg(), "must be");
#ifndef AMD64
if (is_Register()) return true;
#endif // AMD64
// Do not use is_XMMRegister() here as it depends on the UseAVX setting.
if (value() >= ConcreteRegisterImpl::max_fpr && value() < ConcreteRegisterImpl::max_xmm) {
int base = value() - ConcreteRegisterImpl::max_fpr;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2006, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2006, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,7 +26,7 @@
#define CPU_X86_VMREG_X86_INLINE_HPP
inline VMReg Register::RegisterImpl::as_VMReg() const {
return VMRegImpl::as_VMReg(encoding() LP64_ONLY( << 1 ));
return VMRegImpl::as_VMReg(encoding() << 1);
}
inline VMReg FloatRegister::FloatRegisterImpl::as_VMReg() const {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -38,7 +38,7 @@ class AIXDecoder: public AbstractDecoder {
virtual bool demangle(const char* symbol, char* buf, int buflen) { return false; } // use AixSymbols::get_function_name to demangle
virtual bool decode(address addr, char* buf, int buflen, int* offset, const char* modulepath, bool demangle) {
return AixSymbols::get_function_name(addr, buf, buflen, offset, 0, demangle);
return AixSymbols::get_function_name(addr, buf, buflen, offset, nullptr, demangle);
}
virtual bool decode(address addr, char *buf, int buflen, int* offset, const void *base) {
ShouldNotReachHere();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -258,10 +258,18 @@ bool os::free_memory(physical_memory_size_type& value) {
return Aix::available_memory(value);
}
bool os::Machine::free_memory(physical_memory_size_type& value) {
return Aix::available_memory(value);
}
bool os::available_memory(physical_memory_size_type& value) {
return Aix::available_memory(value);
}
bool os::Machine::available_memory(physical_memory_size_type& value) {
return Aix::available_memory(value);
}
bool os::Aix::available_memory(physical_memory_size_type& value) {
os::Aix::meminfo_t mi;
if (os::Aix::get_meminfo(&mi)) {
@ -273,6 +281,10 @@ bool os::Aix::available_memory(physical_memory_size_type& value) {
}
bool os::total_swap_space(physical_memory_size_type& value) {
return Machine::total_swap_space(value);
}
bool os::Machine::total_swap_space(physical_memory_size_type& value) {
perfstat_memory_total_t memory_info;
if (libperfstat::perfstat_memory_total(nullptr, &memory_info, sizeof(perfstat_memory_total_t), 1) == -1) {
return false;
@ -282,6 +294,10 @@ bool os::total_swap_space(physical_memory_size_type& value) {
}
bool os::free_swap_space(physical_memory_size_type& value) {
return Machine::free_swap_space(value);
}
bool os::Machine::free_swap_space(physical_memory_size_type& value) {
perfstat_memory_total_t memory_info;
if (libperfstat::perfstat_memory_total(nullptr, &memory_info, sizeof(perfstat_memory_total_t), 1) == -1) {
return false;
@ -294,6 +310,10 @@ physical_memory_size_type os::physical_memory() {
return Aix::physical_memory();
}
physical_memory_size_type os::Machine::physical_memory() {
return Aix::physical_memory();
}
size_t os::rss() { return (size_t)0; }
// Cpu architecture string
@ -683,7 +703,7 @@ static void *thread_native_entry(Thread *thread) {
log_info(os, thread)("Thread finished (tid: %zu, kernel thread id: %zu).",
os::current_thread_id(), (uintx) kernel_thread_id);
return 0;
return nullptr;
}
bool os::create_thread(Thread* thread, ThreadType thr_type,
@ -2264,6 +2284,10 @@ int os::active_processor_count() {
return ActiveProcessorCount;
}
return Machine::active_processor_count();
}
int os::Machine::active_processor_count() {
int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
return online_cpus;

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2012, 2024 SAP SE. All rights reserved.
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -78,7 +78,7 @@ class fixed_strings {
public:
fixed_strings() : first(0) {}
fixed_strings() : first(nullptr) {}
~fixed_strings() {
node* n = first;
while (n) {
@ -113,7 +113,7 @@ bool AixSymbols::get_function_name (
// information (null if not available)
bool demangle // [in] whether to demangle the name
) {
struct tbtable* tb = 0;
struct tbtable* tb = nullptr;
unsigned int searchcount = 0;
// initialize output parameters
@ -653,10 +653,10 @@ void AixNativeCallstack::print_callstack_for_context(outputStream* st, const uco
// To print the first frame, use the current value of iar:
// current entry indicated by iar (the current pc)
codeptr_t cur_iar = 0;
stackptr_t cur_sp = 0;
codeptr_t cur_rtoc = 0;
codeptr_t cur_lr = 0;
codeptr_t cur_iar = nullptr;
stackptr_t cur_sp = nullptr;
codeptr_t cur_rtoc = nullptr;
codeptr_t cur_lr = nullptr;
const ucontext_t* uc = (const ucontext_t*) context;
@ -926,7 +926,7 @@ static struct handletableentry* p_handletable = nullptr;
static const char* rtv_linkedin_libpath() {
constexpr int bufsize = 4096;
static char buffer[bufsize];
static const char* libpath = 0;
static const char* libpath = nullptr;
// we only try to retrieve the libpath once. After that try we
// let libpath point to buffer, which then contains a valid libpath

View File

@ -132,7 +132,7 @@ public:
static const char* tagToStr(uint32_t user_tag) {
switch (user_tag) {
case 0:
return 0;
return nullptr;
X1(MALLOC, malloc);
X1(MALLOC_SMALL, malloc_small);
X1(MALLOC_LARGE, malloc_large);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -137,10 +137,18 @@ bool os::available_memory(physical_memory_size_type& value) {
return Bsd::available_memory(value);
}
bool os::Machine::available_memory(physical_memory_size_type& value) {
return Bsd::available_memory(value);
}
bool os::free_memory(physical_memory_size_type& value) {
return Bsd::available_memory(value);
}
bool os::Machine::free_memory(physical_memory_size_type& value) {
return Bsd::available_memory(value);
}
// Available here means free. Note that this number is of no much use. As an estimate
// for future memory pressure it is far too conservative, since MacOS will use a lot
// of unused memory for caches, and return it willingly in case of needs.
@ -181,6 +189,10 @@ void os::Bsd::print_uptime_info(outputStream* st) {
}
bool os::total_swap_space(physical_memory_size_type& value) {
return Machine::total_swap_space(value);
}
bool os::Machine::total_swap_space(physical_memory_size_type& value) {
#if defined(__APPLE__)
struct xsw_usage vmusage;
size_t size = sizeof(vmusage);
@ -195,6 +207,10 @@ bool os::total_swap_space(physical_memory_size_type& value) {
}
bool os::free_swap_space(physical_memory_size_type& value) {
return Machine::free_swap_space(value);
}
bool os::Machine::free_swap_space(physical_memory_size_type& value) {
#if defined(__APPLE__)
struct xsw_usage vmusage;
size_t size = sizeof(vmusage);
@ -212,6 +228,10 @@ physical_memory_size_type os::physical_memory() {
return Bsd::physical_memory();
}
physical_memory_size_type os::Machine::physical_memory() {
return Bsd::physical_memory();
}
size_t os::rss() {
size_t rss = 0;
#ifdef __APPLE__
@ -608,7 +628,7 @@ static void *thread_native_entry(Thread *thread) {
log_info(os, thread)("Thread finished (tid: %zu, pthread id: %zu).",
os::current_thread_id(), (uintx) pthread_self());
return 0;
return nullptr;
}
bool os::create_thread(Thread* thread, ThreadType thr_type,
@ -1400,7 +1420,7 @@ int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *pa
#elif defined(__APPLE__)
for (uint32_t i = 1; i < _dyld_image_count(); i++) {
// Value for top_address is returned as 0 since we don't have any information about module size
if (callback(_dyld_get_image_name(i), (address)_dyld_get_image_header(i), (address)0, param)) {
if (callback(_dyld_get_image_name(i), (address)_dyld_get_image_header(i), nullptr, param)) {
return 1;
}
}
@ -2189,6 +2209,10 @@ int os::active_processor_count() {
return ActiveProcessorCount;
}
return Machine::active_processor_count();
}
int os::Machine::active_processor_count() {
return _processor_count;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -631,22 +631,20 @@ void CgroupSubsystemFactory::cleanup(CgroupInfo* cg_infos) {
* return:
* true if there were no errors. false otherwise.
*/
bool CgroupSubsystem::active_processor_count(int& value) {
int cpu_count;
int result = -1;
bool CgroupSubsystem::active_processor_count(double& value) {
// We use a cache with a timeout to avoid performing expensive
// computations in the event this function is called frequently.
// [See 8227006].
CachingCgroupController<CgroupCpuController>* contrl = cpu_controller();
CachedMetric* cpu_limit = contrl->metrics_cache();
CachingCgroupController<CgroupCpuController, double>* contrl = cpu_controller();
CachedMetric<double>* cpu_limit = contrl->metrics_cache();
if (!cpu_limit->should_check_metric()) {
value = (int)cpu_limit->value();
log_trace(os, container)("CgroupSubsystem::active_processor_count (cached): %d", value);
value = cpu_limit->value();
log_trace(os, container)("CgroupSubsystem::active_processor_count (cached): %.2f", value);
return true;
}
cpu_count = os::Linux::active_processor_count();
int cpu_count = os::Linux::active_processor_count();
double result = -1;
if (!CgroupUtil::processor_count(contrl->controller(), cpu_count, result)) {
return false;
}
@ -671,8 +669,8 @@ bool CgroupSubsystem::active_processor_count(int& value) {
*/
bool CgroupSubsystem::memory_limit_in_bytes(physical_memory_size_type upper_bound,
physical_memory_size_type& value) {
CachingCgroupController<CgroupMemoryController>* contrl = memory_controller();
CachedMetric* memory_limit = contrl->metrics_cache();
CachingCgroupController<CgroupMemoryController, physical_memory_size_type>* contrl = memory_controller();
CachedMetric<physical_memory_size_type>* memory_limit = contrl->metrics_cache();
if (!memory_limit->should_check_metric()) {
value = memory_limit->value();
return true;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -181,20 +181,21 @@ class CgroupController: public CHeapObj<mtInternal> {
static bool limit_from_str(char* limit_str, physical_memory_size_type& value);
};
template <typename MetricType>
class CachedMetric : public CHeapObj<mtInternal>{
private:
volatile physical_memory_size_type _metric;
volatile MetricType _metric;
volatile jlong _next_check_counter;
public:
CachedMetric() {
_metric = value_unlimited;
_metric = static_cast<MetricType>(value_unlimited);
_next_check_counter = min_jlong;
}
bool should_check_metric() {
return os::elapsed_counter() > _next_check_counter;
}
physical_memory_size_type value() { return _metric; }
void set_value(physical_memory_size_type value, jlong timeout) {
MetricType value() { return _metric; }
void set_value(MetricType value, jlong timeout) {
_metric = value;
// Metric is unlikely to change, but we want to remain
// responsive to configuration changes. A very short grace time
@ -205,19 +206,19 @@ class CachedMetric : public CHeapObj<mtInternal>{
}
};
template <class T>
template <class T, typename MetricType>
class CachingCgroupController : public CHeapObj<mtInternal> {
private:
T* _controller;
CachedMetric* _metrics_cache;
CachedMetric<MetricType>* _metrics_cache;
public:
CachingCgroupController(T* cont) {
_controller = cont;
_metrics_cache = new CachedMetric();
_metrics_cache = new CachedMetric<MetricType>();
}
CachedMetric* metrics_cache() { return _metrics_cache; }
CachedMetric<MetricType>* metrics_cache() { return _metrics_cache; }
T* controller() { return _controller; }
};
@ -277,7 +278,7 @@ class CgroupMemoryController: public CHeapObj<mtInternal> {
class CgroupSubsystem: public CHeapObj<mtInternal> {
public:
bool memory_limit_in_bytes(physical_memory_size_type upper_bound, physical_memory_size_type& value);
bool active_processor_count(int& value);
bool active_processor_count(double& value);
virtual bool pids_max(uint64_t& value) = 0;
virtual bool pids_current(uint64_t& value) = 0;
@ -286,8 +287,8 @@ class CgroupSubsystem: public CHeapObj<mtInternal> {
virtual char * cpu_cpuset_cpus() = 0;
virtual char * cpu_cpuset_memory_nodes() = 0;
virtual const char * container_type() = 0;
virtual CachingCgroupController<CgroupMemoryController>* memory_controller() = 0;
virtual CachingCgroupController<CgroupCpuController>* cpu_controller() = 0;
virtual CachingCgroupController<CgroupMemoryController, physical_memory_size_type>* memory_controller() = 0;
virtual CachingCgroupController<CgroupCpuController, double>* cpu_controller() = 0;
virtual CgroupCpuacctController* cpuacct_controller() = 0;
bool cpu_quota(int& value);

View File

@ -1,5 +1,6 @@
/*
* Copyright (c) 2024, 2025, Red Hat, Inc.
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,9 +26,8 @@
#include "cgroupUtil_linux.hpp"
#include "os_linux.hpp"
bool CgroupUtil::processor_count(CgroupCpuController* cpu_ctrl, int upper_bound, int& value) {
bool CgroupUtil::processor_count(CgroupCpuController* cpu_ctrl, int upper_bound, double& value) {
assert(upper_bound > 0, "upper bound of cpus must be positive");
int limit_count = upper_bound;
int quota = -1;
int period = -1;
if (!cpu_ctrl->cpu_quota(quota)) {
@ -37,20 +37,15 @@ bool CgroupUtil::processor_count(CgroupCpuController* cpu_ctrl, int upper_bound,
return false;
}
int quota_count = 0;
int result = upper_bound;
double result = upper_bound;
if (quota > -1 && period > 0) {
quota_count = ceilf((float)quota / (float)period);
log_trace(os, container)("CPU Quota count based on quota/period: %d", quota_count);
if (quota > 0 && period > 0) { // Use quotas
double cpu_quota = static_cast<double>(quota) / period;
log_trace(os, container)("CPU Quota based on quota/period: %.2f", cpu_quota);
result = MIN2(result, cpu_quota);
}
// Use quotas
if (quota_count != 0) {
limit_count = quota_count;
}
result = MIN2(upper_bound, limit_count);
log_trace(os, container)("OSContainer::active_processor_count: %d", result);
log_trace(os, container)("OSContainer::active_processor_count: %.2f", result);
value = result;
return true;
}
@ -73,11 +68,11 @@ physical_memory_size_type CgroupUtil::get_updated_mem_limit(CgroupMemoryControll
// Get an updated cpu limit. The return value is strictly less than or equal to the
// passed in 'lowest' value.
int CgroupUtil::get_updated_cpu_limit(CgroupCpuController* cpu,
double CgroupUtil::get_updated_cpu_limit(CgroupCpuController* cpu,
int lowest,
int upper_bound) {
assert(lowest > 0 && lowest <= upper_bound, "invariant");
int cpu_limit_val = -1;
double cpu_limit_val = -1;
if (CgroupUtil::processor_count(cpu, upper_bound, cpu_limit_val) && cpu_limit_val != upper_bound) {
assert(cpu_limit_val <= upper_bound, "invariant");
if (lowest > cpu_limit_val) {
@ -172,7 +167,7 @@ void CgroupUtil::adjust_controller(CgroupCpuController* cpu) {
assert(cg_path[0] == '/', "cgroup path must start with '/'");
int host_cpus = os::Linux::active_processor_count();
int lowest_limit = host_cpus;
int cpus = get_updated_cpu_limit(cpu, lowest_limit, host_cpus);
double cpus = get_updated_cpu_limit(cpu, lowest_limit, host_cpus);
int orig_limit = lowest_limit != host_cpus ? lowest_limit : host_cpus;
char* limit_cg_path = nullptr;
while ((last_slash = strrchr(cg_path, '/')) != cg_path) {

View File

@ -1,5 +1,6 @@
/*
* Copyright (c) 2024, Red Hat, Inc.
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,7 +32,7 @@
class CgroupUtil: AllStatic {
public:
static bool processor_count(CgroupCpuController* cpu, int upper_bound, int& value);
static bool processor_count(CgroupCpuController* cpu, int upper_bound, double& value);
// Given a memory controller, adjust its path to a point in the hierarchy
// that represents the closest memory limit.
static void adjust_controller(CgroupMemoryController* m);
@ -42,9 +43,7 @@ class CgroupUtil: AllStatic {
static physical_memory_size_type get_updated_mem_limit(CgroupMemoryController* m,
physical_memory_size_type lowest,
physical_memory_size_type upper_bound);
static int get_updated_cpu_limit(CgroupCpuController* c,
int lowest,
int upper_bound);
static double get_updated_cpu_limit(CgroupCpuController* c, int lowest, int upper_bound);
};
#endif // CGROUP_UTIL_LINUX_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -328,8 +328,8 @@ CgroupV1Subsystem::CgroupV1Subsystem(CgroupV1Controller* cpuset,
_pids(pids) {
CgroupUtil::adjust_controller(memory);
CgroupUtil::adjust_controller(cpu);
_memory = new CachingCgroupController<CgroupMemoryController>(memory);
_cpu = new CachingCgroupController<CgroupCpuController>(cpu);
_memory = new CachingCgroupController<CgroupMemoryController, physical_memory_size_type>(memory);
_cpu = new CachingCgroupController<CgroupCpuController, double>(cpu);
}
bool CgroupV1Subsystem::is_containerized() {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -214,15 +214,15 @@ class CgroupV1Subsystem: public CgroupSubsystem {
const char * container_type() override {
return "cgroupv1";
}
CachingCgroupController<CgroupMemoryController>* memory_controller() override { return _memory; }
CachingCgroupController<CgroupCpuController>* cpu_controller() override { return _cpu; }
CachingCgroupController<CgroupMemoryController, physical_memory_size_type>* memory_controller() override { return _memory; }
CachingCgroupController<CgroupCpuController, double>* cpu_controller() override { return _cpu; }
CgroupCpuacctController* cpuacct_controller() override { return _cpuacct; }
private:
/* controllers */
CachingCgroupController<CgroupMemoryController>* _memory = nullptr;
CachingCgroupController<CgroupMemoryController, physical_memory_size_type>* _memory = nullptr;
CgroupV1Controller* _cpuset = nullptr;
CachingCgroupController<CgroupCpuController>* _cpu = nullptr;
CachingCgroupController<CgroupCpuController, double>* _cpu = nullptr;
CgroupV1CpuacctController* _cpuacct = nullptr;
CgroupV1Controller* _pids = nullptr;

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2020, 2025, Red Hat Inc.
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -156,8 +156,8 @@ CgroupV2Subsystem::CgroupV2Subsystem(CgroupV2MemoryController * memory,
_unified(unified) {
CgroupUtil::adjust_controller(memory);
CgroupUtil::adjust_controller(cpu);
_memory = new CachingCgroupController<CgroupMemoryController>(memory);
_cpu = new CachingCgroupController<CgroupCpuController>(cpu);
_memory = new CachingCgroupController<CgroupMemoryController, physical_memory_size_type>(memory);
_cpu = new CachingCgroupController<CgroupCpuController, double>(cpu);
_cpuacct = cpuacct;
}

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2020, 2024, Red Hat Inc.
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -152,8 +152,8 @@ class CgroupV2Subsystem: public CgroupSubsystem {
/* One unified controller */
CgroupV2Controller _unified;
/* Caching wrappers for cpu/memory metrics */
CachingCgroupController<CgroupMemoryController>* _memory = nullptr;
CachingCgroupController<CgroupCpuController>* _cpu = nullptr;
CachingCgroupController<CgroupMemoryController, physical_memory_size_type>* _memory = nullptr;
CachingCgroupController<CgroupCpuController, double>* _cpu = nullptr;
CgroupCpuacctController* _cpuacct = nullptr;
@ -175,8 +175,8 @@ class CgroupV2Subsystem: public CgroupSubsystem {
const char * container_type() override {
return "cgroupv2";
}
CachingCgroupController<CgroupMemoryController>* memory_controller() override { return _memory; }
CachingCgroupController<CgroupCpuController>* cpu_controller() override { return _cpu; }
CachingCgroupController<CgroupMemoryController, physical_memory_size_type>* memory_controller() override { return _memory; }
CachingCgroupController<CgroupCpuController, double>* cpu_controller() override { return _cpu; }
CgroupCpuacctController* cpuacct_controller() override { return _cpuacct; };
};

View File

@ -66,9 +66,6 @@
#endif
// open(2) flags
#ifndef O_CLOEXEC
#define O_CLOEXEC 02000000
#endif
#ifndef O_TMPFILE
#define O_TMPFILE (020000000 | O_DIRECTORY)
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -86,8 +86,8 @@ void OSContainer::init() {
// 2.) On a physical Linux system with a limit enforced by other means (like systemd slice)
physical_memory_size_type mem_limit_val = value_unlimited;
(void)memory_limit_in_bytes(mem_limit_val); // discard error and use default
int host_cpus = os::Linux::active_processor_count();
int cpus = host_cpus;
double host_cpus = os::Linux::active_processor_count();
double cpus = host_cpus;
(void)active_processor_count(cpus); // discard error and use default
any_mem_cpu_limit_present = mem_limit_val != value_unlimited || host_cpus != cpus;
if (any_mem_cpu_limit_present) {
@ -127,8 +127,7 @@ bool OSContainer::available_memory_in_bytes(physical_memory_size_type& value) {
return false;
}
bool OSContainer::available_swap_in_bytes(physical_memory_size_type host_free_swap,
physical_memory_size_type& value) {
bool OSContainer::available_swap_in_bytes(physical_memory_size_type& value) {
physical_memory_size_type mem_limit = 0;
physical_memory_size_type mem_swap_limit = 0;
if (memory_limit_in_bytes(mem_limit) &&
@ -179,8 +178,7 @@ bool OSContainer::available_swap_in_bytes(physical_memory_size_type host_free_sw
assert(num < 25, "buffer too small");
mem_limit_buf[num] = '\0';
log_trace(os,container)("OSContainer::available_swap_in_bytes: container_swap_limit=%s"
" container_mem_limit=%s, host_free_swap: " PHYS_MEM_TYPE_FORMAT,
mem_swap_buf, mem_limit_buf, host_free_swap);
" container_mem_limit=%s", mem_swap_buf, mem_limit_buf);
}
return false;
}
@ -252,7 +250,7 @@ char * OSContainer::cpu_cpuset_memory_nodes() {
return cgroup_subsystem->cpu_cpuset_memory_nodes();
}
bool OSContainer::active_processor_count(int& value) {
bool OSContainer::active_processor_count(double& value) {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
return cgroup_subsystem->active_processor_count(value);
}
@ -291,11 +289,13 @@ template<typename T> struct metric_fmt;
template<> struct metric_fmt<unsigned long long int> { static constexpr const char* fmt = "%llu"; };
template<> struct metric_fmt<unsigned long int> { static constexpr const char* fmt = "%lu"; };
template<> struct metric_fmt<int> { static constexpr const char* fmt = "%d"; };
template<> struct metric_fmt<double> { static constexpr const char* fmt = "%.2f"; };
template<> struct metric_fmt<const char*> { static constexpr const char* fmt = "%s"; };
template void OSContainer::print_container_metric<unsigned long long int>(outputStream*, const char*, unsigned long long int, const char*);
template void OSContainer::print_container_metric<unsigned long int>(outputStream*, const char*, unsigned long int, const char*);
template void OSContainer::print_container_metric<int>(outputStream*, const char*, int, const char*);
template void OSContainer::print_container_metric<double>(outputStream*, const char*, double, const char*);
template void OSContainer::print_container_metric<const char*>(outputStream*, const char*, const char*, const char*);
template <typename T>
@ -304,12 +304,13 @@ void OSContainer::print_container_metric(outputStream* st, const char* metrics,
constexpr int longest_value = max_length - 11; // Max length - shortest "metric: " string ("cpu_quota: ")
char value_str[longest_value + 1] = {};
os::snprintf_checked(value_str, longest_value, metric_fmt<T>::fmt, value);
st->print("%s: %*s", metrics, max_length - static_cast<int>(strlen(metrics)) - 2, value_str); // -2 for the ": "
if (unit[0] != '\0') {
st->print_cr(" %s", unit);
} else {
st->print_cr("");
}
const int pad_width = max_length - static_cast<int>(strlen(metrics)) - 2; // -2 for the ": "
const char* unit_prefix = unit[0] != '\0' ? " " : "";
char line[128] = {};
os::snprintf_checked(line, sizeof(line), "%s: %*s%s%s", metrics, pad_width, value_str, unit_prefix, unit);
st->print_cr("%s", line);
}
void OSContainer::print_container_helper(outputStream* st, MetricResult& res, const char* metrics) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -72,8 +72,7 @@ class OSContainer: AllStatic {
static const char * container_type();
static bool available_memory_in_bytes(physical_memory_size_type& value);
static bool available_swap_in_bytes(physical_memory_size_type host_free_swap,
physical_memory_size_type& value);
static bool available_swap_in_bytes(physical_memory_size_type& value);
static bool memory_limit_in_bytes(physical_memory_size_type& value);
static bool memory_and_swap_limit_in_bytes(physical_memory_size_type& value);
static bool memory_and_swap_usage_in_bytes(physical_memory_size_type& value);
@ -84,7 +83,7 @@ class OSContainer: AllStatic {
static bool rss_usage_in_bytes(physical_memory_size_type& value);
static bool cache_usage_in_bytes(physical_memory_size_type& value);
static bool active_processor_count(int& value);
static bool active_processor_count(double& value);
static char * cpu_cpuset_cpus();
static char * cpu_cpuset_memory_nodes();

View File

@ -211,15 +211,58 @@ static bool suppress_primordial_thread_resolution = false;
// utility functions
bool os::is_containerized() {
return OSContainer::is_containerized();
}
bool os::Container::memory_limit(physical_memory_size_type& value) {
physical_memory_size_type result = 0;
if (OSContainer::memory_limit_in_bytes(result) && result != value_unlimited) {
value = result;
return true;
}
return false;
}
bool os::Container::memory_soft_limit(physical_memory_size_type& value) {
physical_memory_size_type result = 0;
if (OSContainer::memory_soft_limit_in_bytes(result) && result != 0 && result != value_unlimited) {
value = result;
return true;
}
return false;
}
bool os::Container::memory_throttle_limit(physical_memory_size_type& value) {
physical_memory_size_type result = 0;
if (OSContainer::memory_throttle_limit_in_bytes(result) && result != value_unlimited) {
value = result;
return true;
}
return false;
}
bool os::Container::used_memory(physical_memory_size_type& value) {
return OSContainer::memory_usage_in_bytes(value);
}
bool os::available_memory(physical_memory_size_type& value) {
if (OSContainer::is_containerized() && OSContainer::available_memory_in_bytes(value)) {
if (is_containerized() && Container::available_memory(value)) {
log_trace(os)("available container memory: " PHYS_MEM_TYPE_FORMAT, value);
return true;
}
return Machine::available_memory(value);
}
bool os::Machine::available_memory(physical_memory_size_type& value) {
return Linux::available_memory(value);
}
bool os::Container::available_memory(physical_memory_size_type& value) {
return OSContainer::available_memory_in_bytes(value);
}
bool os::Linux::available_memory(physical_memory_size_type& value) {
physical_memory_size_type avail_mem = 0;
@ -251,11 +294,15 @@ bool os::Linux::available_memory(physical_memory_size_type& value) {
}
bool os::free_memory(physical_memory_size_type& value) {
if (OSContainer::is_containerized() && OSContainer::available_memory_in_bytes(value)) {
if (is_containerized() && Container::available_memory(value)) {
log_trace(os)("free container memory: " PHYS_MEM_TYPE_FORMAT, value);
return true;
}
return Machine::free_memory(value);
}
bool os::Machine::free_memory(physical_memory_size_type& value) {
return Linux::free_memory(value);
}
@ -274,21 +321,30 @@ bool os::Linux::free_memory(physical_memory_size_type& value) {
}
bool os::total_swap_space(physical_memory_size_type& value) {
if (OSContainer::is_containerized()) {
physical_memory_size_type mem_swap_limit = value_unlimited;
physical_memory_size_type memory_limit = value_unlimited;
if (OSContainer::memory_and_swap_limit_in_bytes(mem_swap_limit) &&
OSContainer::memory_limit_in_bytes(memory_limit)) {
if (memory_limit != value_unlimited && mem_swap_limit != value_unlimited &&
mem_swap_limit >= memory_limit /* ensure swap is >= 0 */) {
value = mem_swap_limit - memory_limit;
return true;
}
}
} // fallback to the host swap space if the container returned unlimited
if (is_containerized() && Container::total_swap_space(value)) {
return true;
} // fallback to the host swap space if the container value fails
return Machine::total_swap_space(value);
}
bool os::Machine::total_swap_space(physical_memory_size_type& value) {
return Linux::host_swap(value);
}
bool os::Container::total_swap_space(physical_memory_size_type& value) {
physical_memory_size_type mem_swap_limit = value_unlimited;
physical_memory_size_type memory_limit = value_unlimited;
if (OSContainer::memory_and_swap_limit_in_bytes(mem_swap_limit) &&
OSContainer::memory_limit_in_bytes(memory_limit)) {
if (memory_limit != value_unlimited && mem_swap_limit != value_unlimited &&
mem_swap_limit >= memory_limit /* ensure swap is >= 0 */) {
value = mem_swap_limit - memory_limit;
return true;
}
}
return false;
}
static bool host_free_swap_f(physical_memory_size_type& value) {
struct sysinfo si;
int ret = sysinfo(&si);
@ -309,32 +365,45 @@ bool os::free_swap_space(physical_memory_size_type& value) {
return false;
}
physical_memory_size_type host_free_swap_val = MIN2(total_swap_space, host_free_swap);
if (OSContainer::is_containerized()) {
if (OSContainer::available_swap_in_bytes(host_free_swap_val, value)) {
if (is_containerized()) {
if (Container::free_swap_space(value)) {
return true;
}
// Fall through to use host value
log_trace(os,container)("os::free_swap_space: containerized value unavailable"
" returning host value: " PHYS_MEM_TYPE_FORMAT, host_free_swap_val);
}
value = host_free_swap_val;
return true;
}
bool os::Machine::free_swap_space(physical_memory_size_type& value) {
return host_free_swap_f(value);
}
bool os::Container::free_swap_space(physical_memory_size_type& value) {
return OSContainer::available_swap_in_bytes(value);
}
physical_memory_size_type os::physical_memory() {
if (OSContainer::is_containerized()) {
if (is_containerized()) {
physical_memory_size_type mem_limit = value_unlimited;
if (OSContainer::memory_limit_in_bytes(mem_limit) && mem_limit != value_unlimited) {
if (Container::memory_limit(mem_limit) && mem_limit != value_unlimited) {
log_trace(os)("total container memory: " PHYS_MEM_TYPE_FORMAT, mem_limit);
return mem_limit;
}
}
physical_memory_size_type phys_mem = Linux::physical_memory();
physical_memory_size_type phys_mem = Machine::physical_memory();
log_trace(os)("total system memory: " PHYS_MEM_TYPE_FORMAT, phys_mem);
return phys_mem;
}
physical_memory_size_type os::Machine::physical_memory() {
return Linux::physical_memory();
}
// Returns the resident set size (RSS) of the process.
// Falls back to using VmRSS from /proc/self/status if /proc/self/smaps_rollup is unavailable.
// Note: On kernels with memory cgroups or shared memory, VmRSS may underreport RSS.
@ -2439,20 +2508,21 @@ bool os::Linux::print_container_info(outputStream* st) {
OSContainer::print_container_metric(st, "cpu_memory_nodes", p != nullptr ? p : "not supported");
free(p);
int i = -1;
bool supported = OSContainer::active_processor_count(i);
double cpus = -1;
bool supported = OSContainer::active_processor_count(cpus);
if (supported) {
assert(i > 0, "must be");
assert(cpus > 0, "must be");
if (ActiveProcessorCount > 0) {
OSContainer::print_container_metric(st, "active_processor_count", ActiveProcessorCount, "(from -XX:ActiveProcessorCount)");
} else {
OSContainer::print_container_metric(st, "active_processor_count", i);
OSContainer::print_container_metric(st, "active_processor_count", cpus);
}
} else {
OSContainer::print_container_metric(st, "active_processor_count", "not supported");
}
int i = -1;
supported = OSContainer::cpu_quota(i);
if (supported && i > 0) {
OSContainer::print_container_metric(st, "cpu_quota", i);
@ -4737,15 +4807,26 @@ int os::active_processor_count() {
return ActiveProcessorCount;
}
int active_cpus = -1;
if (OSContainer::is_containerized() && OSContainer::active_processor_count(active_cpus)) {
log_trace(os)("active_processor_count: determined by OSContainer: %d",
active_cpus);
} else {
active_cpus = os::Linux::active_processor_count();
if (is_containerized()) {
double cpu_quota;
if (Container::processor_count(cpu_quota)) {
int active_cpus = ceilf(cpu_quota); // Round fractional CPU quota up.
assert(active_cpus <= Machine::active_processor_count(), "must be");
log_trace(os)("active_processor_count: determined by OSContainer: %d",
active_cpus);
return active_cpus;
}
}
return active_cpus;
return Machine::active_processor_count();
}
int os::Machine::active_processor_count() {
return os::Linux::active_processor_count();
}
bool os::Container::processor_count(double& value) {
return OSContainer::active_processor_count(value);
}
static bool should_warn_invalid_processor_id() {
@ -4878,36 +4959,18 @@ int os::open(const char *path, int oflag, int mode) {
// All file descriptors that are opened in the Java process and not
// specifically destined for a subprocess should have the close-on-exec
// flag set. If we don't set it, then careless 3rd party native code
// might fork and exec without closing all appropriate file descriptors,
// and this in turn might:
//
// - cause end-of-file to fail to be detected on some file
// descriptors, resulting in mysterious hangs, or
//
// - might cause an fopen in the subprocess to fail on a system
// suffering from bug 1085341.
//
// (Yes, the default setting of the close-on-exec flag is a Unix
// design flaw)
//
// See:
// 1085341: 32-bit stdio routines should support file descriptors >255
// 4843136: (process) pipe file descriptor from Runtime.exec not being closed
// 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
//
// Modern Linux kernels (after 2.6.23 2007) support O_CLOEXEC with open().
// O_CLOEXEC is preferable to using FD_CLOEXEC on an open file descriptor
// because it saves a system call and removes a small window where the flag
// is unset. On ancient Linux kernels the O_CLOEXEC flag will be ignored
// and we fall back to using FD_CLOEXEC (see below).
#ifdef O_CLOEXEC
// might fork and exec without closing all appropriate file descriptors.
oflag |= O_CLOEXEC;
#endif
int fd = ::open(path, oflag, mode);
if (fd == -1) return -1;
// No further checking is needed if open() returned an error or
// access mode is not read only.
if (fd == -1 || (oflag & O_ACCMODE) != O_RDONLY) {
return fd;
}
//If the open succeeded, the file might still be a directory
// If the open succeeded and is read only, the file might be a directory
// which the JVM doesn't allow to be read.
{
struct stat buf;
int ret = ::fstat(fd, &buf);
@ -4925,21 +4988,6 @@ int os::open(const char *path, int oflag, int mode) {
}
}
#ifdef FD_CLOEXEC
// Validate that the use of the O_CLOEXEC flag on open above worked.
// With recent kernels, we will perform this check exactly once.
static sig_atomic_t O_CLOEXEC_is_known_to_work = 0;
if (!O_CLOEXEC_is_known_to_work) {
int flags = ::fcntl(fd, F_GETFD);
if (flags != -1) {
if ((flags & FD_CLOEXEC) != 0)
O_CLOEXEC_is_known_to_work = 1;
else
::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
}
}
#endif
return fd;
}

View File

@ -112,6 +112,10 @@ static void save_memory_to_file(char* addr, size_t size) {
result = ::close(fd);
if (result == OS_ERR) {
warning("Could not close %s: %s\n", destfile, os::strerror(errno));
} else {
if (!successful_write) {
remove(destfile);
}
}
}
FREE_C_HEAP_ARRAY(char, destfile);
@ -949,6 +953,7 @@ static int create_sharedmem_file(const char* dirname, const char* filename, size
warning("Insufficient space for shared memory file: %s/%s\n", dirname, filename);
}
result = OS_ERR;
remove(filename);
break;
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -951,6 +951,32 @@ struct enum_sigcode_desc_t {
const char* s_desc;
};
#if defined(LINUX)
// Additional kernel si_code definitions that are only exported by
// more recent glibc distributions, so we have to hard-code the values.
#ifndef BUS_MCEERR_AR // glibc 2.17
#define BUS_MCEERR_AR 4
#define BUS_MCEERR_AO 5
#endif
#ifndef SEGV_PKUERR // glibc 2.27
#define SEGV_PKUERR 4
#endif
#ifndef SYS_SECCOMP // glibc 2.28
#define SYS_SECCOMP 1
#endif
#ifndef TRAP_BRANCH // glibc 2.30
#define TRAP_BRANCH 3
#endif
#ifndef TRAP_HWBKPT // not glibc version specific - gdb related
#define TRAP_HWBKPT 4
#endif
#endif // LINUX
static bool get_signal_code_description(const siginfo_t* si, enum_sigcode_desc_t* out) {
const struct {
@ -976,6 +1002,7 @@ static bool get_signal_code_description(const siginfo_t* si, enum_sigcode_desc_t
{ SIGSEGV, SEGV_ACCERR, "SEGV_ACCERR", "Invalid permissions for mapped object." },
#if defined(LINUX)
{ SIGSEGV, SEGV_BNDERR, "SEGV_BNDERR", "Failed address bound checks." },
{ SIGSEGV, SEGV_PKUERR, "SEGV_PKUERR", "Protection key checking failure." },
#endif
#if defined(AIX)
// no explanation found what keyerr would be
@ -984,8 +1011,18 @@ static bool get_signal_code_description(const siginfo_t* si, enum_sigcode_desc_t
{ SIGBUS, BUS_ADRALN, "BUS_ADRALN", "Invalid address alignment." },
{ SIGBUS, BUS_ADRERR, "BUS_ADRERR", "Nonexistent physical address." },
{ SIGBUS, BUS_OBJERR, "BUS_OBJERR", "Object-specific hardware error." },
#if defined(LINUX)
{ SIGBUS, BUS_MCEERR_AR,"BUS_MCEERR_AR","Hardware memory error consumed on a machine check: action required." },
{ SIGBUS, BUS_MCEERR_AO,"BUS_MCEERR_AO","Hardware memory error detected in process but not consumed: action optional." },
{ SIGSYS, SYS_SECCOMP, "SYS_SECCOMP", "Secure computing (seccomp) filter failure." },
#endif
{ SIGTRAP, TRAP_BRKPT, "TRAP_BRKPT", "Process breakpoint." },
{ SIGTRAP, TRAP_TRACE, "TRAP_TRACE", "Process trace trap." },
#if defined(LINUX)
{ SIGTRAP, TRAP_BRANCH, "TRAP_BRANCH", "Process taken branch trap." },
{ SIGTRAP, TRAP_HWBKPT, "TRAP_HWBKPT", "Hardware breakpoint/watchpoint." },
#endif
{ SIGCHLD, CLD_EXITED, "CLD_EXITED", "Child has exited." },
{ SIGCHLD, CLD_KILLED, "CLD_KILLED", "Child has terminated abnormally and did not create a core file." },
{ SIGCHLD, CLD_DUMPED, "CLD_DUMPED", "Child has terminated abnormally and created a core file." },
@ -993,6 +1030,7 @@ static bool get_signal_code_description(const siginfo_t* si, enum_sigcode_desc_t
{ SIGCHLD, CLD_STOPPED, "CLD_STOPPED", "Child has stopped." },
{ SIGCHLD, CLD_CONTINUED,"CLD_CONTINUED","Stopped child has continued." },
#ifdef SIGPOLL
{ SIGPOLL, POLL_IN, "POLL_IN", "Data input available." },
{ SIGPOLL, POLL_OUT, "POLL_OUT", "Output buffers available." },
{ SIGPOLL, POLL_MSG, "POLL_MSG", "Input message available." },
{ SIGPOLL, POLL_ERR, "POLL_ERR", "I/O error." },

View File

@ -839,10 +839,18 @@ bool os::available_memory(physical_memory_size_type& value) {
return win32::available_memory(value);
}
bool os::Machine::available_memory(physical_memory_size_type& value) {
return win32::available_memory(value);
}
bool os::free_memory(physical_memory_size_type& value) {
return win32::available_memory(value);
}
bool os::Machine::free_memory(physical_memory_size_type& value) {
return win32::available_memory(value);
}
bool os::win32::available_memory(physical_memory_size_type& value) {
// Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
// value if total memory is larger than 4GB
@ -858,7 +866,11 @@ bool os::win32::available_memory(physical_memory_size_type& value) {
}
}
bool os::total_swap_space(physical_memory_size_type& value) {
bool os::total_swap_space(physical_memory_size_type& value) {
return Machine::total_swap_space(value);
}
bool os::Machine::total_swap_space(physical_memory_size_type& value) {
MEMORYSTATUSEX ms;
ms.dwLength = sizeof(ms);
BOOL res = GlobalMemoryStatusEx(&ms);
@ -872,6 +884,10 @@ bool os::total_swap_space(physical_memory_size_type& value) {
}
bool os::free_swap_space(physical_memory_size_type& value) {
return Machine::free_swap_space(value);
}
bool os::Machine::free_swap_space(physical_memory_size_type& value) {
MEMORYSTATUSEX ms;
ms.dwLength = sizeof(ms);
BOOL res = GlobalMemoryStatusEx(&ms);
@ -888,6 +904,10 @@ physical_memory_size_type os::physical_memory() {
return win32::physical_memory();
}
physical_memory_size_type os::Machine::physical_memory() {
return win32::physical_memory();
}
size_t os::rss() {
size_t rss = 0;
PROCESS_MEMORY_COUNTERS_EX pmex;
@ -911,6 +931,10 @@ int os::active_processor_count() {
return ActiveProcessorCount;
}
return Machine::active_processor_count();
}
int os::Machine::active_processor_count() {
bool schedules_all_processor_groups = win32::is_windows_11_or_greater() || win32::is_windows_server_2022_or_greater();
if (UseAllWindowsProcessorGroups && !schedules_all_processor_groups && !win32::processor_group_warning_displayed()) {
win32::set_processor_group_warning_displayed(true);

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -412,12 +412,8 @@ run_stub:
}
void os::Aix::init_thread_fpu_state(void) {
#if !defined(USE_XLC_BUILTINS)
// Disable FP exceptions.
__asm__ __volatile__ ("mtfsfi 6,0");
#else
__mtfsfi(6, 0);
#endif
}
////////////////////////////////////////////////////////////////////////////////

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2013 SAP SE. All rights reserved.
* Copyright (c) 2012, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,29 +29,21 @@
// Included in runtime/prefetch.inline.hpp
inline void Prefetch::read(const void *loc, intx interval) {
#if !defined(USE_XLC_BUILTINS)
__asm__ __volatile__ (
" dcbt 0, %0 \n"
:
: /*%0*/"r" ( ((address)loc) +((long)interval) )
//:
);
#else
__dcbt(((address)loc) +((long)interval));
#endif
}
inline void Prefetch::write(void *loc, intx interval) {
#if !defined(USE_XLC_BUILTINS)
__asm__ __volatile__ (
" dcbtst 0, %0 \n"
:
: /*%0*/"r" ( ((address)loc) +((long)interval) )
//:
);
#else
__dcbtst( ((address)loc) +((long)interval) );
#endif
}
#endif // OS_CPU_AIX_PPC_PREFETCH_AIX_PPC_INLINE_HPP

View File

@ -42,8 +42,19 @@ frame JavaThread::pd_last_frame() {
void JavaThread::cache_global_variables() {
BarrierSet* bs = BarrierSet::barrier_set();
#if INCLUDE_G1GC
if (bs->is_a(BarrierSet::G1BarrierSet)) {
_card_table_base = nullptr;
} else
#endif
#if INCLUDE_SHENANDOAHGC
if (bs->is_a(BarrierSet::ShenandoahBarrierSet)) {
_card_table_base = nullptr;
} else
#endif
if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
_card_table_base = (address) (barrier_set_cast<CardTableBarrierSet>(bs)->card_table()->byte_map_base());
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
_card_table_base = (address)ctbs->card_table_base_const();
} else {
_card_table_base = nullptr;
}

View File

@ -52,6 +52,7 @@
#include "utilities/debug.hpp"
#include "utilities/events.hpp"
#include "utilities/vmError.hpp"
#include "runtime/vm_version.hpp"
// put OS-includes here
# include <sys/types.h>
@ -380,6 +381,43 @@ size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
/////////////////////////////////////////////////////////////////////////////
// helper functions for fatal error handler
// XSAVE constants - from Intel SDM Vol. 1, Chapter 13
#define XSAVE_HDR_OFFSET 512
#define XFEATURE_APX (1ULL << 19)
// XSAVE header structure
// See: Intel SDM Vol. 1, Section 13.4.2 "XSAVE Header"
// Also: Linux kernel arch/x86/include/asm/fpu/types.h
struct xstate_header {
uint64_t xfeatures;
uint64_t xcomp_bv;
uint64_t reserved[6];
};
// APX extended state - R16-R31 (16 x 64-bit registers)
// See: Intel APX Architecture Specification
struct apx_state {
uint64_t regs[16]; // r16-r31
};
static apx_state* get_apx_state(const ucontext_t* uc) {
uint32_t offset = VM_Version::apx_xstate_offset();
if (offset == 0 || uc->uc_mcontext.fpregs == nullptr) {
return nullptr;
}
char* xsave = (char*)uc->uc_mcontext.fpregs;
xstate_header* hdr = (xstate_header*)(xsave + XSAVE_HDR_OFFSET);
// Check if APX state is present in this context
if (!(hdr->xfeatures & XFEATURE_APX)) {
return nullptr;
}
return (apx_state*)(xsave + offset);
}
void os::print_context(outputStream *st, const void *context) {
if (context == nullptr) return;
@ -406,6 +444,14 @@ void os::print_context(outputStream *st, const void *context) {
st->print(", R14=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_R14]);
st->print(", R15=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_R15]);
st->cr();
// Dump APX EGPRs (R16-R31)
apx_state* apx = UseAPX ? get_apx_state(uc) : nullptr;
if (apx != nullptr) {
for (int i = 0; i < 16; i++) {
st->print("%sR%d=" INTPTR_FORMAT, (i % 4 == 0) ? "" : ", ", 16 + i, (intptr_t)apx->regs[i]);
if (i % 4 == 3) st->cr();
}
}
st->print( "RIP=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_RIP]);
st->print(", EFLAGS=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_EFL]);
st->print(", CSGSFS=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_CSGSFS]);
@ -432,37 +478,50 @@ void os::print_context(outputStream *st, const void *context) {
}
void os::print_register_info(outputStream *st, const void *context, int& continuation) {
const int register_count = 16;
if (context == nullptr) {
return;
}
const ucontext_t *uc = (const ucontext_t*)context;
apx_state* apx = UseAPX ? get_apx_state(uc) : nullptr;
const int register_count = 16 + (apx != nullptr ? 16 : 0);
int n = continuation;
assert(n >= 0 && n <= register_count, "Invalid continuation value");
if (context == nullptr || n == register_count) {
if (n == register_count) {
return;
}
const ucontext_t *uc = (const ucontext_t*)context;
while (n < register_count) {
// Update continuation with next index before printing location
continuation = n + 1;
if (n < 16) {
// Standard registers (RAX-R15)
# define CASE_PRINT_REG(n, str, id) case n: st->print(str); print_location(st, uc->uc_mcontext.gregs[REG_##id]);
switch (n) {
CASE_PRINT_REG( 0, "RAX=", RAX); break;
CASE_PRINT_REG( 1, "RBX=", RBX); break;
CASE_PRINT_REG( 2, "RCX=", RCX); break;
CASE_PRINT_REG( 3, "RDX=", RDX); break;
CASE_PRINT_REG( 4, "RSP=", RSP); break;
CASE_PRINT_REG( 5, "RBP=", RBP); break;
CASE_PRINT_REG( 6, "RSI=", RSI); break;
CASE_PRINT_REG( 7, "RDI=", RDI); break;
CASE_PRINT_REG( 8, "R8 =", R8); break;
CASE_PRINT_REG( 9, "R9 =", R9); break;
CASE_PRINT_REG(10, "R10=", R10); break;
CASE_PRINT_REG(11, "R11=", R11); break;
CASE_PRINT_REG(12, "R12=", R12); break;
CASE_PRINT_REG(13, "R13=", R13); break;
CASE_PRINT_REG(14, "R14=", R14); break;
CASE_PRINT_REG(15, "R15=", R15); break;
}
switch (n) {
CASE_PRINT_REG( 0, "RAX=", RAX); break;
CASE_PRINT_REG( 1, "RBX=", RBX); break;
CASE_PRINT_REG( 2, "RCX=", RCX); break;
CASE_PRINT_REG( 3, "RDX=", RDX); break;
CASE_PRINT_REG( 4, "RSP=", RSP); break;
CASE_PRINT_REG( 5, "RBP=", RBP); break;
CASE_PRINT_REG( 6, "RSI=", RSI); break;
CASE_PRINT_REG( 7, "RDI=", RDI); break;
CASE_PRINT_REG( 8, "R8 =", R8); break;
CASE_PRINT_REG( 9, "R9 =", R9); break;
CASE_PRINT_REG(10, "R10=", R10); break;
CASE_PRINT_REG(11, "R11=", R11); break;
CASE_PRINT_REG(12, "R12=", R12); break;
CASE_PRINT_REG(13, "R13=", R13); break;
CASE_PRINT_REG(14, "R14=", R14); break;
CASE_PRINT_REG(15, "R15=", R15); break;
}
# undef CASE_PRINT_REG
} else {
// APX extended general purpose registers (R16-R31)
st->print("R%d=", n);
print_location(st, apx->regs[n - 16]);
}
++n;
}
}

View File

@ -0,0 +1,34 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "cds/aotGrowableArray.hpp"
#include "cds/aotMetaspace.hpp"
#include "memory/allocation.inline.hpp"
#include "utilities/growableArray.hpp"
void AOTGrowableArrayHelper::deallocate(void* mem) {
if (!AOTMetaspace::in_aot_cache(mem)) {
GrowableArrayCHeapAllocator::deallocate(mem);
}
}

View File

@ -0,0 +1,76 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_AOT_AOTGROWABLEARRAY_HPP
#define SHARE_AOT_AOTGROWABLEARRAY_HPP
#include <memory/metaspaceClosureType.hpp>
#include <utilities/growableArray.hpp>
class AOTGrowableArrayHelper {
public:
static void deallocate(void* mem);
};
// An AOTGrowableArray<T> provides the same functionality as a GrowableArray<T> that
// uses the C heap allocator. In addition, AOTGrowableArray<T> can be iterated with
// MetaspaceClosure. This type should be used for growable arrays that need to be
// stored in the AOT cache. See ModuleEntry::_reads for an example.
template <typename E>
class AOTGrowableArray : public GrowableArrayWithAllocator<E, AOTGrowableArray<E>> {
friend class VMStructs;
friend class GrowableArrayWithAllocator<E, AOTGrowableArray>;
static E* allocate(int max, MemTag mem_tag) {
return (E*)GrowableArrayCHeapAllocator::allocate(max, sizeof(E), mem_tag);
}
E* allocate() {
return allocate(this->_capacity, mtClass);
}
void deallocate(E* mem) {
#if INCLUDE_CDS
AOTGrowableArrayHelper::deallocate(mem);
#else
GrowableArrayCHeapAllocator::deallocate(mem);
#endif
}
public:
AOTGrowableArray(int initial_capacity, MemTag mem_tag) :
GrowableArrayWithAllocator<E, AOTGrowableArray>(
allocate(initial_capacity, mem_tag),
initial_capacity) {}
AOTGrowableArray() : AOTGrowableArray(0, mtClassShared) {}
// methods required by MetaspaceClosure
void metaspace_pointers_do(MetaspaceClosure* it);
int size_in_heapwords() const { return (int)heap_word_size(sizeof(*this)); }
MetaspaceClosureType type() const { return MetaspaceClosureType::GrowableArrayType; }
static bool is_read_only_by_default() { return false; }
};
#endif // SHARE_AOT_AOTGROWABLEARRAY_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,17 +22,16 @@
*
*/
#ifndef SHARE_GC_G1_G1CONCURRENTMARKOBJARRAYPROCESSOR_INLINE_HPP
#define SHARE_GC_G1_G1CONCURRENTMARKOBJARRAYPROCESSOR_INLINE_HPP
#ifndef SHARE_CDS_AOTGROWABLEARRAY_INLINE_HPP
#define SHARE_CDS_AOTGROWABLEARRAY_INLINE_HPP
#include "gc/g1/g1ConcurrentMarkObjArrayProcessor.hpp"
#include "cds/aotGrowableArray.hpp"
#include "gc/shared/gc_globals.hpp"
#include "oops/oop.inline.hpp"
#include "oops/oopsHierarchy.hpp"
#include "memory/metaspaceClosure.hpp"
inline bool G1CMObjArrayProcessor::should_be_sliced(oop obj) {
return obj->is_objArray() && ((objArrayOop)obj)->size() >= 2 * ObjArrayMarkingStride;
template <typename E>
void AOTGrowableArray<E>::metaspace_pointers_do(MetaspaceClosure* it) {
it->push_c_array(AOTGrowableArray<E>::data_addr(), AOTGrowableArray<E>::capacity());
}
#endif // SHARE_GC_G1_G1CONCURRENTMARKOBJARRAYPROCESSOR_INLINE_HPP
#endif // SHARE_CDS_AOTGROWABLEARRAY_INLINE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,6 +29,8 @@
#include "cds/aotStreamedHeapWriter.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/filemap.hpp"
#include "classfile/moduleEntry.hpp"
#include "classfile/packageEntry.hpp"
#include "classfile/systemDictionaryShared.hpp"
#include "classfile/vmClasses.hpp"
#include "logging/log.hpp"
@ -141,7 +143,7 @@ public:
info._buffered_addr = ref->obj();
info._requested_addr = ref->obj();
info._bytes = ref->size() * BytesPerWord;
info._type = ref->msotype();
info._type = ref->type();
_objs.append(info);
}
@ -214,7 +216,7 @@ void AOTMapLogger::dumptime_log_metaspace_region(const char* name, DumpRegion* r
info._buffered_addr = src_info->buffered_addr();
info._requested_addr = info._buffered_addr + _buffer_to_requested_delta;
info._bytes = src_info->size_in_bytes();
info._type = src_info->msotype();
info._type = src_info->type();
objs.append(info);
}
@ -332,43 +334,52 @@ void AOTMapLogger::log_metaspace_objects_impl(address region_base, address regio
address buffered_addr = info._buffered_addr;
address requested_addr = info._requested_addr;
int bytes = info._bytes;
MetaspaceObj::Type type = info._type;
const char* type_name = MetaspaceObj::type_name(type);
MetaspaceClosureType type = info._type;
const char* type_name = MetaspaceClosure::type_name(type);
log_as_hex(last_obj_base, buffered_addr, last_obj_base + _buffer_to_requested_delta);
switch (type) {
case MetaspaceObj::ClassType:
case MetaspaceClosureType::ClassType:
log_klass((Klass*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceObj::ConstantPoolType:
case MetaspaceClosureType::ConstantPoolType:
log_constant_pool((ConstantPool*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceObj::ConstantPoolCacheType:
case MetaspaceClosureType::ConstantPoolCacheType:
log_constant_pool_cache((ConstantPoolCache*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceObj::ConstMethodType:
case MetaspaceClosureType::ConstMethodType:
log_const_method((ConstMethod*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceObj::MethodType:
case MetaspaceClosureType::MethodType:
log_method((Method*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceObj::MethodCountersType:
case MetaspaceClosureType::MethodCountersType:
log_method_counters((MethodCounters*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceObj::MethodDataType:
case MetaspaceClosureType::MethodDataType:
log_method_data((MethodData*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceObj::SymbolType:
case MetaspaceClosureType::ModuleEntryType:
log_module_entry((ModuleEntry*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceClosureType::PackageEntryType:
log_package_entry((PackageEntry*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceClosureType::GrowableArrayType:
log_growable_array((GrowableArrayBase*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceClosureType::SymbolType:
log_symbol((Symbol*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceObj::KlassTrainingDataType:
case MetaspaceClosureType::KlassTrainingDataType:
log_klass_training_data((KlassTrainingData*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceObj::MethodTrainingDataType:
case MetaspaceClosureType::MethodTrainingDataType:
log_method_training_data((MethodTrainingData*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceObj::CompileTrainingDataType:
case MetaspaceClosureType::CompileTrainingDataType:
log_compile_training_data((CompileTrainingData*)src, requested_addr, type_name, bytes, current);
break;
default:
@ -421,6 +432,27 @@ void AOTMapLogger::log_method_data(MethodData* md, address requested_addr, const
log_debug(aot, map)(_LOG_PREFIX " %s", p2i(requested_addr), type_name, bytes, md->method()->external_name());
}
void AOTMapLogger::log_module_entry(ModuleEntry* mod, address requested_addr, const char* type_name,
int bytes, Thread* current) {
ResourceMark rm(current);
log_debug(aot, map)(_LOG_PREFIX " %s", p2i(requested_addr), type_name, bytes,
mod->name_as_C_string());
}
void AOTMapLogger::log_package_entry(PackageEntry* pkg, address requested_addr, const char* type_name,
int bytes, Thread* current) {
ResourceMark rm(current);
log_debug(aot, map)(_LOG_PREFIX " %s - %s", p2i(requested_addr), type_name, bytes,
pkg->module()->name_as_C_string(), pkg->name_as_C_string());
}
void AOTMapLogger::log_growable_array(GrowableArrayBase* arr, address requested_addr, const char* type_name,
int bytes, Thread* current) {
ResourceMark rm(current);
log_debug(aot, map)(_LOG_PREFIX " %d (%d)", p2i(requested_addr), type_name, bytes,
arr->length(), arr->capacity());
}
void AOTMapLogger::log_klass(Klass* k, address requested_addr, const char* type_name,
int bytes, Thread* current) {
ResourceMark rm(current);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,6 +28,7 @@
#include "cds/archiveBuilder.hpp"
#include "memory/allocation.hpp"
#include "memory/allStatic.hpp"
#include "memory/metaspaceClosureType.hpp"
#include "oops/oopsHierarchy.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/growableArray.hpp"
@ -37,9 +38,13 @@ class ArchiveStreamedHeapInfo;
class CompileTrainingData;
class DumpRegion;
class FileMapInfo;
class GrowableArrayBase;
class KlassTrainingData;
class MethodCounters;
class MethodTrainingData;
class ModuleEntry;
class outputStream;
class PackageEntry;
// Write detailed info to a mapfile to analyze contents of the AOT cache/CDS archive.
// -Xlog:aot+map* can be used both when creating an AOT cache, or when using an AOT cache.
@ -62,7 +67,7 @@ class AOTMapLogger : AllStatic {
address _buffered_addr;
address _requested_addr;
int _bytes;
MetaspaceObj::Type _type;
MetaspaceClosureType _type;
};
public:
@ -142,6 +147,9 @@ private:
Thread* current);
static void log_klass(Klass* k, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_method(Method* m, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_module_entry(ModuleEntry* mod, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_package_entry(PackageEntry* pkg, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_growable_array(GrowableArrayBase* arr, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_symbol(Symbol* s, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_klass_training_data(KlassTrainingData* ktd, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_method_training_data(MethodTrainingData* mtd, address requested_addr, const char* type_name, int bytes, Thread* current);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -360,10 +360,8 @@ bool AOTMappedHeapLoader::load_heap_region(FileMapInfo* mapinfo) {
}
objArrayOop AOTMappedHeapLoader::root_segment(int segment_idx) {
if (CDSConfig::is_dumping_heap()) {
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
} else {
assert(CDSConfig::is_using_archive(), "must be");
if (!CDSConfig::is_using_archive()) {
assert(CDSConfig::is_dumping_heap() && Thread::current() == (Thread*)VMThread::vm_thread(), "sanity");
}
objArrayOop segment = (objArrayOop)_root_segments->at(segment_idx).resolve();
@ -466,7 +464,9 @@ void AOTMappedHeapLoader::finish_initialization(FileMapInfo* info) {
add_root_segment((objArrayOop)segment_oop);
}
StringTable::load_shared_strings_array();
if (CDSConfig::is_dumping_final_static_archive()) {
StringTable::move_shared_strings_into_runtime_table();
}
}
}
@ -619,7 +619,7 @@ bool AOTMappedHeapLoader::map_heap_region_impl(FileMapInfo* info) {
aot_log_info(aot)("Preferred address to map heap data (to avoid relocation) is " INTPTR_FORMAT, p2i(requested_start));
// allocate from java heap
HeapWord* start = G1CollectedHeap::heap()->alloc_archive_region(word_size, (HeapWord*)requested_start);
HeapWord* start = G1CollectedHeap::heap()->alloc_archive_region(word_size);
if (start == nullptr) {
AOTMetaspace::report_loading_error("UseSharedSpaces: Unable to allocate java heap region for archive heap.");
return false;

View File

@ -698,6 +698,9 @@ public:
Universe::metaspace_pointers_do(it);
vmSymbols::metaspace_pointers_do(it);
TrainingData::iterate_roots(it);
if (CDSConfig::is_dumping_full_module_graph()) {
ClassLoaderDataShared::iterate_roots(it);
}
// The above code should find all the symbols that are referenced by the
// archived classes. We just need to add the extra symbols which
@ -795,6 +798,10 @@ void VM_PopulateDumpSharedSpace::doit() {
_builder.make_klasses_shareable();
AOTMetaspace::make_method_handle_intrinsics_shareable();
if (CDSConfig::is_dumping_full_module_graph()) {
ClassLoaderDataShared::remove_unshareable_info();
}
dump_java_heap_objects();
dump_shared_symbol_table(_builder.symbols());
@ -1097,7 +1104,12 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS
#if INCLUDE_CDS_JAVA_HEAP
if (CDSConfig::is_dumping_heap()) {
assert(CDSConfig::allow_only_single_java_thread(), "Required");
if (!CDSConfig::is_dumping_preimage_static_archive()) {
// A single thread is required for Reference handling and deterministic CDS archive.
// Its's not required for dumping preimage, where References won't be archived and
// determinism is not needed.
assert(CDSConfig::allow_only_single_java_thread(), "Required");
}
if (!HeapShared::is_archived_boot_layer_available(THREAD)) {
report_loading_error("archivedBootLayer not available, disabling full module graph");
CDSConfig::stop_dumping_full_module_graph();
@ -1135,6 +1147,7 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS
HeapShared::init_heap_writer();
if (CDSConfig::is_dumping_full_module_graph()) {
ClassLoaderDataShared::ensure_module_entry_tables_exist();
ClassLoaderDataShared::build_tables(CHECK);
HeapShared::reset_archived_object_states(CHECK);
}
@ -1154,12 +1167,6 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS
// Perhaps there is a way to avoid hard-coding these names here.
// See discussion in JDK-8342481.
}
if (HeapShared::is_writing_mapping_mode()) {
// Do this at the very end, when no Java code will be executed. Otherwise
// some new strings may be added to the intern table.
StringTable::allocate_shared_strings_array(CHECK);
}
} else {
log_info(aot)("Not dumping heap, reset CDSConfig::_is_using_optimized_module_handling");
CDSConfig::stop_using_optimized_module_handling();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -177,12 +177,17 @@ void AOTReferenceObjSupport::init_keep_alive_objs_table() {
// Returns true IFF obj is an instance of java.lang.ref.Reference. If so, perform extra eligibility checks.
bool AOTReferenceObjSupport::check_if_ref_obj(oop obj) {
// We have a single Java thread. This means java.lang.ref.Reference$ReferenceHandler thread
// is not running. Otherwise the checks for next/discovered may not work.
precond(CDSConfig::allow_only_single_java_thread());
assert_at_safepoint(); // _keep_alive_objs_table uses raw oops
if (obj->klass()->is_subclass_of(vmClasses::Reference_klass())) {
// The following check works only if the java.lang.ref.Reference$ReferenceHandler thread
// is not running.
//
// This code is called on every object found by AOTArtifactFinder. When dumping the
// preimage archive, AOTArtifactFinder should not find any Reference objects.
precond(!CDSConfig::is_dumping_preimage_static_archive());
precond(CDSConfig::allow_only_single_java_thread());
precond(AOTReferenceObjSupport::is_enabled());
precond(JavaClasses::is_supported_for_archiving(obj));
precond(_keep_alive_objs_table != nullptr);

Some files were not shown because too many files have changed in this diff Show More