Merge branch 'master' into dependsonlyontest

This commit is contained in:
Quan Anh Mai 2026-02-08 17:59:54 +07:00
commit ffbe66be14
1258 changed files with 43318 additions and 30426 deletions

View File

@ -1,7 +1,7 @@
#!/bin/bash -f
#
# Copyright (c) 2010, 2025, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2010, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -23,9 +23,13 @@
# questions.
#
# Script to update the Copyright YEAR range in Mercurial & Git sources.
# Script to update the Copyright YEAR range in Git sources.
# (Originally from xdono, Thanks!)
# To update Copyright years for changes in a specific branch,
# you use a command along these lines:
# $ git diff upstream/master...<branch-name> | lsdiff | cut -d '/' -f 2- | bash bin/update_copyright_year.sh -m -
#------------------------------------------------------------
copyright="Copyright"
copyright_symbol="(c)"
@ -47,7 +51,7 @@ rm -f -r ${tmp}
mkdir -p ${tmp}
total=0
usage="Usage: `basename "$0"` [-c company] [-y year] [-h|f]"
usage="Usage: `basename "$0"` [-c company] [-y year] [-m file] [-h|f]"
Help()
{
# Display Help
@ -65,15 +69,18 @@ Help()
echo "-b Specifies the base reference for change set lookup."
echo "-f Updates the copyright for all change sets in a given year,"
echo " as specified by -y. Overrides -b flag."
echo "-m Read the list of modified files from the given file,"
echo " use - to read from stdin"
echo "-h Print this help."
echo
}
full_year=false
base_reference=master
modified_files_origin="";
# Process options
while getopts "b:c:fhy:" option; do
while getopts "b:c:fhm:y:" option; do
case $option in
b) # supplied base reference
base_reference=${OPTARG}
@ -91,6 +98,9 @@ while getopts "b:c:fhy:" option; do
y) # supplied company year
year=${OPTARG}
;;
m) # modified files will be read from the given origin
modified_files_origin="${OPTARG}"
;;
\?) # illegal option
echo "$usage"
exit 1
@ -110,18 +120,10 @@ git status &> /dev/null && git_found=true
if [ "$git_found" != "true" ]; then
echo "Error: Please execute script from within a JDK git repository."
exit 1
else
echo "Using Git version control system"
vcs_status=(git ls-files -m)
if [ "$full_year" = "true" ]; then
vcs_list_changesets=(git log --no-merges --since="${year}-01-01T00:00:00Z" --until="${year}-12-31T23:59:59Z" --pretty=tformat:"%H")
else
vcs_list_changesets=(git log --no-merges "${base_reference}..HEAD" --since="${year}-01-01T00:00:00Z" --until="${year}-12-31T23:59:59Z" --pretty=tformat:"%H")
fi
vcs_changeset_message=(git log -1 --pretty=tformat:"%B") # followed by ${changeset}
vcs_changeset_files=(git diff-tree --no-commit-id --name-only -r) # followed by ${changeset}
fi
echo "Using Git version control system"
# Return true if it makes sense to edit this file
saneFileToCheck()
{
@ -168,6 +170,25 @@ updateFile() # file
echo "${changed}"
}
# Update the copyright year on files sent in stdin
updateFiles() # stdin: list of files to update
{
count=0
fcount=0
while read i; do
fcount=`expr ${fcount} '+' 1`
if [ `updateFile "${i}"` = "true" ] ; then
count=`expr ${count} '+' 1`
fi
done
if [ ${count} -gt 0 ] ; then
printf " UPDATED year on %d of %d files.\n" ${count} ${fcount}
total=`expr ${total} '+' ${count}`
else
printf " None of the %d files were changed.\n" ${fcount}
fi
}
# Update the copyright year on all files changed by this changeset
updateChangesetFiles() # changeset
{
@ -178,18 +199,7 @@ updateChangesetFiles() # changeset
| ${awk} -F' ' '{for(i=1;i<=NF;i++)print $i}' \
> ${files}
if [ -f "${files}" -a -s "${files}" ] ; then
fcount=`cat ${files}| wc -l`
for i in `cat ${files}` ; do
if [ `updateFile "${i}"` = "true" ] ; then
count=`expr ${count} '+' 1`
fi
done
if [ ${count} -gt 0 ] ; then
printf " UPDATED year on %d of %d files.\n" ${count} ${fcount}
total=`expr ${total} '+' ${count}`
else
printf " None of the %d files were changed.\n" ${fcount}
fi
cat ${files} | updateFiles
else
printf " ERROR: No files changed in the changeset? Must be a mistake.\n"
set -x
@ -204,67 +214,80 @@ updateChangesetFiles() # changeset
}
# Check if repository is clean
vcs_status=(git ls-files -m)
previous=`"${vcs_status[@]}"|wc -l`
if [ ${previous} -ne 0 ] ; then
echo "WARNING: This repository contains previously edited working set files."
echo " ${vcs_status[*]} | wc -l = `"${vcs_status[@]}" | wc -l`"
fi
# Get all changesets this year
all_changesets=${tmp}/all_changesets
rm -f ${all_changesets}
"${vcs_list_changesets[@]}" > ${all_changesets}
# Check changeset to see if it is Copyright only changes, filter changesets
if [ -s ${all_changesets} ] ; then
echo "Changesets made in ${year}: `cat ${all_changesets} | wc -l`"
index=0
cat ${all_changesets} | while read changeset ; do
index=`expr ${index} '+' 1`
desc=${tmp}/desc.${changeset}
rm -f ${desc}
echo "------------------------------------------------"
"${vcs_changeset_message[@]}" "${changeset}" > ${desc}
printf "%d: %s\n%s\n" ${index} "${changeset}" "`cat ${desc}|head -1`"
if [ "${year}" = "2010" ] ; then
if cat ${desc} | grep -i -F "Added tag" > /dev/null ; then
printf " EXCLUDED tag changeset.\n"
elif cat ${desc} | grep -i -F rebrand > /dev/null ; then
printf " EXCLUDED rebrand changeset.\n"
elif cat ${desc} | grep -i -F copyright > /dev/null ; then
printf " EXCLUDED copyright changeset.\n"
else
updateChangesetFiles ${changeset}
fi
else
if cat ${desc} | grep -i -F "Added tag" > /dev/null ; then
printf " EXCLUDED tag changeset.\n"
elif cat ${desc} | grep -i -F "copyright year" > /dev/null ; then
printf " EXCLUDED copyright year changeset.\n"
else
updateChangesetFiles ${changeset}
fi
fi
rm -f ${desc}
done
fi
if [ ${total} -gt 0 ] ; then
echo "---------------------------------------------"
echo "Updated the copyright year on a total of ${total} files."
if [ ${previous} -eq 0 ] ; then
echo "This count should match the count of modified files in the repository: ${vcs_status[*]}"
else
echo "WARNING: This repository contained previously edited working set files."
fi
echo " ${vcs_status[*]} | wc -l = `"${vcs_status[@]}" | wc -l`"
if [ "x$modified_files_origin" != "x" ]; then
cat $modified_files_origin | updateFiles
else
echo "---------------------------------------------"
echo "No files were changed"
if [ ${previous} -ne 0 ] ; then
echo "WARNING: This repository contained previously edited working set files."
fi
echo " ${vcs_status[*]} | wc -l = `"${vcs_status[@]}" | wc -l`"
# Get all changesets this year
if [ "$full_year" = "true" ]; then
vcs_list_changesets=(git log --no-merges --since="${year}-01-01T00:00:00Z" --until="${year}-12-31T23:59:59Z" --pretty=tformat:"%H")
else
vcs_list_changesets=(git log --no-merges "${base_reference}..HEAD" --since="${year}-01-01T00:00:00Z" --until="${year}-12-31T23:59:59Z" --pretty=tformat:"%H")
fi
vcs_changeset_message=(git log -1 --pretty=tformat:"%B") # followed by ${changeset}
vcs_changeset_files=(git diff-tree --no-commit-id --name-only -r) # followed by ${changeset}
all_changesets=${tmp}/all_changesets
rm -f ${all_changesets}
"${vcs_list_changesets[@]}" > ${all_changesets}
# Check changeset to see if it is Copyright only changes, filter changesets
if [ -s ${all_changesets} ] ; then
echo "Changesets made in ${year}: `cat ${all_changesets} | wc -l`"
index=0
cat ${all_changesets} | while read changeset ; do
index=`expr ${index} '+' 1`
desc=${tmp}/desc.${changeset}
rm -f ${desc}
echo "------------------------------------------------"
"${vcs_changeset_message[@]}" "${changeset}" > ${desc}
printf "%d: %s\n%s\n" ${index} "${changeset}" "`cat ${desc}|head -1`"
if [ "${year}" = "2010" ] ; then
if cat ${desc} | grep -i -F "Added tag" > /dev/null ; then
printf " EXCLUDED tag changeset.\n"
elif cat ${desc} | grep -i -F rebrand > /dev/null ; then
printf " EXCLUDED rebrand changeset.\n"
elif cat ${desc} | grep -i -F copyright > /dev/null ; then
printf " EXCLUDED copyright changeset.\n"
else
updateChangesetFiles ${changeset}
fi
else
if cat ${desc} | grep -i -F "Added tag" > /dev/null ; then
printf " EXCLUDED tag changeset.\n"
elif cat ${desc} | grep -i -F "copyright year" > /dev/null ; then
printf " EXCLUDED copyright year changeset.\n"
else
updateChangesetFiles ${changeset}
fi
fi
rm -f ${desc}
done
fi
if [ ${total} -gt 0 ] ; then
echo "---------------------------------------------"
echo "Updated the copyright year on a total of ${total} files."
if [ ${previous} -eq 0 ] ; then
echo "This count should match the count of modified files in the repository: ${vcs_status[*]}"
else
echo "WARNING: This repository contained previously edited working set files."
fi
echo " ${vcs_status[*]} | wc -l = `"${vcs_status[@]}" | wc -l`"
else
echo "---------------------------------------------"
echo "No files were changed"
if [ ${previous} -ne 0 ] ; then
echo "WARNING: This repository contained previously edited working set files."
fi
echo " ${vcs_status[*]} | wc -l = `"${vcs_status[@]}" | wc -l`"
fi
fi
# Cleanup

View File

@ -1385,10 +1385,9 @@ dpkg-deb -x /tmp/libasound2-dev_1.0.25-4_armhf.deb .</code></pre></li>
can specify it by <code>--with-alsa</code>.</p></li>
</ul>
<h4 id="x11-1">X11</h4>
<p>You will need X11 libraries suitable for your <em>target</em> system.
In most cases, using Debian's pre-built libraries work fine.</p>
<p>Note that X11 is needed even if you only want to build a headless
JDK.</p>
<p>When not building a headless JDK, you will need X11 libraries
suitable for your <em>target</em> system. In most cases, using Debian's
pre-built libraries work fine.</p>
<ul>
<li><p>Go to <a href="https://www.debian.org/distrib/packages">Debian
Package Search</a>, search for the following packages for your

View File

@ -1178,10 +1178,8 @@ Note that alsa is needed even if you only want to build a headless JDK.
#### X11
You will need X11 libraries suitable for your *target* system. In most cases,
using Debian's pre-built libraries work fine.
Note that X11 is needed even if you only want to build a headless JDK.
When not building a headless JDK, you will need X11 libraries suitable for your
*target* system. In most cases, using Debian's pre-built libraries work fine.
* Go to [Debian Package Search](https://www.debian.org/distrib/packages),
search for the following packages for your *target* system, and download them

View File

@ -72,6 +72,7 @@ id="toc-notes-for-specific-tests">Notes for Specific Tests</a>
<li><a href="#non-us-locale" id="toc-non-us-locale">Non-US
locale</a></li>
<li><a href="#pkcs11-tests" id="toc-pkcs11-tests">PKCS11 Tests</a></li>
<li><a href="#sctp-tests" id="toc-sctp-tests">SCTP Tests</a></li>
<li><a href="#testing-ahead-of-time-optimizations"
id="toc-testing-ahead-of-time-optimizations">Testing Ahead-of-time
Optimizations</a></li>
@ -621,6 +622,21 @@ element of the appropriate <code>@Artifact</code> class. (See
JTREG=&quot;JAVA_OPTIONS=-Djdk.test.lib.artifacts.nsslib-linux_aarch64=/path/to/NSS-libs&quot;</code></pre>
<p>For more notes about the PKCS11 tests, please refer to
test/jdk/sun/security/pkcs11/README.</p>
<h3 id="sctp-tests">SCTP Tests</h3>
<p>The SCTP tests require the SCTP runtime library, which is often not
installed by default in popular Linux distributions. Without this
library, the SCTP tests will be skipped. If you want to enable the SCTP
tests, you should install the SCTP library before running the tests.</p>
<p>For distributions using the .deb packaging format and the apt tool
(such as Debian, Ubuntu, etc.), try this:</p>
<pre><code>sudo apt install libsctp1
sudo modprobe sctp
lsmod | grep sctp</code></pre>
<p>For distributions using the .rpm packaging format and the dnf tool
(such as Fedora, Red Hat, etc.), try this:</p>
<pre><code>sudo dnf install -y lksctp-tools
sudo modprobe sctp
lsmod | grep sctp</code></pre>
<h3 id="testing-ahead-of-time-optimizations">Testing Ahead-of-time
Optimizations</h3>
<p>One way to improve test coverage of ahead-of-time (AOT) optimizations

View File

@ -640,6 +640,32 @@ $ make test TEST="jtreg:sun/security/pkcs11/Secmod/AddTrustedCert.java" \
For more notes about the PKCS11 tests, please refer to
test/jdk/sun/security/pkcs11/README.
### SCTP Tests
The SCTP tests require the SCTP runtime library, which is often not installed
by default in popular Linux distributions. Without this library, the SCTP tests
will be skipped. If you want to enable the SCTP tests, you should install the
SCTP library before running the tests.
For distributions using the .deb packaging format and the apt tool
(such as Debian, Ubuntu, etc.), try this:
```
sudo apt install libsctp1
sudo modprobe sctp
lsmod | grep sctp
```
For distributions using the .rpm packaging format and the dnf tool
(such as Fedora, Red Hat, etc.), try this:
```
sudo dnf install -y lksctp-tools
sudo modprobe sctp
lsmod | grep sctp
```
### Testing Ahead-of-time Optimizations
One way to improve test coverage of ahead-of-time (AOT) optimizations in

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -972,6 +972,10 @@ define SetupRunJtregTestBody
JTREG_AUTO_PROBLEM_LISTS += ProblemList-enable-preview.txt
endif
ifneq ($$(findstring -XX:+UseCompactObjectHeaders, $$(JTREG_ALL_OPTIONS)), )
JTREG_AUTO_PROBLEM_LISTS += ProblemList-coh.txt
endif
ifneq ($$(JTREG_EXTRA_PROBLEM_LISTS), )
# Accept both absolute paths as well as relative to the current test root.

View File

@ -42,12 +42,12 @@ m4_include([lib-tests.m4])
AC_DEFUN_ONCE([LIB_DETERMINE_DEPENDENCIES],
[
# Check if X11 is needed
if test "x$OPENJDK_TARGET_OS" = xwindows || test "x$OPENJDK_TARGET_OS" = xmacosx; then
# No X11 support on windows or macosx
if test "x$OPENJDK_TARGET_OS" = xwindows ||
test "x$OPENJDK_TARGET_OS" = xmacosx ||
test "x$ENABLE_HEADLESS_ONLY" = xtrue; then
NEEDS_LIB_X11=false
else
# All other instances need X11, even if building headless only, libawt still
# needs X11 headers.
# All other instances need X11 for libawt.
NEEDS_LIB_X11=true
fi

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -49,13 +49,15 @@ import static com.sun.source.doctree.DocTree.Kind.*;
* The tags can be used as follows:
*
* <pre>
* &commat;jls section-number description
* &commat;jls chapter.section description
* &commat;jls preview-feature-chapter.section description
* </pre>
*
* For example:
*
* <pre>
* &commat;jls 3.4 Line Terminators
* &commat;jls primitive-types-in-patterns-instanceof-switch-5.7.1 Exact Testing Conversions
* </pre>
*
* will produce the following HTML, depending on the file containing
@ -64,10 +66,24 @@ import static com.sun.source.doctree.DocTree.Kind.*;
* <pre>{@code
* <dt>See <i>Java Language Specification</i>:
* <dd><a href="../../specs/jls/jls-3.html#jls-3.4">3.4 Line terminators</a>
* <dd><a href="../../specs/primitive-types-in-patterns-instanceof-switch-jls.html#jls-5.7.1">
* 5.7.1 Exact Testing Conversions</a><sup class="preview-mark">
* <a href="../../specs/jls/jls-1.html#jls-1.5.1">PREVIEW</a></sup>
* }</pre>
*
* Copies of JLS and JVMS are expected to have been placed in the {@code specs}
* folder. These documents are not included in open-source repositories.
* In inline tags (note you need manual JLS/JVMS prefix):
* <pre>
* JLS {&commat;jls 3.4}
* </pre>
*
* produces (note the section sign and no trailing dot):
* <pre>
* JLS <a href="../../specs/jls/jls-3.html#jls-3.4">§3.4</a>
* </pre>
*
* Copies of JLS, JVMS, and preview JLS and JVMS changes are expected to have
* been placed in the {@code specs} folder. These documents are not included
* in open-source repositories.
*/
public class JSpec implements Taglet {
@ -87,9 +103,9 @@ public class JSpec implements Taglet {
}
}
private String tagName;
private String specTitle;
private String idPrefix;
private final String tagName;
private final String specTitle;
private final String idPrefix;
JSpec(String tagName, String specTitle, String idPrefix) {
this.tagName = tagName;
@ -98,7 +114,7 @@ public class JSpec implements Taglet {
}
// Note: Matches special cases like @jvms 6.5.checkcast
private static final Pattern TAG_PATTERN = Pattern.compile("(?s)(.+ )?(?<chapter>[1-9][0-9]*)(?<section>[0-9a-z_.]*)( .*)?$");
private static final Pattern TAG_PATTERN = Pattern.compile("(?s)(.+ )?(?<preview>([a-z0-9]+-)+)?(?<chapter>[1-9][0-9]*)(?<section>[0-9a-z_.]*)( .*)?$");
/**
* Returns the set of locations in which the tag may be used.
@ -157,19 +173,50 @@ public class JSpec implements Taglet {
.trim();
Matcher m = TAG_PATTERN.matcher(tagText);
if (m.find()) {
// preview-feature-4.6 is preview-feature-, 4, .6
String preview = m.group("preview"); // null if no preview feature
String chapter = m.group("chapter");
String section = m.group("section");
String rootParent = currentPath().replaceAll("[^/]+", "..");
String url = String.format("%1$s/specs/%2$s/%2$s-%3$s.html#%2$s-%3$s%4$s",
rootParent, idPrefix, chapter, section);
String url = preview == null ?
String.format("%1$s/specs/%2$s/%2$s-%3$s.html#%2$s-%3$s%4$s",
rootParent, idPrefix, chapter, section) :
String.format("%1$s/specs/%5$s%2$s.html#%2$s-%3$s%4$s",
rootParent, idPrefix, chapter, section, preview);
var literal = expand(contents).trim();
var prefix = (preview == null ? "" : preview) + chapter + section;
if (literal.startsWith(prefix)) {
var hasFullTitle = literal.length() > prefix.length();
if (hasFullTitle) {
// Drop the preview identifier
literal = chapter + section + literal.substring(prefix.length());
} else {
// No section sign if the tag refers to a chapter, like {@jvms 4}
String sectionSign = section.isEmpty() ? "" : "§";
// Change whole text to "§chapter.x" in inline tags.
literal = sectionSign + chapter + section;
}
}
sb.append("<a href=\"")
.append(url)
.append("\">")
.append(expand(contents))
.append(literal)
.append("</a>");
if (preview != null) {
// Add PREVIEW superscript that links to JLS/JVMS 1.5.1
// "Restrictions on the Use of Preview Features"
// Similar to how APIs link to the Preview info box warning
var sectionLink = String.format("%1$s/specs/%2$s/%2$s-%3$s.html#%2$s-%3$s%4$s",
rootParent, idPrefix, "1", ".5.1");
sb.append("<sup class=\"preview-mark\"><a href=\"")
.append(sectionLink)
.append("\">PREVIEW</a></sup>");
}
if (tag.getKind() == DocTree.Kind.UNKNOWN_BLOCK_TAG) {
sb.append("<br>");
}

View File

@ -88,6 +88,10 @@ LIBAWT_EXTRA_HEADER_DIRS := \
LIBAWT_CFLAGS := -D__MEDIALIB_OLD_NAMES -D__USE_J2D_NAMES -DMLIB_NO_LIBSUNMATH
ifeq ($(ENABLE_HEADLESS_ONLY), true)
LIBAWT_CFLAGS += -DHEADLESS
endif
ifeq ($(call isTargetOs, windows), true)
LIBAWT_CFLAGS += -EHsc -DUNICODE -D_UNICODE -DMLIB_OS64BIT
LIBAWT_RCFLAGS ?= -I$(TOPDIR)/src/java.base/windows/native/launcher/icons
@ -167,11 +171,18 @@ ifeq ($(call isTargetOs, windows macosx), false)
$(TOPDIR)/src/$(MODULE)/$(OPENJDK_TARGET_OS_TYPE)/native/common/awt \
#
LIBAWT_HEADLESS_EXCLUDE_FILES := \
GLXGraphicsConfig.c \
GLXSurfaceData.c \
X11PMBlitLoops.c \
X11Renderer.c \
X11SurfaceData.c \
#
LIBAWT_HEADLESS_EXTRA_HEADER_DIRS := \
$(LIBAWT_DEFAULT_HEADER_DIRS) \
common/awt/debug \
common/font \
common/java2d/opengl \
java.base:libjvm \
#
@ -191,7 +202,8 @@ ifeq ($(call isTargetOs, windows macosx), false)
$(eval $(call SetupJdkLibrary, BUILD_LIBAWT_HEADLESS, \
NAME := awt_headless, \
EXTRA_SRC := $(LIBAWT_HEADLESS_EXTRA_SRC), \
EXCLUDES := medialib, \
EXCLUDES := medialib opengl, \
EXCLUDE_FILES := $(LIBAWT_HEADLESS_EXCLUDE_FILES), \
ONLY_EXPORTED := $(LIBAWT_HEADLESS_ONLY_EXPORTED), \
OPTIMIZATION := LOW, \
CFLAGS := -DHEADLESS=true $(CUPS_CFLAGS) $(FONTCONFIG_CFLAGS) \

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -54,7 +54,7 @@ $(eval $(call SetupJdkExecutable, BUILD_JPACKAGEAPPLAUNCHER, \
SRC := applauncher, \
EXTRA_SRC := common, \
INCLUDE_FILES := $(JPACKAGEAPPLAUNCHER_INCLUDE_FILES), \
OPTIMIZATION := LOW, \
OPTIMIZATION := SIZE, \
DISABLED_WARNINGS_clang_JvmLauncherLib.c := format-nonliteral, \
DISABLED_WARNINGS_clang_LinuxPackage.c := format-nonliteral, \
DISABLED_WARNINGS_clang_Log.cpp := unused-const-variable, \
@ -91,7 +91,7 @@ ifeq ($(call isTargetOs, linux), true)
common, \
EXCLUDE_FILES := LinuxLauncher.c LinuxPackage.c, \
LINK_TYPE := C++, \
OPTIMIZATION := LOW, \
OPTIMIZATION := SIZE, \
DISABLED_WARNINGS_gcc_Log.cpp := unused-const-variable, \
DISABLED_WARNINGS_clang_JvmLauncherLib.c := format-nonliteral, \
DISABLED_WARNINGS_clang_tstrings.cpp := format-nonliteral, \

View File

@ -1,264 +0,0 @@
/*
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Neither the name of Oracle nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
* IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* This source code is provided to illustrate the usage of a given feature
* or technique and has been deliberately simplified. Additional steps
* required for a production-quality application, such as security checks,
* input validation and proper error handling, might not be present in
* this sample code.
*/
import java.util.EventObject;
import java.util.List;
import javax.swing.JTable;
import javax.swing.table.DefaultTableModel;
import javax.swing.table.TableCellEditor;
import javax.swing.table.TableCellRenderer;
import javax.swing.table.TableColumn;
/**
* The OldJTable is an unsupported class containing some methods that were
* deleted from the JTable between releases 0.6 and 0.7
*/
@SuppressWarnings("serial")
public class OldJTable extends JTable
{
/*
* A new convenience method returning the index of the column in the
* co-ordinate space of the view.
*/
public int getColumnIndex(Object identifier) {
return getColumnModel().getColumnIndex(identifier);
}
//
// Methods deleted from the JTable because they only work with the
// DefaultTableModel.
//
public TableColumn addColumn(Object columnIdentifier, int width) {
return addColumn(columnIdentifier, width, null, null, null);
}
public TableColumn addColumn(Object columnIdentifier, List<?> columnData) {
return addColumn(columnIdentifier, -1, null, null, columnData);
}
// Override the new JTable implementation - it will not add a column to the
// DefaultTableModel.
public TableColumn addColumn(Object columnIdentifier, int width,
TableCellRenderer renderer,
TableCellEditor editor) {
return addColumn(columnIdentifier, width, renderer, editor, null);
}
public TableColumn addColumn(Object columnIdentifier, int width,
TableCellRenderer renderer,
TableCellEditor editor, List<?> columnData) {
checkDefaultTableModel();
// Set up the model side first
DefaultTableModel m = (DefaultTableModel)getModel();
m.addColumn(columnIdentifier, columnData.toArray());
// The column will have been added to the end, so the index of the
// column in the model is the last element.
TableColumn newColumn = new TableColumn(
m.getColumnCount()-1, width, renderer, editor);
super.addColumn(newColumn);
return newColumn;
}
// Not possilble to make this work the same way ... change it so that
// it does not delete columns from the model.
public void removeColumn(Object columnIdentifier) {
super.removeColumn(getColumn(columnIdentifier));
}
public void addRow(Object[] rowData) {
checkDefaultTableModel();
((DefaultTableModel)getModel()).addRow(rowData);
}
public void addRow(List<?> rowData) {
checkDefaultTableModel();
((DefaultTableModel)getModel()).addRow(rowData.toArray());
}
public void removeRow(int rowIndex) {
checkDefaultTableModel();
((DefaultTableModel)getModel()).removeRow(rowIndex);
}
public void moveRow(int startIndex, int endIndex, int toIndex) {
checkDefaultTableModel();
((DefaultTableModel)getModel()).moveRow(startIndex, endIndex, toIndex);
}
public void insertRow(int rowIndex, Object[] rowData) {
checkDefaultTableModel();
((DefaultTableModel)getModel()).insertRow(rowIndex, rowData);
}
public void insertRow(int rowIndex, List<?> rowData) {
checkDefaultTableModel();
((DefaultTableModel)getModel()).insertRow(rowIndex, rowData.toArray());
}
public void setNumRows(int newSize) {
checkDefaultTableModel();
((DefaultTableModel)getModel()).setNumRows(newSize);
}
public void setDataVector(Object[][] newData, List<?> columnIds) {
checkDefaultTableModel();
((DefaultTableModel)getModel()).setDataVector(
newData, columnIds.toArray());
}
public void setDataVector(Object[][] newData, Object[] columnIds) {
checkDefaultTableModel();
((DefaultTableModel)getModel()).setDataVector(newData, columnIds);
}
protected void checkDefaultTableModel() {
if(!(dataModel instanceof DefaultTableModel))
throw new InternalError("In order to use this method, the data model must be an instance of DefaultTableModel.");
}
//
// Methods removed from JTable in the move from identifiers to ints.
//
public Object getValueAt(Object columnIdentifier, int rowIndex) {
return super.getValueAt(rowIndex, getColumnIndex(columnIdentifier));
}
public boolean isCellEditable(Object columnIdentifier, int rowIndex) {
return super.isCellEditable(rowIndex, getColumnIndex(columnIdentifier));
}
public void setValueAt(Object aValue, Object columnIdentifier, int rowIndex) {
super.setValueAt(aValue, rowIndex, getColumnIndex(columnIdentifier));
}
public boolean editColumnRow(Object identifier, int row) {
return super.editCellAt(row, getColumnIndex(identifier));
}
public void moveColumn(Object columnIdentifier, Object targetColumnIdentifier) {
moveColumn(getColumnIndex(columnIdentifier),
getColumnIndex(targetColumnIdentifier));
}
public boolean isColumnSelected(Object identifier) {
return isColumnSelected(getColumnIndex(identifier));
}
public TableColumn addColumn(int modelColumn, int width) {
return addColumn(modelColumn, width, null, null);
}
public TableColumn addColumn(int modelColumn) {
return addColumn(modelColumn, 75, null, null);
}
/**
* Creates a new column with <I>modelColumn</I>, <I>width</I>,
* <I>renderer</I>, and <I>editor</I> and adds it to the end of
* the JTable's array of columns. This method also retrieves the
* name of the column using the model's <I>getColumnName(modelColumn)</I>
* method, and sets the both the header value and the identifier
* for this TableColumn accordingly.
* <p>
* The <I>modelColumn</I> is the index of the column in the model which
* will supply the data for this column in the table. This, like the
* <I>columnIdentifier</I> in previous releases, does not change as the
* columns are moved in the view.
* <p>
* For the rest of the JTable API, and all of its associated classes,
* columns are referred to in the co-ordinate system of the view, the
* index of the column in the model is kept inside the TableColumn
* and is used only to retrieve the information from the appropraite
* column in the model.
* <p>
*
* @param modelColumn The index of the column in the model
* @param width The new column's width. Or -1 to use
* the default width
* @param renderer The renderer used with the new column.
* Or null to use the default renderer.
* @param editor The editor used with the new column.
* Or null to use the default editor.
*/
public TableColumn addColumn(int modelColumn, int width,
TableCellRenderer renderer,
TableCellEditor editor) {
TableColumn newColumn = new TableColumn(
modelColumn, width, renderer, editor);
addColumn(newColumn);
return newColumn;
}
//
// Methods that had their arguments switched.
//
// These won't work with the new table package.
/*
public Object getValueAt(int columnIndex, int rowIndex) {
return super.getValueAt(rowIndex, columnIndex);
}
public boolean isCellEditable(int columnIndex, int rowIndex) {
return super.isCellEditable(rowIndex, columnIndex);
}
public void setValueAt(Object aValue, int columnIndex, int rowIndex) {
super.setValueAt(aValue, rowIndex, columnIndex);
}
*/
public boolean editColumnRow(int columnIndex, int rowIndex) {
return super.editCellAt(rowIndex, columnIndex);
}
public boolean editColumnRow(int columnIndex, int rowIndex, EventObject e){
return super.editCellAt(rowIndex, columnIndex, e);
}
} // End Of Class OldJTable

View File

@ -1229,7 +1229,7 @@ public:
// predicate controlling addressing modes
bool size_fits_all_mem_uses(AddPNode* addp, int shift);
// Convert BootTest condition to Assembler condition.
// Convert BoolTest condition to Assembler condition.
// Replicate the logic of cmpOpOper::ccode() and cmpOpUOper::ccode().
Assembler::Condition to_assembler_cond(BoolTest::mask cond);
%}
@ -2579,7 +2579,7 @@ bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
return true;
}
// Convert BootTest condition to Assembler condition.
// Convert BoolTest condition to Assembler condition.
// Replicate the logic of cmpOpOper::ccode() and cmpOpUOper::ccode().
Assembler::Condition to_assembler_cond(BoolTest::mask cond) {
Assembler::Condition result;

View File

@ -1,5 +1,5 @@
//
// Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2020, 2025, Arm Limited. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
@ -201,6 +201,8 @@ source %{
case Op_XorReductionV:
case Op_MinReductionV:
case Op_MaxReductionV:
case Op_UMinReductionV:
case Op_UMaxReductionV:
// Reductions with less than 8 bytes vector length are
// not supported.
if (length_in_bytes < 8) {
@ -383,6 +385,8 @@ source %{
return !VM_Version::use_neon_for_vector(length_in_bytes);
case Op_MinReductionV:
case Op_MaxReductionV:
case Op_UMinReductionV:
case Op_UMaxReductionV:
// For BYTE/SHORT/INT/FLOAT/DOUBLE types, we prefer using NEON
// instructions rather than SVE predicated instructions for
// better performance.
@ -4218,6 +4222,224 @@ instruct reduce_minD_masked(vRegD dst, vRegD dsrc, vReg vsrc, pRegGov pg) %{
ins_pipe(pipe_slow);
%}
// -------------------- Vector reduction unsigned min/max ----------------------
// reduction uminI
instruct reduce_uminI_neon(iRegINoSp dst, iRegIorL2I isrc, vReg vsrc,
vReg tmp, rFlagsReg cr) %{
predicate(VM_Version::use_neon_for_vector(Matcher::vector_length_in_bytes(n->in(2))) &&
(Matcher::vector_element_basic_type(n->in(2)) == T_BYTE ||
Matcher::vector_element_basic_type(n->in(2)) == T_SHORT ||
Matcher::vector_element_basic_type(n->in(2)) == T_INT));
match(Set dst (UMinReductionV isrc vsrc));
effect(TEMP_DEF dst, TEMP tmp, KILL cr);
format %{ "reduce_uminI_neon $dst, $isrc, $vsrc\t# KILL $tmp, cr" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this, $vsrc);
uint length_in_bytes = Matcher::vector_length_in_bytes(this, $vsrc);
__ neon_reduce_minmax_integral(this->ideal_Opcode(), $dst$$Register, bt,
$isrc$$Register, $vsrc$$FloatRegister,
length_in_bytes, $tmp$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct reduce_uminI_sve(iRegINoSp dst, iRegIorL2I isrc, vReg vsrc,
vRegD tmp, rFlagsReg cr) %{
predicate(!VM_Version::use_neon_for_vector(Matcher::vector_length_in_bytes(n->in(2))) &&
(Matcher::vector_element_basic_type(n->in(2)) == T_BYTE ||
Matcher::vector_element_basic_type(n->in(2)) == T_SHORT ||
Matcher::vector_element_basic_type(n->in(2)) == T_INT));
match(Set dst (UMinReductionV isrc vsrc));
effect(TEMP_DEF dst, TEMP tmp, KILL cr);
format %{ "reduce_uminI_sve $dst, $isrc, $vsrc\t# KILL $tmp, cr" %}
ins_encode %{
assert(UseSVE > 0, "must be sve");
BasicType bt = Matcher::vector_element_basic_type(this, $vsrc);
uint length_in_bytes = Matcher::vector_length_in_bytes(this, $vsrc);
assert(length_in_bytes == MaxVectorSize, "invalid vector length");
__ sve_reduce_integral(this->ideal_Opcode(), $dst$$Register, bt,
$isrc$$Register, $vsrc$$FloatRegister,
ptrue, $tmp$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
// reduction uminL
instruct reduce_uminL_neon(iRegLNoSp dst, iRegL isrc, vReg vsrc, rFlagsReg cr) %{
predicate(UseSVE == 0 && Matcher::vector_element_basic_type(n->in(2)) == T_LONG);
match(Set dst (UMinReductionV isrc vsrc));
effect(TEMP_DEF dst, KILL cr);
format %{ "reduce_uminL_neon $dst, $isrc, $vsrc\t# 2L. KILL cr" %}
ins_encode %{
__ neon_reduce_minmax_integral(this->ideal_Opcode(), $dst$$Register, T_LONG,
$isrc$$Register, $vsrc$$FloatRegister,
/* vector_length_in_bytes */ 16, fnoreg);
%}
ins_pipe(pipe_slow);
%}
instruct reduce_uminL_sve(iRegLNoSp dst, iRegL isrc, vReg vsrc,
vRegD tmp, rFlagsReg cr) %{
predicate(UseSVE > 0 && Matcher::vector_element_basic_type(n->in(2)) == T_LONG);
match(Set dst (UMinReductionV isrc vsrc));
effect(TEMP_DEF dst, TEMP tmp, KILL cr);
format %{ "reduce_uminL_sve $dst, $isrc, $vsrc\t# KILL $tmp, cr" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this, $vsrc);
assert(length_in_bytes == MaxVectorSize, "invalid vector length");
__ sve_reduce_integral(this->ideal_Opcode(), $dst$$Register, T_LONG,
$isrc$$Register, $vsrc$$FloatRegister,
ptrue, $tmp$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
// reduction umin - predicated
instruct reduce_uminI_masked(iRegINoSp dst, iRegIorL2I isrc, vReg vsrc, pRegGov pg,
vRegD tmp, rFlagsReg cr) %{
predicate(UseSVE > 0 &&
(Matcher::vector_element_basic_type(n->in(1)->in(2)) == T_BYTE ||
Matcher::vector_element_basic_type(n->in(1)->in(2)) == T_SHORT ||
Matcher::vector_element_basic_type(n->in(1)->in(2)) == T_INT));
match(Set dst (UMinReductionV (Binary isrc vsrc) pg));
effect(TEMP_DEF dst, TEMP tmp, KILL cr);
format %{ "reduce_uminI_masked $dst, $isrc, $pg, $vsrc\t# KILL $tmp, cr" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this, $vsrc);
__ sve_reduce_integral(this->ideal_Opcode(), $dst$$Register, bt,
$isrc$$Register, $vsrc$$FloatRegister,
$pg$$PRegister, $tmp$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct reduce_uminL_masked(iRegLNoSp dst, iRegL isrc, vReg vsrc, pRegGov pg,
vRegD tmp, rFlagsReg cr) %{
predicate(UseSVE > 0 && Matcher::vector_element_basic_type(n->in(1)->in(2)) == T_LONG);
match(Set dst (UMinReductionV (Binary isrc vsrc) pg));
effect(TEMP_DEF dst, TEMP tmp, KILL cr);
format %{ "reduce_uminL_masked $dst, $isrc, $pg, $vsrc\t# KILL $tmp, cr" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this, $vsrc);
__ sve_reduce_integral(this->ideal_Opcode(), $dst$$Register, bt,
$isrc$$Register, $vsrc$$FloatRegister,
$pg$$PRegister, $tmp$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
// reduction umaxI
instruct reduce_umaxI_neon(iRegINoSp dst, iRegIorL2I isrc, vReg vsrc,
vReg tmp, rFlagsReg cr) %{
predicate(VM_Version::use_neon_for_vector(Matcher::vector_length_in_bytes(n->in(2))) &&
(Matcher::vector_element_basic_type(n->in(2)) == T_BYTE ||
Matcher::vector_element_basic_type(n->in(2)) == T_SHORT ||
Matcher::vector_element_basic_type(n->in(2)) == T_INT));
match(Set dst (UMaxReductionV isrc vsrc));
effect(TEMP_DEF dst, TEMP tmp, KILL cr);
format %{ "reduce_umaxI_neon $dst, $isrc, $vsrc\t# KILL $tmp, cr" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this, $vsrc);
uint length_in_bytes = Matcher::vector_length_in_bytes(this, $vsrc);
__ neon_reduce_minmax_integral(this->ideal_Opcode(), $dst$$Register, bt,
$isrc$$Register, $vsrc$$FloatRegister,
length_in_bytes, $tmp$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct reduce_umaxI_sve(iRegINoSp dst, iRegIorL2I isrc, vReg vsrc,
vRegD tmp, rFlagsReg cr) %{
predicate(!VM_Version::use_neon_for_vector(Matcher::vector_length_in_bytes(n->in(2))) &&
(Matcher::vector_element_basic_type(n->in(2)) == T_BYTE ||
Matcher::vector_element_basic_type(n->in(2)) == T_SHORT ||
Matcher::vector_element_basic_type(n->in(2)) == T_INT));
match(Set dst (UMaxReductionV isrc vsrc));
effect(TEMP_DEF dst, TEMP tmp, KILL cr);
format %{ "reduce_umaxI_sve $dst, $isrc, $vsrc\t# KILL $tmp, cr" %}
ins_encode %{
assert(UseSVE > 0, "must be sve");
BasicType bt = Matcher::vector_element_basic_type(this, $vsrc);
uint length_in_bytes = Matcher::vector_length_in_bytes(this, $vsrc);
assert(length_in_bytes == MaxVectorSize, "invalid vector length");
__ sve_reduce_integral(this->ideal_Opcode(), $dst$$Register, bt,
$isrc$$Register, $vsrc$$FloatRegister,
ptrue, $tmp$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
// reduction umaxL
instruct reduce_umaxL_neon(iRegLNoSp dst, iRegL isrc, vReg vsrc, rFlagsReg cr) %{
predicate(UseSVE == 0 && Matcher::vector_element_basic_type(n->in(2)) == T_LONG);
match(Set dst (UMaxReductionV isrc vsrc));
effect(TEMP_DEF dst, KILL cr);
format %{ "reduce_umaxL_neon $dst, $isrc, $vsrc\t# 2L. KILL cr" %}
ins_encode %{
__ neon_reduce_minmax_integral(this->ideal_Opcode(), $dst$$Register, T_LONG,
$isrc$$Register, $vsrc$$FloatRegister,
/* vector_length_in_bytes */ 16, fnoreg);
%}
ins_pipe(pipe_slow);
%}
instruct reduce_umaxL_sve(iRegLNoSp dst, iRegL isrc, vReg vsrc,
vRegD tmp, rFlagsReg cr) %{
predicate(UseSVE > 0 && Matcher::vector_element_basic_type(n->in(2)) == T_LONG);
match(Set dst (UMaxReductionV isrc vsrc));
effect(TEMP_DEF dst, TEMP tmp, KILL cr);
format %{ "reduce_umaxL_sve $dst, $isrc, $vsrc\t# KILL $tmp, cr" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this, $vsrc);
assert(length_in_bytes == MaxVectorSize, "invalid vector length");
__ sve_reduce_integral(this->ideal_Opcode(), $dst$$Register, T_LONG,
$isrc$$Register, $vsrc$$FloatRegister,
ptrue, $tmp$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
// reduction umax - predicated
instruct reduce_umaxI_masked(iRegINoSp dst, iRegIorL2I isrc, vReg vsrc, pRegGov pg,
vRegD tmp, rFlagsReg cr) %{
predicate(UseSVE > 0 &&
(Matcher::vector_element_basic_type(n->in(1)->in(2)) == T_BYTE ||
Matcher::vector_element_basic_type(n->in(1)->in(2)) == T_SHORT ||
Matcher::vector_element_basic_type(n->in(1)->in(2)) == T_INT));
match(Set dst (UMaxReductionV (Binary isrc vsrc) pg));
effect(TEMP_DEF dst, TEMP tmp, KILL cr);
format %{ "reduce_umaxI_masked $dst, $isrc, $pg, $vsrc\t# KILL $tmp, cr" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this, $vsrc);
__ sve_reduce_integral(this->ideal_Opcode(), $dst$$Register, bt,
$isrc$$Register, $vsrc$$FloatRegister,
$pg$$PRegister, $tmp$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct reduce_umaxL_masked(iRegLNoSp dst, iRegL isrc, vReg vsrc, pRegGov pg,
vRegD tmp, rFlagsReg cr) %{
predicate(UseSVE > 0 && Matcher::vector_element_basic_type(n->in(1)->in(2)) == T_LONG);
match(Set dst (UMaxReductionV (Binary isrc vsrc) pg));
effect(TEMP_DEF dst, TEMP tmp, KILL cr);
format %{ "reduce_umaxL_masked $dst, $isrc, $pg, $vsrc\t# KILL $tmp, cr" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this, $vsrc);
__ sve_reduce_integral(this->ideal_Opcode(), $dst$$Register, bt,
$isrc$$Register, $vsrc$$FloatRegister,
$pg$$PRegister, $tmp$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
// ------------------------------ Vector reinterpret ---------------------------
instruct reinterpret_same_size(vReg dst_src) %{

View File

@ -1,5 +1,5 @@
//
// Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2020, 2025, Arm Limited. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
@ -191,6 +191,8 @@ source %{
case Op_XorReductionV:
case Op_MinReductionV:
case Op_MaxReductionV:
case Op_UMinReductionV:
case Op_UMaxReductionV:
// Reductions with less than 8 bytes vector length are
// not supported.
if (length_in_bytes < 8) {
@ -373,6 +375,8 @@ source %{
return !VM_Version::use_neon_for_vector(length_in_bytes);
case Op_MinReductionV:
case Op_MaxReductionV:
case Op_UMinReductionV:
case Op_UMaxReductionV:
// For BYTE/SHORT/INT/FLOAT/DOUBLE types, we prefer using NEON
// instructions rather than SVE predicated instructions for
// better performance.
@ -2505,6 +2509,32 @@ REDUCE_MAXMIN_INT_PREDICATE(min, L, iRegL, MinReductionV)
REDUCE_MAXMIN_FP_PREDICATE(min, F, fsrc, MinReductionV, sve_fminv, fmins)
REDUCE_MAXMIN_FP_PREDICATE(min, D, dsrc, MinReductionV, sve_fminv, fmind)
// -------------------- Vector reduction unsigned min/max ----------------------
// reduction uminI
REDUCE_MAXMIN_I_NEON(umin, UMinReductionV)
REDUCE_MAXMIN_I_SVE(umin, UMinReductionV)
// reduction uminL
REDUCE_MAXMIN_L_NEON(umin, UMinReductionV)
REDUCE_MAXMIN_L_SVE(umin, UMinReductionV)
// reduction umin - predicated
REDUCE_MAXMIN_INT_PREDICATE(umin, I, iRegIorL2I, UMinReductionV)
REDUCE_MAXMIN_INT_PREDICATE(umin, L, iRegL, UMinReductionV)
// reduction umaxI
REDUCE_MAXMIN_I_NEON(umax, UMaxReductionV)
REDUCE_MAXMIN_I_SVE(umax, UMaxReductionV)
// reduction umaxL
REDUCE_MAXMIN_L_NEON(umax, UMaxReductionV)
REDUCE_MAXMIN_L_SVE(umax, UMaxReductionV)
// reduction umax - predicated
REDUCE_MAXMIN_INT_PREDICATE(umax, I, iRegIorL2I, UMaxReductionV)
REDUCE_MAXMIN_INT_PREDICATE(umax, L, iRegL, UMaxReductionV)
// ------------------------------ Vector reinterpret ---------------------------
instruct reinterpret_same_size(vReg dst_src) %{

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2024, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -2658,6 +2658,8 @@ template<typename R, typename... Rx>
INSN(uminv, 1, 0b011011, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
INSN(smaxp, 0, 0b101001, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
INSN(sminp, 0, 0b101011, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
INSN(umaxp, 1, 0b101001, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
INSN(uminp, 1, 0b101011, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
INSN(sqdmulh,0, 0b101101, false); // accepted arrangements: T4H, T8H, T2S, T4S
INSN(shsubv, 0, 0b001001, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
@ -3490,7 +3492,9 @@ public:
INSN(sve_sub, 0b00000100, 0b000001000); // vector sub
INSN(sve_uaddv, 0b00000100, 0b000001001); // unsigned add reduction to scalar
INSN(sve_umax, 0b00000100, 0b001001000); // unsigned maximum vectors
INSN(sve_umaxv, 0b00000100, 0b001001001); // unsigned maximum reduction to scalar
INSN(sve_umin, 0b00000100, 0b001011000); // unsigned minimum vectors
INSN(sve_uminv, 0b00000100, 0b001011001); // unsigned minimum reduction to scalar
#undef INSN
// SVE floating-point arithmetic - predicate
@ -4325,6 +4329,7 @@ public:
#undef INSN
Assembler(CodeBuffer* code) : AbstractAssembler(code) {
MACOS_AARCH64_ONLY(os::thread_wx_enable_write());
}
// Stack overflow checking

View File

@ -1218,43 +1218,11 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
__ bind(*op->stub()->continuation());
}
void LIR_Assembler::type_profile_helper(Register mdo,
ciMethodData *md, ciProfileData *data,
Register recv, Label* update_done) {
void LIR_Assembler::type_profile_helper(Register mdo, ciMethodData *md,
ciProfileData *data, Register recv) {
// Given a profile data offset, generate an Address which points to
// the corresponding slot in mdo->data().
// Clobbers rscratch2.
auto slot_at = [=](ByteSize offset) -> Address {
return __ form_address(rscratch2, mdo,
md->byte_offset_of_slot(data, offset),
LogBytesPerWord);
};
for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
Label next_test;
// See if the receiver is receiver[n].
__ ldr(rscratch1, slot_at(ReceiverTypeData::receiver_offset(i)));
__ cmp(recv, rscratch1);
__ br(Assembler::NE, next_test);
__ addptr(slot_at(ReceiverTypeData::receiver_count_offset(i)),
DataLayout::counter_increment);
__ b(*update_done);
__ bind(next_test);
}
// Didn't find receiver; find next empty slot and fill it in
for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
Label next_test;
Address recv_addr(slot_at(ReceiverTypeData::receiver_offset(i)));
__ ldr(rscratch1, recv_addr);
__ cbnz(rscratch1, next_test);
__ str(recv, recv_addr);
__ mov(rscratch1, DataLayout::counter_increment);
__ str(rscratch1, slot_at(ReceiverTypeData::receiver_count_offset(i)));
__ b(*update_done);
__ bind(next_test);
}
int mdp_offset = md->byte_offset_of_slot(data, in_ByteSize(0));
__ profile_receiver_type(recv, mdo, mdp_offset);
}
void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
@ -1316,14 +1284,9 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
__ b(*obj_is_null);
__ bind(not_null);
Label update_done;
Register recv = k_RInfo;
__ load_klass(recv, obj);
type_profile_helper(mdo, md, data, recv, &update_done);
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
__ addptr(counter_addr, DataLayout::counter_increment);
__ bind(update_done);
type_profile_helper(mdo, md, data, recv);
} else {
__ cbz(obj, *obj_is_null);
}
@ -1430,13 +1393,9 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
__ b(done);
__ bind(not_null);
Label update_done;
Register recv = k_RInfo;
__ load_klass(recv, value);
type_profile_helper(mdo, md, data, recv, &update_done);
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
__ addptr(counter_addr, DataLayout::counter_increment);
__ bind(update_done);
type_profile_helper(mdo, md, data, recv);
} else {
__ cbz(value, done);
}
@ -2540,13 +2499,9 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) {
// We know the type that will be seen at this call site; we can
// statically update the MethodData* rather than needing to do
// dynamic tests on the receiver type
// NOTE: we should probably put a lock around this search to
// avoid collisions by concurrent compilations
// dynamic tests on the receiver type.
ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
uint i;
for (i = 0; i < VirtualCallData::row_limit(); i++) {
for (uint i = 0; i < VirtualCallData::row_limit(); i++) {
ciKlass* receiver = vc_data->receiver(i);
if (known_klass->equals(receiver)) {
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
@ -2554,36 +2509,13 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
return;
}
}
// Receiver type not found in profile data; select an empty slot
// Note that this is less efficient than it should be because it
// always does a write to the receiver part of the
// VirtualCallData rather than just the first time
for (i = 0; i < VirtualCallData::row_limit(); i++) {
ciKlass* receiver = vc_data->receiver(i);
if (receiver == nullptr) {
__ mov_metadata(rscratch1, known_klass->constant_encoding());
Address recv_addr =
__ form_address(rscratch2, mdo,
md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)),
LogBytesPerWord);
__ str(rscratch1, recv_addr);
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
__ addptr(data_addr, DataLayout::counter_increment);
return;
}
}
// Receiver type is not found in profile data.
// Fall back to runtime helper to handle the rest at runtime.
__ mov_metadata(recv, known_klass->constant_encoding());
} else {
__ load_klass(recv, recv);
Label update_done;
type_profile_helper(mdo, md, data, recv, &update_done);
// Receiver did not match any saved receiver and there is no empty row for it.
// Increment total counter to indicate polymorphic case.
__ addptr(counter_addr, DataLayout::counter_increment);
__ bind(update_done);
}
type_profile_helper(mdo, md, data, recv);
} else {
// Static call
__ addptr(counter_addr, DataLayout::counter_increment);

View File

@ -50,9 +50,8 @@ friend class ArrayCopyStub;
Address stack_slot_address(int index, uint shift, Register tmp, int adjust = 0);
// Record the type of the receiver in ReceiverTypeData
void type_profile_helper(Register mdo,
ciMethodData *md, ciProfileData *data,
Register recv, Label* update_done);
void type_profile_helper(Register mdo, ciMethodData *md,
ciProfileData *data, Register recv);
void add_debug_info_for_branch(address adr, CodeEmitInfo* info);
void casw(Register addr, Register newval, Register cmpval);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1960,50 +1960,76 @@ void C2_MacroAssembler::neon_reduce_logical(int opc, Register dst, BasicType bt,
BLOCK_COMMENT("} neon_reduce_logical");
}
// Vector reduction min/max for integral type with ASIMD instructions.
// Helper function to decode min/max reduction operation properties
void C2_MacroAssembler::decode_minmax_reduction_opc(int opc, bool* is_min,
bool* is_unsigned,
Condition* cond) {
switch(opc) {
case Op_MinReductionV:
*is_min = true; *is_unsigned = false; *cond = LT; break;
case Op_MaxReductionV:
*is_min = false; *is_unsigned = false; *cond = GT; break;
case Op_UMinReductionV:
*is_min = true; *is_unsigned = true; *cond = LO; break;
case Op_UMaxReductionV:
*is_min = false; *is_unsigned = true; *cond = HI; break;
default:
ShouldNotReachHere();
}
}
// Vector reduction min/max/umin/umax for integral type with ASIMD instructions.
// Note: vtmp is not used and expected to be fnoreg for T_LONG case.
// Clobbers: rscratch1, rflags
void C2_MacroAssembler::neon_reduce_minmax_integral(int opc, Register dst, BasicType bt,
Register isrc, FloatRegister vsrc,
unsigned vector_length_in_bytes,
FloatRegister vtmp) {
assert(opc == Op_MinReductionV || opc == Op_MaxReductionV, "unsupported");
assert(opc == Op_MinReductionV || opc == Op_MaxReductionV ||
opc == Op_UMinReductionV || opc == Op_UMaxReductionV, "unsupported");
assert(vector_length_in_bytes == 8 || vector_length_in_bytes == 16, "unsupported");
assert(bt == T_BYTE || bt == T_SHORT || bt == T_INT || bt == T_LONG, "unsupported");
assert_different_registers(dst, isrc);
bool isQ = vector_length_in_bytes == 16;
bool is_min = opc == Op_MinReductionV;
bool is_min;
bool is_unsigned;
Condition cond;
decode_minmax_reduction_opc(opc, &is_min, &is_unsigned, &cond);
BLOCK_COMMENT("neon_reduce_minmax_integral {");
if (bt == T_LONG) {
assert(vtmp == fnoreg, "should be");
assert(isQ, "should be");
umov(rscratch1, vsrc, D, 0);
cmp(isrc, rscratch1);
csel(dst, isrc, rscratch1, is_min ? LT : GT);
csel(dst, isrc, rscratch1, cond);
umov(rscratch1, vsrc, D, 1);
cmp(dst, rscratch1);
csel(dst, dst, rscratch1, is_min ? LT : GT);
csel(dst, dst, rscratch1, cond);
} else {
SIMD_Arrangement size = esize2arrangement((unsigned)type2aelembytes(bt), isQ);
if (size == T2S) {
is_min ? sminp(vtmp, size, vsrc, vsrc) : smaxp(vtmp, size, vsrc, vsrc);
// For T2S (2x32-bit elements), use pairwise instructions because
// uminv/umaxv/sminv/smaxv don't support arrangement 2S.
neon_minmaxp(is_unsigned, is_min, vtmp, size, vsrc, vsrc);
} else {
is_min ? sminv(vtmp, size, vsrc) : smaxv(vtmp, size, vsrc);
// For other sizes, use reduction to scalar instructions.
neon_minmaxv(is_unsigned, is_min, vtmp, size, vsrc);
}
if (bt == T_INT) {
umov(dst, vtmp, S, 0);
} else if (is_unsigned) {
umov(dst, vtmp, elemType_to_regVariant(bt), 0);
} else {
smov(dst, vtmp, elemType_to_regVariant(bt), 0);
}
cmpw(dst, isrc);
cselw(dst, dst, isrc, is_min ? LT : GT);
cselw(dst, dst, isrc, cond);
}
BLOCK_COMMENT("} neon_reduce_minmax_integral");
}
// Vector reduction for integral type with SVE instruction.
// Supported operations are Add, And, Or, Xor, Max, Min.
// Supported operations are Add, And, Or, Xor, Max, Min, UMax, UMin.
// rflags would be clobbered if opc is Op_MaxReductionV or Op_MinReductionV.
void C2_MacroAssembler::sve_reduce_integral(int opc, Register dst, BasicType bt, Register src1,
FloatRegister src2, PRegister pg, FloatRegister tmp) {
@ -2075,35 +2101,27 @@ void C2_MacroAssembler::sve_reduce_integral(int opc, Register dst, BasicType bt,
}
break;
}
case Op_MaxReductionV: {
sve_smaxv(tmp, size, pg, src2);
if (bt == T_INT || bt == T_LONG) {
case Op_MaxReductionV:
case Op_MinReductionV:
case Op_UMaxReductionV:
case Op_UMinReductionV: {
bool is_min;
bool is_unsigned;
Condition cond;
decode_minmax_reduction_opc(opc, &is_min, &is_unsigned, &cond);
sve_minmaxv(is_unsigned, is_min, tmp, size, pg, src2);
// Move result from vector to general register
if (is_unsigned || bt == T_INT || bt == T_LONG) {
umov(dst, tmp, size, 0);
} else {
smov(dst, tmp, size, 0);
}
if (bt == T_LONG) {
cmp(dst, src1);
csel(dst, dst, src1, Assembler::GT);
csel(dst, dst, src1, cond);
} else {
cmpw(dst, src1);
cselw(dst, dst, src1, Assembler::GT);
}
break;
}
case Op_MinReductionV: {
sve_sminv(tmp, size, pg, src2);
if (bt == T_INT || bt == T_LONG) {
umov(dst, tmp, size, 0);
} else {
smov(dst, tmp, size, 0);
}
if (bt == T_LONG) {
cmp(dst, src1);
csel(dst, dst, src1, Assembler::LT);
} else {
cmpw(dst, src1);
cselw(dst, dst, src1, Assembler::LT);
cselw(dst, dst, src1, cond);
}
break;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,6 +34,37 @@
void neon_reduce_logical_helper(int opc, bool sf, Register Rd, Register Rn, Register Rm,
enum shift_kind kind = Assembler::LSL, unsigned shift = 0);
// Helper functions for min/max reduction operations
void decode_minmax_reduction_opc(int opc, bool* is_min, bool* is_unsigned, Condition* cond);
void neon_minmaxp(bool is_unsigned, bool is_min, FloatRegister dst,
SIMD_Arrangement size, FloatRegister src1, FloatRegister src2) {
auto m = is_unsigned ? (is_min ? &Assembler::uminp : &Assembler::umaxp)
: (is_min ? &Assembler::sminp : &Assembler::smaxp);
(this->*m)(dst, size, src1, src2);
}
// Typedefs used to disambiguate overloaded member functions.
typedef void (Assembler::*neon_reduction2)
(FloatRegister, SIMD_Arrangement, FloatRegister);
void neon_minmaxv(bool is_unsigned, bool is_min, FloatRegister dst,
SIMD_Arrangement size, FloatRegister src) {
auto m = is_unsigned ? (is_min ? (neon_reduction2)&Assembler::uminv
: (neon_reduction2)&Assembler::umaxv)
: (is_min ? &Assembler::sminv
: &Assembler::smaxv);
(this->*m)(dst, size, src);
}
void sve_minmaxv(bool is_unsigned, bool is_min, FloatRegister dst,
SIMD_RegVariant size, PRegister pg, FloatRegister src) {
auto m = is_unsigned ? (is_min ? &Assembler::sve_uminv : &Assembler::sve_umaxv)
: (is_min ? &Assembler::sve_sminv : &Assembler::sve_smaxv);
(this->*m)(dst, size, pg, src);
}
void select_from_two_vectors_neon(FloatRegister dst, FloatRegister src1,
FloatRegister src2, FloatRegister index,
FloatRegister tmp, unsigned vector_length_in_bytes);

View File

@ -209,6 +209,10 @@ void BarrierSetNMethod::set_guard_value(nmethod* nm, int value, int bit_mask) {
bs_asm->increment_patching_epoch();
}
// Enable WXWrite: the function is called directly from nmethod_entry_barrier
// stub.
MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, Thread::current()));
NativeNMethodBarrier barrier(nm);
barrier.set_value(value, bit_mask);
}

View File

@ -85,26 +85,16 @@ void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, Dec
}
}
void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp,
bool tosca_live,
bool expand_call) {
if (ShenandoahSATBBarrier) {
satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, rscratch1, tosca_live, expand_call);
}
}
void ShenandoahBarrierSetAssembler::satb_barrier(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp1,
Register tmp2,
bool tosca_live,
bool expand_call) {
assert(ShenandoahSATBBarrier, "Should be checked by caller");
void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp1,
Register tmp2,
bool tosca_live,
bool expand_call) {
// If expand_call is true then we expand the call_VM_leaf macro
// directly to skip generating the check by
// InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
@ -358,20 +348,20 @@ void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet d
if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
__ enter(/*strip_ret_addr*/true);
__ push_call_clobbered_registers();
satb_write_barrier_pre(masm /* masm */,
noreg /* obj */,
dst /* pre_val */,
rthread /* thread */,
tmp1 /* tmp1 */,
tmp2 /* tmp2 */,
true /* tosca_live */,
true /* expand_call */);
satb_barrier(masm /* masm */,
noreg /* obj */,
dst /* pre_val */,
rthread /* thread */,
tmp1 /* tmp1 */,
tmp2 /* tmp2 */,
true /* tosca_live */,
true /* expand_call */);
__ pop_call_clobbered_registers();
__ leave();
}
}
void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj) {
void ShenandoahBarrierSetAssembler::card_barrier(MacroAssembler* masm, Register obj) {
assert(ShenandoahCardBarrier, "Should have been checked by caller");
__ lsr(obj, obj, CardTable::card_shift());
@ -394,13 +384,13 @@ void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register o
void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
bool on_oop = is_reference_type(type);
if (!on_oop) {
// 1: non-reference types require no barriers
if (!is_reference_type(type)) {
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
return;
}
// flatten object address if needed
// Flatten object address right away for simplicity: likely needed by barriers
if (dst.index() == noreg && dst.offset() == 0) {
if (dst.base() != tmp3) {
__ mov(tmp3, dst.base());
@ -409,20 +399,26 @@ void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet
__ lea(tmp3, dst);
}
shenandoah_write_barrier_pre(masm,
tmp3 /* obj */,
tmp2 /* pre_val */,
rthread /* thread */,
tmp1 /* tmp */,
val != noreg /* tosca_live */,
false /* expand_call */);
bool storing_non_null = (val != noreg);
// 2: pre-barrier: SATB needs the previous value
if (ShenandoahBarrierSet::need_satb_barrier(decorators, type)) {
satb_barrier(masm,
tmp3 /* obj */,
tmp2 /* pre_val */,
rthread /* thread */,
tmp1 /* tmp */,
rscratch1 /* tmp2 */,
storing_non_null /* tosca_live */,
false /* expand_call */);
}
// Store!
BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), val, noreg, noreg, noreg);
bool in_heap = (decorators & IN_HEAP) != 0;
bool needs_post_barrier = (val != noreg) && in_heap && ShenandoahCardBarrier;
if (needs_post_barrier) {
store_check(masm, tmp3);
// 3: post-barrier: card barrier needs store address
if (ShenandoahBarrierSet::need_card_barrier(decorators, type) && storing_non_null) {
card_barrier(masm, tmp3);
}
}

View File

@ -40,23 +40,16 @@ class StubCodeGenerator;
class ShenandoahBarrierSetAssembler: public BarrierSetAssembler {
private:
void satb_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp1,
Register tmp2,
bool tosca_live,
bool expand_call);
void shenandoah_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp,
bool tosca_live,
bool expand_call);
void satb_barrier(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp1,
Register tmp2,
bool tosca_live,
bool expand_call);
void store_check(MacroAssembler* masm, Register obj);
void card_barrier(MacroAssembler* masm, Register obj);
void resolve_forward_pointer(MacroAssembler* masm, Register dst, Register tmp = noreg);
void resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp = noreg);

View File

@ -240,15 +240,14 @@ void InterpreterMacroAssembler::load_resolved_klass_at_offset(
// Rsub_klass: subklass
//
// Kills:
// r2, r5
// r2
void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
Label& ok_is_subtype) {
assert(Rsub_klass != r0, "r0 holds superklass");
assert(Rsub_klass != r2, "r2 holds 2ndary super array length");
assert(Rsub_klass != r5, "r5 holds 2ndary super array scan ptr");
// Profile the not-null value's klass.
profile_typecheck(r2, Rsub_klass, r5); // blows r2, reloads r5
profile_typecheck(r2, Rsub_klass); // blows r2
// Do the check.
check_klass_subtype(Rsub_klass, r0, r2, ok_is_subtype); // blows r2
@ -991,7 +990,6 @@ void InterpreterMacroAssembler::profile_final_call(Register mdp) {
void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
Register mdp,
Register reg2,
bool receiver_can_be_null) {
if (ProfileInterpreter) {
Label profile_continue;
@ -1009,7 +1007,7 @@ void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
}
// Record the receiver type.
record_klass_in_profile(receiver, mdp, reg2);
profile_receiver_type(receiver, mdp, 0);
bind(skip_receiver_profile);
// The method data pointer needs to be updated to reflect the new target.
@ -1018,131 +1016,6 @@ void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
}
}
// This routine creates a state machine for updating the multi-row
// type profile at a virtual call site (or other type-sensitive bytecode).
// The machine visits each row (of receiver/count) until the receiver type
// is found, or until it runs out of rows. At the same time, it remembers
// the location of the first empty row. (An empty row records null for its
// receiver, and can be allocated for a newly-observed receiver type.)
// Because there are two degrees of freedom in the state, a simple linear
// search will not work; it must be a decision tree. Hence this helper
// function is recursive, to generate the required tree structured code.
// It's the interpreter, so we are trading off code space for speed.
// See below for example code.
void InterpreterMacroAssembler::record_klass_in_profile_helper(
Register receiver, Register mdp,
Register reg2, int start_row,
Label& done) {
if (TypeProfileWidth == 0) {
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
} else {
record_item_in_profile_helper(receiver, mdp, reg2, 0, done, TypeProfileWidth,
&VirtualCallData::receiver_offset, &VirtualCallData::receiver_count_offset);
}
}
void InterpreterMacroAssembler::record_item_in_profile_helper(Register item, Register mdp,
Register reg2, int start_row, Label& done, int total_rows,
OffsetFunction item_offset_fn, OffsetFunction item_count_offset_fn) {
int last_row = total_rows - 1;
assert(start_row <= last_row, "must be work left to do");
// Test this row for both the item and for null.
// Take any of three different outcomes:
// 1. found item => increment count and goto done
// 2. found null => keep looking for case 1, maybe allocate this cell
// 3. found something else => keep looking for cases 1 and 2
// Case 3 is handled by a recursive call.
for (int row = start_row; row <= last_row; row++) {
Label next_test;
bool test_for_null_also = (row == start_row);
// See if the item is item[n].
int item_offset = in_bytes(item_offset_fn(row));
test_mdp_data_at(mdp, item_offset, item,
(test_for_null_also ? reg2 : noreg),
next_test);
// (Reg2 now contains the item from the CallData.)
// The item is item[n]. Increment count[n].
int count_offset = in_bytes(item_count_offset_fn(row));
increment_mdp_data_at(mdp, count_offset);
b(done);
bind(next_test);
if (test_for_null_also) {
Label found_null;
// Failed the equality check on item[n]... Test for null.
if (start_row == last_row) {
// The only thing left to do is handle the null case.
cbz(reg2, found_null);
// Item did not match any saved item and there is no empty row for it.
// Increment total counter to indicate polymorphic case.
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
b(done);
bind(found_null);
break;
}
// Since null is rare, make it be the branch-taken case.
cbz(reg2, found_null);
// Put all the "Case 3" tests here.
record_item_in_profile_helper(item, mdp, reg2, start_row + 1, done, total_rows,
item_offset_fn, item_count_offset_fn);
// Found a null. Keep searching for a matching item,
// but remember that this is an empty (unused) slot.
bind(found_null);
}
}
// In the fall-through case, we found no matching item, but we
// observed the item[start_row] is null.
// Fill in the item field and increment the count.
int item_offset = in_bytes(item_offset_fn(start_row));
set_mdp_data_at(mdp, item_offset, item);
int count_offset = in_bytes(item_count_offset_fn(start_row));
mov(reg2, DataLayout::counter_increment);
set_mdp_data_at(mdp, count_offset, reg2);
if (start_row > 0) {
b(done);
}
}
// Example state machine code for three profile rows:
// // main copy of decision tree, rooted at row[1]
// if (row[0].rec == rec) { row[0].incr(); goto done; }
// if (row[0].rec != nullptr) {
// // inner copy of decision tree, rooted at row[1]
// if (row[1].rec == rec) { row[1].incr(); goto done; }
// if (row[1].rec != nullptr) {
// // degenerate decision tree, rooted at row[2]
// if (row[2].rec == rec) { row[2].incr(); goto done; }
// if (row[2].rec != nullptr) { count.incr(); goto done; } // overflow
// row[2].init(rec); goto done;
// } else {
// // remember row[1] is empty
// if (row[2].rec == rec) { row[2].incr(); goto done; }
// row[1].init(rec); goto done;
// }
// } else {
// // remember row[0] is empty
// if (row[1].rec == rec) { row[1].incr(); goto done; }
// if (row[2].rec == rec) { row[2].incr(); goto done; }
// row[0].init(rec); goto done;
// }
// done:
void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
Register mdp, Register reg2) {
assert(ProfileInterpreter, "must be profiling");
Label done;
record_klass_in_profile_helper(receiver, mdp, reg2, 0, done);
bind (done);
}
void InterpreterMacroAssembler::profile_ret(Register return_bci,
Register mdp) {
if (ProfileInterpreter) {
@ -1200,7 +1073,7 @@ void InterpreterMacroAssembler::profile_null_seen(Register mdp) {
}
}
void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass, Register reg2) {
void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass) {
if (ProfileInterpreter) {
Label profile_continue;
@ -1213,7 +1086,7 @@ void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass,
mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
// Record the object type.
record_klass_in_profile(klass, mdp, reg2);
profile_receiver_type(klass, mdp, 0);
}
update_mdp_by_constant(mdp, mdp_delta);

View File

@ -273,15 +273,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
Register test_value_out,
Label& not_equal_continue);
void record_klass_in_profile(Register receiver, Register mdp,
Register reg2);
void record_klass_in_profile_helper(Register receiver, Register mdp,
Register reg2, int start_row,
Label& done);
void record_item_in_profile_helper(Register item, Register mdp,
Register reg2, int start_row, Label& done, int total_rows,
OffsetFunction item_offset_fn, OffsetFunction item_count_offset_fn);
void update_mdp_by_offset(Register mdp_in, int offset_of_offset);
void update_mdp_by_offset(Register mdp_in, Register reg, int offset_of_disp);
void update_mdp_by_constant(Register mdp_in, int constant);
@ -295,11 +286,10 @@ class InterpreterMacroAssembler: public MacroAssembler {
void profile_call(Register mdp);
void profile_final_call(Register mdp);
void profile_virtual_call(Register receiver, Register mdp,
Register scratch2,
bool receiver_can_be_null = false);
void profile_ret(Register return_bci, Register mdp);
void profile_null_seen(Register mdp);
void profile_typecheck(Register mdp, Register klass, Register scratch);
void profile_typecheck(Register mdp, Register klass);
void profile_typecheck_failed(Register mdp);
void profile_switch_default(Register mdp);
void profile_switch_case(Register index_in_scratch, Register mdp,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2024, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -473,6 +473,7 @@ address MacroAssembler::target_addr_for_insn(address insn_addr) {
// Patch any kind of instruction; there may be several instructions.
// Return the total length (in bytes) of the instructions.
int MacroAssembler::pd_patch_instruction_size(address insn_addr, address target) {
MACOS_AARCH64_ONLY(os::thread_wx_enable_write());
return RelocActions<Patcher>::run(insn_addr, target);
}
@ -481,6 +482,8 @@ int MacroAssembler::patch_oop(address insn_addr, address o) {
unsigned insn = *(unsigned*)insn_addr;
assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
MACOS_AARCH64_ONLY(os::thread_wx_enable_write());
// OOPs are either narrow (32 bits) or wide (48 bits). We encode
// narrow OOPs by setting the upper 16 bits in the first
// instruction.
@ -510,6 +513,8 @@ int MacroAssembler::patch_narrow_klass(address insn_addr, narrowKlass n) {
assert(Instruction_aarch64::extract(insn->encoding(), 31, 21) == 0b11010010101 &&
nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
MACOS_AARCH64_ONLY(os::thread_wx_enable_write());
Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16);
Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff);
return 2 * NativeInstruction::instruction_size;
@ -2118,6 +2123,161 @@ Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
}
}
// Handle the receiver type profile update given the "recv" klass.
//
// Normally updates the ReceiverData (RD) that starts at "mdp" + "mdp_offset".
// If there are no matching or claimable receiver entries in RD, updates
// the polymorphic counter.
//
// This code expected to run by either the interpreter or JIT-ed code, without
// extra synchronization. For safety, receiver cells are claimed atomically, which
// avoids grossly misrepresenting the profiles under concurrent updates. For speed,
// counter updates are not atomic.
//
void MacroAssembler::profile_receiver_type(Register recv, Register mdp, int mdp_offset) {
assert_different_registers(recv, mdp, rscratch1, rscratch2);
int base_receiver_offset = in_bytes(ReceiverTypeData::receiver_offset(0));
int end_receiver_offset = in_bytes(ReceiverTypeData::receiver_offset(ReceiverTypeData::row_limit()));
int poly_count_offset = in_bytes(CounterData::count_offset());
int receiver_step = in_bytes(ReceiverTypeData::receiver_offset(1)) - base_receiver_offset;
int receiver_to_count_step = in_bytes(ReceiverTypeData::receiver_count_offset(0)) - base_receiver_offset;
// Adjust for MDP offsets.
base_receiver_offset += mdp_offset;
end_receiver_offset += mdp_offset;
poly_count_offset += mdp_offset;
#ifdef ASSERT
// We are about to walk the MDO slots without asking for offsets.
// Check that our math hits all the right spots.
for (uint c = 0; c < ReceiverTypeData::row_limit(); c++) {
int real_recv_offset = mdp_offset + in_bytes(ReceiverTypeData::receiver_offset(c));
int real_count_offset = mdp_offset + in_bytes(ReceiverTypeData::receiver_count_offset(c));
int offset = base_receiver_offset + receiver_step*c;
int count_offset = offset + receiver_to_count_step;
assert(offset == real_recv_offset, "receiver slot math");
assert(count_offset == real_count_offset, "receiver count math");
}
int real_poly_count_offset = mdp_offset + in_bytes(CounterData::count_offset());
assert(poly_count_offset == real_poly_count_offset, "poly counter math");
#endif
// Corner case: no profile table. Increment poly counter and exit.
if (ReceiverTypeData::row_limit() == 0) {
increment(Address(mdp, poly_count_offset), DataLayout::counter_increment);
return;
}
Register offset = rscratch2;
Label L_loop_search_receiver, L_loop_search_empty;
Label L_restart, L_found_recv, L_found_empty, L_polymorphic, L_count_update;
// The code here recognizes three major cases:
// A. Fastest: receiver found in the table
// B. Fast: no receiver in the table, and the table is full
// C. Slow: no receiver in the table, free slots in the table
//
// The case A performance is most important, as perfectly-behaved code would end up
// there, especially with larger TypeProfileWidth. The case B performance is
// important as well, this is where bulk of code would land for normally megamorphic
// cases. The case C performance is not essential, its job is to deal with installation
// races, we optimize for code density instead. Case C needs to make sure that receiver
// rows are only claimed once. This makes sure we never overwrite a row for another
// receiver and never duplicate the receivers in the list, making profile type-accurate.
//
// It is very tempting to handle these cases in a single loop, and claim the first slot
// without checking the rest of the table. But, profiling code should tolerate free slots
// in the table, as class unloading can clear them. After such cleanup, the receiver
// we need might be _after_ the free slot. Therefore, we need to let at least full scan
// to complete, before trying to install new slots. Splitting the code in several tight
// loops also helpfully optimizes for cases A and B.
//
// This code is effectively:
//
// restart:
// // Fastest: receiver is already installed
// for (i = 0; i < receiver_count(); i++) {
// if (receiver(i) == recv) goto found_recv(i);
// }
//
// // Fast: no receiver, but profile is full
// for (i = 0; i < receiver_count(); i++) {
// if (receiver(i) == null) goto found_null(i);
// }
// goto polymorphic
//
// // Slow: try to install receiver
// found_null(i):
// CAS(&receiver(i), null, recv);
// goto restart
//
// polymorphic:
// count++;
// return
//
// found_recv(i):
// *receiver_count(i)++
//
bind(L_restart);
// Fastest: receiver is already installed
mov(offset, base_receiver_offset);
bind(L_loop_search_receiver);
ldr(rscratch1, Address(mdp, offset));
cmp(rscratch1, recv);
br(Assembler::EQ, L_found_recv);
add(offset, offset, receiver_step);
sub(rscratch1, offset, end_receiver_offset);
cbnz(rscratch1, L_loop_search_receiver);
// Fast: no receiver, but profile is full
mov(offset, base_receiver_offset);
bind(L_loop_search_empty);
ldr(rscratch1, Address(mdp, offset));
cbz(rscratch1, L_found_empty);
add(offset, offset, receiver_step);
sub(rscratch1, offset, end_receiver_offset);
cbnz(rscratch1, L_loop_search_empty);
b(L_polymorphic);
// Slow: try to install receiver
bind(L_found_empty);
// Atomically swing receiver slot: null -> recv.
//
// The update uses CAS, which clobbers rscratch1. Therefore, rscratch2
// is used to hold the destination address. This is safe because the
// offset is no longer needed after the address is computed.
lea(rscratch2, Address(mdp, offset));
cmpxchg(/*addr*/ rscratch2, /*expected*/ zr, /*new*/ recv, Assembler::xword,
/*acquire*/ false, /*release*/ false, /*weak*/ true, noreg);
// CAS success means the slot now has the receiver we want. CAS failure means
// something had claimed the slot concurrently: it can be the same receiver we want,
// or something else. Since this is a slow path, we can optimize for code density,
// and just restart the search from the beginning.
b(L_restart);
// Counter updates:
// Increment polymorphic counter instead of receiver slot.
bind(L_polymorphic);
mov(offset, poly_count_offset);
b(L_count_update);
// Found a receiver, convert its slot offset to corresponding count offset.
bind(L_found_recv);
add(offset, offset, receiver_to_count_step);
bind(L_count_update);
increment(Address(mdp, offset), DataLayout::counter_increment);
}
void MacroAssembler::call_VM_leaf_base(address entry_point,
int number_of_arguments,
Label *retaddr) {
@ -5606,12 +5766,11 @@ void MacroAssembler::adrp(Register reg1, const Address &dest, uint64_t &byte_off
}
void MacroAssembler::load_byte_map_base(Register reg) {
CardTable::CardValue* byte_map_base =
((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base();
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
// Strictly speaking the byte_map_base isn't an address at all, and it might
// Strictly speaking the card table base isn't an address at all, and it might
// even be negative. It is thus materialised as a constant.
mov(reg, (uint64_t)byte_map_base);
mov(reg, (uint64_t)ctbs->card_table_base_const());
}
void MacroAssembler::build_frame(int framesize) {
@ -5782,6 +5941,9 @@ address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3,
// return false;
bind(A_IS_NOT_NULL);
ldrw(cnt1, Address(a1, length_offset));
ldrw(tmp5, Address(a2, length_offset));
cmp(cnt1, tmp5);
br(NE, DONE); // If lengths differ, return false
// Increase loop counter by diff between base- and actual start-offset.
addw(cnt1, cnt1, extra_length);
lea(a1, Address(a1, start_offset));
@ -5848,6 +6010,9 @@ address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3,
cbz(a1, DONE);
ldrw(cnt1, Address(a1, length_offset));
cbz(a2, DONE);
ldrw(tmp5, Address(a2, length_offset));
cmp(cnt1, tmp5);
br(NE, DONE); // If lengths differ, return false
// Increase loop counter by diff between base- and actual start-offset.
addw(cnt1, cnt1, extra_length);
@ -6259,10 +6424,14 @@ void MacroAssembler::fill_words(Register base, Register cnt, Register value)
// Intrinsic for
//
// - sun/nio/cs/ISO_8859_1$Encoder.implEncodeISOArray
// return the number of characters copied.
// - java/lang/StringUTF16.compress
// return index of non-latin1 character if copy fails, otherwise 'len'.
// - sun.nio.cs.ISO_8859_1.Encoder#encodeISOArray0(byte[] sa, int sp, byte[] da, int dp, int len)
// Encodes char[] to byte[] in ISO-8859-1
//
// - java.lang.StringCoding#encodeISOArray0(byte[] sa, int sp, byte[] da, int dp, int len)
// Encodes byte[] (containing UTF-16) to byte[] in ISO-8859-1
//
// - java.lang.StringCoding#encodeAsciiArray0(char[] sa, int sp, byte[] da, int dp, int len)
// Encodes char[] to byte[] in ASCII
//
// This version always returns the number of characters copied, and does not
// clobber the 'len' register. A successful copy will complete with the post-

View File

@ -1122,6 +1122,8 @@ public:
Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
void profile_receiver_type(Register recv, Register mdp, int mdp_offset);
void verify_sve_vector_length(Register tmp = rscratch1);
void reinitialize_ptrue() {
if (UseSVE > 0) {

View File

@ -133,7 +133,6 @@ void NativeMovConstReg::verify() {
intptr_t NativeMovConstReg::data() const {
// das(uint64_t(instruction_address()),2);
address addr = MacroAssembler::target_addr_for_insn(instruction_address());
if (maybe_cpool_ref(instruction_address())) {
return *(intptr_t*)addr;
@ -144,6 +143,7 @@ intptr_t NativeMovConstReg::data() const {
void NativeMovConstReg::set_data(intptr_t x) {
if (maybe_cpool_ref(instruction_address())) {
MACOS_AARCH64_ONLY(os::thread_wx_enable_write());
address addr = MacroAssembler::target_addr_for_insn(instruction_address());
*(intptr_t*)addr = x;
} else {
@ -350,8 +350,6 @@ bool NativeInstruction::is_stop() {
//-------------------------------------------------------------------
void NativeGeneralJump::verify() { }
// MT-safe patching of a long jump instruction.
void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
ShouldNotCallThis();

View File

@ -90,16 +90,18 @@ protected:
s_char sbyte_at(int offset) const { return *(s_char*)addr_at(offset); }
u_char ubyte_at(int offset) const { return *(u_char*)addr_at(offset); }
jint int_at(int offset) const { return *(jint*)addr_at(offset); }
juint uint_at(int offset) const { return *(juint*)addr_at(offset); }
address ptr_at(int offset) const { return *(address*)addr_at(offset); }
oop oop_at(int offset) const { return *(oop*)addr_at(offset); }
jint int_at(int offset) const { return *(jint*)addr_at(offset); }
juint uint_at(int offset) const { return *(juint*)addr_at(offset); }
address ptr_at(int offset) const { return *(address*)addr_at(offset); }
oop oop_at(int offset) const { return *(oop*)addr_at(offset); }
void set_char_at(int offset, char c) { *addr_at(offset) = (u_char)c; }
void set_int_at(int offset, jint i) { *(jint*)addr_at(offset) = i; }
void set_uint_at(int offset, jint i) { *(juint*)addr_at(offset) = i; }
void set_ptr_at(int offset, address ptr) { *(address*)addr_at(offset) = ptr; }
void set_oop_at(int offset, oop o) { *(oop*)addr_at(offset) = o; }
#define MACOS_WX_WRITE MACOS_AARCH64_ONLY(os::thread_wx_enable_write())
void set_char_at(int offset, char c) { MACOS_WX_WRITE; *addr_at(offset) = (u_char)c; }
void set_int_at(int offset, jint i) { MACOS_WX_WRITE; *(jint*)addr_at(offset) = i; }
void set_uint_at(int offset, jint i) { MACOS_WX_WRITE; *(juint*)addr_at(offset) = i; }
void set_ptr_at(int offset, address ptr) { MACOS_WX_WRITE; *(address*)addr_at(offset) = ptr; }
void set_oop_at(int offset, oop o) { MACOS_WX_WRITE; *(oop*)addr_at(offset) = o; }
#undef MACOS_WX_WRITE
void wrote(int offset);
@ -380,7 +382,6 @@ public:
void set_jump_destination(address dest);
static void replace_mt_safe(address instr_addr, address code_buffer);
static void verify();
};
inline NativeGeneralJump* nativeGeneralJump_at(address address) {

View File

@ -6081,14 +6081,18 @@ class StubGenerator: public StubCodeGenerator {
// static int implKyber12To16(
// byte[] condensed, int index, short[] parsed, int parsedLength) {}
//
// (parsedLength or (parsedLength - 48) must be divisible by 64.)
// we assume that parsed and condensed are allocated such that for
// n = (parsedLength + 63) / 64
// n blocks of 96 bytes of input can be processed, i.e.
// index + n * 96 <= condensed.length and
// n * 64 <= parsed.length
//
// condensed (byte[]) = c_rarg0
// condensedIndex = c_rarg1
// parsed (short[112 or 256]) = c_rarg2
// parsedLength (112 or 256) = c_rarg3
// parsed (short[]) = c_rarg2
// parsedLength = c_rarg3
address generate_kyber12To16() {
Label L_F00, L_loop, L_end;
Label L_F00, L_loop;
__ align(CodeEntryAlignment);
StubId stub_id = StubId::stubgen_kyber12To16_id;
@ -6209,75 +6213,8 @@ class StubGenerator: public StubCodeGenerator {
vs_st2_post(vs_front(vb), __ T8H, parsed);
__ sub(parsedLength, parsedLength, 64);
__ cmp(parsedLength, (u1)64);
__ br(Assembler::GE, L_loop);
__ cbz(parsedLength, L_end);
// if anything is left it should be a final 72 bytes of input
// i.e. a final 48 12-bit values. so we handle this by loading
// 48 bytes into all 16B lanes of front(vin) and only 24
// bytes into the lower 8B lane of back(vin)
vs_ld3_post(vs_front(vin), __ T16B, condensed);
vs_ld3(vs_back(vin), __ T8B, condensed);
// Expand vin[0] into va[0:1], and vin[1] into va[2:3] and va[4:5]
// n.b. target elements 2 and 3 of va duplicate elements 4 and
// 5 and target element 2 of vb duplicates element 4.
__ ushll(va[0], __ T8H, vin[0], __ T8B, 0);
__ ushll2(va[1], __ T8H, vin[0], __ T16B, 0);
__ ushll(va[2], __ T8H, vin[1], __ T8B, 0);
__ ushll2(va[3], __ T8H, vin[1], __ T16B, 0);
__ ushll(va[4], __ T8H, vin[1], __ T8B, 0);
__ ushll2(va[5], __ T8H, vin[1], __ T16B, 0);
// This time expand just the lower 8 lanes
__ ushll(vb[0], __ T8H, vin[3], __ T8B, 0);
__ ushll(vb[2], __ T8H, vin[4], __ T8B, 0);
__ ushll(vb[4], __ T8H, vin[4], __ T8B, 0);
// shift lo byte of copy 1 of the middle stripe into the high byte
__ shl(va[2], __ T8H, va[2], 8);
__ shl(va[3], __ T8H, va[3], 8);
__ shl(vb[2], __ T8H, vb[2], 8);
// expand vin[2] into va[6:7] and lower 8 lanes of vin[5] into
// vb[6] pre-shifted by 4 to ensure top bits of the input 12-bit
// int are in bit positions [4..11].
__ ushll(va[6], __ T8H, vin[2], __ T8B, 4);
__ ushll2(va[7], __ T8H, vin[2], __ T16B, 4);
__ ushll(vb[6], __ T8H, vin[5], __ T8B, 4);
// mask hi 4 bits of each 1st 12-bit int in pair from copy1 and
// shift lo 4 bits of each 2nd 12-bit int in pair to bottom of
// copy2
__ andr(va[2], __ T16B, va[2], v31);
__ andr(va[3], __ T16B, va[3], v31);
__ ushr(va[4], __ T8H, va[4], 4);
__ ushr(va[5], __ T8H, va[5], 4);
__ andr(vb[2], __ T16B, vb[2], v31);
__ ushr(vb[4], __ T8H, vb[4], 4);
// sum hi 4 bits and lo 8 bits of each 1st 12-bit int in pair and
// hi 8 bits plus lo 4 bits of each 2nd 12-bit int in pair
// n.b. ordering ensures: i) inputs are consumed before they are
// overwritten ii) order of 16-bit results across succsessive
// pairs of vectors in va and then lower half of vb reflects order
// of corresponding 12-bit inputs
__ addv(va[0], __ T8H, va[0], va[2]);
__ addv(va[2], __ T8H, va[1], va[3]);
__ addv(va[1], __ T8H, va[4], va[6]);
__ addv(va[3], __ T8H, va[5], va[7]);
__ addv(vb[0], __ T8H, vb[0], vb[2]);
__ addv(vb[1], __ T8H, vb[4], vb[6]);
// store 48 results interleaved as shorts
vs_st2_post(vs_front(va), __ T8H, parsed);
vs_st2_post(vs_front(vs_front(vb)), __ T8H, parsed);
__ BIND(L_end);
__ cmp(parsedLength, (u1)0);
__ br(Assembler::GT, L_loop);
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ mov(r0, zr); // return 0
@ -11805,7 +11742,9 @@ class StubGenerator: public StubCodeGenerator {
}
#endif
StubRoutines::_unsafe_setmemory = generate_unsafe_setmemory();
if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_setMemory)) {
StubRoutines::_unsafe_setmemory = generate_unsafe_setmemory();
}
StubRoutines::aarch64::set_completed(); // Inidicate that arraycopy and zero_blocks stubs are generated
}

View File

@ -3370,7 +3370,7 @@ void TemplateTable::invokevirtual_helper(Register index,
__ load_klass(r0, recv);
// profile this call
__ profile_virtual_call(r0, rlocals, r3);
__ profile_virtual_call(r0, rlocals);
// get target Method & entry point
__ lookup_virtual_method(r0, index, method);
@ -3500,7 +3500,7 @@ void TemplateTable::invokeinterface(int byte_no) {
/*return_method=*/false);
// profile this call
__ profile_virtual_call(r3, r13, r19);
__ profile_virtual_call(r3, r13);
// Get declaring interface class from method, and itable index

View File

@ -201,16 +201,14 @@ void VM_Version::initialize() {
}
}
// Cortex A53
if (_cpu == CPU_ARM && model_is(0xd03)) {
if (_cpu == CPU_ARM && model_is(CPU_MODEL_ARM_CORTEX_A53)) {
set_feature(CPU_A53MAC);
if (FLAG_IS_DEFAULT(UseSIMDForArrayEquals)) {
FLAG_SET_DEFAULT(UseSIMDForArrayEquals, false);
}
}
// Cortex A73
if (_cpu == CPU_ARM && model_is(0xd09)) {
if (_cpu == CPU_ARM && model_is(CPU_MODEL_ARM_CORTEX_A73)) {
if (FLAG_IS_DEFAULT(SoftwarePrefetchHintDistance)) {
FLAG_SET_DEFAULT(SoftwarePrefetchHintDistance, -1);
}
@ -220,16 +218,11 @@ void VM_Version::initialize() {
}
}
// Neoverse
// N1: 0xd0c
// N2: 0xd49
// N3: 0xd8e
// V1: 0xd40
// V2: 0xd4f
// V3: 0xd84
if (_cpu == CPU_ARM && (model_is(0xd0c) || model_is(0xd49) ||
model_is(0xd40) || model_is(0xd4f) ||
model_is(0xd8e) || model_is(0xd84))) {
if (_cpu == CPU_ARM &&
model_is_in({ CPU_MODEL_ARM_NEOVERSE_N1, CPU_MODEL_ARM_NEOVERSE_V1,
CPU_MODEL_ARM_NEOVERSE_N2, CPU_MODEL_ARM_NEOVERSE_V2,
CPU_MODEL_ARM_NEOVERSE_N3, CPU_MODEL_ARM_NEOVERSE_V3,
CPU_MODEL_ARM_NEOVERSE_V3AE })) {
if (FLAG_IS_DEFAULT(UseSIMDForMemoryOps)) {
FLAG_SET_DEFAULT(UseSIMDForMemoryOps, true);
}
@ -261,12 +254,9 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseCRC32, false);
}
// Neoverse
// V1: 0xd40
// V2: 0xd4f
// V3: 0xd84
if (_cpu == CPU_ARM &&
(model_is(0xd40) || model_is(0xd4f) || model_is(0xd84))) {
model_is_in({ CPU_MODEL_ARM_NEOVERSE_V1, CPU_MODEL_ARM_NEOVERSE_V2,
CPU_MODEL_ARM_NEOVERSE_V3, CPU_MODEL_ARM_NEOVERSE_V3AE })) {
if (FLAG_IS_DEFAULT(UseCryptoPmullForCRC32)) {
FLAG_SET_DEFAULT(UseCryptoPmullForCRC32, true);
}
@ -632,6 +622,22 @@ void VM_Version::initialize() {
check_virtualizations();
#ifdef __APPLE__
DefaultWXWriteMode = UseOldWX ? WXWrite : WXArmedForWrite;
if (TraceWXHealing) {
if (pthread_jit_write_protect_supported_np()) {
tty->print_cr("### TraceWXHealing is in use");
if (StressWXHealing) {
tty->print_cr("### StressWXHealing is in use");
}
} else {
tty->print_cr("WX Healing is not in use because MAP_JIT write protection "
"does not work on this system.");
}
}
#endif
// Sync SVE related CPU features with flags
if (UseSVE < 2) {
clear_feature(CPU_SVE2);

View File

@ -30,6 +30,8 @@
#include "runtime/abstract_vm_version.hpp"
#include "utilities/sizes.hpp"
#include <initializer_list>
class stringStream;
#define BIT_MASK(flag) (1ULL<<(flag))
@ -112,14 +114,26 @@ public:
CPU_APPLE = 'a',
};
enum Ampere_CPU_Model {
enum Ampere_CPU_Model {
CPU_MODEL_EMAG = 0x0, /* CPU implementer is CPU_AMCC */
CPU_MODEL_ALTRA = 0xd0c, /* CPU implementer is CPU_ARM, Neoverse N1 */
CPU_MODEL_ALTRAMAX = 0xd0c, /* CPU implementer is CPU_ARM, Neoverse N1 */
CPU_MODEL_AMPERE_1 = 0xac3, /* CPU implementer is CPU_AMPERE */
CPU_MODEL_AMPERE_1A = 0xac4, /* CPU implementer is CPU_AMPERE */
CPU_MODEL_AMPERE_1B = 0xac5 /* AMPERE_1B core Implements ARMv8.7 with CSSC, MTE, SM3/SM4 extensions */
};
};
enum ARM_CPU_Model {
CPU_MODEL_ARM_CORTEX_A53 = 0xd03,
CPU_MODEL_ARM_CORTEX_A73 = 0xd09,
CPU_MODEL_ARM_NEOVERSE_N1 = 0xd0c,
CPU_MODEL_ARM_NEOVERSE_V1 = 0xd40,
CPU_MODEL_ARM_NEOVERSE_N2 = 0xd49,
CPU_MODEL_ARM_NEOVERSE_V2 = 0xd4f,
CPU_MODEL_ARM_NEOVERSE_V3AE = 0xd83,
CPU_MODEL_ARM_NEOVERSE_V3 = 0xd84,
CPU_MODEL_ARM_NEOVERSE_N3 = 0xd8e,
};
#define CPU_FEATURE_FLAGS(decl) \
decl(FP, fp, 0) \
@ -181,6 +195,15 @@ enum Ampere_CPU_Model {
return _model == cpu_model || _model2 == cpu_model;
}
static bool model_is_in(std::initializer_list<int> cpu_models) {
for (const int& cpu_model : cpu_models) {
if (_model == cpu_model || _model2 == cpu_model) {
return true;
}
}
return false;
}
static bool is_zva_enabled() { return 0 <= _zva_length; }
static int zva_length() {
assert(is_zva_enabled(), "ZVA not available");

View File

@ -67,9 +67,7 @@ void CardTableBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet d
void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, Register tmp) {
BLOCK_COMMENT("CardTablePostBarrier");
BarrierSet* bs = BarrierSet::barrier_set();
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
Label L_cardtable_loop, L_done;
@ -83,7 +81,7 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
__ sub(count, count, addr); // nb of cards
// warning: Rthread has not been preserved
__ mov_address(tmp, (address) ct->byte_map_base());
__ mov_address(tmp, (address)ctbs->card_table_base_const());
__ add(addr,tmp, addr);
Register zero = __ zero_register(tmp);
@ -122,8 +120,7 @@ void CardTableBarrierSetAssembler::store_check_part1(MacroAssembler* masm, Regis
assert(bs->kind() == BarrierSet::CardTableBarrierSet,
"Wrong barrier set kind");
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
// Load card table base address.
@ -140,7 +137,7 @@ void CardTableBarrierSetAssembler::store_check_part1(MacroAssembler* masm, Regis
Possible cause is a cache miss (card table base address resides in a
rarely accessed area of thread descriptor).
*/
__ mov_address(card_table_base, (address)ct->byte_map_base());
__ mov_address(card_table_base, (address)ctbs->card_table_base_const());
}
// The 2nd part of the store check.
@ -170,8 +167,8 @@ void CardTableBarrierSetAssembler::store_check_part2(MacroAssembler* masm, Regis
void CardTableBarrierSetAssembler::set_card(MacroAssembler* masm, Register card_table_base, Address card_table_addr, Register tmp) {
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
CardTable* ct = ctbs->card_table();
if ((((uintptr_t)ct->byte_map_base() & 0xff) == 0)) {
if ((((uintptr_t)ctbs->card_table_base_const() & 0xff) == 0)) {
// Card table is aligned so the lowest byte of the table address base is zero.
// This works only if the code is not saved for later use, possibly
// in a context where the base would no longer be aligned.

View File

@ -568,6 +568,9 @@ class Assembler : public AbstractAssembler {
XSCVDPHP_OPCODE= (60u << OPCODE_SHIFT | 347u << 2 | 17u << 16), // XX2-FORM
XXPERM_OPCODE = (60u << OPCODE_SHIFT | 26u << 3),
XXSEL_OPCODE = (60u << OPCODE_SHIFT | 3u << 4),
XSCMPEQDP_OPCODE=(60u << OPCODE_SHIFT | 3u << 3),
XSCMPGEDP_OPCODE=(60u << OPCODE_SHIFT | 19u << 3),
XSCMPGTDP_OPCODE=(60u << OPCODE_SHIFT | 11u << 3),
XXSPLTIB_OPCODE= (60u << OPCODE_SHIFT | 360u << 1),
XVDIVDP_OPCODE = (60u << OPCODE_SHIFT | 120u << 3),
XVABSSP_OPCODE = (60u << OPCODE_SHIFT | 409u << 2),
@ -2424,6 +2427,9 @@ class Assembler : public AbstractAssembler {
inline void xscvdphp( VectorSRegister d, VectorSRegister b);
inline void xxland( VectorSRegister d, VectorSRegister a, VectorSRegister b);
inline void xxsel( VectorSRegister d, VectorSRegister a, VectorSRegister b, VectorSRegister c);
inline void xscmpeqdp(VectorSRegister t, VectorSRegister a, VectorSRegister b); // Requires Power9
inline void xscmpgedp(VectorSRegister t, VectorSRegister a, VectorSRegister b); // Requires Power9
inline void xscmpgtdp(VectorSRegister t, VectorSRegister a, VectorSRegister b); // Requires Power9
inline void xxspltib( VectorSRegister d, int ui8);
inline void xvdivsp( VectorSRegister d, VectorSRegister a, VectorSRegister b);
inline void xvdivdp( VectorSRegister d, VectorSRegister a, VectorSRegister b);

View File

@ -923,6 +923,10 @@ inline void Assembler::xxmrghw( VectorSRegister d, VectorSRegister a, VectorSReg
inline void Assembler::xxmrglw( VectorSRegister d, VectorSRegister a, VectorSRegister b) { emit_int32( XXMRGHW_OPCODE | vsrt(d) | vsra(a) | vsrb(b)); }
inline void Assembler::xxsel( VectorSRegister d, VectorSRegister a, VectorSRegister b, VectorSRegister c) { emit_int32( XXSEL_OPCODE | vsrt(d) | vsra(a) | vsrb(b) | vsrc(c)); }
inline void Assembler::xscmpeqdp(VectorSRegister t, VectorSRegister a, VectorSRegister b) { emit_int32( XSCMPEQDP_OPCODE | vsrt(t) | vsra(a) | vsrb(b) );}
inline void Assembler::xscmpgedp(VectorSRegister t, VectorSRegister a, VectorSRegister b) { emit_int32( XSCMPGEDP_OPCODE | vsrt(t) | vsra(a) | vsrb(b) );}
inline void Assembler::xscmpgtdp(VectorSRegister t, VectorSRegister a, VectorSRegister b) { emit_int32( XSCMPGTDP_OPCODE | vsrt(t) | vsra(a) | vsrb(b) );}
// VSX Extended Mnemonics
inline void Assembler::xxspltd( VectorSRegister d, VectorSRegister a, int x) { xxpermdi(d, a, a, x ? 3 : 0); }
inline void Assembler::xxmrghd( VectorSRegister d, VectorSRegister a, VectorSRegister b) { xxpermdi(d, a, b, 0); }

View File

@ -664,3 +664,37 @@ void C2_MacroAssembler::reduceI(int opcode, Register dst, Register iSrc, VectorR
fn_scalar_op(opcode, dst, iSrc, R0); // dst <- op(iSrc, R0)
}
// Works for single and double precision floats.
// dst = (op1 cmp(cc) op2) ? src1 : src2;
// Unordered semantics are the same as for CmpF3Node/CmpD3Node which implement the fcmpl/dcmpl bytecodes.
// Comparing unordered values has the same result as when src1 is less than src2.
// So dst = src1 for <, <=, != and dst = src2 for >, >=, ==.
void C2_MacroAssembler::cmovF(int cc, VectorSRegister dst, VectorSRegister op1, VectorSRegister op2,
VectorSRegister src1, VectorSRegister src2, VectorSRegister tmp) {
// See operand cmpOp() for details.
bool invert_cond = (cc & 8) == 0; // invert reflects bcondCRbiIs0
auto cmp = (Assembler::Condition)(cc & 3);
switch(cmp) {
case Assembler::Condition::equal:
// Use false_result if "unordered".
xscmpeqdp(tmp, op1, op2);
break;
case Assembler::Condition::greater:
// Use false_result if "unordered".
xscmpgtdp(tmp, op1, op2);
break;
case Assembler::Condition::less:
// Use true_result if "unordered".
xscmpgedp(tmp, op1, op2);
invert_cond = !invert_cond;
break;
default:
assert(false, "unsupported compare condition: %d", cc);
ShouldNotReachHere();
}
VectorSRegister true_result = invert_cond ? src2 : src1;
VectorSRegister false_result = invert_cond ? src1 : src2;
xxsel(dst, false_result, true_result, tmp);
}

View File

@ -74,5 +74,7 @@
void count_positives(Register src, Register cnt, Register result, Register tmp1, Register tmp2);
void reduceI(int opcode, Register dst, Register iSrc, VectorRegister vSrc, VectorRegister vTmp1, VectorRegister vTmp2);
void cmovF(int cc, VectorSRegister dst, VectorSRegister op1, VectorSRegister op2,
VectorSRegister src1, VectorSRegister src2, VectorSRegister tmp);
#endif // CPU_PPC_C2_MACROASSEMBLER_PPC_HPP

View File

@ -103,8 +103,7 @@ void CardTableBarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Registe
void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr,
Register count, Register preserve) {
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
CardTable* ct = ctbs->card_table();
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
assert_different_registers(addr, count, R0);
Label Lskip_loop, Lstore_loop;
@ -117,7 +116,7 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
__ srdi(addr, addr, CardTable::card_shift());
__ srdi(count, count, CardTable::card_shift());
__ subf(count, addr, count);
__ add_const_optimized(addr, addr, (address)ct->byte_map_base(), R0);
__ add_const_optimized(addr, addr, (address)ctbs->card_table_base_const(), R0);
__ addi(count, count, 1);
__ li(R0, 0);
__ mtctr(count);
@ -140,8 +139,8 @@ void CardTableBarrierSetAssembler::card_table_write(MacroAssembler* masm,
}
void CardTableBarrierSetAssembler::card_write_barrier_post(MacroAssembler* masm, Register store_addr, Register tmp) {
CardTableBarrierSet* bs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
card_table_write(masm, bs->card_table()->byte_map_base(), tmp, store_addr);
CardTableBarrierSet* bs = CardTableBarrierSet::barrier_set();
card_table_write(masm, bs->card_table_base_const(), tmp, store_addr);
}
void CardTableBarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,

View File

@ -50,14 +50,14 @@
#define __ masm->
void ShenandoahBarrierSetAssembler::satb_write_barrier(MacroAssembler *masm,
Register base, RegisterOrConstant ind_or_offs,
Register tmp1, Register tmp2, Register tmp3,
MacroAssembler::PreservationLevel preservation_level) {
void ShenandoahBarrierSetAssembler::satb_barrier(MacroAssembler *masm,
Register base, RegisterOrConstant ind_or_offs,
Register tmp1, Register tmp2, Register tmp3,
MacroAssembler::PreservationLevel preservation_level) {
if (ShenandoahSATBBarrier) {
__ block_comment("satb_write_barrier (shenandoahgc) {");
satb_write_barrier_impl(masm, 0, base, ind_or_offs, tmp1, tmp2, tmp3, preservation_level);
__ block_comment("} satb_write_barrier (shenandoahgc)");
__ block_comment("satb_barrier (shenandoahgc) {");
satb_barrier_impl(masm, 0, base, ind_or_offs, tmp1, tmp2, tmp3, preservation_level);
__ block_comment("} satb_barrier (shenandoahgc)");
}
}
@ -198,11 +198,12 @@ void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, Dec
// In "load mode", this register acts as a temporary register and must
// thus not be 'noreg'. In "preloaded mode", its content will be sustained.
// tmp1/tmp2: Temporary registers, one of which must be non-volatile in "preloaded mode".
void ShenandoahBarrierSetAssembler::satb_write_barrier_impl(MacroAssembler *masm, DecoratorSet decorators,
Register base, RegisterOrConstant ind_or_offs,
Register pre_val,
Register tmp1, Register tmp2,
MacroAssembler::PreservationLevel preservation_level) {
void ShenandoahBarrierSetAssembler::satb_barrier_impl(MacroAssembler *masm, DecoratorSet decorators,
Register base, RegisterOrConstant ind_or_offs,
Register pre_val,
Register tmp1, Register tmp2,
MacroAssembler::PreservationLevel preservation_level) {
assert(ShenandoahSATBBarrier, "Should be checked by caller");
assert_different_registers(tmp1, tmp2, pre_val, noreg);
Label skip_barrier;
@ -574,13 +575,13 @@ void ShenandoahBarrierSetAssembler::load_at(
if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
if (ShenandoahSATBBarrier) {
__ block_comment("keep_alive_barrier (shenandoahgc) {");
satb_write_barrier_impl(masm, 0, noreg, noreg, dst, tmp1, tmp2, preservation_level);
satb_barrier_impl(masm, 0, noreg, noreg, dst, tmp1, tmp2, preservation_level);
__ block_comment("} keep_alive_barrier (shenandoahgc)");
}
}
}
void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register base, RegisterOrConstant ind_or_offs, Register tmp) {
void ShenandoahBarrierSetAssembler::card_barrier(MacroAssembler* masm, Register base, RegisterOrConstant ind_or_offs, Register tmp) {
assert(ShenandoahCardBarrier, "Should have been checked by caller");
assert_different_registers(base, tmp, R0);
@ -603,21 +604,33 @@ void ShenandoahBarrierSetAssembler::store_at(MacroAssembler *masm, DecoratorSet
Register base, RegisterOrConstant ind_or_offs, Register val,
Register tmp1, Register tmp2, Register tmp3,
MacroAssembler::PreservationLevel preservation_level) {
if (is_reference_type(type)) {
if (ShenandoahSATBBarrier) {
satb_write_barrier(masm, base, ind_or_offs, tmp1, tmp2, tmp3, preservation_level);
}
// 1: non-reference types require no barriers
if (!is_reference_type(type)) {
BarrierSetAssembler::store_at(masm, decorators, type,
base, ind_or_offs,
val,
tmp1, tmp2, tmp3,
preservation_level);
return;
}
bool storing_non_null = (val != noreg);
// 2: pre-barrier: SATB needs the previous value
if (ShenandoahBarrierSet::need_satb_barrier(decorators, type)) {
satb_barrier(masm, base, ind_or_offs, tmp1, tmp2, tmp3, preservation_level);
}
// Store!
BarrierSetAssembler::store_at(masm, decorators, type,
base, ind_or_offs,
val,
tmp1, tmp2, tmp3,
preservation_level);
// No need for post barrier if storing null
if (ShenandoahCardBarrier && is_reference_type(type) && val != noreg) {
store_check(masm, base, ind_or_offs, tmp1);
// 3: post-barrier: card barrier needs store address
if (ShenandoahBarrierSet::need_card_barrier(decorators, type) && storing_non_null) {
card_barrier(masm, base, ind_or_offs, tmp1);
}
}
@ -771,9 +784,6 @@ void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler *masm, Register b
void ShenandoahBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, Register preserve) {
assert(ShenandoahCardBarrier, "Should have been checked by caller");
ShenandoahBarrierSet* bs = ShenandoahBarrierSet::barrier_set();
CardTable* ct = bs->card_table();
assert_different_registers(addr, count, R0);
Label L_skip_loop, L_store_loop;

View File

@ -45,15 +45,15 @@ class ShenandoahBarrierSetAssembler: public BarrierSetAssembler {
private:
/* ==== Actual barrier implementations ==== */
void satb_write_barrier_impl(MacroAssembler* masm, DecoratorSet decorators,
Register base, RegisterOrConstant ind_or_offs,
Register pre_val,
Register tmp1, Register tmp2,
MacroAssembler::PreservationLevel preservation_level);
void satb_barrier_impl(MacroAssembler* masm, DecoratorSet decorators,
Register base, RegisterOrConstant ind_or_offs,
Register pre_val,
Register tmp1, Register tmp2,
MacroAssembler::PreservationLevel preservation_level);
void store_check(MacroAssembler* masm,
Register base, RegisterOrConstant ind_or_offs,
Register tmp);
void card_barrier(MacroAssembler* masm,
Register base, RegisterOrConstant ind_or_offs,
Register tmp);
void load_reference_barrier_impl(MacroAssembler* masm, DecoratorSet decorators,
Register base, RegisterOrConstant ind_or_offs,
@ -85,10 +85,10 @@ public:
#endif
/* ==== Available barriers (facades of the actual implementations) ==== */
void satb_write_barrier(MacroAssembler* masm,
Register base, RegisterOrConstant ind_or_offs,
Register tmp1, Register tmp2, Register tmp3,
MacroAssembler::PreservationLevel preservation_level);
void satb_barrier(MacroAssembler* masm,
Register base, RegisterOrConstant ind_or_offs,
Register tmp1, Register tmp2, Register tmp3,
MacroAssembler::PreservationLevel preservation_level);
void load_reference_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register base, RegisterOrConstant ind_or_offs,

View File

@ -64,12 +64,10 @@
return true;
}
// Use conditional move (CMOVL) on Power7.
static constexpr int long_cmove_cost() { return 0; } // this only makes long cmoves more expensive than int cmoves
// Suppress CMOVF. Conditional move available (sort of) on PPC64 only from P7 onwards. Not exploited yet.
// fsel doesn't accept a condition register as input, so this would be slightly different.
static int float_cmove_cost() { return ConditionalMoveLimit; }
// Suppress CMOVF for Power8 because there are no fast nodes.
static int float_cmove_cost() { return (PowerArchitecturePPC64 >= 9) ? 0 : ConditionalMoveLimit; }
// This affects two different things:
// - how Decode nodes are matched

View File

@ -3024,7 +3024,6 @@ encode %{
%}
enc_class postalloc_expand_encode_oop(iRegNdst dst, iRegPdst src, flagsReg crx) %{
// use isel instruction with Power 7
cmpP_reg_imm16Node *n_compare = new cmpP_reg_imm16Node();
encodeP_subNode *n_sub_base = new encodeP_subNode();
encodeP_shiftNode *n_shift = new encodeP_shiftNode();
@ -3099,7 +3098,6 @@ encode %{
n_shift->_opnds[1] = op_src;
n_shift->_bottom_type = _bottom_type;
// use isel instruction with Power 7
decodeN_addNode *n_add_base = new decodeN_addNode();
n_add_base->add_req(n_region, n_shift);
n_add_base->_opnds[0] = op_dst;
@ -6618,7 +6616,6 @@ instruct cond_sub_base(iRegNdst dst, flagsRegSrc crx, iRegPsrc src1) %{
ins_pipe(pipe_class_default);
%}
// Power 7 can use isel instruction
instruct cond_set_0_oop(iRegNdst dst, flagsRegSrc crx, iRegPsrc src1) %{
// The match rule is needed to make it a 'MachTypeNode'!
match(Set dst (EncodeP (Binary crx src1)));
@ -7293,7 +7290,6 @@ instruct cmovF_reg(cmpOp cmp, flagsRegSrc crx, regF dst, regF src) %{
ins_variable_size_depending_on_alignment(true);
format %{ "CMOVEF $cmp, $crx, $dst, $src\n\t" %}
// Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode %{
Label done;
@ -7313,7 +7309,6 @@ instruct cmovD_reg(cmpOp cmp, flagsRegSrc crx, regD dst, regD src) %{
ins_variable_size_depending_on_alignment(true);
format %{ "CMOVEF $cmp, $crx, $dst, $src\n\t" %}
// Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode %{
Label done;
@ -7326,6 +7321,70 @@ instruct cmovD_reg(cmpOp cmp, flagsRegSrc crx, regD dst, regD src) %{
ins_pipe(pipe_class_default);
%}
instruct cmovF_cmpF(cmpOp cop, regF op1, regF op2, regF dst, regF false_result, regF true_result, regD tmp) %{
match(Set dst (CMoveF (Binary cop (CmpF op1 op2)) (Binary false_result true_result)));
predicate(PowerArchitecturePPC64 >= 9);
effect(TEMP tmp);
ins_cost(2*DEFAULT_COST);
format %{ "cmovF_cmpF $dst = ($op1 $cop $op2) ? $true_result : $false_result\n\t" %}
size(8);
ins_encode %{
__ cmovF($cop$$cmpcode, $dst$$FloatRegister->to_vsr(),
$op1$$FloatRegister->to_vsr(), $op2$$FloatRegister->to_vsr(),
$true_result$$FloatRegister->to_vsr(), $false_result$$FloatRegister->to_vsr(),
$tmp$$FloatRegister->to_vsr());
%}
ins_pipe(pipe_class_default);
%}
instruct cmovF_cmpD(cmpOp cop, regD op1, regD op2, regF dst, regF false_result, regF true_result, regD tmp) %{
match(Set dst (CMoveF (Binary cop (CmpD op1 op2)) (Binary false_result true_result)));
predicate(PowerArchitecturePPC64 >= 9);
effect(TEMP tmp);
ins_cost(2*DEFAULT_COST);
format %{ "cmovF_cmpD $dst = ($op1 $cop $op2) ? $true_result : $false_result\n\t" %}
size(8);
ins_encode %{
__ cmovF($cop$$cmpcode, $dst$$FloatRegister->to_vsr(),
$op1$$FloatRegister->to_vsr(), $op2$$FloatRegister->to_vsr(),
$true_result$$FloatRegister->to_vsr(), $false_result$$FloatRegister->to_vsr(),
$tmp$$FloatRegister->to_vsr());
%}
ins_pipe(pipe_class_default);
%}
instruct cmovD_cmpD(cmpOp cop, regD op1, regD op2, regD dst, regD false_result, regD true_result, regD tmp) %{
match(Set dst (CMoveD (Binary cop (CmpD op1 op2)) (Binary false_result true_result)));
predicate(PowerArchitecturePPC64 >= 9);
effect(TEMP tmp);
ins_cost(2*DEFAULT_COST);
format %{ "cmovD_cmpD $dst = ($op1 $cop $op2) ? $true_result : $false_result\n\t" %}
size(8);
ins_encode %{
__ cmovF($cop$$cmpcode, $dst$$FloatRegister->to_vsr(),
$op1$$FloatRegister->to_vsr(), $op2$$FloatRegister->to_vsr(),
$true_result$$FloatRegister->to_vsr(), $false_result$$FloatRegister->to_vsr(),
$tmp$$FloatRegister->to_vsr());
%}
ins_pipe(pipe_class_default);
%}
instruct cmovD_cmpF(cmpOp cop, regF op1, regF op2, regD dst, regD false_result, regD true_result, regD tmp) %{
match(Set dst (CMoveD (Binary cop (CmpF op1 op2)) (Binary false_result true_result)));
predicate(PowerArchitecturePPC64 >= 9);
effect(TEMP tmp);
ins_cost(2*DEFAULT_COST);
format %{ "cmovD_cmpF $dst = ($op1 $cop $op2) ? $true_result : $false_result\n\t" %}
size(8);
ins_encode %{
__ cmovF($cop$$cmpcode, $dst$$FloatRegister->to_vsr(),
$op1$$FloatRegister->to_vsr(), $op2$$FloatRegister->to_vsr(),
$true_result$$FloatRegister->to_vsr(), $false_result$$FloatRegister->to_vsr(),
$tmp$$FloatRegister->to_vsr());
%}
ins_pipe(pipe_class_default);
%}
//----------Compare-And-Swap---------------------------------------------------
// CompareAndSwap{P,I,L} have more than one output, therefore "CmpI
@ -8492,7 +8551,6 @@ instruct cmovI_bne_negI_reg(iRegIdst dst, flagsRegSrc crx, iRegIsrc src1) %{
ins_variable_size_depending_on_alignment(true);
format %{ "CMOVE $dst, neg($src1), $crx" %}
// Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode %{
Label done;
@ -8551,7 +8609,6 @@ instruct cmovL_bne_negL_reg(iRegLdst dst, flagsRegSrc crx, iRegLsrc src1) %{
ins_variable_size_depending_on_alignment(true);
format %{ "CMOVE $dst, neg($src1), $crx" %}
// Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode %{
Label done;
@ -10262,7 +10319,6 @@ instruct cmovI_bso_stackSlotL(iRegIdst dst, flagsRegSrc crx, stackSlotL src) %{
ins_variable_size_depending_on_alignment(true);
format %{ "cmovI $crx, $dst, $src" %}
// Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode( enc_cmove_bso_stackSlotL(dst, crx, src) );
ins_pipe(pipe_class_default);
@ -10276,7 +10332,6 @@ instruct cmovI_bso_reg(iRegIdst dst, flagsRegSrc crx, regD src) %{
ins_variable_size_depending_on_alignment(true);
format %{ "cmovI $crx, $dst, $src" %}
// Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode( enc_cmove_bso_reg(dst, crx, src) );
ins_pipe(pipe_class_default);
@ -10439,7 +10494,6 @@ instruct cmovL_bso_stackSlotL(iRegLdst dst, flagsRegSrc crx, stackSlotL src) %{
ins_variable_size_depending_on_alignment(true);
format %{ "cmovL $crx, $dst, $src" %}
// Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode( enc_cmove_bso_stackSlotL(dst, crx, src) );
ins_pipe(pipe_class_default);
@ -10453,7 +10507,6 @@ instruct cmovL_bso_reg(iRegLdst dst, flagsRegSrc crx, regD src) %{
ins_variable_size_depending_on_alignment(true);
format %{ "cmovL $crx, $dst, $src" %}
// Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode( enc_cmove_bso_reg(dst, crx, src) );
ins_pipe(pipe_class_default);
@ -11080,7 +11133,6 @@ instruct cmov_bns_less(flagsReg crx) %{
ins_variable_size_depending_on_alignment(true);
format %{ "cmov $crx" %}
// Worst case is branch + move + stop, no stop without scheduler.
size(12);
ins_encode %{
Label done;

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
* Copyright (c) 2012, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -775,7 +775,6 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
return stk;
}
#if defined(COMPILER1) || defined(COMPILER2)
// Calling convention for calling C code.
int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
VMRegPair *regs,
@ -913,7 +912,6 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
return MAX2(arg, 8) * 2 + additional_frame_header_slots;
#endif
}
#endif // COMPILER2
int SharedRuntime::vector_calling_convention(VMRegPair *regs,
uint num_bits,
@ -2874,7 +2872,6 @@ void SharedRuntime::generate_deopt_blob() {
CodeBuffer buffer(name, 2048, 1024);
InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer);
Label exec_mode_initialized;
int frame_size_in_words;
OopMap* map = nullptr;
OopMapSet *oop_maps = new OopMapSet();
@ -2886,6 +2883,9 @@ void SharedRuntime::generate_deopt_blob() {
const Register exec_mode_reg = R21_tmp1;
const address start = __ pc();
int exception_offset = 0;
int exception_in_tls_offset = 0;
int reexecute_offset = 0;
#if defined(COMPILER1) || defined(COMPILER2)
// --------------------------------------------------------------------------
@ -2925,7 +2925,7 @@ void SharedRuntime::generate_deopt_blob() {
// - R3_ARG1: exception oop
// - R4_ARG2: exception pc
int exception_offset = __ pc() - start;
exception_offset = __ pc() - start;
BLOCK_COMMENT("Prolog for exception case");
@ -2936,7 +2936,7 @@ void SharedRuntime::generate_deopt_blob() {
__ std(R4_ARG2, _abi0(lr), R1_SP);
// Vanilla deoptimization with an exception pending in exception_oop.
int exception_in_tls_offset = __ pc() - start;
exception_in_tls_offset = __ pc() - start;
// Push the "unpack frame".
// Save everything in sight.
@ -2949,8 +2949,6 @@ void SharedRuntime::generate_deopt_blob() {
__ li(exec_mode_reg, Deoptimization::Unpack_exception);
// fall through
int reexecute_offset = 0;
#ifdef COMPILER1
__ b(exec_mode_initialized);
@ -3068,11 +3066,12 @@ void SharedRuntime::generate_deopt_blob() {
// Return to the interpreter entry point.
__ blr();
__ flush();
#else // COMPILER2
#else // !defined(COMPILER1) && !defined(COMPILER2)
__ unimplemented("deopt blob needed only with compiler");
int exception_offset = __ pc() - start;
#endif // COMPILER2
#endif
// Make sure all code is generated
__ flush();
_deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset,
reexecute_offset, first_frame_size_in_bytes / wordSize);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -2813,10 +2813,14 @@ void C2_MacroAssembler::char_array_compress_v(Register src, Register dst, Regist
// Intrinsic for
//
// - sun/nio/cs/ISO_8859_1$Encoder.implEncodeISOArray
// return the number of characters copied.
// - java/lang/StringUTF16.compress
// return index of non-latin1 character if copy fails, otherwise 'len'.
// - sun.nio.cs.ISO_8859_1.Encoder#encodeISOArray0(byte[] sa, int sp, byte[] da, int dp, int len)
// Encodes char[] to byte[] in ISO-8859-1
//
// - java.lang.StringCoding#encodeISOArray0(byte[] sa, int sp, byte[] da, int dp, int len)
// Encodes byte[] (containing UTF-16) to byte[] in ISO-8859-1
//
// - java.lang.StringCoding#encodeAsciiArray0(char[] sa, int sp, byte[] da, int dp, int len)
// Encodes char[] to byte[] in ASCII
//
// This version always returns the number of characters copied. A successful
// copy will complete with the post-condition: 'res' == 'len', while an

View File

@ -88,26 +88,16 @@ void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, Dec
}
}
void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp,
bool tosca_live,
bool expand_call) {
if (ShenandoahSATBBarrier) {
satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, t0, tosca_live, expand_call);
}
}
void ShenandoahBarrierSetAssembler::satb_barrier(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp1,
Register tmp2,
bool tosca_live,
bool expand_call) {
assert(ShenandoahSATBBarrier, "Should be checked by caller");
void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp1,
Register tmp2,
bool tosca_live,
bool expand_call) {
// If expand_call is true then we expand the call_VM_leaf macro
// directly to skip generating the check by
// InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
@ -376,21 +366,21 @@ void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm,
if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
__ enter();
__ push_call_clobbered_registers();
satb_write_barrier_pre(masm /* masm */,
noreg /* obj */,
dst /* pre_val */,
xthread /* thread */,
tmp1 /* tmp1 */,
tmp2 /* tmp2 */,
true /* tosca_live */,
true /* expand_call */);
satb_barrier(masm /* masm */,
noreg /* obj */,
dst /* pre_val */,
xthread /* thread */,
tmp1 /* tmp1 */,
tmp2 /* tmp2 */,
true /* tosca_live */,
true /* expand_call */);
__ pop_call_clobbered_registers();
__ leave();
}
}
void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj) {
assert(ShenandoahCardBarrier, "Did you mean to enable ShenandoahCardBarrier?");
void ShenandoahBarrierSetAssembler::card_barrier(MacroAssembler* masm, Register obj) {
assert(ShenandoahCardBarrier, "Should have been checked by caller");
__ srli(obj, obj, CardTable::card_shift());
@ -413,13 +403,13 @@ void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register o
void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
bool on_oop = is_reference_type(type);
if (!on_oop) {
// 1: non-reference types require no barriers
if (!is_reference_type(type)) {
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
return;
}
// flatten object address if needed
// Flatten object address right away for simplicity: likely needed by barriers
if (dst.offset() == 0) {
if (dst.base() != tmp3) {
__ mv(tmp3, dst.base());
@ -428,20 +418,26 @@ void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet
__ la(tmp3, dst);
}
shenandoah_write_barrier_pre(masm,
tmp3 /* obj */,
tmp2 /* pre_val */,
xthread /* thread */,
tmp1 /* tmp */,
val != noreg /* tosca_live */,
false /* expand_call */);
bool storing_non_null = (val != noreg);
// 2: pre-barrier: SATB needs the previous value
if (ShenandoahBarrierSet::need_satb_barrier(decorators, type)) {
satb_barrier(masm,
tmp3 /* obj */,
tmp2 /* pre_val */,
xthread /* thread */,
tmp1 /* tmp */,
t0 /* tmp2 */,
storing_non_null /* tosca_live */,
false /* expand_call */);
}
// Store!
BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), val, noreg, noreg, noreg);
bool in_heap = (decorators & IN_HEAP) != 0;
bool needs_post_barrier = (val != noreg) && in_heap && ShenandoahCardBarrier;
if (needs_post_barrier) {
store_check(masm, tmp3);
// 3: post-barrier: card barrier needs store address
if (ShenandoahBarrierSet::need_card_barrier(decorators, type) && storing_non_null) {
card_barrier(masm, tmp3);
}
}

View File

@ -41,23 +41,16 @@ class StubCodeGenerator;
class ShenandoahBarrierSetAssembler: public BarrierSetAssembler {
private:
void satb_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp1,
Register tmp2,
bool tosca_live,
bool expand_call);
void shenandoah_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp,
bool tosca_live,
bool expand_call);
void satb_barrier(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp1,
Register tmp2,
bool tosca_live,
bool expand_call);
void store_check(MacroAssembler* masm, Register obj);
void card_barrier(MacroAssembler* masm, Register obj);
void resolve_forward_pointer(MacroAssembler* masm, Register dst, Register tmp = noreg);
void resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp = noreg);

View File

@ -5110,9 +5110,8 @@ void MacroAssembler::get_thread(Register thread) {
}
void MacroAssembler::load_byte_map_base(Register reg) {
CardTable::CardValue* byte_map_base =
((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base();
mv(reg, (uint64_t)byte_map_base);
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
mv(reg, (uint64_t)ctbs->card_table_base_const());
}
void MacroAssembler::build_frame(int framesize) {

View File

@ -708,7 +708,6 @@ void TemplateTable::index_check(Register array, Register index) {
__ mv(x11, index);
}
Label ok;
__ sext(index, index, 32);
__ bltu(index, length, ok);
__ mv(x13, array);
__ mv(t1, Interpreter::_throw_ArrayIndexOutOfBoundsException_entry);
@ -1052,7 +1051,7 @@ void TemplateTable::aastore() {
transition(vtos, vtos);
// stack: ..., array, index, value
__ ld(x10, at_tos()); // value
__ ld(x12, at_tos_p1()); // index
__ lw(x12, at_tos_p1()); // index
__ ld(x13, at_tos_p2()); // array
index_check(x13, x12); // kills x11
@ -1462,9 +1461,9 @@ void TemplateTable::iinc() {
transition(vtos, vtos);
__ load_signed_byte(x11, at_bcp(2)); // get constant
locals_index(x12);
__ ld(x10, iaddress(x12, x10, _masm));
__ lw(x10, iaddress(x12, x10, _masm));
__ addw(x10, x10, x11);
__ sd(x10, iaddress(x12, t0, _masm));
__ sw(x10, iaddress(x12, t0, _masm));
}
void TemplateTable::wide_iinc() {
@ -1477,9 +1476,9 @@ void TemplateTable::wide_iinc() {
__ orr(x11, x11, t1);
locals_index_wide(x12);
__ ld(x10, iaddress(x12, t0, _masm));
__ lw(x10, iaddress(x12, t0, _masm));
__ addw(x10, x10, x11);
__ sd(x10, iaddress(x12, t0, _masm));
__ sw(x10, iaddress(x12, t0, _masm));
}
void TemplateTable::convert() {

View File

@ -83,8 +83,7 @@ void CardTableBarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Registe
void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count,
bool do_return) {
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
CardTable* ct = ctbs->card_table();
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
NearLabel doXC, done;
assert_different_registers(Z_R0, Z_R1, addr, count);
@ -105,7 +104,7 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
__ add2reg_with_index(count, -BytesPerHeapOop, count, addr);
// Get base address of card table.
__ load_const_optimized(Z_R1, (address)ct->byte_map_base());
__ load_const_optimized(Z_R1, (address)ctbs->card_table_base_const());
// count = (count>>shift) - (addr>>shift)
__ z_srlg(addr, addr, CardTable::card_shift());
@ -179,13 +178,12 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register store_addr, Register tmp) {
// Does a store check for the oop in register obj. The content of
// register obj is destroyed afterwards.
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
CardTable* ct = ctbs->card_table();
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
assert_different_registers(store_addr, tmp);
__ z_srlg(store_addr, store_addr, CardTable::card_shift());
__ load_absolute_address(tmp, (address)ct->byte_map_base());
__ load_absolute_address(tmp, (address)ctbs->card_table_base_const());
__ z_agr(store_addr, tmp);
__ z_mvi(0, store_addr, CardTable::dirty_card_val());
}

View File

@ -95,11 +95,7 @@ void CardTableBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet d
void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, Register tmp) {
BarrierSet *bs = BarrierSet::barrier_set();
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
intptr_t disp = (intptr_t) ct->byte_map_base();
SHENANDOAHGC_ONLY(assert(!UseShenandoahGC, "Shenandoah byte_map_base is not constant.");)
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
Label L_loop, L_done;
const Register end = count;
@ -115,7 +111,7 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
__ shrptr(end, CardTable::card_shift());
__ subptr(end, addr); // end --> cards count
__ mov64(tmp, disp);
__ mov64(tmp, (intptr_t)ctbs->card_table_base_const());
__ addptr(addr, tmp);
__ BIND(L_loop);
__ movb(Address(addr, count, Address::times_1), 0);
@ -128,10 +124,7 @@ __ BIND(L_done);
void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj, Address dst) {
// Does a store check for the oop in register obj. The content of
// register obj is destroyed afterwards.
BarrierSet* bs = BarrierSet::barrier_set();
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
__ shrptr(obj, CardTable::card_shift());
@ -142,7 +135,7 @@ void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register ob
// So this essentially converts an address to a displacement and it will
// never need to be relocated. On 64bit however the value may be too
// large for a 32bit displacement.
intptr_t byte_map_base = (intptr_t)ct->byte_map_base();
intptr_t byte_map_base = (intptr_t)ctbs->card_table_base_const();
if (__ is_simm32(byte_map_base)) {
card_addr = Address(noreg, obj, Address::times_1, byte_map_base);
} else {

View File

@ -174,24 +174,14 @@ void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, Dec
}
}
void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register tmp,
bool tosca_live,
bool expand_call) {
void ShenandoahBarrierSetAssembler::satb_barrier(MacroAssembler* masm,
Register obj,
Register pre_val,
Register tmp,
bool tosca_live,
bool expand_call) {
assert(ShenandoahSATBBarrier, "Should be checked by caller");
if (ShenandoahSATBBarrier) {
satb_write_barrier_pre(masm, obj, pre_val, tmp, tosca_live, expand_call);
}
}
void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register tmp,
bool tosca_live,
bool expand_call) {
// If expand_call is true then we expand the call_VM_leaf macro
// directly to skip generating the check by
// InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
@ -533,18 +523,18 @@ void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet d
assert_different_registers(dst, tmp1, r15_thread);
// Generate the SATB pre-barrier code to log the value of
// the referent field in an SATB buffer.
shenandoah_write_barrier_pre(masm /* masm */,
noreg /* obj */,
dst /* pre_val */,
tmp1 /* tmp */,
true /* tosca_live */,
true /* expand_call */);
satb_barrier(masm /* masm */,
noreg /* obj */,
dst /* pre_val */,
tmp1 /* tmp */,
true /* tosca_live */,
true /* expand_call */);
restore_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ true);
}
}
void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj) {
void ShenandoahBarrierSetAssembler::card_barrier(MacroAssembler* masm, Register obj) {
assert(ShenandoahCardBarrier, "Should have been checked by caller");
// Does a store check for the oop in register obj. The content of
@ -575,41 +565,40 @@ void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register o
void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
bool on_oop = is_reference_type(type);
bool in_heap = (decorators & IN_HEAP) != 0;
bool as_normal = (decorators & AS_NORMAL) != 0;
if (on_oop && in_heap) {
bool needs_pre_barrier = as_normal;
// 1: non-reference types require no barriers
if (!is_reference_type(type)) {
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
return;
}
// flatten object address if needed
// We do it regardless of precise because we need the registers
if (dst.index() == noreg && dst.disp() == 0) {
if (dst.base() != tmp1) {
__ movptr(tmp1, dst.base());
}
} else {
__ lea(tmp1, dst);
}
assert_different_registers(val, tmp1, tmp2, tmp3, r15_thread);
if (needs_pre_barrier) {
shenandoah_write_barrier_pre(masm /*masm*/,
tmp1 /* obj */,
tmp2 /* pre_val */,
tmp3 /* tmp */,
val != noreg /* tosca_live */,
false /* expand_call */);
}
BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg, noreg);
if (val != noreg) {
if (ShenandoahCardBarrier) {
store_check(masm, tmp1);
}
// Flatten object address right away for simplicity: likely needed by barriers
assert_different_registers(val, tmp1, tmp2, tmp3, r15_thread);
if (dst.index() == noreg && dst.disp() == 0) {
if (dst.base() != tmp1) {
__ movptr(tmp1, dst.base());
}
} else {
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
__ lea(tmp1, dst);
}
bool storing_non_null = (val != noreg);
// 2: pre-barrier: SATB needs the previous value
if (ShenandoahBarrierSet::need_satb_barrier(decorators, type)) {
satb_barrier(masm,
tmp1 /* obj */,
tmp2 /* pre_val */,
tmp3 /* tmp */,
storing_non_null /* tosca_live */,
false /* expand_call */);
}
// Store!
BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg, noreg);
// 3: post-barrier: card barrier needs store address
if (ShenandoahBarrierSet::need_card_barrier(decorators, type) && storing_non_null) {
card_barrier(masm, tmp1);
}
}

View File

@ -41,21 +41,14 @@ class StubCodeGenerator;
class ShenandoahBarrierSetAssembler: public BarrierSetAssembler {
private:
void satb_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register tmp,
bool tosca_live,
bool expand_call);
void satb_barrier(MacroAssembler* masm,
Register obj,
Register pre_val,
Register tmp,
bool tosca_live,
bool expand_call);
void shenandoah_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register tmp,
bool tosca_live,
bool expand_call);
void store_check(MacroAssembler* masm, Register obj);
void card_barrier(MacroAssembler* masm, Register obj);
void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count,

View File

@ -6251,32 +6251,46 @@ void MacroAssembler::evpbroadcast(BasicType type, XMMRegister dst, Register src,
}
}
// encode char[] to byte[] in ISO_8859_1 or ASCII
//@IntrinsicCandidate
//private static int implEncodeISOArray(byte[] sa, int sp,
//byte[] da, int dp, int len) {
// int i = 0;
// for (; i < len; i++) {
// char c = StringUTF16.getChar(sa, sp++);
// if (c > '\u00FF')
// break;
// da[dp++] = (byte)c;
// }
// return i;
//}
//
//@IntrinsicCandidate
//private static int implEncodeAsciiArray(char[] sa, int sp,
// byte[] da, int dp, int len) {
// int i = 0;
// for (; i < len; i++) {
// char c = sa[sp++];
// if (c >= '\u0080')
// break;
// da[dp++] = (byte)c;
// }
// return i;
//}
// Encode given char[]/byte[] to byte[] in ISO_8859_1 or ASCII
//
// @IntrinsicCandidate
// int sun.nio.cs.ISO_8859_1.Encoder#encodeISOArray0(
// char[] sa, int sp, byte[] da, int dp, int len) {
// int i = 0;
// for (; i < len; i++) {
// char c = sa[sp++];
// if (c > '\u00FF')
// break;
// da[dp++] = (byte) c;
// }
// return i;
// }
//
// @IntrinsicCandidate
// int java.lang.StringCoding.encodeISOArray0(
// byte[] sa, int sp, byte[] da, int dp, int len) {
// int i = 0;
// for (; i < len; i++) {
// char c = StringUTF16.getChar(sa, sp++);
// if (c > '\u00FF')
// break;
// da[dp++] = (byte) c;
// }
// return i;
// }
//
// @IntrinsicCandidate
// int java.lang.StringCoding.encodeAsciiArray0(
// char[] sa, int sp, byte[] da, int dp, int len) {
// int i = 0;
// for (; i < len; i++) {
// char c = sa[sp++];
// if (c >= '\u0080')
// break;
// da[dp++] = (byte) c;
// }
// return i;
// }
void MacroAssembler::encode_iso_array(Register src, Register dst, Register len,
XMMRegister tmp1Reg, XMMRegister tmp2Reg,
XMMRegister tmp3Reg, XMMRegister tmp4Reg,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -143,7 +143,7 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4, std_cpuid24, std_cpuid29;
Label sef_cpuid, sefsl1_cpuid, ext_cpuid, ext_cpuid1, ext_cpuid5, ext_cpuid7;
Label ext_cpuid8, done, wrapup, vector_save_restore, apx_save_restore_warning;
Label ext_cpuid8, done, wrapup, vector_save_restore, apx_save_restore_warning, apx_xstate;
Label legacy_setup, save_restore_except, legacy_save_restore, start_simd_check;
StubCodeMark mark(this, "VM_Version", "get_cpu_info_stub");
@ -468,6 +468,20 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
__ movq(Address(rsi, 0), r16);
__ movq(Address(rsi, 8), r31);
//
// Query CPUID 0xD.19 for APX XSAVE offset
// Extended State Enumeration Sub-leaf 19 (APX)
// EAX = size of APX state (should be 128)
// EBX = offset in standard XSAVE format
//
__ movl(rax, 0xD);
__ movl(rcx, 19);
__ cpuid();
__ lea(rsi, Address(rbp, in_bytes(VM_Version::apx_xstate_size_offset())));
__ movl(Address(rsi, 0), rax);
__ lea(rsi, Address(rbp, in_bytes(VM_Version::apx_xstate_offset_offset())));
__ movl(Address(rsi, 0), rbx);
UseAPX = save_apx;
__ bind(vector_save_restore);
//
@ -921,8 +935,9 @@ void VM_Version::get_processor_features() {
// Check if processor has Intel Ecore
if (FLAG_IS_DEFAULT(EnableX86ECoreOpts) && is_intel() && is_intel_server_family() &&
(_model == 0x97 || _model == 0xAA || _model == 0xAC || _model == 0xAF ||
_model == 0xCC || _model == 0xDD)) {
(supports_hybrid() ||
_model == 0xAF /* Xeon 6 E-cores (Sierra Forest) */ ||
_model == 0xDD /* Xeon 6+ E-cores (Clearwater Forest) */ )) {
FLAG_SET_DEFAULT(EnableX86ECoreOpts, true);
}
@ -1137,6 +1152,10 @@ void VM_Version::get_processor_features() {
warning("AES intrinsics require UseAES flag to be enabled. Intrinsics will be disabled.");
}
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) {
warning("AES_CTR intrinsics require UseAES flag to be enabled. AES_CTR intrinsics will be disabled.");
}
FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
} else {
if (UseSSE > 2) {
if (FLAG_IS_DEFAULT(UseAESIntrinsics)) {
@ -1155,8 +1174,8 @@ void VM_Version::get_processor_features() {
if (!UseAESIntrinsics) {
if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) {
warning("AES-CTR intrinsics require UseAESIntrinsics flag to be enabled. Intrinsics will be disabled.");
FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
}
FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
} else {
if (supports_sse4_1()) {
if (FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) {
@ -1176,16 +1195,16 @@ void VM_Version::get_processor_features() {
} else if (UseAES || UseAESIntrinsics || UseAESCTRIntrinsics) {
if (UseAES && !FLAG_IS_DEFAULT(UseAES)) {
warning("AES instructions are not available on this CPU");
FLAG_SET_DEFAULT(UseAES, false);
}
FLAG_SET_DEFAULT(UseAES, false);
if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
warning("AES intrinsics are not available on this CPU");
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
}
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) {
warning("AES-CTR intrinsics are not available on this CPU");
FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
}
FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
}
// Use CLMUL instructions if available.

View File

@ -676,6 +676,10 @@ protected:
// Space to save apx registers after signal handle
jlong apx_save[2]; // Save r16 and r31
// cpuid function 0xD, subleaf 19 (APX extended state)
uint32_t apx_xstate_size; // EAX: size of APX state (128)
uint32_t apx_xstate_offset; // EBX: offset in standard XSAVE area
VM_Features feature_flags() const;
// Asserts
@ -739,6 +743,11 @@ public:
static ByteSize ymm_save_offset() { return byte_offset_of(CpuidInfo, ymm_save); }
static ByteSize zmm_save_offset() { return byte_offset_of(CpuidInfo, zmm_save); }
static ByteSize apx_save_offset() { return byte_offset_of(CpuidInfo, apx_save); }
static ByteSize apx_xstate_offset_offset() { return byte_offset_of(CpuidInfo, apx_xstate_offset); }
static ByteSize apx_xstate_size_offset() { return byte_offset_of(CpuidInfo, apx_xstate_size); }
static uint32_t apx_xstate_offset() { return _cpuid_info.apx_xstate_offset; }
static uint32_t apx_xstate_size() { return _cpuid_info.apx_xstate_size; }
// The value used to check ymm register after signal handle
static int ymm_test_value() { return 0xCAFEBABE; }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -38,7 +38,7 @@ class AIXDecoder: public AbstractDecoder {
virtual bool demangle(const char* symbol, char* buf, int buflen) { return false; } // use AixSymbols::get_function_name to demangle
virtual bool decode(address addr, char* buf, int buflen, int* offset, const char* modulepath, bool demangle) {
return AixSymbols::get_function_name(addr, buf, buflen, offset, 0, demangle);
return AixSymbols::get_function_name(addr, buf, buflen, offset, nullptr, demangle);
}
virtual bool decode(address addr, char *buf, int buflen, int* offset, const void *base) {
ShouldNotReachHere();

View File

@ -703,7 +703,7 @@ static void *thread_native_entry(Thread *thread) {
log_info(os, thread)("Thread finished (tid: %zu, kernel thread id: %zu).",
os::current_thread_id(), (uintx) kernel_thread_id);
return 0;
return nullptr;
}
bool os::create_thread(Thread* thread, ThreadType thr_type,

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2012, 2024 SAP SE. All rights reserved.
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -78,7 +78,7 @@ class fixed_strings {
public:
fixed_strings() : first(0) {}
fixed_strings() : first(nullptr) {}
~fixed_strings() {
node* n = first;
while (n) {
@ -113,7 +113,7 @@ bool AixSymbols::get_function_name (
// information (null if not available)
bool demangle // [in] whether to demangle the name
) {
struct tbtable* tb = 0;
struct tbtable* tb = nullptr;
unsigned int searchcount = 0;
// initialize output parameters
@ -653,10 +653,10 @@ void AixNativeCallstack::print_callstack_for_context(outputStream* st, const uco
// To print the first frame, use the current value of iar:
// current entry indicated by iar (the current pc)
codeptr_t cur_iar = 0;
stackptr_t cur_sp = 0;
codeptr_t cur_rtoc = 0;
codeptr_t cur_lr = 0;
codeptr_t cur_iar = nullptr;
stackptr_t cur_sp = nullptr;
codeptr_t cur_rtoc = nullptr;
codeptr_t cur_lr = nullptr;
const ucontext_t* uc = (const ucontext_t*) context;
@ -926,7 +926,7 @@ static struct handletableentry* p_handletable = nullptr;
static const char* rtv_linkedin_libpath() {
constexpr int bufsize = 4096;
static char buffer[bufsize];
static const char* libpath = 0;
static const char* libpath = nullptr;
// we only try to retrieve the libpath once. After that try we
// let libpath point to buffer, which then contains a valid libpath

View File

@ -28,6 +28,7 @@
//
// Declare Bsd specific flags. They are not available on other platforms.
//
#ifdef AARCH64
#define RUNTIME_OS_FLAGS(develop, \
develop_pd, \
product, \
@ -35,9 +36,21 @@
range, \
constraint) \
\
AARCH64_ONLY(develop(bool, AssertWXAtThreadSync, true, \
"Conservatively check W^X thread state at possible safepoint" \
"or handshake"))
develop(bool, TraceWXHealing, false, \
"track occurrences of W^X mode healing") \
develop(bool, UseOldWX, false, \
"Choose old W^X implementation.") \
product(bool, StressWXHealing, false, DIAGNOSTIC, \
"Stress W xor X healing on MacOS")
#else
#define RUNTIME_OS_FLAGS(develop, \
develop_pd, \
product, \
product_pd, \
range, \
constraint)
#endif
// end of RUNTIME_OS_FLAGS

View File

@ -841,6 +841,7 @@ jlong os::javaTimeNanos() {
// We might also condition (c) on the magnitude of the delta between obsv and now.
// Avoiding excessive CAS operations to hot RW locations is critical.
// See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
// https://web.archive.org/web/20131214182431/https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
return (prev == obsv) ? now : obsv;
}

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -412,12 +412,8 @@ run_stub:
}
void os::Aix::init_thread_fpu_state(void) {
#if !defined(USE_XLC_BUILTINS)
// Disable FP exceptions.
__asm__ __volatile__ ("mtfsfi 6,0");
#else
__mtfsfi(6, 0);
#endif
}
////////////////////////////////////////////////////////////////////////////////

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2013 SAP SE. All rights reserved.
* Copyright (c) 2012, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,29 +29,21 @@
// Included in runtime/prefetch.inline.hpp
inline void Prefetch::read(const void *loc, intx interval) {
#if !defined(USE_XLC_BUILTINS)
__asm__ __volatile__ (
" dcbt 0, %0 \n"
:
: /*%0*/"r" ( ((address)loc) +((long)interval) )
//:
);
#else
__dcbt(((address)loc) +((long)interval));
#endif
}
inline void Prefetch::write(void *loc, intx interval) {
#if !defined(USE_XLC_BUILTINS)
__asm__ __volatile__ (
" dcbtst 0, %0 \n"
:
: /*%0*/"r" ( ((address)loc) +((long)interval) )
//:
);
#else
__dcbtst( ((address)loc) +((long)interval) );
#endif
}
#endif // OS_CPU_AIX_PPC_PREFETCH_AIX_PPC_INLINE_HPP

View File

@ -54,8 +54,11 @@
#include "signals_posix.hpp"
#include "utilities/align.hpp"
#include "utilities/debug.hpp"
#include "utilities/decoder.hpp"
#include "utilities/events.hpp"
#include "utilities/nativeStackPrinter.hpp"
#include "utilities/vmError.hpp"
#include "compiler/disassembler.hpp"
// put OS-includes here
# include <sys/types.h>
@ -85,6 +88,8 @@
#define SPELL_REG_SP "sp"
#ifdef __APPLE__
WXMode DefaultWXWriteMode;
// see darwin-xnu/osfmk/mach/arm/_structs.h
// 10.5 UNIX03 member name prefixes
@ -233,19 +238,56 @@ NOINLINE frame os::current_frame() {
bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
ucontext_t* uc, JavaThread* thread) {
// Enable WXWrite: this function is called by the signal handler at arbitrary
// point of execution.
ThreadWXEnable wx(WXWrite, thread);
// decide if this trap can be handled by a stub
address stub = nullptr;
address pc = nullptr;
address pc = nullptr;
//%note os_trap_1
if (info != nullptr && uc != nullptr && thread != nullptr) {
pc = (address) os::Posix::ucontext_get_pc(uc);
#ifdef MACOS_AARCH64
// If we got a SIGBUS because we tried to write into the code
// cache, try enabling WXWrite mode.
if (sig == SIGBUS
&& pc != info->si_addr
&& CodeCache::contains(info->si_addr)
&& os::address_is_in_vm(pc)) {
WXMode *entry_mode = thread->_cur_wx_mode;
if (entry_mode != nullptr && *entry_mode == WXArmedForWrite) {
if (TraceWXHealing) {
static const char *mode_names[3] = {"WXWrite", "WXExec", "WXArmedForWrite"};
tty->print("Healing WXMode %s at %p to WXWrite",
mode_names[*entry_mode], entry_mode);
char name[128];
int offset = 0;
if (os::dll_address_to_function_name(pc, name, sizeof name, &offset)) {
tty->print_cr(" (%s+0x%x)", name, offset);
} else {
tty->cr();
}
if (Verbose) {
char buf[O_BUFLEN];
NativeStackPrinter nsp(thread);
nsp.print_stack(tty, buf, sizeof(buf), pc,
true /* print_source_info */, -1 /* max stack */);
}
}
#ifndef PRODUCT
guarantee(StressWXHealing,
"We should not reach here unless StressWXHealing");
#endif
*(thread->_cur_wx_mode) = WXWrite;
return thread->wx_enable_write();
}
}
// There may be cases where code after this point that we call
// from the signal handler changes WX state, so we protect against
// that by saving and restoring the state.
ThreadWXEnable wx(thread->get_wx_state(), thread);
#endif
// Handle ALL stack overflow variations here
if (sig == SIGSEGV || sig == SIGBUS) {
address addr = (address) info->si_addr;
@ -515,11 +557,42 @@ int os::extra_bang_size_in_bytes() {
return 0;
}
#ifdef __APPLE__
#ifdef MACOS_AARCH64
THREAD_LOCAL bool os::_jit_exec_enabled;
// This is a wrapper around the standard library function
// pthread_jit_write_protect_np(3). We keep track of the state of
// per-thread write protection on the MAP_JIT region in the
// thread-local variable os::_jit_exec_enabled
void os::current_thread_enable_wx(WXMode mode) {
pthread_jit_write_protect_np(mode == WXExec);
bool exec_enabled = mode != WXWrite;
if (exec_enabled != _jit_exec_enabled NOT_PRODUCT( || DefaultWXWriteMode == WXWrite)) {
permit_forbidden_function::pthread_jit_write_protect_np(exec_enabled);
_jit_exec_enabled = exec_enabled;
}
}
#endif
// If the current thread is in the WX state WXArmedForWrite, change
// the state to WXWrite.
bool Thread::wx_enable_write() {
if (_wx_state == WXArmedForWrite) {
_wx_state = WXWrite;
os::current_thread_enable_wx(WXWrite);
return true;
} else {
return false;
}
}
// A wrapper around wx_enable_write() for when the current thread is
// not known.
void os::thread_wx_enable_write_impl() {
if (!StressWXHealing) {
Thread::current()->wx_enable_write();
}
}
#endif // MACOS_AARCH64
static inline void atomic_copy64(const volatile void *src, volatile void *dst) {
*(jlong *) dst = *(const jlong *) src;

View File

@ -42,8 +42,19 @@ frame JavaThread::pd_last_frame() {
void JavaThread::cache_global_variables() {
BarrierSet* bs = BarrierSet::barrier_set();
#if INCLUDE_G1GC
if (bs->is_a(BarrierSet::G1BarrierSet)) {
_card_table_base = nullptr;
} else
#endif
#if INCLUDE_SHENANDOAHGC
if (bs->is_a(BarrierSet::ShenandoahBarrierSet)) {
_card_table_base = nullptr;
} else
#endif
if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
_card_table_base = (address) (barrier_set_cast<CardTableBarrierSet>(bs)->card_table()->byte_map_base());
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
_card_table_base = (address)ctbs->card_table_base_const();
} else {
_card_table_base = nullptr;
}

View File

@ -52,6 +52,7 @@
#include "utilities/debug.hpp"
#include "utilities/events.hpp"
#include "utilities/vmError.hpp"
#include "runtime/vm_version.hpp"
// put OS-includes here
# include <sys/types.h>
@ -380,6 +381,43 @@ size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
/////////////////////////////////////////////////////////////////////////////
// helper functions for fatal error handler
// XSAVE constants - from Intel SDM Vol. 1, Chapter 13
#define XSAVE_HDR_OFFSET 512
#define XFEATURE_APX (1ULL << 19)
// XSAVE header structure
// See: Intel SDM Vol. 1, Section 13.4.2 "XSAVE Header"
// Also: Linux kernel arch/x86/include/asm/fpu/types.h
struct xstate_header {
uint64_t xfeatures;
uint64_t xcomp_bv;
uint64_t reserved[6];
};
// APX extended state - R16-R31 (16 x 64-bit registers)
// See: Intel APX Architecture Specification
struct apx_state {
uint64_t regs[16]; // r16-r31
};
static apx_state* get_apx_state(const ucontext_t* uc) {
uint32_t offset = VM_Version::apx_xstate_offset();
if (offset == 0 || uc->uc_mcontext.fpregs == nullptr) {
return nullptr;
}
char* xsave = (char*)uc->uc_mcontext.fpregs;
xstate_header* hdr = (xstate_header*)(xsave + XSAVE_HDR_OFFSET);
// Check if APX state is present in this context
if (!(hdr->xfeatures & XFEATURE_APX)) {
return nullptr;
}
return (apx_state*)(xsave + offset);
}
void os::print_context(outputStream *st, const void *context) {
if (context == nullptr) return;
@ -406,6 +444,14 @@ void os::print_context(outputStream *st, const void *context) {
st->print(", R14=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_R14]);
st->print(", R15=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_R15]);
st->cr();
// Dump APX EGPRs (R16-R31)
apx_state* apx = UseAPX ? get_apx_state(uc) : nullptr;
if (apx != nullptr) {
for (int i = 0; i < 16; i++) {
st->print("%sR%d=" INTPTR_FORMAT, (i % 4 == 0) ? "" : ", ", 16 + i, (intptr_t)apx->regs[i]);
if (i % 4 == 3) st->cr();
}
}
st->print( "RIP=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_RIP]);
st->print(", EFLAGS=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_EFL]);
st->print(", CSGSFS=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_CSGSFS]);
@ -432,37 +478,50 @@ void os::print_context(outputStream *st, const void *context) {
}
void os::print_register_info(outputStream *st, const void *context, int& continuation) {
const int register_count = 16;
if (context == nullptr) {
return;
}
const ucontext_t *uc = (const ucontext_t*)context;
apx_state* apx = UseAPX ? get_apx_state(uc) : nullptr;
const int register_count = 16 + (apx != nullptr ? 16 : 0);
int n = continuation;
assert(n >= 0 && n <= register_count, "Invalid continuation value");
if (context == nullptr || n == register_count) {
if (n == register_count) {
return;
}
const ucontext_t *uc = (const ucontext_t*)context;
while (n < register_count) {
// Update continuation with next index before printing location
continuation = n + 1;
if (n < 16) {
// Standard registers (RAX-R15)
# define CASE_PRINT_REG(n, str, id) case n: st->print(str); print_location(st, uc->uc_mcontext.gregs[REG_##id]);
switch (n) {
CASE_PRINT_REG( 0, "RAX=", RAX); break;
CASE_PRINT_REG( 1, "RBX=", RBX); break;
CASE_PRINT_REG( 2, "RCX=", RCX); break;
CASE_PRINT_REG( 3, "RDX=", RDX); break;
CASE_PRINT_REG( 4, "RSP=", RSP); break;
CASE_PRINT_REG( 5, "RBP=", RBP); break;
CASE_PRINT_REG( 6, "RSI=", RSI); break;
CASE_PRINT_REG( 7, "RDI=", RDI); break;
CASE_PRINT_REG( 8, "R8 =", R8); break;
CASE_PRINT_REG( 9, "R9 =", R9); break;
CASE_PRINT_REG(10, "R10=", R10); break;
CASE_PRINT_REG(11, "R11=", R11); break;
CASE_PRINT_REG(12, "R12=", R12); break;
CASE_PRINT_REG(13, "R13=", R13); break;
CASE_PRINT_REG(14, "R14=", R14); break;
CASE_PRINT_REG(15, "R15=", R15); break;
}
switch (n) {
CASE_PRINT_REG( 0, "RAX=", RAX); break;
CASE_PRINT_REG( 1, "RBX=", RBX); break;
CASE_PRINT_REG( 2, "RCX=", RCX); break;
CASE_PRINT_REG( 3, "RDX=", RDX); break;
CASE_PRINT_REG( 4, "RSP=", RSP); break;
CASE_PRINT_REG( 5, "RBP=", RBP); break;
CASE_PRINT_REG( 6, "RSI=", RSI); break;
CASE_PRINT_REG( 7, "RDI=", RDI); break;
CASE_PRINT_REG( 8, "R8 =", R8); break;
CASE_PRINT_REG( 9, "R9 =", R9); break;
CASE_PRINT_REG(10, "R10=", R10); break;
CASE_PRINT_REG(11, "R11=", R11); break;
CASE_PRINT_REG(12, "R12=", R12); break;
CASE_PRINT_REG(13, "R13=", R13); break;
CASE_PRINT_REG(14, "R14=", R14); break;
CASE_PRINT_REG(15, "R15=", R15); break;
}
# undef CASE_PRINT_REG
} else {
// APX extended general purpose registers (R16-R31)
st->print("R%d=", n);
print_location(st, apx->regs[n - 16]);
}
++n;
}
}

View File

@ -98,6 +98,8 @@ CodeBuffer::CodeBuffer(const CodeBlob* blob) DEBUG_ONLY(: Scrubber(this, sizeof(
}
void CodeBuffer::initialize(csize_t code_size, csize_t locs_size) {
MACOS_AARCH64_ONLY(os::thread_wx_enable_write());
// Always allow for empty slop around each section.
int slop = (int) CodeSection::end_slop();

View File

@ -541,6 +541,7 @@ extern void vm_exit(int code);
// unpack_with_exception entry instead. This makes life for the exception blob easier
// because making that same check and diverting is painful from assembly language.
JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* current, oopDesc* ex, address pc, nmethod*& nm))
MACOS_AARCH64_ONLY(current->wx_enable_write());
Handle exception(current, ex);
// This function is called when we are about to throw an exception. Therefore,

View File

@ -0,0 +1,34 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "cds/aotGrowableArray.hpp"
#include "cds/aotMetaspace.hpp"
#include "memory/allocation.inline.hpp"
#include "utilities/growableArray.hpp"
void AOTGrowableArrayHelper::deallocate(void* mem) {
if (!AOTMetaspace::in_aot_cache(mem)) {
GrowableArrayCHeapAllocator::deallocate(mem);
}
}

View File

@ -0,0 +1,76 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_AOT_AOTGROWABLEARRAY_HPP
#define SHARE_AOT_AOTGROWABLEARRAY_HPP
#include <memory/metaspaceClosureType.hpp>
#include <utilities/growableArray.hpp>
class AOTGrowableArrayHelper {
public:
static void deallocate(void* mem);
};
// An AOTGrowableArray<T> provides the same functionality as a GrowableArray<T> that
// uses the C heap allocator. In addition, AOTGrowableArray<T> can be iterated with
// MetaspaceClosure. This type should be used for growable arrays that need to be
// stored in the AOT cache. See ModuleEntry::_reads for an example.
template <typename E>
class AOTGrowableArray : public GrowableArrayWithAllocator<E, AOTGrowableArray<E>> {
friend class VMStructs;
friend class GrowableArrayWithAllocator<E, AOTGrowableArray>;
static E* allocate(int max, MemTag mem_tag) {
return (E*)GrowableArrayCHeapAllocator::allocate(max, sizeof(E), mem_tag);
}
E* allocate() {
return allocate(this->_capacity, mtClass);
}
void deallocate(E* mem) {
#if INCLUDE_CDS
AOTGrowableArrayHelper::deallocate(mem);
#else
GrowableArrayCHeapAllocator::deallocate(mem);
#endif
}
public:
AOTGrowableArray(int initial_capacity, MemTag mem_tag) :
GrowableArrayWithAllocator<E, AOTGrowableArray>(
allocate(initial_capacity, mem_tag),
initial_capacity) {}
AOTGrowableArray() : AOTGrowableArray(0, mtClassShared) {}
// methods required by MetaspaceClosure
void metaspace_pointers_do(MetaspaceClosure* it);
int size_in_heapwords() const { return (int)heap_word_size(sizeof(*this)); }
MetaspaceClosureType type() const { return MetaspaceClosureType::GrowableArrayType; }
static bool is_read_only_by_default() { return false; }
};
#endif // SHARE_AOT_AOTGROWABLEARRAY_HPP

View File

@ -0,0 +1,37 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_CDS_AOTGROWABLEARRAY_INLINE_HPP
#define SHARE_CDS_AOTGROWABLEARRAY_INLINE_HPP
#include "cds/aotGrowableArray.hpp"
#include "memory/metaspaceClosure.hpp"
template <typename E>
void AOTGrowableArray<E>::metaspace_pointers_do(MetaspaceClosure* it) {
it->push_c_array(AOTGrowableArray<E>::data_addr(), AOTGrowableArray<E>::capacity());
}
#endif // SHARE_CDS_AOTGROWABLEARRAY_INLINE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,6 +29,8 @@
#include "cds/aotStreamedHeapWriter.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/filemap.hpp"
#include "classfile/moduleEntry.hpp"
#include "classfile/packageEntry.hpp"
#include "classfile/systemDictionaryShared.hpp"
#include "classfile/vmClasses.hpp"
#include "logging/log.hpp"
@ -141,7 +143,7 @@ public:
info._buffered_addr = ref->obj();
info._requested_addr = ref->obj();
info._bytes = ref->size() * BytesPerWord;
info._type = ref->msotype();
info._type = ref->type();
_objs.append(info);
}
@ -214,7 +216,7 @@ void AOTMapLogger::dumptime_log_metaspace_region(const char* name, DumpRegion* r
info._buffered_addr = src_info->buffered_addr();
info._requested_addr = info._buffered_addr + _buffer_to_requested_delta;
info._bytes = src_info->size_in_bytes();
info._type = src_info->msotype();
info._type = src_info->type();
objs.append(info);
}
@ -332,43 +334,52 @@ void AOTMapLogger::log_metaspace_objects_impl(address region_base, address regio
address buffered_addr = info._buffered_addr;
address requested_addr = info._requested_addr;
int bytes = info._bytes;
MetaspaceObj::Type type = info._type;
const char* type_name = MetaspaceObj::type_name(type);
MetaspaceClosureType type = info._type;
const char* type_name = MetaspaceClosure::type_name(type);
log_as_hex(last_obj_base, buffered_addr, last_obj_base + _buffer_to_requested_delta);
switch (type) {
case MetaspaceObj::ClassType:
case MetaspaceClosureType::ClassType:
log_klass((Klass*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceObj::ConstantPoolType:
case MetaspaceClosureType::ConstantPoolType:
log_constant_pool((ConstantPool*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceObj::ConstantPoolCacheType:
case MetaspaceClosureType::ConstantPoolCacheType:
log_constant_pool_cache((ConstantPoolCache*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceObj::ConstMethodType:
case MetaspaceClosureType::ConstMethodType:
log_const_method((ConstMethod*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceObj::MethodType:
case MetaspaceClosureType::MethodType:
log_method((Method*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceObj::MethodCountersType:
case MetaspaceClosureType::MethodCountersType:
log_method_counters((MethodCounters*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceObj::MethodDataType:
case MetaspaceClosureType::MethodDataType:
log_method_data((MethodData*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceObj::SymbolType:
case MetaspaceClosureType::ModuleEntryType:
log_module_entry((ModuleEntry*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceClosureType::PackageEntryType:
log_package_entry((PackageEntry*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceClosureType::GrowableArrayType:
log_growable_array((GrowableArrayBase*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceClosureType::SymbolType:
log_symbol((Symbol*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceObj::KlassTrainingDataType:
case MetaspaceClosureType::KlassTrainingDataType:
log_klass_training_data((KlassTrainingData*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceObj::MethodTrainingDataType:
case MetaspaceClosureType::MethodTrainingDataType:
log_method_training_data((MethodTrainingData*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceObj::CompileTrainingDataType:
case MetaspaceClosureType::CompileTrainingDataType:
log_compile_training_data((CompileTrainingData*)src, requested_addr, type_name, bytes, current);
break;
default:
@ -421,6 +432,27 @@ void AOTMapLogger::log_method_data(MethodData* md, address requested_addr, const
log_debug(aot, map)(_LOG_PREFIX " %s", p2i(requested_addr), type_name, bytes, md->method()->external_name());
}
void AOTMapLogger::log_module_entry(ModuleEntry* mod, address requested_addr, const char* type_name,
int bytes, Thread* current) {
ResourceMark rm(current);
log_debug(aot, map)(_LOG_PREFIX " %s", p2i(requested_addr), type_name, bytes,
mod->name_as_C_string());
}
void AOTMapLogger::log_package_entry(PackageEntry* pkg, address requested_addr, const char* type_name,
int bytes, Thread* current) {
ResourceMark rm(current);
log_debug(aot, map)(_LOG_PREFIX " %s - %s", p2i(requested_addr), type_name, bytes,
pkg->module()->name_as_C_string(), pkg->name_as_C_string());
}
void AOTMapLogger::log_growable_array(GrowableArrayBase* arr, address requested_addr, const char* type_name,
int bytes, Thread* current) {
ResourceMark rm(current);
log_debug(aot, map)(_LOG_PREFIX " %d (%d)", p2i(requested_addr), type_name, bytes,
arr->length(), arr->capacity());
}
void AOTMapLogger::log_klass(Klass* k, address requested_addr, const char* type_name,
int bytes, Thread* current) {
ResourceMark rm(current);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,6 +28,7 @@
#include "cds/archiveBuilder.hpp"
#include "memory/allocation.hpp"
#include "memory/allStatic.hpp"
#include "memory/metaspaceClosureType.hpp"
#include "oops/oopsHierarchy.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/growableArray.hpp"
@ -37,9 +38,13 @@ class ArchiveStreamedHeapInfo;
class CompileTrainingData;
class DumpRegion;
class FileMapInfo;
class GrowableArrayBase;
class KlassTrainingData;
class MethodCounters;
class MethodTrainingData;
class ModuleEntry;
class outputStream;
class PackageEntry;
// Write detailed info to a mapfile to analyze contents of the AOT cache/CDS archive.
// -Xlog:aot+map* can be used both when creating an AOT cache, or when using an AOT cache.
@ -62,7 +67,7 @@ class AOTMapLogger : AllStatic {
address _buffered_addr;
address _requested_addr;
int _bytes;
MetaspaceObj::Type _type;
MetaspaceClosureType _type;
};
public:
@ -142,6 +147,9 @@ private:
Thread* current);
static void log_klass(Klass* k, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_method(Method* m, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_module_entry(ModuleEntry* mod, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_package_entry(PackageEntry* pkg, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_growable_array(GrowableArrayBase* arr, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_symbol(Symbol* s, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_klass_training_data(KlassTrainingData* ktd, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_method_training_data(MethodTrainingData* mtd, address requested_addr, const char* type_name, int bytes, Thread* current);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -360,10 +360,8 @@ bool AOTMappedHeapLoader::load_heap_region(FileMapInfo* mapinfo) {
}
objArrayOop AOTMappedHeapLoader::root_segment(int segment_idx) {
if (CDSConfig::is_dumping_heap()) {
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
} else {
assert(CDSConfig::is_using_archive(), "must be");
if (!CDSConfig::is_using_archive()) {
assert(CDSConfig::is_dumping_heap() && Thread::current() == (Thread*)VMThread::vm_thread(), "sanity");
}
objArrayOop segment = (objArrayOop)_root_segments->at(segment_idx).resolve();
@ -466,7 +464,9 @@ void AOTMappedHeapLoader::finish_initialization(FileMapInfo* info) {
add_root_segment((objArrayOop)segment_oop);
}
StringTable::load_shared_strings_array();
if (CDSConfig::is_dumping_final_static_archive()) {
StringTable::move_shared_strings_into_runtime_table();
}
}
}
@ -619,7 +619,7 @@ bool AOTMappedHeapLoader::map_heap_region_impl(FileMapInfo* info) {
aot_log_info(aot)("Preferred address to map heap data (to avoid relocation) is " INTPTR_FORMAT, p2i(requested_start));
// allocate from java heap
HeapWord* start = G1CollectedHeap::heap()->alloc_archive_region(word_size, (HeapWord*)requested_start);
HeapWord* start = G1CollectedHeap::heap()->alloc_archive_region(word_size);
if (start == nullptr) {
AOTMetaspace::report_loading_error("UseSharedSpaces: Unable to allocate java heap region for archive heap.");
return false;

View File

@ -698,6 +698,9 @@ public:
Universe::metaspace_pointers_do(it);
vmSymbols::metaspace_pointers_do(it);
TrainingData::iterate_roots(it);
if (CDSConfig::is_dumping_full_module_graph()) {
ClassLoaderDataShared::iterate_roots(it);
}
// The above code should find all the symbols that are referenced by the
// archived classes. We just need to add the extra symbols which
@ -795,6 +798,10 @@ void VM_PopulateDumpSharedSpace::doit() {
_builder.make_klasses_shareable();
AOTMetaspace::make_method_handle_intrinsics_shareable();
if (CDSConfig::is_dumping_full_module_graph()) {
ClassLoaderDataShared::remove_unshareable_info();
}
dump_java_heap_objects();
dump_shared_symbol_table(_builder.symbols());
@ -1097,7 +1104,12 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS
#if INCLUDE_CDS_JAVA_HEAP
if (CDSConfig::is_dumping_heap()) {
assert(CDSConfig::allow_only_single_java_thread(), "Required");
if (!CDSConfig::is_dumping_preimage_static_archive()) {
// A single thread is required for Reference handling and deterministic CDS archive.
// Its's not required for dumping preimage, where References won't be archived and
// determinism is not needed.
assert(CDSConfig::allow_only_single_java_thread(), "Required");
}
if (!HeapShared::is_archived_boot_layer_available(THREAD)) {
report_loading_error("archivedBootLayer not available, disabling full module graph");
CDSConfig::stop_dumping_full_module_graph();
@ -1135,6 +1147,7 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS
HeapShared::init_heap_writer();
if (CDSConfig::is_dumping_full_module_graph()) {
ClassLoaderDataShared::ensure_module_entry_tables_exist();
ClassLoaderDataShared::build_tables(CHECK);
HeapShared::reset_archived_object_states(CHECK);
}
@ -1154,12 +1167,6 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS
// Perhaps there is a way to avoid hard-coding these names here.
// See discussion in JDK-8342481.
}
if (HeapShared::is_writing_mapping_mode()) {
// Do this at the very end, when no Java code will be executed. Otherwise
// some new strings may be added to the intern table.
StringTable::allocate_shared_strings_array(CHECK);
}
} else {
log_info(aot)("Not dumping heap, reset CDSConfig::_is_using_optimized_module_handling");
CDSConfig::stop_using_optimized_module_handling();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -177,12 +177,17 @@ void AOTReferenceObjSupport::init_keep_alive_objs_table() {
// Returns true IFF obj is an instance of java.lang.ref.Reference. If so, perform extra eligibility checks.
bool AOTReferenceObjSupport::check_if_ref_obj(oop obj) {
// We have a single Java thread. This means java.lang.ref.Reference$ReferenceHandler thread
// is not running. Otherwise the checks for next/discovered may not work.
precond(CDSConfig::allow_only_single_java_thread());
assert_at_safepoint(); // _keep_alive_objs_table uses raw oops
if (obj->klass()->is_subclass_of(vmClasses::Reference_klass())) {
// The following check works only if the java.lang.ref.Reference$ReferenceHandler thread
// is not running.
//
// This code is called on every object found by AOTArtifactFinder. When dumping the
// preimage archive, AOTArtifactFinder should not find any Reference objects.
precond(!CDSConfig::is_dumping_preimage_static_archive());
precond(CDSConfig::allow_only_single_java_thread());
precond(AOTReferenceObjSupport::is_enabled());
precond(JavaClasses::is_supported_for_archiving(obj));
precond(_keep_alive_objs_table != nullptr);

View File

@ -243,7 +243,7 @@ bool ArchiveBuilder::gather_klass_and_symbol(MetaspaceClosure::Ref* ref, bool re
if (get_follow_mode(ref) != make_a_copy) {
return false;
}
if (ref->msotype() == MetaspaceObj::ClassType) {
if (ref->type() == MetaspaceClosureType::ClassType) {
Klass* klass = (Klass*)ref->obj();
assert(klass->is_klass(), "must be");
if (!is_excluded(klass)) {
@ -252,7 +252,7 @@ bool ArchiveBuilder::gather_klass_and_symbol(MetaspaceClosure::Ref* ref, bool re
assert(klass->is_instance_klass(), "must be");
}
}
} else if (ref->msotype() == MetaspaceObj::SymbolType) {
} else if (ref->type() == MetaspaceClosureType::SymbolType) {
// Make sure the symbol won't be GC'ed while we are dumping the archive.
Symbol* sym = (Symbol*)ref->obj();
sym->increment_refcount();
@ -271,11 +271,6 @@ void ArchiveBuilder::gather_klasses_and_symbols() {
aot_log_info(aot)("Gathering classes and symbols ... ");
GatherKlassesAndSymbols doit(this);
iterate_roots(&doit);
#if INCLUDE_CDS_JAVA_HEAP
if (CDSConfig::is_dumping_full_module_graph()) {
ClassLoaderDataShared::iterate_symbols(&doit);
}
#endif
doit.finish();
if (CDSConfig::is_dumping_static_archive()) {
@ -446,14 +441,14 @@ bool ArchiveBuilder::gather_one_source_obj(MetaspaceClosure::Ref* ref, bool read
}
#ifdef ASSERT
if (ref->msotype() == MetaspaceObj::MethodType) {
if (ref->type() == MetaspaceClosureType::MethodType) {
Method* m = (Method*)ref->obj();
assert(!RegeneratedClasses::has_been_regenerated((address)m->method_holder()),
"Should not archive methods in a class that has been regenerated");
}
#endif
if (ref->msotype() == MetaspaceObj::MethodDataType) {
if (ref->type() == MetaspaceClosureType::MethodDataType) {
MethodData* md = (MethodData*)ref->obj();
md->clean_method_data(false /* always_clean */);
}
@ -554,16 +549,16 @@ ArchiveBuilder::FollowMode ArchiveBuilder::get_follow_mode(MetaspaceClosure::Ref
if (CDSConfig::is_dumping_dynamic_archive() && AOTMetaspace::in_aot_cache(obj)) {
// Don't dump existing shared metadata again.
return point_to_it;
} else if (ref->msotype() == MetaspaceObj::MethodDataType ||
ref->msotype() == MetaspaceObj::MethodCountersType ||
ref->msotype() == MetaspaceObj::KlassTrainingDataType ||
ref->msotype() == MetaspaceObj::MethodTrainingDataType ||
ref->msotype() == MetaspaceObj::CompileTrainingDataType) {
} else if (ref->type() == MetaspaceClosureType::MethodDataType ||
ref->type() == MetaspaceClosureType::MethodCountersType ||
ref->type() == MetaspaceClosureType::KlassTrainingDataType ||
ref->type() == MetaspaceClosureType::MethodTrainingDataType ||
ref->type() == MetaspaceClosureType::CompileTrainingDataType) {
return (TrainingData::need_data() || TrainingData::assembling_data()) ? make_a_copy : set_to_null;
} else if (ref->msotype() == MetaspaceObj::AdapterHandlerEntryType) {
} else if (ref->type() == MetaspaceClosureType::AdapterHandlerEntryType) {
return CDSConfig::is_dumping_adapters() ? make_a_copy : set_to_null;
} else {
if (ref->msotype() == MetaspaceObj::ClassType) {
if (ref->type() == MetaspaceClosureType::ClassType) {
Klass* klass = (Klass*)ref->obj();
assert(klass->is_klass(), "must be");
if (RegeneratedClasses::has_been_regenerated(klass)) {
@ -620,15 +615,6 @@ void ArchiveBuilder::dump_rw_metadata() {
ResourceMark rm;
aot_log_info(aot)("Allocating RW objects ... ");
make_shallow_copies(&_rw_region, &_rw_src_objs);
#if INCLUDE_CDS_JAVA_HEAP
if (CDSConfig::is_dumping_full_module_graph()) {
// Archive the ModuleEntry's and PackageEntry's of the 3 built-in loaders
char* start = rw_region()->top();
ClassLoaderDataShared::allocate_archived_tables();
alloc_stats()->record_modules(rw_region()->top() - start, /*read_only*/false);
}
#endif
}
void ArchiveBuilder::dump_ro_metadata() {
@ -637,15 +623,6 @@ void ArchiveBuilder::dump_ro_metadata() {
start_dump_region(&_ro_region);
make_shallow_copies(&_ro_region, &_ro_src_objs);
#if INCLUDE_CDS_JAVA_HEAP
if (CDSConfig::is_dumping_full_module_graph()) {
char* start = ro_region()->top();
ClassLoaderDataShared::init_archived_tables();
alloc_stats()->record_modules(ro_region()->top() - start, /*read_only*/true);
}
#endif
RegeneratedClasses::record_regenerated_objects();
}
@ -663,7 +640,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
size_t alignment = SharedSpaceObjectAlignment; // alignment for the dest pointer
char* oldtop = dump_region->top();
if (src_info->msotype() == MetaspaceObj::ClassType) {
if (src_info->type() == MetaspaceClosureType::ClassType) {
// Allocate space for a pointer directly in front of the future InstanceKlass, so
// we can do a quick lookup from InstanceKlass* -> RunTimeClassInfo*
// without building another hashtable. See RunTimeClassInfo::get_for()
@ -679,7 +656,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
alignment = nth_bit(ArchiveBuilder::precomputed_narrow_klass_shift());
}
#endif
} else if (src_info->msotype() == MetaspaceObj::SymbolType) {
} else if (src_info->type() == MetaspaceClosureType::SymbolType) {
// Symbols may be allocated by using AllocateHeap, so their sizes
// may be less than size_in_bytes() indicates.
bytes = ((Symbol*)src)->byte_size();
@ -689,7 +666,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
memcpy(dest, src, bytes);
// Update the hash of buffered sorted symbols for static dump so that the symbols have deterministic contents
if (CDSConfig::is_dumping_static_archive() && (src_info->msotype() == MetaspaceObj::SymbolType)) {
if (CDSConfig::is_dumping_static_archive() && (src_info->type() == MetaspaceClosureType::SymbolType)) {
Symbol* buffered_symbol = (Symbol*)dest;
assert(((Symbol*)src)->is_permanent(), "archived symbols must be permanent");
buffered_symbol->update_identity_hash();
@ -704,7 +681,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
}
}
intptr_t* archived_vtable = CppVtables::get_archived_vtable(src_info->msotype(), (address)dest);
intptr_t* archived_vtable = CppVtables::get_archived_vtable(src_info->type(), (address)dest);
if (archived_vtable != nullptr) {
*(address*)dest = (address)archived_vtable;
ArchivePtrMarker::mark_pointer((address*)dest);
@ -714,7 +691,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
src_info->set_buffered_addr((address)dest);
char* newtop = dump_region->top();
_alloc_stats.record(src_info->msotype(), int(newtop - oldtop), src_info->read_only());
_alloc_stats.record(src_info->type(), int(newtop - oldtop), src_info->read_only());
DEBUG_ONLY(_alloc_stats.verify((int)dump_region->used(), src_info->read_only()));
}
@ -997,15 +974,15 @@ void ArchiveBuilder::make_training_data_shareable() {
return;
}
if (info.msotype() == MetaspaceObj::KlassTrainingDataType ||
info.msotype() == MetaspaceObj::MethodTrainingDataType ||
info.msotype() == MetaspaceObj::CompileTrainingDataType) {
if (info.type() == MetaspaceClosureType::KlassTrainingDataType ||
info.type() == MetaspaceClosureType::MethodTrainingDataType ||
info.type() == MetaspaceClosureType::CompileTrainingDataType) {
TrainingData* buffered_td = (TrainingData*)info.buffered_addr();
buffered_td->remove_unshareable_info();
} else if (info.msotype() == MetaspaceObj::MethodDataType) {
} else if (info.type() == MetaspaceClosureType::MethodDataType) {
MethodData* buffered_mdo = (MethodData*)info.buffered_addr();
buffered_mdo->remove_unshareable_info();
} else if (info.msotype() == MetaspaceObj::MethodCountersType) {
} else if (info.type() == MetaspaceClosureType::MethodCountersType) {
MethodCounters* buffered_mc = (MethodCounters*)info.buffered_addr();
buffered_mc->remove_unshareable_info();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -134,13 +134,13 @@ private:
int _size_in_bytes;
int _id; // Each object has a unique serial ID, starting from zero. The ID is assigned
// when the object is added into _source_objs.
MetaspaceObj::Type _msotype;
MetaspaceClosureType _type;
address _source_addr; // The source object to be copied.
address _buffered_addr; // The copy of this object insider the buffer.
public:
SourceObjInfo(MetaspaceClosure::Ref* ref, bool read_only, FollowMode follow_mode) :
_ptrmap_start(0), _ptrmap_end(0), _read_only(read_only), _has_embedded_pointer(false), _follow_mode(follow_mode),
_size_in_bytes(ref->size() * BytesPerWord), _id(0), _msotype(ref->msotype()),
_size_in_bytes(ref->size() * BytesPerWord), _id(0), _type(ref->type()),
_source_addr(ref->obj()) {
if (follow_mode == point_to_it) {
_buffered_addr = ref->obj();
@ -155,7 +155,7 @@ private:
SourceObjInfo(address src, SourceObjInfo* renegerated_obj_info) :
_ptrmap_start(0), _ptrmap_end(0), _read_only(false),
_follow_mode(renegerated_obj_info->_follow_mode),
_size_in_bytes(0), _msotype(renegerated_obj_info->_msotype),
_size_in_bytes(0), _type(renegerated_obj_info->_type),
_source_addr(src), _buffered_addr(renegerated_obj_info->_buffered_addr) {}
bool should_copy() const { return _follow_mode == make_a_copy; }
@ -182,7 +182,7 @@ private:
}
return _buffered_addr;
}
MetaspaceObj::Type msotype() const { return _msotype; }
MetaspaceClosureType type() const { return _type; }
FollowMode follow_mode() const { return _follow_mode; }
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -556,7 +556,9 @@ void CDSConfig::check_aotmode_record() {
// At VM exit, the module graph may be contaminated with program states.
// We will rebuild the module graph when dumping the CDS final image.
disable_heap_dumping();
_is_using_optimized_module_handling = false;
_is_using_full_module_graph = false;
_is_dumping_full_module_graph = false;
}
void CDSConfig::check_aotmode_create() {
@ -582,6 +584,7 @@ void CDSConfig::check_aotmode_create() {
substitute_aot_filename(FLAG_MEMBER_ENUM(AOTCache));
_is_dumping_final_static_archive = true;
_is_using_full_module_graph = false;
UseSharedSpaces = true;
RequireSharedSpaces = true;
@ -954,7 +957,9 @@ bool CDSConfig::are_vm_options_incompatible_with_dumping_heap() {
}
bool CDSConfig::is_dumping_heap() {
if (!(is_dumping_classic_static_archive() || is_dumping_final_static_archive())
// Note: when dumping preimage static archive, only a very limited set of oops
// are dumped.
if (!is_dumping_static_archive()
|| are_vm_options_incompatible_with_dumping_heap()
|| _disable_heap_dumping) {
return false;
@ -966,6 +971,26 @@ bool CDSConfig::is_loading_heap() {
return HeapShared::is_archived_heap_in_use();
}
bool CDSConfig::is_dumping_klass_subgraphs() {
if (is_dumping_classic_static_archive() || is_dumping_final_static_archive()) {
// KlassSubGraphs (see heapShared.cpp) is a legacy mechanism for archiving oops. It
// has been superceded by AOT class linking. This feature is used only when
// AOT class linking is disabled.
//
// KlassSubGraphs are disabled in the preimage static archive, which contains a very
// limited set of oops.
return is_dumping_heap() && !is_dumping_aot_linked_classes();
} else {
return false;
}
}
bool CDSConfig::is_using_klass_subgraphs() {
return (is_loading_heap() &&
!CDSConfig::is_using_aot_linked_classes() &&
!CDSConfig::is_dumping_final_static_archive());
}
bool CDSConfig::is_using_full_module_graph() {
if (ClassLoaderDataShared::is_full_module_graph_loaded()) {
return true;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -188,6 +188,9 @@ public:
static bool is_dumping_heap() NOT_CDS_JAVA_HEAP_RETURN_(false);
static bool is_loading_heap() NOT_CDS_JAVA_HEAP_RETURN_(false);
static bool is_dumping_klass_subgraphs() NOT_CDS_JAVA_HEAP_RETURN_(false);
static bool is_using_klass_subgraphs() NOT_CDS_JAVA_HEAP_RETURN_(false);
static bool is_dumping_invokedynamic() NOT_CDS_JAVA_HEAP_RETURN_(false);
static bool is_dumping_method_handles() NOT_CDS_JAVA_HEAP_RETURN_(false);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,12 +22,14 @@
*
*/
#include "cds/aotGrowableArray.hpp"
#include "cds/aotMetaspace.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/archiveUtils.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/cppVtables.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
#include "oops/instanceClassLoaderKlass.hpp"
#include "oops/instanceMirrorKlass.hpp"
#include "oops/instanceRefKlass.hpp"
@ -53,6 +55,19 @@
// + at run time: we clone the actual contents of the vtables from libjvm.so
// into our own tables.
#ifndef PRODUCT
// AOTGrowableArray has a vtable only when in non-product builds (due to
// the virtual printing functions in AnyObj).
using GrowableArray_ModuleEntry_ptr = AOTGrowableArray<ModuleEntry*>;
#define DEBUG_CPP_VTABLE_TYPES_DO(f) \
f(GrowableArray_ModuleEntry_ptr) \
#endif
// Currently, the archive contains ONLY the following types of objects that have C++ vtables.
#define CPP_VTABLE_TYPES_DO(f) \
f(ConstantPool) \
@ -68,7 +83,8 @@
f(TypeArrayKlass) \
f(KlassTrainingData) \
f(MethodTrainingData) \
f(CompileTrainingData)
f(CompileTrainingData) \
NOT_PRODUCT(DEBUG_CPP_VTABLE_TYPES_DO(f))
class CppVtableInfo {
intptr_t _vtable_size;
@ -86,7 +102,7 @@ public:
}
};
static inline intptr_t* vtable_of(const Metadata* m) {
static inline intptr_t* vtable_of(const void* m) {
return *((intptr_t**)m);
}
@ -116,6 +132,7 @@ CppVtableInfo* CppVtableCloner<T>::allocate_and_initialize(const char* name) {
template <class T>
void CppVtableCloner<T>::initialize(const char* name, CppVtableInfo* info) {
ResourceMark rm;
T tmp; // Allocate temporary dummy metadata object to get to the original vtable.
int n = info->vtable_size();
intptr_t* srcvtable = vtable_of(&tmp);
@ -268,7 +285,7 @@ void CppVtables::serialize(SerializeClosure* soc) {
}
}
intptr_t* CppVtables::get_archived_vtable(MetaspaceObj::Type msotype, address obj) {
intptr_t* CppVtables::get_archived_vtable(MetaspaceClosureType type, address obj) {
if (!_orig_cpp_vtptrs_inited) {
CPP_VTABLE_TYPES_DO(INIT_ORIG_CPP_VTPTRS);
_orig_cpp_vtptrs_inited = true;
@ -276,19 +293,23 @@ intptr_t* CppVtables::get_archived_vtable(MetaspaceObj::Type msotype, address ob
assert(CDSConfig::is_dumping_archive(), "sanity");
int kind = -1;
switch (msotype) {
case MetaspaceObj::SymbolType:
case MetaspaceObj::TypeArrayU1Type:
case MetaspaceObj::TypeArrayU2Type:
case MetaspaceObj::TypeArrayU4Type:
case MetaspaceObj::TypeArrayU8Type:
case MetaspaceObj::TypeArrayOtherType:
case MetaspaceObj::ConstMethodType:
case MetaspaceObj::ConstantPoolCacheType:
case MetaspaceObj::AnnotationsType:
case MetaspaceObj::RecordComponentType:
case MetaspaceObj::AdapterHandlerEntryType:
case MetaspaceObj::AdapterFingerPrintType:
switch (type) {
case MetaspaceClosureType::SymbolType:
case MetaspaceClosureType::TypeArrayU1Type:
case MetaspaceClosureType::TypeArrayU2Type:
case MetaspaceClosureType::TypeArrayU4Type:
case MetaspaceClosureType::TypeArrayU8Type:
case MetaspaceClosureType::TypeArrayOtherType:
case MetaspaceClosureType::CArrayType:
case MetaspaceClosureType::ConstMethodType:
case MetaspaceClosureType::ConstantPoolCacheType:
case MetaspaceClosureType::AnnotationsType:
case MetaspaceClosureType::ModuleEntryType:
case MetaspaceClosureType::PackageEntryType:
case MetaspaceClosureType::RecordComponentType:
case MetaspaceClosureType::AdapterHandlerEntryType:
case MetaspaceClosureType::AdapterFingerPrintType:
PRODUCT_ONLY(case MetaspaceClosureType::GrowableArrayType:)
// These have no vtables.
break;
default:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,6 +27,7 @@
#include "memory/allocation.hpp"
#include "memory/allStatic.hpp"
#include "memory/metaspaceClosureType.hpp"
#include "utilities/globalDefinitions.hpp"
class ArchiveBuilder;
@ -40,7 +41,7 @@ class CppVtables : AllStatic {
public:
static void dumptime_init(ArchiveBuilder* builder);
static void zero_archived_vtables();
static intptr_t* get_archived_vtable(MetaspaceObj::Type msotype, address obj);
static intptr_t* get_archived_vtable(MetaspaceClosureType type, address obj);
static void serialize(SerializeClosure* sc);
static bool is_valid_shared_method(const Method* m) NOT_CDS_RETURN_(false);
static char* vtables_serialized_base() { return _vtables_serialized_base; }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,32 +27,34 @@
#include "classfile/compactHashtable.hpp"
#include "memory/allocation.hpp"
#include "memory/metaspaceClosureType.hpp"
// This is for dumping detailed statistics for the allocations
// in the shared spaces.
class DumpAllocStats : public StackObj {
public:
// Here's poor man's enum inheritance
#define SHAREDSPACE_OBJ_TYPES_DO(f) \
METASPACE_OBJ_TYPES_DO(f) \
#define DUMPED_OBJ_TYPES_DO(f) \
METASPACE_CLOSURE_TYPES_DO(f) \
f(SymbolHashentry) \
f(SymbolBucket) \
f(StringHashentry) \
f(StringBucket) \
f(ModulesNatives) \
f(CppVTables) \
f(Other)
#define DUMPED_TYPE_DECLARE(name) name ## Type,
#define DUMPED_TYPE_NAME_CASE(name) case name ## Type: return #name;
enum Type {
// Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc
SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE)
DUMPED_OBJ_TYPES_DO(DUMPED_TYPE_DECLARE)
_number_of_types
};
static const char* type_name(Type type) {
switch(type) {
SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE)
DUMPED_OBJ_TYPES_DO(DUMPED_TYPE_NAME_CASE)
default:
ShouldNotReachHere();
return nullptr;
@ -101,16 +103,12 @@ public:
CompactHashtableStats* symbol_stats() { return &_symbol_stats; }
CompactHashtableStats* string_stats() { return &_string_stats; }
void record(MetaspaceObj::Type type, int byte_size, bool read_only) {
assert(int(type) >= 0 && type < MetaspaceObj::_number_of_types, "sanity");
void record(MetaspaceClosureType type, int byte_size, bool read_only) {
int t = (int)type;
assert(t >= 0 && t < (int)MetaspaceClosureType::_number_of_types, "sanity");
int which = (read_only) ? RO : RW;
_counts[which][type] ++;
_bytes [which][type] += byte_size;
}
void record_modules(int byte_size, bool read_only) {
int which = (read_only) ? RO : RW;
_bytes [which][ModulesNativesType] += byte_size;
_counts[which][t] ++;
_bytes [which][t] += byte_size;
}
void record_other_type(int byte_size, bool read_only) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -210,7 +210,7 @@ static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], Instan
bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) {
assert(CDSConfig::is_dumping_heap(), "dump-time only");
if (!CDSConfig::is_dumping_aot_linked_classes()) {
if (CDSConfig::is_dumping_klass_subgraphs()) {
// Legacy CDS archive support (to be deprecated)
return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) ||
is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik);
@ -413,6 +413,8 @@ void HeapShared::materialize_thread_object() {
void HeapShared::add_to_dumped_interned_strings(oop string) {
assert(HeapShared::is_writing_mapping_mode(), "Only used by this mode");
AOTMappedHeapWriter::add_to_dumped_interned_strings(string);
bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, string);
assert(success, "shared strings array must not point to arrays or strings that are too large to archive");
}
void HeapShared::finalize_initialization(FileMapInfo* static_mapinfo) {
@ -453,7 +455,6 @@ int HeapShared::append_root(oop obj) {
oop HeapShared::get_root(int index, bool clear) {
assert(index >= 0, "sanity");
assert(!CDSConfig::is_dumping_heap() && CDSConfig::is_using_archive(), "runtime only");
assert(is_archived_heap_in_use(), "getting roots into heap that is not used");
oop result;
@ -598,8 +599,7 @@ public:
void set_oop(MetaspaceObj* ptr, oop o) {
MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
OopHandle handle(Universe::vm_global(), o);
bool is_new = put(ptr, handle);
assert(is_new, "cannot set twice");
put_when_absent(ptr, handle);
}
void remove_oop(MetaspaceObj* ptr) {
MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
@ -612,6 +612,11 @@ public:
};
void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) {
if (CDSConfig::is_dumping_preimage_static_archive() && scratch_resolved_references(src) != nullptr) {
// We are in AOT training run. The class has been redefined and we are giving it a new resolved_reference.
// Ignore it, as this class will be excluded from the AOT config.
return;
}
if (SystemDictionaryShared::is_builtin_loader(src->pool_holder()->class_loader_data())) {
_scratch_objects_table->set_oop(src, dest);
}
@ -831,14 +836,6 @@ static objArrayOop get_archived_resolved_references(InstanceKlass* src_ik) {
return nullptr;
}
void HeapShared::archive_strings() {
assert(HeapShared::is_writing_mapping_mode(), "should not reach here");
oop shared_strings_array = StringTable::init_shared_strings_array();
bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, shared_strings_array);
assert(success, "shared strings array must not point to arrays or strings that are too large to archive");
StringTable::set_shared_strings_array_index(append_root(shared_strings_array));
}
int HeapShared::archive_exception_instance(oop exception) {
bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, exception);
assert(success, "sanity");
@ -890,7 +887,7 @@ void HeapShared::start_scanning_for_oops() {
void HeapShared::end_scanning_for_oops() {
if (is_writing_mapping_mode()) {
archive_strings();
StringTable::init_shared_table();
}
delete_seen_objects_table();
}
@ -940,7 +937,7 @@ void HeapShared::scan_java_class(Klass* orig_k) {
void HeapShared::archive_subgraphs() {
assert(CDSConfig::is_dumping_heap(), "must be");
if (!CDSConfig::is_dumping_aot_linked_classes()) {
if (CDSConfig::is_dumping_klass_subgraphs()) {
archive_object_subgraphs(archive_subgraph_entry_fields,
false /* is_full_module_graph */);
if (CDSConfig::is_dumping_full_module_graph()) {
@ -948,10 +945,6 @@ void HeapShared::archive_subgraphs() {
true /* is_full_module_graph */);
}
}
if (CDSConfig::is_dumping_full_module_graph()) {
Modules::verify_archived_modules();
}
}
//
@ -1302,10 +1295,7 @@ static void verify_the_heap(Klass* k, const char* which) {
// this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots.
void HeapShared::resolve_classes(JavaThread* current) {
assert(CDSConfig::is_using_archive(), "runtime only!");
if (!is_archived_heap_in_use()) {
return; // nothing to do
}
if (!CDSConfig::is_using_aot_linked_classes()) {
if (CDSConfig::is_using_klass_subgraphs()) {
resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields);
resolve_classes_for_subgraphs(current, fmg_archive_subgraph_entry_fields);
}
@ -1395,7 +1385,7 @@ void HeapShared::init_classes_for_special_subgraph(Handle class_loader, TRAPS) {
void HeapShared::initialize_from_archived_subgraph(JavaThread* current, Klass* k) {
JavaThread* THREAD = current;
if (!is_archived_heap_in_use()) {
if (!CDSConfig::is_using_klass_subgraphs()) {
return; // nothing to do
}
@ -1871,7 +1861,7 @@ void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k,
const char* klass_name,
int field_offset,
const char* field_name) {
assert(CDSConfig::is_dumping_heap(), "dump time only");
precond(CDSConfig::is_dumping_klass_subgraphs());
assert(k->defined_by_boot_loader(), "must be boot class");
oop m = k->java_mirror();
@ -1922,7 +1912,7 @@ class VerifySharedOopClosure: public BasicOopIterateClosure {
};
void HeapShared::verify_subgraph_from_static_field(InstanceKlass* k, int field_offset) {
assert(CDSConfig::is_dumping_heap(), "dump time only");
precond(CDSConfig::is_dumping_klass_subgraphs());
assert(k->defined_by_boot_loader(), "must be boot class");
oop m = k->java_mirror();
@ -2148,7 +2138,7 @@ void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[],
void HeapShared::init_subgraph_entry_fields(TRAPS) {
assert(CDSConfig::is_dumping_heap(), "must be");
_dump_time_subgraph_info_table = new (mtClass)DumpTimeKlassSubGraphInfoTable();
if (!CDSConfig::is_dumping_aot_linked_classes()) {
if (CDSConfig::is_dumping_klass_subgraphs()) {
init_subgraph_entry_fields(archive_subgraph_entry_fields, CHECK);
if (CDSConfig::is_dumping_full_module_graph()) {
init_subgraph_entry_fields(fmg_archive_subgraph_entry_fields, CHECK);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -478,7 +478,6 @@ private:
static bool has_been_archived(oop orig_obj);
static void prepare_resolved_references();
static void archive_strings();
static void archive_subgraphs();
static void copy_java_mirror(oop orig_mirror, oop scratch_m);

View File

@ -216,6 +216,10 @@ ciField::ciField(fieldDescriptor *fd) :
static bool trust_final_non_static_fields(ciInstanceKlass* holder) {
if (holder == nullptr)
return false;
if (holder->trust_final_fields()) {
// Explicit opt-in from system classes
return true;
}
// Even if general trusting is disabled, trust system-built closures in these packages.
if (holder->is_in_package("java/lang/invoke") || holder->is_in_package("sun/invoke") ||
holder->is_in_package("java/lang/reflect") || holder->is_in_package("jdk/internal/reflect") ||
@ -230,14 +234,6 @@ static bool trust_final_non_static_fields(ciInstanceKlass* holder) {
// Trust final fields in records
if (holder->is_record())
return true;
// Trust Atomic*FieldUpdaters: they are very important for performance, and make up one
// more reason not to use Unsafe, if their final fields are trusted. See more in JDK-8140483.
if (holder->name() == ciSymbols::java_util_concurrent_atomic_AtomicIntegerFieldUpdater_Impl() ||
holder->name() == ciSymbols::java_util_concurrent_atomic_AtomicLongFieldUpdater_CASUpdater() ||
holder->name() == ciSymbols::java_util_concurrent_atomic_AtomicLongFieldUpdater_LockedUpdater() ||
holder->name() == ciSymbols::java_util_concurrent_atomic_AtomicReferenceFieldUpdater_Impl()) {
return true;
}
return TrustFinalNonStaticFields;
}

View File

@ -65,6 +65,7 @@ ciInstanceKlass::ciInstanceKlass(Klass* k) :
_has_nonstatic_concrete_methods = ik->has_nonstatic_concrete_methods();
_is_hidden = ik->is_hidden();
_is_record = ik->is_record();
_trust_final_fields = ik->trust_final_fields();
_nonstatic_fields = nullptr; // initialized lazily by compute_nonstatic_fields:
_has_injected_fields = -1;
_implementor = nullptr; // we will fill these lazily

View File

@ -59,6 +59,7 @@ private:
bool _has_nonstatic_concrete_methods;
bool _is_hidden;
bool _is_record;
bool _trust_final_fields;
bool _has_trusted_loader;
ciFlags _flags;
@ -207,6 +208,10 @@ public:
return _is_record;
}
bool trust_final_fields() const {
return _trust_final_fields;
}
ciInstanceKlass* get_canonical_holder(int offset);
ciField* get_field_by_offset(int field_offset, bool is_static);
ciField* get_field_by_name(ciSymbol* name, ciSymbol* signature, bool is_static);

View File

@ -42,10 +42,7 @@ const char* basictype_to_str(BasicType t) {
// ------------------------------------------------------------------
// card_table_base
CardTable::CardValue* ci_card_table_address() {
BarrierSet* bs = BarrierSet::barrier_set();
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
assert(!UseShenandoahGC, "Shenandoah byte_map_base is not constant.");
return ct->byte_map_base();
CardTable::CardValue* ci_card_table_address_const() {
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
return ctbs->card_table_base_const();
}

View File

@ -51,9 +51,9 @@ inline const char* bool_to_str(bool b) {
const char* basictype_to_str(BasicType t);
CardTable::CardValue* ci_card_table_address();
CardTable::CardValue* ci_card_table_address_const();
template <typename T> T ci_card_table_address_as() {
return reinterpret_cast<T>(ci_card_table_address());
return reinterpret_cast<T>(ci_card_table_address_const());
}
#endif // SHARE_CI_CIUTILITIES_HPP

View File

@ -943,6 +943,7 @@ public:
_java_lang_Deprecated_for_removal,
_jdk_internal_vm_annotation_AOTSafeClassInitializer,
_method_AOTRuntimeSetup,
_jdk_internal_vm_annotation_TrustFinalFields,
_annotation_LIMIT
};
const Location _location;
@ -1878,6 +1879,11 @@ AnnotationCollector::annotation_index(const ClassLoaderData* loader_data,
if (!privileged) break; // only allow in privileged code
return _field_Stable;
}
case VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_TrustFinalFields_signature): {
if (_location != _in_class) break; // only allow for classes
if (!privileged) break; // only allow in privileged code
return _jdk_internal_vm_annotation_TrustFinalFields;
}
case VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_Contended_signature): {
if (_location != _in_field && _location != _in_class) {
break; // only allow for fields and classes
@ -1992,6 +1998,9 @@ void ClassFileParser::ClassAnnotationCollector::apply_to(InstanceKlass* ik) {
if (has_annotation(_jdk_internal_vm_annotation_AOTSafeClassInitializer)) {
ik->set_has_aot_safe_initializer();
}
if (has_annotation(_jdk_internal_vm_annotation_TrustFinalFields)) {
ik->set_trust_final_fields(true);
}
}
#define MAX_ARGS_SIZE 255

View File

@ -127,6 +127,7 @@ PerfCounter* ClassLoader::_perf_ik_link_methods_count = nullptr;
PerfCounter* ClassLoader::_perf_method_adapters_count = nullptr;
PerfCounter* ClassLoader::_unsafe_defineClassCallCounter = nullptr;
PerfCounter* ClassLoader::_perf_secondary_hash_time = nullptr;
PerfCounter* ClassLoader::_perf_change_wx_time = nullptr;
PerfCounter* ClassLoader::_perf_resolve_indy_time = nullptr;
PerfCounter* ClassLoader::_perf_resolve_invokehandle_time = nullptr;
@ -1370,6 +1371,7 @@ void ClassLoader::initialize(TRAPS) {
NEWPERFBYTECOUNTER(_perf_sys_classfile_bytes_read, SUN_CLS, "sysClassBytes");
NEWPERFEVENTCOUNTER(_unsafe_defineClassCallCounter, SUN_CLS, "unsafeDefineClassCalls");
NEWPERFTICKCOUNTER(_perf_secondary_hash_time, SUN_CLS, "secondarySuperHashTime");
NEWPERFTICKCOUNTER(_perf_change_wx_time, SUN_CLS, "changeWXTime");
if (log_is_enabled(Info, perf, class, link)) {
NEWPERFTICKCOUNTER(_perf_ik_link_methods_time, SUN_CLS, "linkMethodsTime");
@ -1418,6 +1420,10 @@ char* ClassLoader::lookup_vm_options() {
jio_snprintf(modules_path, JVM_MAXPATHLEN, "%s%slib%smodules", Arguments::get_java_home(), fileSep, fileSep);
JImage_file =(*JImageOpen)(modules_path, &error);
if (JImage_file == nullptr) {
if (Arguments::has_jimage()) {
// The modules file exists but is unreadable or corrupt
vm_exit_during_initialization(err_msg("Unable to load %s", modules_path));
}
return nullptr;
}

View File

@ -184,6 +184,7 @@ class ClassLoader: AllStatic {
// Count the time taken to hash the scondary superclass arrays.
static PerfCounter* _perf_secondary_hash_time;
static PerfCounter* _perf_change_wx_time;
// The boot class path consists of 3 ordered pieces:
// 1. the module/path pairs specified to --patch-module
@ -268,6 +269,9 @@ class ClassLoader: AllStatic {
static PerfCounter* perf_secondary_hash_time() {
return _perf_secondary_hash_time;
}
static PerfCounter* perf_change_wx_time() {
return _perf_change_wx_time;
}
static PerfCounter* perf_sys_classload_time() { return _perf_sys_classload_time; }
static PerfCounter* perf_app_classload_time() { return _perf_app_classload_time; }
static PerfCounter* perf_app_classload_selftime() { return _perf_app_classload_selftime; }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -33,6 +33,7 @@
#include "classfile/packageEntry.hpp"
#include "classfile/systemDictionary.hpp"
#include "logging/log.hpp"
#include "memory/metaspaceClosure.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/safepoint.hpp"
@ -56,9 +57,9 @@ class ArchivedClassLoaderData {
public:
ArchivedClassLoaderData() : _packages(nullptr), _modules(nullptr), _unnamed_module(nullptr) {}
void iterate_symbols(ClassLoaderData* loader_data, MetaspaceClosure* closure);
void allocate(ClassLoaderData* loader_data);
void init_archived_entries(ClassLoaderData* loader_data);
void iterate_roots(MetaspaceClosure* closure);
void build_tables(ClassLoaderData* loader_data, TRAPS);
void remove_unshareable_info();
ModuleEntry* unnamed_module() {
return _unnamed_module;
}
@ -80,17 +81,14 @@ static ModuleEntry* _archived_javabase_moduleEntry = nullptr;
static int _platform_loader_root_index = -1;
static int _system_loader_root_index = -1;
void ArchivedClassLoaderData::iterate_symbols(ClassLoaderData* loader_data, MetaspaceClosure* closure) {
void ArchivedClassLoaderData::iterate_roots(MetaspaceClosure* it) {
assert(CDSConfig::is_dumping_full_module_graph(), "must be");
assert_valid(loader_data);
if (loader_data != nullptr) {
loader_data->packages()->iterate_symbols(closure);
loader_data->modules() ->iterate_symbols(closure);
loader_data->unnamed_module()->iterate_symbols(closure);
}
it->push(&_packages);
it->push(&_modules);
it->push(&_unnamed_module);
}
void ArchivedClassLoaderData::allocate(ClassLoaderData* loader_data) {
void ArchivedClassLoaderData::build_tables(ClassLoaderData* loader_data, TRAPS) {
assert(CDSConfig::is_dumping_full_module_graph(), "must be");
assert_valid(loader_data);
if (loader_data != nullptr) {
@ -98,19 +96,28 @@ void ArchivedClassLoaderData::allocate(ClassLoaderData* loader_data) {
// address of the Symbols, which may be relocated at runtime due to ASLR.
// So we store the packages/modules in Arrays. At runtime, we create
// the hashtables using these arrays.
_packages = loader_data->packages()->allocate_archived_entries();
_modules = loader_data->modules() ->allocate_archived_entries();
_unnamed_module = loader_data->unnamed_module()->allocate_archived_entry();
_packages = loader_data->packages()->build_aot_table(loader_data, CHECK);
_modules = loader_data->modules()->build_aot_table(loader_data, CHECK);
_unnamed_module = loader_data->unnamed_module();
}
}
void ArchivedClassLoaderData::init_archived_entries(ClassLoaderData* loader_data) {
assert(CDSConfig::is_dumping_full_module_graph(), "must be");
assert_valid(loader_data);
if (loader_data != nullptr) {
loader_data->packages()->init_archived_entries(_packages);
loader_data->modules() ->init_archived_entries(_modules);
_unnamed_module->init_as_archived_entry();
void ArchivedClassLoaderData::remove_unshareable_info() {
if (_packages != nullptr) {
_packages = ArchiveBuilder::current()->get_buffered_addr(_packages);
for (int i = 0; i < _packages->length(); i++) {
_packages->at(i)->remove_unshareable_info();
}
}
if (_modules != nullptr) {
_modules = ArchiveBuilder::current()->get_buffered_addr(_modules);
for (int i = 0; i < _modules->length(); i++) {
_modules->at(i)->remove_unshareable_info();
}
}
if (_unnamed_module != nullptr) {
_unnamed_module = ArchiveBuilder::current()->get_buffered_addr(_unnamed_module);
_unnamed_module->remove_unshareable_info();
}
}
@ -153,7 +160,6 @@ void ArchivedClassLoaderData::clear_archived_oops() {
// ------------------------------
void ClassLoaderDataShared::load_archived_platform_and_system_class_loaders() {
#if INCLUDE_CDS_JAVA_HEAP
// The streaming object loader prefers loading the class loader related objects before
// the CLD constructor which has a NoSafepointVerifier.
if (!HeapShared::is_loading_streaming_mode()) {
@ -178,7 +184,6 @@ void ClassLoaderDataShared::load_archived_platform_and_system_class_loaders() {
if (system_loader_module_entry != nullptr) {
system_loader_module_entry->preload_archived_oops();
}
#endif
}
static ClassLoaderData* null_class_loader_data() {
@ -210,28 +215,27 @@ void ClassLoaderDataShared::ensure_module_entry_table_exists(oop class_loader) {
assert(met != nullptr, "sanity");
}
void ClassLoaderDataShared::iterate_symbols(MetaspaceClosure* closure) {
void ClassLoaderDataShared::build_tables(TRAPS) {
assert(CDSConfig::is_dumping_full_module_graph(), "must be");
_archived_boot_loader_data.iterate_symbols (null_class_loader_data(), closure);
_archived_platform_loader_data.iterate_symbols(java_platform_loader_data_or_null(), closure);
_archived_system_loader_data.iterate_symbols (java_system_loader_data_or_null(), closure);
_archived_boot_loader_data.build_tables(null_class_loader_data(), CHECK);
_archived_platform_loader_data.build_tables(java_platform_loader_data_or_null(), CHECK);
_archived_system_loader_data.build_tables(java_system_loader_data_or_null(), CHECK);
}
void ClassLoaderDataShared::allocate_archived_tables() {
void ClassLoaderDataShared::iterate_roots(MetaspaceClosure* it) {
assert(CDSConfig::is_dumping_full_module_graph(), "must be");
_archived_boot_loader_data.allocate (null_class_loader_data());
_archived_platform_loader_data.allocate(java_platform_loader_data_or_null());
_archived_system_loader_data.allocate (java_system_loader_data_or_null());
_archived_boot_loader_data.iterate_roots(it);
_archived_platform_loader_data.iterate_roots(it);
_archived_system_loader_data.iterate_roots(it);
}
void ClassLoaderDataShared::init_archived_tables() {
void ClassLoaderDataShared::remove_unshareable_info() {
assert(CDSConfig::is_dumping_full_module_graph(), "must be");
_archived_boot_loader_data.remove_unshareable_info();
_archived_platform_loader_data.remove_unshareable_info();
_archived_system_loader_data.remove_unshareable_info();
_archived_boot_loader_data.init_archived_entries (null_class_loader_data());
_archived_platform_loader_data.init_archived_entries(java_platform_loader_data_or_null());
_archived_system_loader_data.init_archived_entries (java_system_loader_data_or_null());
_archived_javabase_moduleEntry = ModuleEntry::get_archived_entry(ModuleEntryTable::javabase_moduleEntry());
_archived_javabase_moduleEntry = ArchiveBuilder::current()->get_buffered_addr(ModuleEntryTable::javabase_moduleEntry());
_platform_loader_root_index = HeapShared::append_root(SystemDictionary::java_platform_loader());
_system_loader_root_index = HeapShared::append_root(SystemDictionary::java_system_loader());
@ -271,7 +275,6 @@ ModuleEntry* ClassLoaderDataShared::archived_unnamed_module(ClassLoaderData* loa
return archived_module;
}
void ClassLoaderDataShared::clear_archived_oops() {
assert(!CDSConfig::is_using_full_module_graph(), "must be");
_archived_boot_loader_data.clear_archived_oops();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -40,11 +40,11 @@ class ClassLoaderDataShared : AllStatic {
public:
static void load_archived_platform_and_system_class_loaders() NOT_CDS_JAVA_HEAP_RETURN;
static void restore_archived_modules_for_preloading_classes(JavaThread* current) NOT_CDS_JAVA_HEAP_RETURN;
static void build_tables(TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
static void iterate_roots(MetaspaceClosure* closure) NOT_CDS_JAVA_HEAP_RETURN;
static void remove_unshareable_info() NOT_CDS_JAVA_HEAP_RETURN;
#if INCLUDE_CDS_JAVA_HEAP
static void ensure_module_entry_tables_exist();
static void allocate_archived_tables();
static void iterate_symbols(MetaspaceClosure* closure);
static void init_archived_tables();
static void serialize(SerializeClosure* f);
static void clear_archived_oops();
static void restore_archived_entries_for_null_class_loader_data();

View File

@ -1263,6 +1263,10 @@ bool java_lang_Class::restore_archived_mirror(Klass *k,
"Restored %s archived mirror " PTR_FORMAT, k->external_name(), p2i(mirror()));
}
if (CDSConfig::is_dumping_heap()) {
create_scratch_mirror(k, CHECK_(false));
}
return true;
}
#endif // INCLUDE_CDS_JAVA_HEAP
@ -1904,16 +1908,16 @@ oop java_lang_Thread::park_blocker(oop java_thread) {
return java_thread->obj_field_access<MO_RELAXED>(_park_blocker_offset);
}
// Obtain stack trace for platform or mounted virtual thread.
// If jthread is a virtual thread and it has been unmounted (or remounted to different carrier) the method returns null.
// The caller (java.lang.VirtualThread) handles returned nulls via retry.
// Obtain stack trace for a platform or virtual thread.
oop java_lang_Thread::async_get_stack_trace(jobject jthread, TRAPS) {
ThreadsListHandle tlh(THREAD);
JavaThread* java_thread = nullptr;
oop thread_oop;
oop thread_oop = nullptr;
bool has_java_thread = tlh.cv_internal_thread_to_JavaThread(jthread, &java_thread, &thread_oop);
if (!has_java_thread) {
assert(thread_oop != nullptr, "Missing Thread oop");
bool is_virtual = java_lang_VirtualThread::is_instance(thread_oop);
if (!has_java_thread && !is_virtual) {
return nullptr;
}
@ -1921,12 +1925,11 @@ oop java_lang_Thread::async_get_stack_trace(jobject jthread, TRAPS) {
public:
const Handle _thread_h;
int _depth;
bool _retry_handshake;
GrowableArray<Method*>* _methods;
GrowableArray<int>* _bcis;
GetStackTraceHandshakeClosure(Handle thread_h) :
HandshakeClosure("GetStackTraceHandshakeClosure"), _thread_h(thread_h), _depth(0), _retry_handshake(false),
HandshakeClosure("GetStackTraceHandshakeClosure"), _thread_h(thread_h), _depth(0),
_methods(nullptr), _bcis(nullptr) {
}
~GetStackTraceHandshakeClosure() {
@ -1934,37 +1937,15 @@ oop java_lang_Thread::async_get_stack_trace(jobject jthread, TRAPS) {
delete _bcis;
}
bool read_reset_retry() {
bool ret = _retry_handshake;
// If we re-execute the handshake this method need to return false
// when the handshake cannot be performed. (E.g. thread terminating)
_retry_handshake = false;
return ret;
}
void do_thread(Thread* th) {
if (!Thread::current()->is_Java_thread()) {
_retry_handshake = true;
JavaThread* java_thread = th != nullptr ? JavaThread::cast(th) : nullptr;
if (java_thread != nullptr && !java_thread->has_last_Java_frame()) {
// stack trace is empty
return;
}
JavaThread* java_thread = JavaThread::cast(th);
if (!java_thread->has_last_Java_frame()) {
return;
}
bool carrier = false;
if (java_lang_VirtualThread::is_instance(_thread_h())) {
// Ensure _thread_h is still mounted to java_thread.
const ContinuationEntry* ce = java_thread->vthread_continuation();
if (ce == nullptr || ce->cont_oop(java_thread) != java_lang_VirtualThread::continuation(_thread_h())) {
// Target thread has been unmounted.
return;
}
} else {
carrier = (java_thread->vthread_continuation() != nullptr);
}
bool is_virtual = java_lang_VirtualThread::is_instance(_thread_h());
bool vthread_carrier = !is_virtual && (java_thread->vthread_continuation() != nullptr);
const int max_depth = MaxJavaStackTraceDepth;
const bool skip_hidden = !ShowHiddenFrames;
@ -1975,7 +1956,10 @@ oop java_lang_Thread::async_get_stack_trace(jobject jthread, TRAPS) {
_bcis = new (mtInternal) GrowableArray<int>(init_length, mtInternal);
int total_count = 0;
for (vframeStream vfst(java_thread, false, false, carrier); // we don't process frames as we don't care about oops
vframeStream vfst(java_thread != nullptr
? vframeStream(java_thread, false, false, vthread_carrier) // we don't process frames as we don't care about oops
: vframeStream(java_lang_VirtualThread::continuation(_thread_h())));
for (;
!vfst.at_end() && (max_depth == 0 || max_depth != total_count);
vfst.next()) {
@ -1997,9 +1981,11 @@ oop java_lang_Thread::async_get_stack_trace(jobject jthread, TRAPS) {
ResourceMark rm(THREAD);
HandleMark hm(THREAD);
GetStackTraceHandshakeClosure gsthc(Handle(THREAD, thread_oop));
do {
Handshake::execute(&gsthc, &tlh, java_thread);
} while (gsthc.read_reset_retry());
if (is_virtual) {
Handshake::execute(&gsthc, thread_oop);
} else {
Handshake::execute(&gsthc, &tlh, java_thread);
}
// Stop if no stack trace is found.
if (gsthc._depth == 0) {
@ -2196,7 +2182,7 @@ void java_lang_VirtualThread::set_timeout(oop vthread, jlong value) {
JavaThreadStatus java_lang_VirtualThread::map_state_to_thread_status(int state) {
JavaThreadStatus status = JavaThreadStatus::NEW;
switch (state & ~SUSPENDED) {
switch (state) {
case NEW:
status = JavaThreadStatus::NEW;
break;

View File

@ -592,9 +592,6 @@ class java_lang_VirtualThread : AllStatic {
TIMED_WAITING = 17,
TIMED_WAIT = 18, // waiting in timed-Object.wait
TERMINATED = 99,
// additional state bits
SUSPENDED = 1 << 8, // suspended when unmounted
};
static void compute_offsets();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,6 +23,7 @@
*/
#include "cds/aotClassLocation.hpp"
#include "cds/aotGrowableArray.inline.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/archiveUtils.hpp"
#include "cds/cdsConfig.hpp"
@ -37,6 +38,7 @@
#include "jni.h"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/oopHandle.inline.hpp"
@ -44,7 +46,6 @@
#include "runtime/handles.inline.hpp"
#include "runtime/safepoint.hpp"
#include "utilities/events.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/hashTable.hpp"
#include "utilities/ostream.hpp"
#include "utilities/quickSort.hpp"
@ -167,7 +168,7 @@ void ModuleEntry::add_read(ModuleEntry* m) {
} else {
if (reads() == nullptr) {
// Lazily create a module's reads list
GrowableArray<ModuleEntry*>* new_reads = new (mtModule) GrowableArray<ModuleEntry*>(MODULE_READS_SIZE, mtModule);
AOTGrowableArray<ModuleEntry*>* new_reads = new (mtModule) AOTGrowableArray<ModuleEntry*>(MODULE_READS_SIZE, mtModule);
set_reads(new_reads);
}
@ -274,8 +275,7 @@ ModuleEntry::ModuleEntry(Handle module_handle,
_has_default_read_edges(false),
_must_walk_reads(false),
_is_open(is_open),
_is_patched(false)
DEBUG_ONLY(COMMA _reads_is_archived(false)) {
_is_patched(false) {
// Initialize fields specific to a ModuleEntry
if (_name == nullptr) {
@ -394,7 +394,6 @@ ModuleEntryTable::~ModuleEntryTable() {
ModuleEntryTableDeleter deleter;
_table.unlink(&deleter);
assert(_table.number_of_entries() == 0, "should have removed all entries");
}
void ModuleEntry::set_loader_data(ClassLoaderData* cld) {
@ -402,147 +401,51 @@ void ModuleEntry::set_loader_data(ClassLoaderData* cld) {
_loader_data = cld;
}
void ModuleEntry::metaspace_pointers_do(MetaspaceClosure* it) {
it->push(&_name);
it->push(&_reads);
it->push(&_version);
it->push(&_location);
}
#if INCLUDE_CDS_JAVA_HEAP
typedef HashTable<
const ModuleEntry*,
ModuleEntry*,
557, // prime number
AnyObj::C_HEAP> ArchivedModuleEntries;
static ArchivedModuleEntries* _archive_modules_entries = nullptr;
#ifndef PRODUCT
static int _num_archived_module_entries = 0;
static int _num_inited_module_entries = 0;
#endif
bool ModuleEntry::should_be_archived() const {
return SystemDictionaryShared::is_builtin_loader(loader_data());
}
ModuleEntry* ModuleEntry::allocate_archived_entry() const {
precond(should_be_archived());
precond(CDSConfig::is_dumping_full_module_graph());
ModuleEntry* archived_entry = (ModuleEntry*)ArchiveBuilder::rw_region_alloc(sizeof(ModuleEntry));
memcpy((void*)archived_entry, (void*)this, sizeof(ModuleEntry));
void ModuleEntry::remove_unshareable_info() {
_archived_module_index = HeapShared::append_root(module_oop());
archived_entry->_archived_module_index = HeapShared::append_root(module_oop());
if (_archive_modules_entries == nullptr) {
_archive_modules_entries = new (mtClass)ArchivedModuleEntries();
}
assert(_archive_modules_entries->get(this) == nullptr, "Each ModuleEntry must not be shared across ModuleEntryTables");
_archive_modules_entries->put(this, archived_entry);
DEBUG_ONLY(_num_archived_module_entries++);
if (CDSConfig::is_dumping_final_static_archive()) {
OopHandle null_handle;
archived_entry->_shared_pd = null_handle;
} else {
assert(archived_entry->shared_protection_domain() == nullptr, "never set during -Xshare:dump");
if (_reads != nullptr) {
_reads->set_in_aot_cache();
}
// Clear handles and restore at run time. Handles cannot be archived.
if (CDSConfig::is_dumping_final_static_archive()) {
OopHandle null_handle;
_shared_pd = null_handle;
} else {
assert(shared_protection_domain() == nullptr, "never set during -Xshare:dump");
}
OopHandle null_handle;
archived_entry->_module_handle = null_handle;
// For verify_archived_module_entries()
DEBUG_ONLY(_num_inited_module_entries++);
if (log_is_enabled(Info, aot, module)) {
ResourceMark rm;
LogStream ls(Log(aot, module)::info());
ls.print("Stored in archive: ");
archived_entry->print(&ls);
}
return archived_entry;
}
bool ModuleEntry::has_been_archived() {
assert(!ArchiveBuilder::current()->is_in_buffer_space(this), "must be called on original ModuleEntry");
return _archive_modules_entries->contains(this);
}
ModuleEntry* ModuleEntry::get_archived_entry(ModuleEntry* orig_entry) {
ModuleEntry** ptr = _archive_modules_entries->get(orig_entry);
assert(ptr != nullptr && *ptr != nullptr, "must have been allocated");
return *ptr;
}
// This function is used to archive ModuleEntry::_reads and PackageEntry::_qualified_exports.
// GrowableArray cannot be directly archived, as it needs to be expandable at runtime.
// Write it out as an Array, and convert it back to GrowableArray at runtime.
Array<ModuleEntry*>* ModuleEntry::write_growable_array(GrowableArray<ModuleEntry*>* array) {
Array<ModuleEntry*>* archived_array = nullptr;
int length = (array == nullptr) ? 0 : array->length();
if (length > 0) {
archived_array = ArchiveBuilder::new_ro_array<ModuleEntry*>(length);
for (int i = 0; i < length; i++) {
ModuleEntry* archived_entry = get_archived_entry(array->at(i));
archived_array->at_put(i, archived_entry);
ArchivePtrMarker::mark_pointer((address*)archived_array->adr_at(i));
}
}
return archived_array;
}
GrowableArray<ModuleEntry*>* ModuleEntry::restore_growable_array(Array<ModuleEntry*>* archived_array) {
GrowableArray<ModuleEntry*>* array = nullptr;
int length = (archived_array == nullptr) ? 0 : archived_array->length();
if (length > 0) {
array = new (mtModule) GrowableArray<ModuleEntry*>(length, mtModule);
for (int i = 0; i < length; i++) {
ModuleEntry* archived_entry = archived_array->at(i);
array->append(archived_entry);
}
}
return array;
}
void ModuleEntry::iterate_symbols(MetaspaceClosure* closure) {
closure->push(&_name);
closure->push(&_version);
closure->push(&_location);
}
void ModuleEntry::init_as_archived_entry() {
set_archived_reads(write_growable_array(reads()));
_module_handle = null_handle;
_loader_data = nullptr; // re-init at runtime
if (name() != nullptr) {
_shared_path_index = AOTClassLocationConfig::dumptime()->get_module_shared_path_index(_location);
_name = ArchiveBuilder::get_buffered_symbol(_name);
ArchivePtrMarker::mark_pointer((address*)&_name);
Symbol* src_location = ArchiveBuilder::current()->get_source_addr(_location);
_shared_path_index = AOTClassLocationConfig::dumptime()->get_module_shared_path_index(src_location);
} else {
// _shared_path_index is used only by SystemDictionary::is_shared_class_visible_impl()
// for checking classes in named modules.
_shared_path_index = -1;
}
if (_version != nullptr) {
_version = ArchiveBuilder::get_buffered_symbol(_version);
}
if (_location != nullptr) {
_location = ArchiveBuilder::get_buffered_symbol(_location);
}
JFR_ONLY(set_trace_id(0);) // re-init at runtime
ArchivePtrMarker::mark_pointer((address*)&_reads);
ArchivePtrMarker::mark_pointer((address*)&_version);
ArchivePtrMarker::mark_pointer((address*)&_location);
}
#ifndef PRODUCT
void ModuleEntry::verify_archived_module_entries() {
assert(_num_archived_module_entries == _num_inited_module_entries,
"%d ModuleEntries have been archived but %d of them have been properly initialized with archived java.lang.Module objects",
_num_archived_module_entries, _num_inited_module_entries);
}
#endif // PRODUCT
void ModuleEntry::load_from_archive(ClassLoaderData* loader_data) {
assert(CDSConfig::is_using_archive(), "runtime only");
set_loader_data(loader_data);
set_reads(restore_growable_array(archived_reads()));
JFR_ONLY(INIT_ID(this);)
}
@ -581,38 +484,28 @@ static int compare_module_by_name(ModuleEntry* a, ModuleEntry* b) {
return a->name()->fast_compare(b->name());
}
void ModuleEntryTable::iterate_symbols(MetaspaceClosure* closure) {
auto syms = [&] (const SymbolHandle& key, ModuleEntry*& m) {
m->iterate_symbols(closure);
};
_table.iterate_all(syms);
}
Array<ModuleEntry*>* ModuleEntryTable::allocate_archived_entries() {
Array<ModuleEntry*>* archived_modules = ArchiveBuilder::new_rw_array<ModuleEntry*>(_table.number_of_entries());
Array<ModuleEntry*>* ModuleEntryTable::build_aot_table(ClassLoaderData* loader_data, TRAPS) {
Array<ModuleEntry*>* aot_table =
MetadataFactory::new_array<ModuleEntry*>(loader_data, _table.number_of_entries(), nullptr, CHECK_NULL);
int n = 0;
auto grab = [&] (const SymbolHandle& key, ModuleEntry*& m) {
archived_modules->at_put(n++, m);
m->pack_reads();
aot_table->at_put(n++, m);
if (log_is_enabled(Info, aot, module)) {
ResourceMark rm;
LogStream ls(Log(aot, module)::info());
ls.print("Stored in archive: ");
m->print(&ls);
}
};
_table.iterate_all(grab);
if (n > 1) {
// Always allocate in the same order to produce deterministic archive.
QuickSort::sort(archived_modules->data(), n, compare_module_by_name);
QuickSort::sort(aot_table->data(), n, compare_module_by_name);
}
for (int i = 0; i < n; i++) {
archived_modules->at_put(i, archived_modules->at(i)->allocate_archived_entry());
ArchivePtrMarker::mark_pointer((address*)archived_modules->adr_at(i));
}
return archived_modules;
}
void ModuleEntryTable::init_archived_entries(Array<ModuleEntry*>* archived_modules) {
assert(CDSConfig::is_dumping_full_module_graph(), "sanity");
for (int i = 0; i < archived_modules->length(); i++) {
ModuleEntry* archived_entry = archived_modules->at(i);
archived_entry->init_as_archived_entry();
}
return aot_table;
}
void ModuleEntryTable::load_archived_entries(ClassLoaderData* loader_data,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,7 +25,9 @@
#ifndef SHARE_CLASSFILE_MODULEENTRY_HPP
#define SHARE_CLASSFILE_MODULEENTRY_HPP
#include "cds/aotGrowableArray.hpp"
#include "jni.h"
#include "memory/metaspaceClosureType.hpp"
#include "oops/oopHandle.hpp"
#include "oops/symbol.hpp"
#include "oops/symbolHandle.hpp"
@ -68,11 +70,8 @@ private:
// for shared classes from this module
Symbol* _name; // name of this module
ClassLoaderData* _loader_data;
AOTGrowableArray<ModuleEntry*>* _reads; // list of modules that are readable by this module
union {
GrowableArray<ModuleEntry*>* _reads; // list of modules that are readable by this module
Array<ModuleEntry*>* _archived_reads; // List of readable modules stored in the CDS archive
};
Symbol* _version; // module version number
Symbol* _location; // module location
CDS_ONLY(int _shared_path_index;) // >=0 if classes in this module are in CDS archive
@ -81,7 +80,6 @@ private:
bool _must_walk_reads; // walk module's reads list at GC safepoints to purge out dead modules
bool _is_open; // whether the packages in the module are all unqualifiedly exported
bool _is_patched; // whether the module is patched via --patch-module
DEBUG_ONLY(bool _reads_is_archived);
CDS_JAVA_HEAP_ONLY(int _archived_module_index;)
JFR_ONLY(DEFINE_TRACE_ID_FIELD;)
@ -120,22 +118,18 @@ public:
bool can_read(ModuleEntry* m) const;
bool has_reads_list() const;
GrowableArray<ModuleEntry*>* reads() const {
assert(!_reads_is_archived, "sanity");
AOTGrowableArray<ModuleEntry*>* reads() const {
return _reads;
}
void set_reads(GrowableArray<ModuleEntry*>* r) {
void set_reads(AOTGrowableArray<ModuleEntry*>* r) {
_reads = r;
DEBUG_ONLY(_reads_is_archived = false);
}
Array<ModuleEntry*>* archived_reads() const {
assert(_reads_is_archived, "sanity");
return _archived_reads;
}
void set_archived_reads(Array<ModuleEntry*>* r) {
_archived_reads = r;
DEBUG_ONLY(_reads_is_archived = true);
void pack_reads() {
if (_reads != nullptr) {
_reads->shrink_to_fit();
}
}
void add_read(ModuleEntry* m);
void set_read_walk_required(ClassLoaderData* m_loader_data);
@ -189,6 +183,13 @@ public:
const char* name_as_C_string() const {
return is_named() ? name()->as_C_string() : UNNAMED_MODULE;
}
// methods required by MetaspaceClosure
void metaspace_pointers_do(MetaspaceClosure* it);
int size_in_heapwords() const { return (int)heap_word_size(sizeof(ModuleEntry)); }
MetaspaceClosureType type() const { return MetaspaceClosureType::ModuleEntryType; }
static bool is_read_only_by_default() { return false; }
void print(outputStream* st = tty) const;
void verify();
@ -198,18 +199,11 @@ public:
#if INCLUDE_CDS_JAVA_HEAP
bool should_be_archived() const;
void iterate_symbols(MetaspaceClosure* closure);
ModuleEntry* allocate_archived_entry() const;
void init_as_archived_entry();
static ModuleEntry* get_archived_entry(ModuleEntry* orig_entry);
bool has_been_archived();
static Array<ModuleEntry*>* write_growable_array(GrowableArray<ModuleEntry*>* array);
static GrowableArray<ModuleEntry*>* restore_growable_array(Array<ModuleEntry*>* archived_array);
void remove_unshareable_info();
void load_from_archive(ClassLoaderData* loader_data);
void preload_archived_oops();
void restore_archived_oops(ClassLoaderData* loader_data);
void clear_archived_oops();
static void verify_archived_module_entries() PRODUCT_RETURN;
#endif
};
@ -275,9 +269,7 @@ public:
void verify();
#if INCLUDE_CDS_JAVA_HEAP
void iterate_symbols(MetaspaceClosure* closure);
Array<ModuleEntry*>* allocate_archived_entries();
void init_archived_entries(Array<ModuleEntry*>* archived_modules);
Array<ModuleEntry*>* build_aot_table(ClassLoaderData* loader_data, TRAPS);
void load_archived_entries(ClassLoaderData* loader_data,
Array<ModuleEntry*>* archived_modules);
void restore_archived_oops(ClassLoaderData* loader_data,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -505,13 +505,10 @@ void Modules::check_archived_module_oop(oop orig_module_obj) {
ClassLoaderData* loader_data = orig_module_ent->loader_data();
assert(loader_data->is_builtin_class_loader_data(), "must be");
if (orig_module_ent->name() != nullptr) {
// For each named module, we archive both the java.lang.Module oop and the ModuleEntry.
assert(orig_module_ent->has_been_archived(), "sanity");
} else {
precond(ArchiveBuilder::current()->has_been_archived(orig_module_ent));
if (orig_module_ent->name() == nullptr) {
// We always archive unnamed module oop for boot, platform, and system loaders.
precond(orig_module_ent->should_be_archived());
precond(orig_module_ent->has_been_archived());
if (loader_data->is_boot_class_loader_data()) {
assert(!_seen_boot_unnamed_module, "only once");
@ -529,10 +526,6 @@ void Modules::check_archived_module_oop(oop orig_module_obj) {
}
}
void Modules::verify_archived_modules() {
ModuleEntry::verify_archived_module_entries();
}
class Modules::ArchivedProperty {
const char* _prop;
const bool _numbered;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -59,7 +59,6 @@ public:
TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
static void init_archived_modules(JavaThread* current, Handle h_platform_loader, Handle h_system_loader)
NOT_CDS_JAVA_HEAP_RETURN;
static void verify_archived_modules() NOT_CDS_JAVA_HEAP_RETURN;
static void dump_archived_module_info() NOT_CDS_JAVA_HEAP_RETURN;
static void serialize_archived_module_info(SerializeClosure* soc) NOT_CDS_JAVA_HEAP_RETURN;

Some files were not shown because too many files have changed in this diff Show More