Merge branch 'master' into JDK-8326205

This commit is contained in:
Evgeny Astigeevich 2026-02-08 08:34:12 +00:00
commit faf2d312b6
883 changed files with 33473 additions and 21359 deletions

View File

@ -1,7 +1,7 @@
#!/bin/bash -f
#
# Copyright (c) 2010, 2025, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2010, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -23,9 +23,13 @@
# questions.
#
# Script to update the Copyright YEAR range in Mercurial & Git sources.
# Script to update the Copyright YEAR range in Git sources.
# (Originally from xdono, Thanks!)
# To update Copyright years for changes in a specific branch,
# you use a command along these lines:
# $ git diff upstream/master...<branch-name> | lsdiff | cut -d '/' -f 2- | bash bin/update_copyright_year.sh -m -
#------------------------------------------------------------
copyright="Copyright"
copyright_symbol="(c)"
@ -47,7 +51,7 @@ rm -f -r ${tmp}
mkdir -p ${tmp}
total=0
usage="Usage: `basename "$0"` [-c company] [-y year] [-h|f]"
usage="Usage: `basename "$0"` [-c company] [-y year] [-m file] [-h|f]"
Help()
{
# Display Help
@ -65,15 +69,18 @@ Help()
echo "-b Specifies the base reference for change set lookup."
echo "-f Updates the copyright for all change sets in a given year,"
echo " as specified by -y. Overrides -b flag."
echo "-m Read the list of modified files from the given file,"
echo " use - to read from stdin"
echo "-h Print this help."
echo
}
full_year=false
base_reference=master
modified_files_origin="";
# Process options
while getopts "b:c:fhy:" option; do
while getopts "b:c:fhm:y:" option; do
case $option in
b) # supplied base reference
base_reference=${OPTARG}
@ -91,6 +98,9 @@ while getopts "b:c:fhy:" option; do
y) # supplied company year
year=${OPTARG}
;;
m) # modified files will be read from the given origin
modified_files_origin="${OPTARG}"
;;
\?) # illegal option
echo "$usage"
exit 1
@ -110,18 +120,10 @@ git status &> /dev/null && git_found=true
if [ "$git_found" != "true" ]; then
echo "Error: Please execute script from within a JDK git repository."
exit 1
else
echo "Using Git version control system"
vcs_status=(git ls-files -m)
if [ "$full_year" = "true" ]; then
vcs_list_changesets=(git log --no-merges --since="${year}-01-01T00:00:00Z" --until="${year}-12-31T23:59:59Z" --pretty=tformat:"%H")
else
vcs_list_changesets=(git log --no-merges "${base_reference}..HEAD" --since="${year}-01-01T00:00:00Z" --until="${year}-12-31T23:59:59Z" --pretty=tformat:"%H")
fi
vcs_changeset_message=(git log -1 --pretty=tformat:"%B") # followed by ${changeset}
vcs_changeset_files=(git diff-tree --no-commit-id --name-only -r) # followed by ${changeset}
fi
echo "Using Git version control system"
# Return true if it makes sense to edit this file
saneFileToCheck()
{
@ -168,6 +170,25 @@ updateFile() # file
echo "${changed}"
}
# Update the copyright year on files sent in stdin
updateFiles() # stdin: list of files to update
{
count=0
fcount=0
while read i; do
fcount=`expr ${fcount} '+' 1`
if [ `updateFile "${i}"` = "true" ] ; then
count=`expr ${count} '+' 1`
fi
done
if [ ${count} -gt 0 ] ; then
printf " UPDATED year on %d of %d files.\n" ${count} ${fcount}
total=`expr ${total} '+' ${count}`
else
printf " None of the %d files were changed.\n" ${fcount}
fi
}
# Update the copyright year on all files changed by this changeset
updateChangesetFiles() # changeset
{
@ -178,18 +199,7 @@ updateChangesetFiles() # changeset
| ${awk} -F' ' '{for(i=1;i<=NF;i++)print $i}' \
> ${files}
if [ -f "${files}" -a -s "${files}" ] ; then
fcount=`cat ${files}| wc -l`
for i in `cat ${files}` ; do
if [ `updateFile "${i}"` = "true" ] ; then
count=`expr ${count} '+' 1`
fi
done
if [ ${count} -gt 0 ] ; then
printf " UPDATED year on %d of %d files.\n" ${count} ${fcount}
total=`expr ${total} '+' ${count}`
else
printf " None of the %d files were changed.\n" ${fcount}
fi
cat ${files} | updateFiles
else
printf " ERROR: No files changed in the changeset? Must be a mistake.\n"
set -x
@ -204,67 +214,80 @@ updateChangesetFiles() # changeset
}
# Check if repository is clean
vcs_status=(git ls-files -m)
previous=`"${vcs_status[@]}"|wc -l`
if [ ${previous} -ne 0 ] ; then
echo "WARNING: This repository contains previously edited working set files."
echo " ${vcs_status[*]} | wc -l = `"${vcs_status[@]}" | wc -l`"
fi
# Get all changesets this year
all_changesets=${tmp}/all_changesets
rm -f ${all_changesets}
"${vcs_list_changesets[@]}" > ${all_changesets}
# Check changeset to see if it is Copyright only changes, filter changesets
if [ -s ${all_changesets} ] ; then
echo "Changesets made in ${year}: `cat ${all_changesets} | wc -l`"
index=0
cat ${all_changesets} | while read changeset ; do
index=`expr ${index} '+' 1`
desc=${tmp}/desc.${changeset}
rm -f ${desc}
echo "------------------------------------------------"
"${vcs_changeset_message[@]}" "${changeset}" > ${desc}
printf "%d: %s\n%s\n" ${index} "${changeset}" "`cat ${desc}|head -1`"
if [ "${year}" = "2010" ] ; then
if cat ${desc} | grep -i -F "Added tag" > /dev/null ; then
printf " EXCLUDED tag changeset.\n"
elif cat ${desc} | grep -i -F rebrand > /dev/null ; then
printf " EXCLUDED rebrand changeset.\n"
elif cat ${desc} | grep -i -F copyright > /dev/null ; then
printf " EXCLUDED copyright changeset.\n"
else
updateChangesetFiles ${changeset}
fi
else
if cat ${desc} | grep -i -F "Added tag" > /dev/null ; then
printf " EXCLUDED tag changeset.\n"
elif cat ${desc} | grep -i -F "copyright year" > /dev/null ; then
printf " EXCLUDED copyright year changeset.\n"
else
updateChangesetFiles ${changeset}
fi
fi
rm -f ${desc}
done
fi
if [ ${total} -gt 0 ] ; then
echo "---------------------------------------------"
echo "Updated the copyright year on a total of ${total} files."
if [ ${previous} -eq 0 ] ; then
echo "This count should match the count of modified files in the repository: ${vcs_status[*]}"
else
echo "WARNING: This repository contained previously edited working set files."
fi
echo " ${vcs_status[*]} | wc -l = `"${vcs_status[@]}" | wc -l`"
if [ "x$modified_files_origin" != "x" ]; then
cat $modified_files_origin | updateFiles
else
echo "---------------------------------------------"
echo "No files were changed"
if [ ${previous} -ne 0 ] ; then
echo "WARNING: This repository contained previously edited working set files."
fi
echo " ${vcs_status[*]} | wc -l = `"${vcs_status[@]}" | wc -l`"
# Get all changesets this year
if [ "$full_year" = "true" ]; then
vcs_list_changesets=(git log --no-merges --since="${year}-01-01T00:00:00Z" --until="${year}-12-31T23:59:59Z" --pretty=tformat:"%H")
else
vcs_list_changesets=(git log --no-merges "${base_reference}..HEAD" --since="${year}-01-01T00:00:00Z" --until="${year}-12-31T23:59:59Z" --pretty=tformat:"%H")
fi
vcs_changeset_message=(git log -1 --pretty=tformat:"%B") # followed by ${changeset}
vcs_changeset_files=(git diff-tree --no-commit-id --name-only -r) # followed by ${changeset}
all_changesets=${tmp}/all_changesets
rm -f ${all_changesets}
"${vcs_list_changesets[@]}" > ${all_changesets}
# Check changeset to see if it is Copyright only changes, filter changesets
if [ -s ${all_changesets} ] ; then
echo "Changesets made in ${year}: `cat ${all_changesets} | wc -l`"
index=0
cat ${all_changesets} | while read changeset ; do
index=`expr ${index} '+' 1`
desc=${tmp}/desc.${changeset}
rm -f ${desc}
echo "------------------------------------------------"
"${vcs_changeset_message[@]}" "${changeset}" > ${desc}
printf "%d: %s\n%s\n" ${index} "${changeset}" "`cat ${desc}|head -1`"
if [ "${year}" = "2010" ] ; then
if cat ${desc} | grep -i -F "Added tag" > /dev/null ; then
printf " EXCLUDED tag changeset.\n"
elif cat ${desc} | grep -i -F rebrand > /dev/null ; then
printf " EXCLUDED rebrand changeset.\n"
elif cat ${desc} | grep -i -F copyright > /dev/null ; then
printf " EXCLUDED copyright changeset.\n"
else
updateChangesetFiles ${changeset}
fi
else
if cat ${desc} | grep -i -F "Added tag" > /dev/null ; then
printf " EXCLUDED tag changeset.\n"
elif cat ${desc} | grep -i -F "copyright year" > /dev/null ; then
printf " EXCLUDED copyright year changeset.\n"
else
updateChangesetFiles ${changeset}
fi
fi
rm -f ${desc}
done
fi
if [ ${total} -gt 0 ] ; then
echo "---------------------------------------------"
echo "Updated the copyright year on a total of ${total} files."
if [ ${previous} -eq 0 ] ; then
echo "This count should match the count of modified files in the repository: ${vcs_status[*]}"
else
echo "WARNING: This repository contained previously edited working set files."
fi
echo " ${vcs_status[*]} | wc -l = `"${vcs_status[@]}" | wc -l`"
else
echo "---------------------------------------------"
echo "No files were changed"
if [ ${previous} -ne 0 ] ; then
echo "WARNING: This repository contained previously edited working set files."
fi
echo " ${vcs_status[*]} | wc -l = `"${vcs_status[@]}" | wc -l`"
fi
fi
# Cleanup

View File

@ -1385,10 +1385,9 @@ dpkg-deb -x /tmp/libasound2-dev_1.0.25-4_armhf.deb .</code></pre></li>
can specify it by <code>--with-alsa</code>.</p></li>
</ul>
<h4 id="x11-1">X11</h4>
<p>You will need X11 libraries suitable for your <em>target</em> system.
In most cases, using Debian's pre-built libraries work fine.</p>
<p>Note that X11 is needed even if you only want to build a headless
JDK.</p>
<p>When not building a headless JDK, you will need X11 libraries
suitable for your <em>target</em> system. In most cases, using Debian's
pre-built libraries work fine.</p>
<ul>
<li><p>Go to <a href="https://www.debian.org/distrib/packages">Debian
Package Search</a>, search for the following packages for your

View File

@ -1178,10 +1178,8 @@ Note that alsa is needed even if you only want to build a headless JDK.
#### X11
You will need X11 libraries suitable for your *target* system. In most cases,
using Debian's pre-built libraries work fine.
Note that X11 is needed even if you only want to build a headless JDK.
When not building a headless JDK, you will need X11 libraries suitable for your
*target* system. In most cases, using Debian's pre-built libraries work fine.
* Go to [Debian Package Search](https://www.debian.org/distrib/packages),
search for the following packages for your *target* system, and download them

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -972,6 +972,10 @@ define SetupRunJtregTestBody
JTREG_AUTO_PROBLEM_LISTS += ProblemList-enable-preview.txt
endif
ifneq ($$(findstring -XX:+UseCompactObjectHeaders, $$(JTREG_ALL_OPTIONS)), )
JTREG_AUTO_PROBLEM_LISTS += ProblemList-coh.txt
endif
ifneq ($$(JTREG_EXTRA_PROBLEM_LISTS), )
# Accept both absolute paths as well as relative to the current test root.

View File

@ -42,12 +42,12 @@ m4_include([lib-tests.m4])
AC_DEFUN_ONCE([LIB_DETERMINE_DEPENDENCIES],
[
# Check if X11 is needed
if test "x$OPENJDK_TARGET_OS" = xwindows || test "x$OPENJDK_TARGET_OS" = xmacosx; then
# No X11 support on windows or macosx
if test "x$OPENJDK_TARGET_OS" = xwindows ||
test "x$OPENJDK_TARGET_OS" = xmacosx ||
test "x$ENABLE_HEADLESS_ONLY" = xtrue; then
NEEDS_LIB_X11=false
else
# All other instances need X11, even if building headless only, libawt still
# needs X11 headers.
# All other instances need X11 for libawt.
NEEDS_LIB_X11=true
fi

View File

@ -88,6 +88,10 @@ LIBAWT_EXTRA_HEADER_DIRS := \
LIBAWT_CFLAGS := -D__MEDIALIB_OLD_NAMES -D__USE_J2D_NAMES -DMLIB_NO_LIBSUNMATH
ifeq ($(ENABLE_HEADLESS_ONLY), true)
LIBAWT_CFLAGS += -DHEADLESS
endif
ifeq ($(call isTargetOs, windows), true)
LIBAWT_CFLAGS += -EHsc -DUNICODE -D_UNICODE -DMLIB_OS64BIT
LIBAWT_RCFLAGS ?= -I$(TOPDIR)/src/java.base/windows/native/launcher/icons
@ -167,11 +171,18 @@ ifeq ($(call isTargetOs, windows macosx), false)
$(TOPDIR)/src/$(MODULE)/$(OPENJDK_TARGET_OS_TYPE)/native/common/awt \
#
LIBAWT_HEADLESS_EXCLUDE_FILES := \
GLXGraphicsConfig.c \
GLXSurfaceData.c \
X11PMBlitLoops.c \
X11Renderer.c \
X11SurfaceData.c \
#
LIBAWT_HEADLESS_EXTRA_HEADER_DIRS := \
$(LIBAWT_DEFAULT_HEADER_DIRS) \
common/awt/debug \
common/font \
common/java2d/opengl \
java.base:libjvm \
#
@ -191,7 +202,8 @@ ifeq ($(call isTargetOs, windows macosx), false)
$(eval $(call SetupJdkLibrary, BUILD_LIBAWT_HEADLESS, \
NAME := awt_headless, \
EXTRA_SRC := $(LIBAWT_HEADLESS_EXTRA_SRC), \
EXCLUDES := medialib, \
EXCLUDES := medialib opengl, \
EXCLUDE_FILES := $(LIBAWT_HEADLESS_EXCLUDE_FILES), \
ONLY_EXPORTED := $(LIBAWT_HEADLESS_ONLY_EXPORTED), \
OPTIMIZATION := LOW, \
CFLAGS := -DHEADLESS=true $(CUPS_CFLAGS) $(FONTCONFIG_CFLAGS) \

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -54,7 +54,7 @@ $(eval $(call SetupJdkExecutable, BUILD_JPACKAGEAPPLAUNCHER, \
SRC := applauncher, \
EXTRA_SRC := common, \
INCLUDE_FILES := $(JPACKAGEAPPLAUNCHER_INCLUDE_FILES), \
OPTIMIZATION := LOW, \
OPTIMIZATION := SIZE, \
DISABLED_WARNINGS_clang_JvmLauncherLib.c := format-nonliteral, \
DISABLED_WARNINGS_clang_LinuxPackage.c := format-nonliteral, \
DISABLED_WARNINGS_clang_Log.cpp := unused-const-variable, \
@ -91,7 +91,7 @@ ifeq ($(call isTargetOs, linux), true)
common, \
EXCLUDE_FILES := LinuxLauncher.c LinuxPackage.c, \
LINK_TYPE := C++, \
OPTIMIZATION := LOW, \
OPTIMIZATION := SIZE, \
DISABLED_WARNINGS_gcc_Log.cpp := unused-const-variable, \
DISABLED_WARNINGS_clang_JvmLauncherLib.c := format-nonliteral, \
DISABLED_WARNINGS_clang_tstrings.cpp := format-nonliteral, \

View File

@ -1,264 +0,0 @@
/*
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Neither the name of Oracle nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
* IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* This source code is provided to illustrate the usage of a given feature
* or technique and has been deliberately simplified. Additional steps
* required for a production-quality application, such as security checks,
* input validation and proper error handling, might not be present in
* this sample code.
*/
import java.util.EventObject;
import java.util.List;
import javax.swing.JTable;
import javax.swing.table.DefaultTableModel;
import javax.swing.table.TableCellEditor;
import javax.swing.table.TableCellRenderer;
import javax.swing.table.TableColumn;
/**
* The OldJTable is an unsupported class containing some methods that were
* deleted from the JTable between releases 0.6 and 0.7
*/
@SuppressWarnings("serial")
public class OldJTable extends JTable
{
/*
* A new convenience method returning the index of the column in the
* co-ordinate space of the view.
*/
public int getColumnIndex(Object identifier) {
return getColumnModel().getColumnIndex(identifier);
}
//
// Methods deleted from the JTable because they only work with the
// DefaultTableModel.
//
public TableColumn addColumn(Object columnIdentifier, int width) {
return addColumn(columnIdentifier, width, null, null, null);
}
public TableColumn addColumn(Object columnIdentifier, List<?> columnData) {
return addColumn(columnIdentifier, -1, null, null, columnData);
}
// Override the new JTable implementation - it will not add a column to the
// DefaultTableModel.
public TableColumn addColumn(Object columnIdentifier, int width,
TableCellRenderer renderer,
TableCellEditor editor) {
return addColumn(columnIdentifier, width, renderer, editor, null);
}
public TableColumn addColumn(Object columnIdentifier, int width,
TableCellRenderer renderer,
TableCellEditor editor, List<?> columnData) {
checkDefaultTableModel();
// Set up the model side first
DefaultTableModel m = (DefaultTableModel)getModel();
m.addColumn(columnIdentifier, columnData.toArray());
// The column will have been added to the end, so the index of the
// column in the model is the last element.
TableColumn newColumn = new TableColumn(
m.getColumnCount()-1, width, renderer, editor);
super.addColumn(newColumn);
return newColumn;
}
// Not possilble to make this work the same way ... change it so that
// it does not delete columns from the model.
public void removeColumn(Object columnIdentifier) {
super.removeColumn(getColumn(columnIdentifier));
}
public void addRow(Object[] rowData) {
checkDefaultTableModel();
((DefaultTableModel)getModel()).addRow(rowData);
}
public void addRow(List<?> rowData) {
checkDefaultTableModel();
((DefaultTableModel)getModel()).addRow(rowData.toArray());
}
public void removeRow(int rowIndex) {
checkDefaultTableModel();
((DefaultTableModel)getModel()).removeRow(rowIndex);
}
public void moveRow(int startIndex, int endIndex, int toIndex) {
checkDefaultTableModel();
((DefaultTableModel)getModel()).moveRow(startIndex, endIndex, toIndex);
}
public void insertRow(int rowIndex, Object[] rowData) {
checkDefaultTableModel();
((DefaultTableModel)getModel()).insertRow(rowIndex, rowData);
}
public void insertRow(int rowIndex, List<?> rowData) {
checkDefaultTableModel();
((DefaultTableModel)getModel()).insertRow(rowIndex, rowData.toArray());
}
public void setNumRows(int newSize) {
checkDefaultTableModel();
((DefaultTableModel)getModel()).setNumRows(newSize);
}
public void setDataVector(Object[][] newData, List<?> columnIds) {
checkDefaultTableModel();
((DefaultTableModel)getModel()).setDataVector(
newData, columnIds.toArray());
}
public void setDataVector(Object[][] newData, Object[] columnIds) {
checkDefaultTableModel();
((DefaultTableModel)getModel()).setDataVector(newData, columnIds);
}
protected void checkDefaultTableModel() {
if(!(dataModel instanceof DefaultTableModel))
throw new InternalError("In order to use this method, the data model must be an instance of DefaultTableModel.");
}
//
// Methods removed from JTable in the move from identifiers to ints.
//
public Object getValueAt(Object columnIdentifier, int rowIndex) {
return super.getValueAt(rowIndex, getColumnIndex(columnIdentifier));
}
public boolean isCellEditable(Object columnIdentifier, int rowIndex) {
return super.isCellEditable(rowIndex, getColumnIndex(columnIdentifier));
}
public void setValueAt(Object aValue, Object columnIdentifier, int rowIndex) {
super.setValueAt(aValue, rowIndex, getColumnIndex(columnIdentifier));
}
public boolean editColumnRow(Object identifier, int row) {
return super.editCellAt(row, getColumnIndex(identifier));
}
public void moveColumn(Object columnIdentifier, Object targetColumnIdentifier) {
moveColumn(getColumnIndex(columnIdentifier),
getColumnIndex(targetColumnIdentifier));
}
public boolean isColumnSelected(Object identifier) {
return isColumnSelected(getColumnIndex(identifier));
}
public TableColumn addColumn(int modelColumn, int width) {
return addColumn(modelColumn, width, null, null);
}
public TableColumn addColumn(int modelColumn) {
return addColumn(modelColumn, 75, null, null);
}
/**
* Creates a new column with <I>modelColumn</I>, <I>width</I>,
* <I>renderer</I>, and <I>editor</I> and adds it to the end of
* the JTable's array of columns. This method also retrieves the
* name of the column using the model's <I>getColumnName(modelColumn)</I>
* method, and sets the both the header value and the identifier
* for this TableColumn accordingly.
* <p>
* The <I>modelColumn</I> is the index of the column in the model which
* will supply the data for this column in the table. This, like the
* <I>columnIdentifier</I> in previous releases, does not change as the
* columns are moved in the view.
* <p>
* For the rest of the JTable API, and all of its associated classes,
* columns are referred to in the co-ordinate system of the view, the
* index of the column in the model is kept inside the TableColumn
* and is used only to retrieve the information from the appropraite
* column in the model.
* <p>
*
* @param modelColumn The index of the column in the model
* @param width The new column's width. Or -1 to use
* the default width
* @param renderer The renderer used with the new column.
* Or null to use the default renderer.
* @param editor The editor used with the new column.
* Or null to use the default editor.
*/
public TableColumn addColumn(int modelColumn, int width,
TableCellRenderer renderer,
TableCellEditor editor) {
TableColumn newColumn = new TableColumn(
modelColumn, width, renderer, editor);
addColumn(newColumn);
return newColumn;
}
//
// Methods that had their arguments switched.
//
// These won't work with the new table package.
/*
public Object getValueAt(int columnIndex, int rowIndex) {
return super.getValueAt(rowIndex, columnIndex);
}
public boolean isCellEditable(int columnIndex, int rowIndex) {
return super.isCellEditable(rowIndex, columnIndex);
}
public void setValueAt(Object aValue, int columnIndex, int rowIndex) {
super.setValueAt(aValue, rowIndex, columnIndex);
}
*/
public boolean editColumnRow(int columnIndex, int rowIndex) {
return super.editCellAt(rowIndex, columnIndex);
}
public boolean editColumnRow(int columnIndex, int rowIndex, EventObject e){
return super.editCellAt(rowIndex, columnIndex, e);
}
} // End Of Class OldJTable

View File

@ -1229,7 +1229,7 @@ public:
// predicate controlling addressing modes
bool size_fits_all_mem_uses(AddPNode* addp, int shift);
// Convert BootTest condition to Assembler condition.
// Convert BoolTest condition to Assembler condition.
// Replicate the logic of cmpOpOper::ccode() and cmpOpUOper::ccode().
Assembler::Condition to_assembler_cond(BoolTest::mask cond);
%}
@ -2579,7 +2579,7 @@ bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
return true;
}
// Convert BootTest condition to Assembler condition.
// Convert BoolTest condition to Assembler condition.
// Replicate the logic of cmpOpOper::ccode() and cmpOpUOper::ccode().
Assembler::Condition to_assembler_cond(BoolTest::mask cond) {
Assembler::Condition result;

View File

@ -1,5 +1,5 @@
//
// Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2020, 2025, Arm Limited. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
@ -201,6 +201,8 @@ source %{
case Op_XorReductionV:
case Op_MinReductionV:
case Op_MaxReductionV:
case Op_UMinReductionV:
case Op_UMaxReductionV:
// Reductions with less than 8 bytes vector length are
// not supported.
if (length_in_bytes < 8) {
@ -383,6 +385,8 @@ source %{
return !VM_Version::use_neon_for_vector(length_in_bytes);
case Op_MinReductionV:
case Op_MaxReductionV:
case Op_UMinReductionV:
case Op_UMaxReductionV:
// For BYTE/SHORT/INT/FLOAT/DOUBLE types, we prefer using NEON
// instructions rather than SVE predicated instructions for
// better performance.
@ -4218,6 +4222,224 @@ instruct reduce_minD_masked(vRegD dst, vRegD dsrc, vReg vsrc, pRegGov pg) %{
ins_pipe(pipe_slow);
%}
// -------------------- Vector reduction unsigned min/max ----------------------
// reduction uminI
instruct reduce_uminI_neon(iRegINoSp dst, iRegIorL2I isrc, vReg vsrc,
vReg tmp, rFlagsReg cr) %{
predicate(VM_Version::use_neon_for_vector(Matcher::vector_length_in_bytes(n->in(2))) &&
(Matcher::vector_element_basic_type(n->in(2)) == T_BYTE ||
Matcher::vector_element_basic_type(n->in(2)) == T_SHORT ||
Matcher::vector_element_basic_type(n->in(2)) == T_INT));
match(Set dst (UMinReductionV isrc vsrc));
effect(TEMP_DEF dst, TEMP tmp, KILL cr);
format %{ "reduce_uminI_neon $dst, $isrc, $vsrc\t# KILL $tmp, cr" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this, $vsrc);
uint length_in_bytes = Matcher::vector_length_in_bytes(this, $vsrc);
__ neon_reduce_minmax_integral(this->ideal_Opcode(), $dst$$Register, bt,
$isrc$$Register, $vsrc$$FloatRegister,
length_in_bytes, $tmp$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct reduce_uminI_sve(iRegINoSp dst, iRegIorL2I isrc, vReg vsrc,
vRegD tmp, rFlagsReg cr) %{
predicate(!VM_Version::use_neon_for_vector(Matcher::vector_length_in_bytes(n->in(2))) &&
(Matcher::vector_element_basic_type(n->in(2)) == T_BYTE ||
Matcher::vector_element_basic_type(n->in(2)) == T_SHORT ||
Matcher::vector_element_basic_type(n->in(2)) == T_INT));
match(Set dst (UMinReductionV isrc vsrc));
effect(TEMP_DEF dst, TEMP tmp, KILL cr);
format %{ "reduce_uminI_sve $dst, $isrc, $vsrc\t# KILL $tmp, cr" %}
ins_encode %{
assert(UseSVE > 0, "must be sve");
BasicType bt = Matcher::vector_element_basic_type(this, $vsrc);
uint length_in_bytes = Matcher::vector_length_in_bytes(this, $vsrc);
assert(length_in_bytes == MaxVectorSize, "invalid vector length");
__ sve_reduce_integral(this->ideal_Opcode(), $dst$$Register, bt,
$isrc$$Register, $vsrc$$FloatRegister,
ptrue, $tmp$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
// reduction uminL
instruct reduce_uminL_neon(iRegLNoSp dst, iRegL isrc, vReg vsrc, rFlagsReg cr) %{
predicate(UseSVE == 0 && Matcher::vector_element_basic_type(n->in(2)) == T_LONG);
match(Set dst (UMinReductionV isrc vsrc));
effect(TEMP_DEF dst, KILL cr);
format %{ "reduce_uminL_neon $dst, $isrc, $vsrc\t# 2L. KILL cr" %}
ins_encode %{
__ neon_reduce_minmax_integral(this->ideal_Opcode(), $dst$$Register, T_LONG,
$isrc$$Register, $vsrc$$FloatRegister,
/* vector_length_in_bytes */ 16, fnoreg);
%}
ins_pipe(pipe_slow);
%}
instruct reduce_uminL_sve(iRegLNoSp dst, iRegL isrc, vReg vsrc,
vRegD tmp, rFlagsReg cr) %{
predicate(UseSVE > 0 && Matcher::vector_element_basic_type(n->in(2)) == T_LONG);
match(Set dst (UMinReductionV isrc vsrc));
effect(TEMP_DEF dst, TEMP tmp, KILL cr);
format %{ "reduce_uminL_sve $dst, $isrc, $vsrc\t# KILL $tmp, cr" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this, $vsrc);
assert(length_in_bytes == MaxVectorSize, "invalid vector length");
__ sve_reduce_integral(this->ideal_Opcode(), $dst$$Register, T_LONG,
$isrc$$Register, $vsrc$$FloatRegister,
ptrue, $tmp$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
// reduction umin - predicated
instruct reduce_uminI_masked(iRegINoSp dst, iRegIorL2I isrc, vReg vsrc, pRegGov pg,
vRegD tmp, rFlagsReg cr) %{
predicate(UseSVE > 0 &&
(Matcher::vector_element_basic_type(n->in(1)->in(2)) == T_BYTE ||
Matcher::vector_element_basic_type(n->in(1)->in(2)) == T_SHORT ||
Matcher::vector_element_basic_type(n->in(1)->in(2)) == T_INT));
match(Set dst (UMinReductionV (Binary isrc vsrc) pg));
effect(TEMP_DEF dst, TEMP tmp, KILL cr);
format %{ "reduce_uminI_masked $dst, $isrc, $pg, $vsrc\t# KILL $tmp, cr" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this, $vsrc);
__ sve_reduce_integral(this->ideal_Opcode(), $dst$$Register, bt,
$isrc$$Register, $vsrc$$FloatRegister,
$pg$$PRegister, $tmp$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct reduce_uminL_masked(iRegLNoSp dst, iRegL isrc, vReg vsrc, pRegGov pg,
vRegD tmp, rFlagsReg cr) %{
predicate(UseSVE > 0 && Matcher::vector_element_basic_type(n->in(1)->in(2)) == T_LONG);
match(Set dst (UMinReductionV (Binary isrc vsrc) pg));
effect(TEMP_DEF dst, TEMP tmp, KILL cr);
format %{ "reduce_uminL_masked $dst, $isrc, $pg, $vsrc\t# KILL $tmp, cr" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this, $vsrc);
__ sve_reduce_integral(this->ideal_Opcode(), $dst$$Register, bt,
$isrc$$Register, $vsrc$$FloatRegister,
$pg$$PRegister, $tmp$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
// reduction umaxI
instruct reduce_umaxI_neon(iRegINoSp dst, iRegIorL2I isrc, vReg vsrc,
vReg tmp, rFlagsReg cr) %{
predicate(VM_Version::use_neon_for_vector(Matcher::vector_length_in_bytes(n->in(2))) &&
(Matcher::vector_element_basic_type(n->in(2)) == T_BYTE ||
Matcher::vector_element_basic_type(n->in(2)) == T_SHORT ||
Matcher::vector_element_basic_type(n->in(2)) == T_INT));
match(Set dst (UMaxReductionV isrc vsrc));
effect(TEMP_DEF dst, TEMP tmp, KILL cr);
format %{ "reduce_umaxI_neon $dst, $isrc, $vsrc\t# KILL $tmp, cr" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this, $vsrc);
uint length_in_bytes = Matcher::vector_length_in_bytes(this, $vsrc);
__ neon_reduce_minmax_integral(this->ideal_Opcode(), $dst$$Register, bt,
$isrc$$Register, $vsrc$$FloatRegister,
length_in_bytes, $tmp$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct reduce_umaxI_sve(iRegINoSp dst, iRegIorL2I isrc, vReg vsrc,
vRegD tmp, rFlagsReg cr) %{
predicate(!VM_Version::use_neon_for_vector(Matcher::vector_length_in_bytes(n->in(2))) &&
(Matcher::vector_element_basic_type(n->in(2)) == T_BYTE ||
Matcher::vector_element_basic_type(n->in(2)) == T_SHORT ||
Matcher::vector_element_basic_type(n->in(2)) == T_INT));
match(Set dst (UMaxReductionV isrc vsrc));
effect(TEMP_DEF dst, TEMP tmp, KILL cr);
format %{ "reduce_umaxI_sve $dst, $isrc, $vsrc\t# KILL $tmp, cr" %}
ins_encode %{
assert(UseSVE > 0, "must be sve");
BasicType bt = Matcher::vector_element_basic_type(this, $vsrc);
uint length_in_bytes = Matcher::vector_length_in_bytes(this, $vsrc);
assert(length_in_bytes == MaxVectorSize, "invalid vector length");
__ sve_reduce_integral(this->ideal_Opcode(), $dst$$Register, bt,
$isrc$$Register, $vsrc$$FloatRegister,
ptrue, $tmp$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
// reduction umaxL
instruct reduce_umaxL_neon(iRegLNoSp dst, iRegL isrc, vReg vsrc, rFlagsReg cr) %{
predicate(UseSVE == 0 && Matcher::vector_element_basic_type(n->in(2)) == T_LONG);
match(Set dst (UMaxReductionV isrc vsrc));
effect(TEMP_DEF dst, KILL cr);
format %{ "reduce_umaxL_neon $dst, $isrc, $vsrc\t# 2L. KILL cr" %}
ins_encode %{
__ neon_reduce_minmax_integral(this->ideal_Opcode(), $dst$$Register, T_LONG,
$isrc$$Register, $vsrc$$FloatRegister,
/* vector_length_in_bytes */ 16, fnoreg);
%}
ins_pipe(pipe_slow);
%}
instruct reduce_umaxL_sve(iRegLNoSp dst, iRegL isrc, vReg vsrc,
vRegD tmp, rFlagsReg cr) %{
predicate(UseSVE > 0 && Matcher::vector_element_basic_type(n->in(2)) == T_LONG);
match(Set dst (UMaxReductionV isrc vsrc));
effect(TEMP_DEF dst, TEMP tmp, KILL cr);
format %{ "reduce_umaxL_sve $dst, $isrc, $vsrc\t# KILL $tmp, cr" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this, $vsrc);
assert(length_in_bytes == MaxVectorSize, "invalid vector length");
__ sve_reduce_integral(this->ideal_Opcode(), $dst$$Register, T_LONG,
$isrc$$Register, $vsrc$$FloatRegister,
ptrue, $tmp$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
// reduction umax - predicated
instruct reduce_umaxI_masked(iRegINoSp dst, iRegIorL2I isrc, vReg vsrc, pRegGov pg,
vRegD tmp, rFlagsReg cr) %{
predicate(UseSVE > 0 &&
(Matcher::vector_element_basic_type(n->in(1)->in(2)) == T_BYTE ||
Matcher::vector_element_basic_type(n->in(1)->in(2)) == T_SHORT ||
Matcher::vector_element_basic_type(n->in(1)->in(2)) == T_INT));
match(Set dst (UMaxReductionV (Binary isrc vsrc) pg));
effect(TEMP_DEF dst, TEMP tmp, KILL cr);
format %{ "reduce_umaxI_masked $dst, $isrc, $pg, $vsrc\t# KILL $tmp, cr" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this, $vsrc);
__ sve_reduce_integral(this->ideal_Opcode(), $dst$$Register, bt,
$isrc$$Register, $vsrc$$FloatRegister,
$pg$$PRegister, $tmp$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct reduce_umaxL_masked(iRegLNoSp dst, iRegL isrc, vReg vsrc, pRegGov pg,
vRegD tmp, rFlagsReg cr) %{
predicate(UseSVE > 0 && Matcher::vector_element_basic_type(n->in(1)->in(2)) == T_LONG);
match(Set dst (UMaxReductionV (Binary isrc vsrc) pg));
effect(TEMP_DEF dst, TEMP tmp, KILL cr);
format %{ "reduce_umaxL_masked $dst, $isrc, $pg, $vsrc\t# KILL $tmp, cr" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this, $vsrc);
__ sve_reduce_integral(this->ideal_Opcode(), $dst$$Register, bt,
$isrc$$Register, $vsrc$$FloatRegister,
$pg$$PRegister, $tmp$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
// ------------------------------ Vector reinterpret ---------------------------
instruct reinterpret_same_size(vReg dst_src) %{

View File

@ -1,5 +1,5 @@
//
// Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2020, 2025, Arm Limited. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
@ -191,6 +191,8 @@ source %{
case Op_XorReductionV:
case Op_MinReductionV:
case Op_MaxReductionV:
case Op_UMinReductionV:
case Op_UMaxReductionV:
// Reductions with less than 8 bytes vector length are
// not supported.
if (length_in_bytes < 8) {
@ -373,6 +375,8 @@ source %{
return !VM_Version::use_neon_for_vector(length_in_bytes);
case Op_MinReductionV:
case Op_MaxReductionV:
case Op_UMinReductionV:
case Op_UMaxReductionV:
// For BYTE/SHORT/INT/FLOAT/DOUBLE types, we prefer using NEON
// instructions rather than SVE predicated instructions for
// better performance.
@ -2505,6 +2509,32 @@ REDUCE_MAXMIN_INT_PREDICATE(min, L, iRegL, MinReductionV)
REDUCE_MAXMIN_FP_PREDICATE(min, F, fsrc, MinReductionV, sve_fminv, fmins)
REDUCE_MAXMIN_FP_PREDICATE(min, D, dsrc, MinReductionV, sve_fminv, fmind)
// -------------------- Vector reduction unsigned min/max ----------------------
// reduction uminI
REDUCE_MAXMIN_I_NEON(umin, UMinReductionV)
REDUCE_MAXMIN_I_SVE(umin, UMinReductionV)
// reduction uminL
REDUCE_MAXMIN_L_NEON(umin, UMinReductionV)
REDUCE_MAXMIN_L_SVE(umin, UMinReductionV)
// reduction umin - predicated
REDUCE_MAXMIN_INT_PREDICATE(umin, I, iRegIorL2I, UMinReductionV)
REDUCE_MAXMIN_INT_PREDICATE(umin, L, iRegL, UMinReductionV)
// reduction umaxI
REDUCE_MAXMIN_I_NEON(umax, UMaxReductionV)
REDUCE_MAXMIN_I_SVE(umax, UMaxReductionV)
// reduction umaxL
REDUCE_MAXMIN_L_NEON(umax, UMaxReductionV)
REDUCE_MAXMIN_L_SVE(umax, UMaxReductionV)
// reduction umax - predicated
REDUCE_MAXMIN_INT_PREDICATE(umax, I, iRegIorL2I, UMaxReductionV)
REDUCE_MAXMIN_INT_PREDICATE(umax, L, iRegL, UMaxReductionV)
// ------------------------------ Vector reinterpret ---------------------------
instruct reinterpret_same_size(vReg dst_src) %{

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2024, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -2658,6 +2658,8 @@ template<typename R, typename... Rx>
INSN(uminv, 1, 0b011011, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
INSN(smaxp, 0, 0b101001, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
INSN(sminp, 0, 0b101011, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
INSN(umaxp, 1, 0b101001, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
INSN(uminp, 1, 0b101011, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
INSN(sqdmulh,0, 0b101101, false); // accepted arrangements: T4H, T8H, T2S, T4S
INSN(shsubv, 0, 0b001001, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
@ -3490,7 +3492,9 @@ public:
INSN(sve_sub, 0b00000100, 0b000001000); // vector sub
INSN(sve_uaddv, 0b00000100, 0b000001001); // unsigned add reduction to scalar
INSN(sve_umax, 0b00000100, 0b001001000); // unsigned maximum vectors
INSN(sve_umaxv, 0b00000100, 0b001001001); // unsigned maximum reduction to scalar
INSN(sve_umin, 0b00000100, 0b001011000); // unsigned minimum vectors
INSN(sve_uminv, 0b00000100, 0b001011001); // unsigned minimum reduction to scalar
#undef INSN
// SVE floating-point arithmetic - predicate
@ -4325,6 +4329,7 @@ public:
#undef INSN
Assembler(CodeBuffer* code) : AbstractAssembler(code) {
MACOS_AARCH64_ONLY(os::thread_wx_enable_write());
}
// Stack overflow checking

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1960,50 +1960,76 @@ void C2_MacroAssembler::neon_reduce_logical(int opc, Register dst, BasicType bt,
BLOCK_COMMENT("} neon_reduce_logical");
}
// Vector reduction min/max for integral type with ASIMD instructions.
// Helper function to decode min/max reduction operation properties
void C2_MacroAssembler::decode_minmax_reduction_opc(int opc, bool* is_min,
bool* is_unsigned,
Condition* cond) {
switch(opc) {
case Op_MinReductionV:
*is_min = true; *is_unsigned = false; *cond = LT; break;
case Op_MaxReductionV:
*is_min = false; *is_unsigned = false; *cond = GT; break;
case Op_UMinReductionV:
*is_min = true; *is_unsigned = true; *cond = LO; break;
case Op_UMaxReductionV:
*is_min = false; *is_unsigned = true; *cond = HI; break;
default:
ShouldNotReachHere();
}
}
// Vector reduction min/max/umin/umax for integral type with ASIMD instructions.
// Note: vtmp is not used and expected to be fnoreg for T_LONG case.
// Clobbers: rscratch1, rflags
void C2_MacroAssembler::neon_reduce_minmax_integral(int opc, Register dst, BasicType bt,
Register isrc, FloatRegister vsrc,
unsigned vector_length_in_bytes,
FloatRegister vtmp) {
assert(opc == Op_MinReductionV || opc == Op_MaxReductionV, "unsupported");
assert(opc == Op_MinReductionV || opc == Op_MaxReductionV ||
opc == Op_UMinReductionV || opc == Op_UMaxReductionV, "unsupported");
assert(vector_length_in_bytes == 8 || vector_length_in_bytes == 16, "unsupported");
assert(bt == T_BYTE || bt == T_SHORT || bt == T_INT || bt == T_LONG, "unsupported");
assert_different_registers(dst, isrc);
bool isQ = vector_length_in_bytes == 16;
bool is_min = opc == Op_MinReductionV;
bool is_min;
bool is_unsigned;
Condition cond;
decode_minmax_reduction_opc(opc, &is_min, &is_unsigned, &cond);
BLOCK_COMMENT("neon_reduce_minmax_integral {");
if (bt == T_LONG) {
assert(vtmp == fnoreg, "should be");
assert(isQ, "should be");
umov(rscratch1, vsrc, D, 0);
cmp(isrc, rscratch1);
csel(dst, isrc, rscratch1, is_min ? LT : GT);
csel(dst, isrc, rscratch1, cond);
umov(rscratch1, vsrc, D, 1);
cmp(dst, rscratch1);
csel(dst, dst, rscratch1, is_min ? LT : GT);
csel(dst, dst, rscratch1, cond);
} else {
SIMD_Arrangement size = esize2arrangement((unsigned)type2aelembytes(bt), isQ);
if (size == T2S) {
is_min ? sminp(vtmp, size, vsrc, vsrc) : smaxp(vtmp, size, vsrc, vsrc);
// For T2S (2x32-bit elements), use pairwise instructions because
// uminv/umaxv/sminv/smaxv don't support arrangement 2S.
neon_minmaxp(is_unsigned, is_min, vtmp, size, vsrc, vsrc);
} else {
is_min ? sminv(vtmp, size, vsrc) : smaxv(vtmp, size, vsrc);
// For other sizes, use reduction to scalar instructions.
neon_minmaxv(is_unsigned, is_min, vtmp, size, vsrc);
}
if (bt == T_INT) {
umov(dst, vtmp, S, 0);
} else if (is_unsigned) {
umov(dst, vtmp, elemType_to_regVariant(bt), 0);
} else {
smov(dst, vtmp, elemType_to_regVariant(bt), 0);
}
cmpw(dst, isrc);
cselw(dst, dst, isrc, is_min ? LT : GT);
cselw(dst, dst, isrc, cond);
}
BLOCK_COMMENT("} neon_reduce_minmax_integral");
}
// Vector reduction for integral type with SVE instruction.
// Supported operations are Add, And, Or, Xor, Max, Min.
// Supported operations are Add, And, Or, Xor, Max, Min, UMax, UMin.
// rflags would be clobbered if opc is Op_MaxReductionV or Op_MinReductionV.
void C2_MacroAssembler::sve_reduce_integral(int opc, Register dst, BasicType bt, Register src1,
FloatRegister src2, PRegister pg, FloatRegister tmp) {
@ -2075,35 +2101,27 @@ void C2_MacroAssembler::sve_reduce_integral(int opc, Register dst, BasicType bt,
}
break;
}
case Op_MaxReductionV: {
sve_smaxv(tmp, size, pg, src2);
if (bt == T_INT || bt == T_LONG) {
case Op_MaxReductionV:
case Op_MinReductionV:
case Op_UMaxReductionV:
case Op_UMinReductionV: {
bool is_min;
bool is_unsigned;
Condition cond;
decode_minmax_reduction_opc(opc, &is_min, &is_unsigned, &cond);
sve_minmaxv(is_unsigned, is_min, tmp, size, pg, src2);
// Move result from vector to general register
if (is_unsigned || bt == T_INT || bt == T_LONG) {
umov(dst, tmp, size, 0);
} else {
smov(dst, tmp, size, 0);
}
if (bt == T_LONG) {
cmp(dst, src1);
csel(dst, dst, src1, Assembler::GT);
csel(dst, dst, src1, cond);
} else {
cmpw(dst, src1);
cselw(dst, dst, src1, Assembler::GT);
}
break;
}
case Op_MinReductionV: {
sve_sminv(tmp, size, pg, src2);
if (bt == T_INT || bt == T_LONG) {
umov(dst, tmp, size, 0);
} else {
smov(dst, tmp, size, 0);
}
if (bt == T_LONG) {
cmp(dst, src1);
csel(dst, dst, src1, Assembler::LT);
} else {
cmpw(dst, src1);
cselw(dst, dst, src1, Assembler::LT);
cselw(dst, dst, src1, cond);
}
break;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,6 +34,37 @@
void neon_reduce_logical_helper(int opc, bool sf, Register Rd, Register Rn, Register Rm,
enum shift_kind kind = Assembler::LSL, unsigned shift = 0);
// Helper functions for min/max reduction operations
void decode_minmax_reduction_opc(int opc, bool* is_min, bool* is_unsigned, Condition* cond);
void neon_minmaxp(bool is_unsigned, bool is_min, FloatRegister dst,
SIMD_Arrangement size, FloatRegister src1, FloatRegister src2) {
auto m = is_unsigned ? (is_min ? &Assembler::uminp : &Assembler::umaxp)
: (is_min ? &Assembler::sminp : &Assembler::smaxp);
(this->*m)(dst, size, src1, src2);
}
// Typedefs used to disambiguate overloaded member functions.
typedef void (Assembler::*neon_reduction2)
(FloatRegister, SIMD_Arrangement, FloatRegister);
void neon_minmaxv(bool is_unsigned, bool is_min, FloatRegister dst,
SIMD_Arrangement size, FloatRegister src) {
auto m = is_unsigned ? (is_min ? (neon_reduction2)&Assembler::uminv
: (neon_reduction2)&Assembler::umaxv)
: (is_min ? &Assembler::sminv
: &Assembler::smaxv);
(this->*m)(dst, size, src);
}
void sve_minmaxv(bool is_unsigned, bool is_min, FloatRegister dst,
SIMD_RegVariant size, PRegister pg, FloatRegister src) {
auto m = is_unsigned ? (is_min ? &Assembler::sve_uminv : &Assembler::sve_umaxv)
: (is_min ? &Assembler::sve_sminv : &Assembler::sve_smaxv);
(this->*m)(dst, size, pg, src);
}
void select_from_two_vectors_neon(FloatRegister dst, FloatRegister src1,
FloatRegister src2, FloatRegister index,
FloatRegister tmp, unsigned vector_length_in_bytes);

View File

@ -209,6 +209,10 @@ void BarrierSetNMethod::set_guard_value(nmethod* nm, int value, int bit_mask) {
bs_asm->increment_patching_epoch();
}
// Enable WXWrite: the function is called directly from nmethod_entry_barrier
// stub.
MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, Thread::current()));
NativeNMethodBarrier barrier(nm);
barrier.set_value(value, bit_mask);
}

View File

@ -85,26 +85,16 @@ void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, Dec
}
}
void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp,
bool tosca_live,
bool expand_call) {
if (ShenandoahSATBBarrier) {
satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, rscratch1, tosca_live, expand_call);
}
}
void ShenandoahBarrierSetAssembler::satb_barrier(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp1,
Register tmp2,
bool tosca_live,
bool expand_call) {
assert(ShenandoahSATBBarrier, "Should be checked by caller");
void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp1,
Register tmp2,
bool tosca_live,
bool expand_call) {
// If expand_call is true then we expand the call_VM_leaf macro
// directly to skip generating the check by
// InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
@ -358,20 +348,20 @@ void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet d
if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
__ enter(/*strip_ret_addr*/true);
__ push_call_clobbered_registers();
satb_write_barrier_pre(masm /* masm */,
noreg /* obj */,
dst /* pre_val */,
rthread /* thread */,
tmp1 /* tmp1 */,
tmp2 /* tmp2 */,
true /* tosca_live */,
true /* expand_call */);
satb_barrier(masm /* masm */,
noreg /* obj */,
dst /* pre_val */,
rthread /* thread */,
tmp1 /* tmp1 */,
tmp2 /* tmp2 */,
true /* tosca_live */,
true /* expand_call */);
__ pop_call_clobbered_registers();
__ leave();
}
}
void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj) {
void ShenandoahBarrierSetAssembler::card_barrier(MacroAssembler* masm, Register obj) {
assert(ShenandoahCardBarrier, "Should have been checked by caller");
__ lsr(obj, obj, CardTable::card_shift());
@ -394,13 +384,13 @@ void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register o
void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
bool on_oop = is_reference_type(type);
if (!on_oop) {
// 1: non-reference types require no barriers
if (!is_reference_type(type)) {
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
return;
}
// flatten object address if needed
// Flatten object address right away for simplicity: likely needed by barriers
if (dst.index() == noreg && dst.offset() == 0) {
if (dst.base() != tmp3) {
__ mov(tmp3, dst.base());
@ -409,20 +399,26 @@ void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet
__ lea(tmp3, dst);
}
shenandoah_write_barrier_pre(masm,
tmp3 /* obj */,
tmp2 /* pre_val */,
rthread /* thread */,
tmp1 /* tmp */,
val != noreg /* tosca_live */,
false /* expand_call */);
bool storing_non_null = (val != noreg);
// 2: pre-barrier: SATB needs the previous value
if (ShenandoahBarrierSet::need_satb_barrier(decorators, type)) {
satb_barrier(masm,
tmp3 /* obj */,
tmp2 /* pre_val */,
rthread /* thread */,
tmp1 /* tmp */,
rscratch1 /* tmp2 */,
storing_non_null /* tosca_live */,
false /* expand_call */);
}
// Store!
BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), val, noreg, noreg, noreg);
bool in_heap = (decorators & IN_HEAP) != 0;
bool needs_post_barrier = (val != noreg) && in_heap && ShenandoahCardBarrier;
if (needs_post_barrier) {
store_check(masm, tmp3);
// 3: post-barrier: card barrier needs store address
if (ShenandoahBarrierSet::need_card_barrier(decorators, type) && storing_non_null) {
card_barrier(masm, tmp3);
}
}

View File

@ -40,23 +40,16 @@ class StubCodeGenerator;
class ShenandoahBarrierSetAssembler: public BarrierSetAssembler {
private:
void satb_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp1,
Register tmp2,
bool tosca_live,
bool expand_call);
void shenandoah_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp,
bool tosca_live,
bool expand_call);
void satb_barrier(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp1,
Register tmp2,
bool tosca_live,
bool expand_call);
void store_check(MacroAssembler* masm, Register obj);
void card_barrier(MacroAssembler* masm, Register obj);
void resolve_forward_pointer(MacroAssembler* masm, Register dst, Register tmp = noreg);
void resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp = noreg);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2024, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -473,6 +473,7 @@ address MacroAssembler::target_addr_for_insn(address insn_addr) {
// Patch any kind of instruction; there may be several instructions.
// Return the total length (in bytes) of the instructions.
int MacroAssembler::pd_patch_instruction_size(address insn_addr, address target) {
MACOS_AARCH64_ONLY(os::thread_wx_enable_write());
return RelocActions<Patcher>::run(insn_addr, target);
}
@ -481,6 +482,8 @@ int MacroAssembler::patch_oop(address insn_addr, address o) {
unsigned insn = *(unsigned*)insn_addr;
assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
MACOS_AARCH64_ONLY(os::thread_wx_enable_write());
// OOPs are either narrow (32 bits) or wide (48 bits). We encode
// narrow OOPs by setting the upper 16 bits in the first
// instruction.
@ -510,6 +513,8 @@ int MacroAssembler::patch_narrow_klass(address insn_addr, narrowKlass n) {
assert(Instruction_aarch64::extract(insn->encoding(), 31, 21) == 0b11010010101 &&
nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
MACOS_AARCH64_ONLY(os::thread_wx_enable_write());
Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16);
Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff);
return 2 * NativeInstruction::instruction_size;
@ -6419,10 +6424,14 @@ void MacroAssembler::fill_words(Register base, Register cnt, Register value)
// Intrinsic for
//
// - sun/nio/cs/ISO_8859_1$Encoder.implEncodeISOArray
// return the number of characters copied.
// - java/lang/StringUTF16.compress
// return index of non-latin1 character if copy fails, otherwise 'len'.
// - sun.nio.cs.ISO_8859_1.Encoder#encodeISOArray0(byte[] sa, int sp, byte[] da, int dp, int len)
// Encodes char[] to byte[] in ISO-8859-1
//
// - java.lang.StringCoding#encodeISOArray0(byte[] sa, int sp, byte[] da, int dp, int len)
// Encodes byte[] (containing UTF-16) to byte[] in ISO-8859-1
//
// - java.lang.StringCoding#encodeAsciiArray0(char[] sa, int sp, byte[] da, int dp, int len)
// Encodes char[] to byte[] in ASCII
//
// This version always returns the number of characters copied, and does not
// clobber the 'len' register. A successful copy will complete with the post-

View File

@ -133,7 +133,6 @@ void NativeMovConstReg::verify() {
intptr_t NativeMovConstReg::data() const {
// das(uint64_t(instruction_address()),2);
address addr = MacroAssembler::target_addr_for_insn(instruction_address());
if (maybe_cpool_ref(instruction_address())) {
return *(intptr_t*)addr;
@ -144,6 +143,7 @@ intptr_t NativeMovConstReg::data() const {
void NativeMovConstReg::set_data(intptr_t x) {
if (maybe_cpool_ref(instruction_address())) {
MACOS_AARCH64_ONLY(os::thread_wx_enable_write());
address addr = MacroAssembler::target_addr_for_insn(instruction_address());
*(intptr_t*)addr = x;
} else {
@ -350,8 +350,6 @@ bool NativeInstruction::is_stop() {
//-------------------------------------------------------------------
void NativeGeneralJump::verify() { }
// MT-safe patching of a long jump instruction.
void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
ShouldNotCallThis();

View File

@ -90,16 +90,18 @@ protected:
s_char sbyte_at(int offset) const { return *(s_char*)addr_at(offset); }
u_char ubyte_at(int offset) const { return *(u_char*)addr_at(offset); }
jint int_at(int offset) const { return *(jint*)addr_at(offset); }
juint uint_at(int offset) const { return *(juint*)addr_at(offset); }
address ptr_at(int offset) const { return *(address*)addr_at(offset); }
oop oop_at(int offset) const { return *(oop*)addr_at(offset); }
jint int_at(int offset) const { return *(jint*)addr_at(offset); }
juint uint_at(int offset) const { return *(juint*)addr_at(offset); }
address ptr_at(int offset) const { return *(address*)addr_at(offset); }
oop oop_at(int offset) const { return *(oop*)addr_at(offset); }
void set_char_at(int offset, char c) { *addr_at(offset) = (u_char)c; }
void set_int_at(int offset, jint i) { *(jint*)addr_at(offset) = i; }
void set_uint_at(int offset, jint i) { *(juint*)addr_at(offset) = i; }
void set_ptr_at(int offset, address ptr) { *(address*)addr_at(offset) = ptr; }
void set_oop_at(int offset, oop o) { *(oop*)addr_at(offset) = o; }
#define MACOS_WX_WRITE MACOS_AARCH64_ONLY(os::thread_wx_enable_write())
void set_char_at(int offset, char c) { MACOS_WX_WRITE; *addr_at(offset) = (u_char)c; }
void set_int_at(int offset, jint i) { MACOS_WX_WRITE; *(jint*)addr_at(offset) = i; }
void set_uint_at(int offset, jint i) { MACOS_WX_WRITE; *(juint*)addr_at(offset) = i; }
void set_ptr_at(int offset, address ptr) { MACOS_WX_WRITE; *(address*)addr_at(offset) = ptr; }
void set_oop_at(int offset, oop o) { MACOS_WX_WRITE; *(oop*)addr_at(offset) = o; }
#undef MACOS_WX_WRITE
void wrote(int offset);
@ -380,7 +382,6 @@ public:
void set_jump_destination(address dest);
static void replace_mt_safe(address instr_addr, address code_buffer);
static void verify();
};
inline NativeGeneralJump* nativeGeneralJump_at(address address) {

View File

@ -6081,14 +6081,18 @@ class StubGenerator: public StubCodeGenerator {
// static int implKyber12To16(
// byte[] condensed, int index, short[] parsed, int parsedLength) {}
//
// (parsedLength or (parsedLength - 48) must be divisible by 64.)
// we assume that parsed and condensed are allocated such that for
// n = (parsedLength + 63) / 64
// n blocks of 96 bytes of input can be processed, i.e.
// index + n * 96 <= condensed.length and
// n * 64 <= parsed.length
//
// condensed (byte[]) = c_rarg0
// condensedIndex = c_rarg1
// parsed (short[112 or 256]) = c_rarg2
// parsedLength (112 or 256) = c_rarg3
// parsed (short[]) = c_rarg2
// parsedLength = c_rarg3
address generate_kyber12To16() {
Label L_F00, L_loop, L_end;
Label L_F00, L_loop;
__ align(CodeEntryAlignment);
StubId stub_id = StubId::stubgen_kyber12To16_id;
@ -6209,75 +6213,8 @@ class StubGenerator: public StubCodeGenerator {
vs_st2_post(vs_front(vb), __ T8H, parsed);
__ sub(parsedLength, parsedLength, 64);
__ cmp(parsedLength, (u1)64);
__ br(Assembler::GE, L_loop);
__ cbz(parsedLength, L_end);
// if anything is left it should be a final 72 bytes of input
// i.e. a final 48 12-bit values. so we handle this by loading
// 48 bytes into all 16B lanes of front(vin) and only 24
// bytes into the lower 8B lane of back(vin)
vs_ld3_post(vs_front(vin), __ T16B, condensed);
vs_ld3(vs_back(vin), __ T8B, condensed);
// Expand vin[0] into va[0:1], and vin[1] into va[2:3] and va[4:5]
// n.b. target elements 2 and 3 of va duplicate elements 4 and
// 5 and target element 2 of vb duplicates element 4.
__ ushll(va[0], __ T8H, vin[0], __ T8B, 0);
__ ushll2(va[1], __ T8H, vin[0], __ T16B, 0);
__ ushll(va[2], __ T8H, vin[1], __ T8B, 0);
__ ushll2(va[3], __ T8H, vin[1], __ T16B, 0);
__ ushll(va[4], __ T8H, vin[1], __ T8B, 0);
__ ushll2(va[5], __ T8H, vin[1], __ T16B, 0);
// This time expand just the lower 8 lanes
__ ushll(vb[0], __ T8H, vin[3], __ T8B, 0);
__ ushll(vb[2], __ T8H, vin[4], __ T8B, 0);
__ ushll(vb[4], __ T8H, vin[4], __ T8B, 0);
// shift lo byte of copy 1 of the middle stripe into the high byte
__ shl(va[2], __ T8H, va[2], 8);
__ shl(va[3], __ T8H, va[3], 8);
__ shl(vb[2], __ T8H, vb[2], 8);
// expand vin[2] into va[6:7] and lower 8 lanes of vin[5] into
// vb[6] pre-shifted by 4 to ensure top bits of the input 12-bit
// int are in bit positions [4..11].
__ ushll(va[6], __ T8H, vin[2], __ T8B, 4);
__ ushll2(va[7], __ T8H, vin[2], __ T16B, 4);
__ ushll(vb[6], __ T8H, vin[5], __ T8B, 4);
// mask hi 4 bits of each 1st 12-bit int in pair from copy1 and
// shift lo 4 bits of each 2nd 12-bit int in pair to bottom of
// copy2
__ andr(va[2], __ T16B, va[2], v31);
__ andr(va[3], __ T16B, va[3], v31);
__ ushr(va[4], __ T8H, va[4], 4);
__ ushr(va[5], __ T8H, va[5], 4);
__ andr(vb[2], __ T16B, vb[2], v31);
__ ushr(vb[4], __ T8H, vb[4], 4);
// sum hi 4 bits and lo 8 bits of each 1st 12-bit int in pair and
// hi 8 bits plus lo 4 bits of each 2nd 12-bit int in pair
// n.b. ordering ensures: i) inputs are consumed before they are
// overwritten ii) order of 16-bit results across succsessive
// pairs of vectors in va and then lower half of vb reflects order
// of corresponding 12-bit inputs
__ addv(va[0], __ T8H, va[0], va[2]);
__ addv(va[2], __ T8H, va[1], va[3]);
__ addv(va[1], __ T8H, va[4], va[6]);
__ addv(va[3], __ T8H, va[5], va[7]);
__ addv(vb[0], __ T8H, vb[0], vb[2]);
__ addv(vb[1], __ T8H, vb[4], vb[6]);
// store 48 results interleaved as shorts
vs_st2_post(vs_front(va), __ T8H, parsed);
vs_st2_post(vs_front(vs_front(vb)), __ T8H, parsed);
__ BIND(L_end);
__ cmp(parsedLength, (u1)0);
__ br(Assembler::GT, L_loop);
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ mov(r0, zr); // return 0
@ -11805,7 +11742,9 @@ class StubGenerator: public StubCodeGenerator {
}
#endif
StubRoutines::_unsafe_setmemory = generate_unsafe_setmemory();
if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_setMemory)) {
StubRoutines::_unsafe_setmemory = generate_unsafe_setmemory();
}
StubRoutines::aarch64::set_completed(); // Inidicate that arraycopy and zero_blocks stubs are generated
}

View File

@ -201,16 +201,14 @@ void VM_Version::initialize() {
}
}
// Cortex A53
if (_cpu == CPU_ARM && model_is(0xd03)) {
if (_cpu == CPU_ARM && model_is(CPU_MODEL_ARM_CORTEX_A53)) {
set_feature(CPU_A53MAC);
if (FLAG_IS_DEFAULT(UseSIMDForArrayEquals)) {
FLAG_SET_DEFAULT(UseSIMDForArrayEquals, false);
}
}
// Cortex A73
if (_cpu == CPU_ARM && model_is(0xd09)) {
if (_cpu == CPU_ARM && model_is(CPU_MODEL_ARM_CORTEX_A73)) {
if (FLAG_IS_DEFAULT(SoftwarePrefetchHintDistance)) {
FLAG_SET_DEFAULT(SoftwarePrefetchHintDistance, -1);
}
@ -220,16 +218,11 @@ void VM_Version::initialize() {
}
}
// Neoverse
// N1: 0xd0c
// N2: 0xd49
// N3: 0xd8e
// V1: 0xd40
// V2: 0xd4f
// V3: 0xd84
if (_cpu == CPU_ARM && (model_is(0xd0c) || model_is(0xd49) ||
model_is(0xd40) || model_is(0xd4f) ||
model_is(0xd8e) || model_is(0xd84))) {
if (_cpu == CPU_ARM &&
model_is_in({ CPU_MODEL_ARM_NEOVERSE_N1, CPU_MODEL_ARM_NEOVERSE_V1,
CPU_MODEL_ARM_NEOVERSE_N2, CPU_MODEL_ARM_NEOVERSE_V2,
CPU_MODEL_ARM_NEOVERSE_N3, CPU_MODEL_ARM_NEOVERSE_V3,
CPU_MODEL_ARM_NEOVERSE_V3AE })) {
if (FLAG_IS_DEFAULT(UseSIMDForMemoryOps)) {
FLAG_SET_DEFAULT(UseSIMDForMemoryOps, true);
}
@ -261,12 +254,9 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseCRC32, false);
}
// Neoverse
// V1: 0xd40
// V2: 0xd4f
// V3: 0xd84
if (_cpu == CPU_ARM &&
(model_is(0xd40) || model_is(0xd4f) || model_is(0xd84))) {
model_is_in({ CPU_MODEL_ARM_NEOVERSE_V1, CPU_MODEL_ARM_NEOVERSE_V2,
CPU_MODEL_ARM_NEOVERSE_V3, CPU_MODEL_ARM_NEOVERSE_V3AE })) {
if (FLAG_IS_DEFAULT(UseCryptoPmullForCRC32)) {
FLAG_SET_DEFAULT(UseCryptoPmullForCRC32, true);
}
@ -632,6 +622,22 @@ void VM_Version::initialize() {
check_virtualizations();
#ifdef __APPLE__
DefaultWXWriteMode = UseOldWX ? WXWrite : WXArmedForWrite;
if (TraceWXHealing) {
if (pthread_jit_write_protect_supported_np()) {
tty->print_cr("### TraceWXHealing is in use");
if (StressWXHealing) {
tty->print_cr("### StressWXHealing is in use");
}
} else {
tty->print_cr("WX Healing is not in use because MAP_JIT write protection "
"does not work on this system.");
}
}
#endif
// Sync SVE related CPU features with flags
if (UseSVE < 2) {
clear_feature(CPU_SVE2);

View File

@ -30,6 +30,8 @@
#include "runtime/abstract_vm_version.hpp"
#include "utilities/sizes.hpp"
#include <initializer_list>
class stringStream;
#define BIT_MASK(flag) (1ULL<<(flag))
@ -112,14 +114,26 @@ public:
CPU_APPLE = 'a',
};
enum Ampere_CPU_Model {
enum Ampere_CPU_Model {
CPU_MODEL_EMAG = 0x0, /* CPU implementer is CPU_AMCC */
CPU_MODEL_ALTRA = 0xd0c, /* CPU implementer is CPU_ARM, Neoverse N1 */
CPU_MODEL_ALTRAMAX = 0xd0c, /* CPU implementer is CPU_ARM, Neoverse N1 */
CPU_MODEL_AMPERE_1 = 0xac3, /* CPU implementer is CPU_AMPERE */
CPU_MODEL_AMPERE_1A = 0xac4, /* CPU implementer is CPU_AMPERE */
CPU_MODEL_AMPERE_1B = 0xac5 /* AMPERE_1B core Implements ARMv8.7 with CSSC, MTE, SM3/SM4 extensions */
};
};
enum ARM_CPU_Model {
CPU_MODEL_ARM_CORTEX_A53 = 0xd03,
CPU_MODEL_ARM_CORTEX_A73 = 0xd09,
CPU_MODEL_ARM_NEOVERSE_N1 = 0xd0c,
CPU_MODEL_ARM_NEOVERSE_V1 = 0xd40,
CPU_MODEL_ARM_NEOVERSE_N2 = 0xd49,
CPU_MODEL_ARM_NEOVERSE_V2 = 0xd4f,
CPU_MODEL_ARM_NEOVERSE_V3AE = 0xd83,
CPU_MODEL_ARM_NEOVERSE_V3 = 0xd84,
CPU_MODEL_ARM_NEOVERSE_N3 = 0xd8e,
};
#define CPU_FEATURE_FLAGS(decl) \
decl(FP, fp, 0) \
@ -181,6 +195,15 @@ enum Ampere_CPU_Model {
return _model == cpu_model || _model2 == cpu_model;
}
static bool model_is_in(std::initializer_list<int> cpu_models) {
for (const int& cpu_model : cpu_models) {
if (_model == cpu_model || _model2 == cpu_model) {
return true;
}
}
return false;
}
static bool is_zva_enabled() { return 0 <= _zva_length; }
static int zva_length() {
assert(is_zva_enabled(), "ZVA not available");

View File

@ -568,6 +568,9 @@ class Assembler : public AbstractAssembler {
XSCVDPHP_OPCODE= (60u << OPCODE_SHIFT | 347u << 2 | 17u << 16), // XX2-FORM
XXPERM_OPCODE = (60u << OPCODE_SHIFT | 26u << 3),
XXSEL_OPCODE = (60u << OPCODE_SHIFT | 3u << 4),
XSCMPEQDP_OPCODE=(60u << OPCODE_SHIFT | 3u << 3),
XSCMPGEDP_OPCODE=(60u << OPCODE_SHIFT | 19u << 3),
XSCMPGTDP_OPCODE=(60u << OPCODE_SHIFT | 11u << 3),
XXSPLTIB_OPCODE= (60u << OPCODE_SHIFT | 360u << 1),
XVDIVDP_OPCODE = (60u << OPCODE_SHIFT | 120u << 3),
XVABSSP_OPCODE = (60u << OPCODE_SHIFT | 409u << 2),
@ -2424,6 +2427,9 @@ class Assembler : public AbstractAssembler {
inline void xscvdphp( VectorSRegister d, VectorSRegister b);
inline void xxland( VectorSRegister d, VectorSRegister a, VectorSRegister b);
inline void xxsel( VectorSRegister d, VectorSRegister a, VectorSRegister b, VectorSRegister c);
inline void xscmpeqdp(VectorSRegister t, VectorSRegister a, VectorSRegister b); // Requires Power9
inline void xscmpgedp(VectorSRegister t, VectorSRegister a, VectorSRegister b); // Requires Power9
inline void xscmpgtdp(VectorSRegister t, VectorSRegister a, VectorSRegister b); // Requires Power9
inline void xxspltib( VectorSRegister d, int ui8);
inline void xvdivsp( VectorSRegister d, VectorSRegister a, VectorSRegister b);
inline void xvdivdp( VectorSRegister d, VectorSRegister a, VectorSRegister b);

View File

@ -923,6 +923,10 @@ inline void Assembler::xxmrghw( VectorSRegister d, VectorSRegister a, VectorSReg
inline void Assembler::xxmrglw( VectorSRegister d, VectorSRegister a, VectorSRegister b) { emit_int32( XXMRGHW_OPCODE | vsrt(d) | vsra(a) | vsrb(b)); }
inline void Assembler::xxsel( VectorSRegister d, VectorSRegister a, VectorSRegister b, VectorSRegister c) { emit_int32( XXSEL_OPCODE | vsrt(d) | vsra(a) | vsrb(b) | vsrc(c)); }
inline void Assembler::xscmpeqdp(VectorSRegister t, VectorSRegister a, VectorSRegister b) { emit_int32( XSCMPEQDP_OPCODE | vsrt(t) | vsra(a) | vsrb(b) );}
inline void Assembler::xscmpgedp(VectorSRegister t, VectorSRegister a, VectorSRegister b) { emit_int32( XSCMPGEDP_OPCODE | vsrt(t) | vsra(a) | vsrb(b) );}
inline void Assembler::xscmpgtdp(VectorSRegister t, VectorSRegister a, VectorSRegister b) { emit_int32( XSCMPGTDP_OPCODE | vsrt(t) | vsra(a) | vsrb(b) );}
// VSX Extended Mnemonics
inline void Assembler::xxspltd( VectorSRegister d, VectorSRegister a, int x) { xxpermdi(d, a, a, x ? 3 : 0); }
inline void Assembler::xxmrghd( VectorSRegister d, VectorSRegister a, VectorSRegister b) { xxpermdi(d, a, b, 0); }

View File

@ -664,3 +664,37 @@ void C2_MacroAssembler::reduceI(int opcode, Register dst, Register iSrc, VectorR
fn_scalar_op(opcode, dst, iSrc, R0); // dst <- op(iSrc, R0)
}
// Works for single and double precision floats.
// dst = (op1 cmp(cc) op2) ? src1 : src2;
// Unordered semantics are the same as for CmpF3Node/CmpD3Node which implement the fcmpl/dcmpl bytecodes.
// Comparing unordered values has the same result as when src1 is less than src2.
// So dst = src1 for <, <=, != and dst = src2 for >, >=, ==.
void C2_MacroAssembler::cmovF(int cc, VectorSRegister dst, VectorSRegister op1, VectorSRegister op2,
VectorSRegister src1, VectorSRegister src2, VectorSRegister tmp) {
// See operand cmpOp() for details.
bool invert_cond = (cc & 8) == 0; // invert reflects bcondCRbiIs0
auto cmp = (Assembler::Condition)(cc & 3);
switch(cmp) {
case Assembler::Condition::equal:
// Use false_result if "unordered".
xscmpeqdp(tmp, op1, op2);
break;
case Assembler::Condition::greater:
// Use false_result if "unordered".
xscmpgtdp(tmp, op1, op2);
break;
case Assembler::Condition::less:
// Use true_result if "unordered".
xscmpgedp(tmp, op1, op2);
invert_cond = !invert_cond;
break;
default:
assert(false, "unsupported compare condition: %d", cc);
ShouldNotReachHere();
}
VectorSRegister true_result = invert_cond ? src2 : src1;
VectorSRegister false_result = invert_cond ? src1 : src2;
xxsel(dst, false_result, true_result, tmp);
}

View File

@ -74,5 +74,7 @@
void count_positives(Register src, Register cnt, Register result, Register tmp1, Register tmp2);
void reduceI(int opcode, Register dst, Register iSrc, VectorRegister vSrc, VectorRegister vTmp1, VectorRegister vTmp2);
void cmovF(int cc, VectorSRegister dst, VectorSRegister op1, VectorSRegister op2,
VectorSRegister src1, VectorSRegister src2, VectorSRegister tmp);
#endif // CPU_PPC_C2_MACROASSEMBLER_PPC_HPP

View File

@ -50,14 +50,14 @@
#define __ masm->
void ShenandoahBarrierSetAssembler::satb_write_barrier(MacroAssembler *masm,
Register base, RegisterOrConstant ind_or_offs,
Register tmp1, Register tmp2, Register tmp3,
MacroAssembler::PreservationLevel preservation_level) {
void ShenandoahBarrierSetAssembler::satb_barrier(MacroAssembler *masm,
Register base, RegisterOrConstant ind_or_offs,
Register tmp1, Register tmp2, Register tmp3,
MacroAssembler::PreservationLevel preservation_level) {
if (ShenandoahSATBBarrier) {
__ block_comment("satb_write_barrier (shenandoahgc) {");
satb_write_barrier_impl(masm, 0, base, ind_or_offs, tmp1, tmp2, tmp3, preservation_level);
__ block_comment("} satb_write_barrier (shenandoahgc)");
__ block_comment("satb_barrier (shenandoahgc) {");
satb_barrier_impl(masm, 0, base, ind_or_offs, tmp1, tmp2, tmp3, preservation_level);
__ block_comment("} satb_barrier (shenandoahgc)");
}
}
@ -198,11 +198,12 @@ void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, Dec
// In "load mode", this register acts as a temporary register and must
// thus not be 'noreg'. In "preloaded mode", its content will be sustained.
// tmp1/tmp2: Temporary registers, one of which must be non-volatile in "preloaded mode".
void ShenandoahBarrierSetAssembler::satb_write_barrier_impl(MacroAssembler *masm, DecoratorSet decorators,
Register base, RegisterOrConstant ind_or_offs,
Register pre_val,
Register tmp1, Register tmp2,
MacroAssembler::PreservationLevel preservation_level) {
void ShenandoahBarrierSetAssembler::satb_barrier_impl(MacroAssembler *masm, DecoratorSet decorators,
Register base, RegisterOrConstant ind_or_offs,
Register pre_val,
Register tmp1, Register tmp2,
MacroAssembler::PreservationLevel preservation_level) {
assert(ShenandoahSATBBarrier, "Should be checked by caller");
assert_different_registers(tmp1, tmp2, pre_val, noreg);
Label skip_barrier;
@ -574,13 +575,13 @@ void ShenandoahBarrierSetAssembler::load_at(
if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
if (ShenandoahSATBBarrier) {
__ block_comment("keep_alive_barrier (shenandoahgc) {");
satb_write_barrier_impl(masm, 0, noreg, noreg, dst, tmp1, tmp2, preservation_level);
satb_barrier_impl(masm, 0, noreg, noreg, dst, tmp1, tmp2, preservation_level);
__ block_comment("} keep_alive_barrier (shenandoahgc)");
}
}
}
void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register base, RegisterOrConstant ind_or_offs, Register tmp) {
void ShenandoahBarrierSetAssembler::card_barrier(MacroAssembler* masm, Register base, RegisterOrConstant ind_or_offs, Register tmp) {
assert(ShenandoahCardBarrier, "Should have been checked by caller");
assert_different_registers(base, tmp, R0);
@ -603,21 +604,33 @@ void ShenandoahBarrierSetAssembler::store_at(MacroAssembler *masm, DecoratorSet
Register base, RegisterOrConstant ind_or_offs, Register val,
Register tmp1, Register tmp2, Register tmp3,
MacroAssembler::PreservationLevel preservation_level) {
if (is_reference_type(type)) {
if (ShenandoahSATBBarrier) {
satb_write_barrier(masm, base, ind_or_offs, tmp1, tmp2, tmp3, preservation_level);
}
// 1: non-reference types require no barriers
if (!is_reference_type(type)) {
BarrierSetAssembler::store_at(masm, decorators, type,
base, ind_or_offs,
val,
tmp1, tmp2, tmp3,
preservation_level);
return;
}
bool storing_non_null = (val != noreg);
// 2: pre-barrier: SATB needs the previous value
if (ShenandoahBarrierSet::need_satb_barrier(decorators, type)) {
satb_barrier(masm, base, ind_or_offs, tmp1, tmp2, tmp3, preservation_level);
}
// Store!
BarrierSetAssembler::store_at(masm, decorators, type,
base, ind_or_offs,
val,
tmp1, tmp2, tmp3,
preservation_level);
// No need for post barrier if storing null
if (ShenandoahCardBarrier && is_reference_type(type) && val != noreg) {
store_check(masm, base, ind_or_offs, tmp1);
// 3: post-barrier: card barrier needs store address
if (ShenandoahBarrierSet::need_card_barrier(decorators, type) && storing_non_null) {
card_barrier(masm, base, ind_or_offs, tmp1);
}
}

View File

@ -45,15 +45,15 @@ class ShenandoahBarrierSetAssembler: public BarrierSetAssembler {
private:
/* ==== Actual barrier implementations ==== */
void satb_write_barrier_impl(MacroAssembler* masm, DecoratorSet decorators,
Register base, RegisterOrConstant ind_or_offs,
Register pre_val,
Register tmp1, Register tmp2,
MacroAssembler::PreservationLevel preservation_level);
void satb_barrier_impl(MacroAssembler* masm, DecoratorSet decorators,
Register base, RegisterOrConstant ind_or_offs,
Register pre_val,
Register tmp1, Register tmp2,
MacroAssembler::PreservationLevel preservation_level);
void store_check(MacroAssembler* masm,
Register base, RegisterOrConstant ind_or_offs,
Register tmp);
void card_barrier(MacroAssembler* masm,
Register base, RegisterOrConstant ind_or_offs,
Register tmp);
void load_reference_barrier_impl(MacroAssembler* masm, DecoratorSet decorators,
Register base, RegisterOrConstant ind_or_offs,
@ -85,10 +85,10 @@ public:
#endif
/* ==== Available barriers (facades of the actual implementations) ==== */
void satb_write_barrier(MacroAssembler* masm,
Register base, RegisterOrConstant ind_or_offs,
Register tmp1, Register tmp2, Register tmp3,
MacroAssembler::PreservationLevel preservation_level);
void satb_barrier(MacroAssembler* masm,
Register base, RegisterOrConstant ind_or_offs,
Register tmp1, Register tmp2, Register tmp3,
MacroAssembler::PreservationLevel preservation_level);
void load_reference_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register base, RegisterOrConstant ind_or_offs,

View File

@ -64,12 +64,10 @@
return true;
}
// Use conditional move (CMOVL) on Power7.
static constexpr int long_cmove_cost() { return 0; } // this only makes long cmoves more expensive than int cmoves
// Suppress CMOVF. Conditional move available (sort of) on PPC64 only from P7 onwards. Not exploited yet.
// fsel doesn't accept a condition register as input, so this would be slightly different.
static int float_cmove_cost() { return ConditionalMoveLimit; }
// Suppress CMOVF for Power8 because there are no fast nodes.
static int float_cmove_cost() { return (PowerArchitecturePPC64 >= 9) ? 0 : ConditionalMoveLimit; }
// This affects two different things:
// - how Decode nodes are matched

View File

@ -3024,7 +3024,6 @@ encode %{
%}
enc_class postalloc_expand_encode_oop(iRegNdst dst, iRegPdst src, flagsReg crx) %{
// use isel instruction with Power 7
cmpP_reg_imm16Node *n_compare = new cmpP_reg_imm16Node();
encodeP_subNode *n_sub_base = new encodeP_subNode();
encodeP_shiftNode *n_shift = new encodeP_shiftNode();
@ -3099,7 +3098,6 @@ encode %{
n_shift->_opnds[1] = op_src;
n_shift->_bottom_type = _bottom_type;
// use isel instruction with Power 7
decodeN_addNode *n_add_base = new decodeN_addNode();
n_add_base->add_req(n_region, n_shift);
n_add_base->_opnds[0] = op_dst;
@ -6618,7 +6616,6 @@ instruct cond_sub_base(iRegNdst dst, flagsRegSrc crx, iRegPsrc src1) %{
ins_pipe(pipe_class_default);
%}
// Power 7 can use isel instruction
instruct cond_set_0_oop(iRegNdst dst, flagsRegSrc crx, iRegPsrc src1) %{
// The match rule is needed to make it a 'MachTypeNode'!
match(Set dst (EncodeP (Binary crx src1)));
@ -7293,7 +7290,6 @@ instruct cmovF_reg(cmpOp cmp, flagsRegSrc crx, regF dst, regF src) %{
ins_variable_size_depending_on_alignment(true);
format %{ "CMOVEF $cmp, $crx, $dst, $src\n\t" %}
// Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode %{
Label done;
@ -7313,7 +7309,6 @@ instruct cmovD_reg(cmpOp cmp, flagsRegSrc crx, regD dst, regD src) %{
ins_variable_size_depending_on_alignment(true);
format %{ "CMOVEF $cmp, $crx, $dst, $src\n\t" %}
// Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode %{
Label done;
@ -7326,6 +7321,70 @@ instruct cmovD_reg(cmpOp cmp, flagsRegSrc crx, regD dst, regD src) %{
ins_pipe(pipe_class_default);
%}
instruct cmovF_cmpF(cmpOp cop, regF op1, regF op2, regF dst, regF false_result, regF true_result, regD tmp) %{
match(Set dst (CMoveF (Binary cop (CmpF op1 op2)) (Binary false_result true_result)));
predicate(PowerArchitecturePPC64 >= 9);
effect(TEMP tmp);
ins_cost(2*DEFAULT_COST);
format %{ "cmovF_cmpF $dst = ($op1 $cop $op2) ? $true_result : $false_result\n\t" %}
size(8);
ins_encode %{
__ cmovF($cop$$cmpcode, $dst$$FloatRegister->to_vsr(),
$op1$$FloatRegister->to_vsr(), $op2$$FloatRegister->to_vsr(),
$true_result$$FloatRegister->to_vsr(), $false_result$$FloatRegister->to_vsr(),
$tmp$$FloatRegister->to_vsr());
%}
ins_pipe(pipe_class_default);
%}
instruct cmovF_cmpD(cmpOp cop, regD op1, regD op2, regF dst, regF false_result, regF true_result, regD tmp) %{
match(Set dst (CMoveF (Binary cop (CmpD op1 op2)) (Binary false_result true_result)));
predicate(PowerArchitecturePPC64 >= 9);
effect(TEMP tmp);
ins_cost(2*DEFAULT_COST);
format %{ "cmovF_cmpD $dst = ($op1 $cop $op2) ? $true_result : $false_result\n\t" %}
size(8);
ins_encode %{
__ cmovF($cop$$cmpcode, $dst$$FloatRegister->to_vsr(),
$op1$$FloatRegister->to_vsr(), $op2$$FloatRegister->to_vsr(),
$true_result$$FloatRegister->to_vsr(), $false_result$$FloatRegister->to_vsr(),
$tmp$$FloatRegister->to_vsr());
%}
ins_pipe(pipe_class_default);
%}
instruct cmovD_cmpD(cmpOp cop, regD op1, regD op2, regD dst, regD false_result, regD true_result, regD tmp) %{
match(Set dst (CMoveD (Binary cop (CmpD op1 op2)) (Binary false_result true_result)));
predicate(PowerArchitecturePPC64 >= 9);
effect(TEMP tmp);
ins_cost(2*DEFAULT_COST);
format %{ "cmovD_cmpD $dst = ($op1 $cop $op2) ? $true_result : $false_result\n\t" %}
size(8);
ins_encode %{
__ cmovF($cop$$cmpcode, $dst$$FloatRegister->to_vsr(),
$op1$$FloatRegister->to_vsr(), $op2$$FloatRegister->to_vsr(),
$true_result$$FloatRegister->to_vsr(), $false_result$$FloatRegister->to_vsr(),
$tmp$$FloatRegister->to_vsr());
%}
ins_pipe(pipe_class_default);
%}
instruct cmovD_cmpF(cmpOp cop, regF op1, regF op2, regD dst, regD false_result, regD true_result, regD tmp) %{
match(Set dst (CMoveD (Binary cop (CmpF op1 op2)) (Binary false_result true_result)));
predicate(PowerArchitecturePPC64 >= 9);
effect(TEMP tmp);
ins_cost(2*DEFAULT_COST);
format %{ "cmovD_cmpF $dst = ($op1 $cop $op2) ? $true_result : $false_result\n\t" %}
size(8);
ins_encode %{
__ cmovF($cop$$cmpcode, $dst$$FloatRegister->to_vsr(),
$op1$$FloatRegister->to_vsr(), $op2$$FloatRegister->to_vsr(),
$true_result$$FloatRegister->to_vsr(), $false_result$$FloatRegister->to_vsr(),
$tmp$$FloatRegister->to_vsr());
%}
ins_pipe(pipe_class_default);
%}
//----------Compare-And-Swap---------------------------------------------------
// CompareAndSwap{P,I,L} have more than one output, therefore "CmpI
@ -8492,7 +8551,6 @@ instruct cmovI_bne_negI_reg(iRegIdst dst, flagsRegSrc crx, iRegIsrc src1) %{
ins_variable_size_depending_on_alignment(true);
format %{ "CMOVE $dst, neg($src1), $crx" %}
// Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode %{
Label done;
@ -8551,7 +8609,6 @@ instruct cmovL_bne_negL_reg(iRegLdst dst, flagsRegSrc crx, iRegLsrc src1) %{
ins_variable_size_depending_on_alignment(true);
format %{ "CMOVE $dst, neg($src1), $crx" %}
// Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode %{
Label done;
@ -10262,7 +10319,6 @@ instruct cmovI_bso_stackSlotL(iRegIdst dst, flagsRegSrc crx, stackSlotL src) %{
ins_variable_size_depending_on_alignment(true);
format %{ "cmovI $crx, $dst, $src" %}
// Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode( enc_cmove_bso_stackSlotL(dst, crx, src) );
ins_pipe(pipe_class_default);
@ -10276,7 +10332,6 @@ instruct cmovI_bso_reg(iRegIdst dst, flagsRegSrc crx, regD src) %{
ins_variable_size_depending_on_alignment(true);
format %{ "cmovI $crx, $dst, $src" %}
// Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode( enc_cmove_bso_reg(dst, crx, src) );
ins_pipe(pipe_class_default);
@ -10439,7 +10494,6 @@ instruct cmovL_bso_stackSlotL(iRegLdst dst, flagsRegSrc crx, stackSlotL src) %{
ins_variable_size_depending_on_alignment(true);
format %{ "cmovL $crx, $dst, $src" %}
// Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode( enc_cmove_bso_stackSlotL(dst, crx, src) );
ins_pipe(pipe_class_default);
@ -10453,7 +10507,6 @@ instruct cmovL_bso_reg(iRegLdst dst, flagsRegSrc crx, regD src) %{
ins_variable_size_depending_on_alignment(true);
format %{ "cmovL $crx, $dst, $src" %}
// Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode( enc_cmove_bso_reg(dst, crx, src) );
ins_pipe(pipe_class_default);
@ -11080,7 +11133,6 @@ instruct cmov_bns_less(flagsReg crx) %{
ins_variable_size_depending_on_alignment(true);
format %{ "cmov $crx" %}
// Worst case is branch + move + stop, no stop without scheduler.
size(12);
ins_encode %{
Label done;

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
* Copyright (c) 2012, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -775,7 +775,6 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
return stk;
}
#if defined(COMPILER1) || defined(COMPILER2)
// Calling convention for calling C code.
int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
VMRegPair *regs,
@ -913,7 +912,6 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
return MAX2(arg, 8) * 2 + additional_frame_header_slots;
#endif
}
#endif // COMPILER2
int SharedRuntime::vector_calling_convention(VMRegPair *regs,
uint num_bits,
@ -2874,7 +2872,6 @@ void SharedRuntime::generate_deopt_blob() {
CodeBuffer buffer(name, 2048, 1024);
InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer);
Label exec_mode_initialized;
int frame_size_in_words;
OopMap* map = nullptr;
OopMapSet *oop_maps = new OopMapSet();
@ -2886,6 +2883,9 @@ void SharedRuntime::generate_deopt_blob() {
const Register exec_mode_reg = R21_tmp1;
const address start = __ pc();
int exception_offset = 0;
int exception_in_tls_offset = 0;
int reexecute_offset = 0;
#if defined(COMPILER1) || defined(COMPILER2)
// --------------------------------------------------------------------------
@ -2925,7 +2925,7 @@ void SharedRuntime::generate_deopt_blob() {
// - R3_ARG1: exception oop
// - R4_ARG2: exception pc
int exception_offset = __ pc() - start;
exception_offset = __ pc() - start;
BLOCK_COMMENT("Prolog for exception case");
@ -2936,7 +2936,7 @@ void SharedRuntime::generate_deopt_blob() {
__ std(R4_ARG2, _abi0(lr), R1_SP);
// Vanilla deoptimization with an exception pending in exception_oop.
int exception_in_tls_offset = __ pc() - start;
exception_in_tls_offset = __ pc() - start;
// Push the "unpack frame".
// Save everything in sight.
@ -2949,8 +2949,6 @@ void SharedRuntime::generate_deopt_blob() {
__ li(exec_mode_reg, Deoptimization::Unpack_exception);
// fall through
int reexecute_offset = 0;
#ifdef COMPILER1
__ b(exec_mode_initialized);
@ -3068,11 +3066,12 @@ void SharedRuntime::generate_deopt_blob() {
// Return to the interpreter entry point.
__ blr();
__ flush();
#else // COMPILER2
#else // !defined(COMPILER1) && !defined(COMPILER2)
__ unimplemented("deopt blob needed only with compiler");
int exception_offset = __ pc() - start;
#endif // COMPILER2
#endif
// Make sure all code is generated
__ flush();
_deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset,
reexecute_offset, first_frame_size_in_bytes / wordSize);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -2813,10 +2813,14 @@ void C2_MacroAssembler::char_array_compress_v(Register src, Register dst, Regist
// Intrinsic for
//
// - sun/nio/cs/ISO_8859_1$Encoder.implEncodeISOArray
// return the number of characters copied.
// - java/lang/StringUTF16.compress
// return index of non-latin1 character if copy fails, otherwise 'len'.
// - sun.nio.cs.ISO_8859_1.Encoder#encodeISOArray0(byte[] sa, int sp, byte[] da, int dp, int len)
// Encodes char[] to byte[] in ISO-8859-1
//
// - java.lang.StringCoding#encodeISOArray0(byte[] sa, int sp, byte[] da, int dp, int len)
// Encodes byte[] (containing UTF-16) to byte[] in ISO-8859-1
//
// - java.lang.StringCoding#encodeAsciiArray0(char[] sa, int sp, byte[] da, int dp, int len)
// Encodes char[] to byte[] in ASCII
//
// This version always returns the number of characters copied. A successful
// copy will complete with the post-condition: 'res' == 'len', while an

View File

@ -88,26 +88,16 @@ void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, Dec
}
}
void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp,
bool tosca_live,
bool expand_call) {
if (ShenandoahSATBBarrier) {
satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, t0, tosca_live, expand_call);
}
}
void ShenandoahBarrierSetAssembler::satb_barrier(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp1,
Register tmp2,
bool tosca_live,
bool expand_call) {
assert(ShenandoahSATBBarrier, "Should be checked by caller");
void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp1,
Register tmp2,
bool tosca_live,
bool expand_call) {
// If expand_call is true then we expand the call_VM_leaf macro
// directly to skip generating the check by
// InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
@ -376,21 +366,21 @@ void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm,
if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
__ enter();
__ push_call_clobbered_registers();
satb_write_barrier_pre(masm /* masm */,
noreg /* obj */,
dst /* pre_val */,
xthread /* thread */,
tmp1 /* tmp1 */,
tmp2 /* tmp2 */,
true /* tosca_live */,
true /* expand_call */);
satb_barrier(masm /* masm */,
noreg /* obj */,
dst /* pre_val */,
xthread /* thread */,
tmp1 /* tmp1 */,
tmp2 /* tmp2 */,
true /* tosca_live */,
true /* expand_call */);
__ pop_call_clobbered_registers();
__ leave();
}
}
void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj) {
assert(ShenandoahCardBarrier, "Did you mean to enable ShenandoahCardBarrier?");
void ShenandoahBarrierSetAssembler::card_barrier(MacroAssembler* masm, Register obj) {
assert(ShenandoahCardBarrier, "Should have been checked by caller");
__ srli(obj, obj, CardTable::card_shift());
@ -413,13 +403,13 @@ void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register o
void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
bool on_oop = is_reference_type(type);
if (!on_oop) {
// 1: non-reference types require no barriers
if (!is_reference_type(type)) {
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
return;
}
// flatten object address if needed
// Flatten object address right away for simplicity: likely needed by barriers
if (dst.offset() == 0) {
if (dst.base() != tmp3) {
__ mv(tmp3, dst.base());
@ -428,20 +418,26 @@ void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet
__ la(tmp3, dst);
}
shenandoah_write_barrier_pre(masm,
tmp3 /* obj */,
tmp2 /* pre_val */,
xthread /* thread */,
tmp1 /* tmp */,
val != noreg /* tosca_live */,
false /* expand_call */);
bool storing_non_null = (val != noreg);
// 2: pre-barrier: SATB needs the previous value
if (ShenandoahBarrierSet::need_satb_barrier(decorators, type)) {
satb_barrier(masm,
tmp3 /* obj */,
tmp2 /* pre_val */,
xthread /* thread */,
tmp1 /* tmp */,
t0 /* tmp2 */,
storing_non_null /* tosca_live */,
false /* expand_call */);
}
// Store!
BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), val, noreg, noreg, noreg);
bool in_heap = (decorators & IN_HEAP) != 0;
bool needs_post_barrier = (val != noreg) && in_heap && ShenandoahCardBarrier;
if (needs_post_barrier) {
store_check(masm, tmp3);
// 3: post-barrier: card barrier needs store address
if (ShenandoahBarrierSet::need_card_barrier(decorators, type) && storing_non_null) {
card_barrier(masm, tmp3);
}
}

View File

@ -41,23 +41,16 @@ class StubCodeGenerator;
class ShenandoahBarrierSetAssembler: public BarrierSetAssembler {
private:
void satb_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp1,
Register tmp2,
bool tosca_live,
bool expand_call);
void shenandoah_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp,
bool tosca_live,
bool expand_call);
void satb_barrier(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp1,
Register tmp2,
bool tosca_live,
bool expand_call);
void store_check(MacroAssembler* masm, Register obj);
void card_barrier(MacroAssembler* masm, Register obj);
void resolve_forward_pointer(MacroAssembler* masm, Register dst, Register tmp = noreg);
void resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp = noreg);

View File

@ -708,7 +708,6 @@ void TemplateTable::index_check(Register array, Register index) {
__ mv(x11, index);
}
Label ok;
__ sext(index, index, 32);
__ bltu(index, length, ok);
__ mv(x13, array);
__ mv(t1, Interpreter::_throw_ArrayIndexOutOfBoundsException_entry);
@ -1052,7 +1051,7 @@ void TemplateTable::aastore() {
transition(vtos, vtos);
// stack: ..., array, index, value
__ ld(x10, at_tos()); // value
__ ld(x12, at_tos_p1()); // index
__ lw(x12, at_tos_p1()); // index
__ ld(x13, at_tos_p2()); // array
index_check(x13, x12); // kills x11
@ -1462,9 +1461,9 @@ void TemplateTable::iinc() {
transition(vtos, vtos);
__ load_signed_byte(x11, at_bcp(2)); // get constant
locals_index(x12);
__ ld(x10, iaddress(x12, x10, _masm));
__ lw(x10, iaddress(x12, x10, _masm));
__ addw(x10, x10, x11);
__ sd(x10, iaddress(x12, t0, _masm));
__ sw(x10, iaddress(x12, t0, _masm));
}
void TemplateTable::wide_iinc() {
@ -1477,9 +1476,9 @@ void TemplateTable::wide_iinc() {
__ orr(x11, x11, t1);
locals_index_wide(x12);
__ ld(x10, iaddress(x12, t0, _masm));
__ lw(x10, iaddress(x12, t0, _masm));
__ addw(x10, x10, x11);
__ sd(x10, iaddress(x12, t0, _masm));
__ sw(x10, iaddress(x12, t0, _masm));
}
void TemplateTable::convert() {

View File

@ -174,24 +174,14 @@ void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, Dec
}
}
void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register tmp,
bool tosca_live,
bool expand_call) {
void ShenandoahBarrierSetAssembler::satb_barrier(MacroAssembler* masm,
Register obj,
Register pre_val,
Register tmp,
bool tosca_live,
bool expand_call) {
assert(ShenandoahSATBBarrier, "Should be checked by caller");
if (ShenandoahSATBBarrier) {
satb_write_barrier_pre(masm, obj, pre_val, tmp, tosca_live, expand_call);
}
}
void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register tmp,
bool tosca_live,
bool expand_call) {
// If expand_call is true then we expand the call_VM_leaf macro
// directly to skip generating the check by
// InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
@ -533,18 +523,18 @@ void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet d
assert_different_registers(dst, tmp1, r15_thread);
// Generate the SATB pre-barrier code to log the value of
// the referent field in an SATB buffer.
shenandoah_write_barrier_pre(masm /* masm */,
noreg /* obj */,
dst /* pre_val */,
tmp1 /* tmp */,
true /* tosca_live */,
true /* expand_call */);
satb_barrier(masm /* masm */,
noreg /* obj */,
dst /* pre_val */,
tmp1 /* tmp */,
true /* tosca_live */,
true /* expand_call */);
restore_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ true);
}
}
void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj) {
void ShenandoahBarrierSetAssembler::card_barrier(MacroAssembler* masm, Register obj) {
assert(ShenandoahCardBarrier, "Should have been checked by caller");
// Does a store check for the oop in register obj. The content of
@ -575,41 +565,40 @@ void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register o
void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
bool on_oop = is_reference_type(type);
bool in_heap = (decorators & IN_HEAP) != 0;
bool as_normal = (decorators & AS_NORMAL) != 0;
if (on_oop && in_heap) {
bool needs_pre_barrier = as_normal;
// 1: non-reference types require no barriers
if (!is_reference_type(type)) {
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
return;
}
// flatten object address if needed
// We do it regardless of precise because we need the registers
if (dst.index() == noreg && dst.disp() == 0) {
if (dst.base() != tmp1) {
__ movptr(tmp1, dst.base());
}
} else {
__ lea(tmp1, dst);
}
assert_different_registers(val, tmp1, tmp2, tmp3, r15_thread);
if (needs_pre_barrier) {
shenandoah_write_barrier_pre(masm /*masm*/,
tmp1 /* obj */,
tmp2 /* pre_val */,
tmp3 /* tmp */,
val != noreg /* tosca_live */,
false /* expand_call */);
}
BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg, noreg);
if (val != noreg) {
if (ShenandoahCardBarrier) {
store_check(masm, tmp1);
}
// Flatten object address right away for simplicity: likely needed by barriers
assert_different_registers(val, tmp1, tmp2, tmp3, r15_thread);
if (dst.index() == noreg && dst.disp() == 0) {
if (dst.base() != tmp1) {
__ movptr(tmp1, dst.base());
}
} else {
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
__ lea(tmp1, dst);
}
bool storing_non_null = (val != noreg);
// 2: pre-barrier: SATB needs the previous value
if (ShenandoahBarrierSet::need_satb_barrier(decorators, type)) {
satb_barrier(masm,
tmp1 /* obj */,
tmp2 /* pre_val */,
tmp3 /* tmp */,
storing_non_null /* tosca_live */,
false /* expand_call */);
}
// Store!
BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg, noreg);
// 3: post-barrier: card barrier needs store address
if (ShenandoahBarrierSet::need_card_barrier(decorators, type) && storing_non_null) {
card_barrier(masm, tmp1);
}
}

View File

@ -41,21 +41,14 @@ class StubCodeGenerator;
class ShenandoahBarrierSetAssembler: public BarrierSetAssembler {
private:
void satb_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register tmp,
bool tosca_live,
bool expand_call);
void satb_barrier(MacroAssembler* masm,
Register obj,
Register pre_val,
Register tmp,
bool tosca_live,
bool expand_call);
void shenandoah_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register tmp,
bool tosca_live,
bool expand_call);
void store_check(MacroAssembler* masm, Register obj);
void card_barrier(MacroAssembler* masm, Register obj);
void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count,

View File

@ -6251,32 +6251,46 @@ void MacroAssembler::evpbroadcast(BasicType type, XMMRegister dst, Register src,
}
}
// encode char[] to byte[] in ISO_8859_1 or ASCII
//@IntrinsicCandidate
//private static int implEncodeISOArray(byte[] sa, int sp,
//byte[] da, int dp, int len) {
// int i = 0;
// for (; i < len; i++) {
// char c = StringUTF16.getChar(sa, sp++);
// if (c > '\u00FF')
// break;
// da[dp++] = (byte)c;
// }
// return i;
//}
//
//@IntrinsicCandidate
//private static int implEncodeAsciiArray(char[] sa, int sp,
// byte[] da, int dp, int len) {
// int i = 0;
// for (; i < len; i++) {
// char c = sa[sp++];
// if (c >= '\u0080')
// break;
// da[dp++] = (byte)c;
// }
// return i;
//}
// Encode given char[]/byte[] to byte[] in ISO_8859_1 or ASCII
//
// @IntrinsicCandidate
// int sun.nio.cs.ISO_8859_1.Encoder#encodeISOArray0(
// char[] sa, int sp, byte[] da, int dp, int len) {
// int i = 0;
// for (; i < len; i++) {
// char c = sa[sp++];
// if (c > '\u00FF')
// break;
// da[dp++] = (byte) c;
// }
// return i;
// }
//
// @IntrinsicCandidate
// int java.lang.StringCoding.encodeISOArray0(
// byte[] sa, int sp, byte[] da, int dp, int len) {
// int i = 0;
// for (; i < len; i++) {
// char c = StringUTF16.getChar(sa, sp++);
// if (c > '\u00FF')
// break;
// da[dp++] = (byte) c;
// }
// return i;
// }
//
// @IntrinsicCandidate
// int java.lang.StringCoding.encodeAsciiArray0(
// char[] sa, int sp, byte[] da, int dp, int len) {
// int i = 0;
// for (; i < len; i++) {
// char c = sa[sp++];
// if (c >= '\u0080')
// break;
// da[dp++] = (byte) c;
// }
// return i;
// }
void MacroAssembler::encode_iso_array(Register src, Register dst, Register len,
XMMRegister tmp1Reg, XMMRegister tmp2Reg,
XMMRegister tmp3Reg, XMMRegister tmp4Reg,

View File

@ -143,7 +143,7 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4, std_cpuid24, std_cpuid29;
Label sef_cpuid, sefsl1_cpuid, ext_cpuid, ext_cpuid1, ext_cpuid5, ext_cpuid7;
Label ext_cpuid8, done, wrapup, vector_save_restore, apx_save_restore_warning;
Label ext_cpuid8, done, wrapup, vector_save_restore, apx_save_restore_warning, apx_xstate;
Label legacy_setup, save_restore_except, legacy_save_restore, start_simd_check;
StubCodeMark mark(this, "VM_Version", "get_cpu_info_stub");
@ -468,6 +468,20 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
__ movq(Address(rsi, 0), r16);
__ movq(Address(rsi, 8), r31);
//
// Query CPUID 0xD.19 for APX XSAVE offset
// Extended State Enumeration Sub-leaf 19 (APX)
// EAX = size of APX state (should be 128)
// EBX = offset in standard XSAVE format
//
__ movl(rax, 0xD);
__ movl(rcx, 19);
__ cpuid();
__ lea(rsi, Address(rbp, in_bytes(VM_Version::apx_xstate_size_offset())));
__ movl(Address(rsi, 0), rax);
__ lea(rsi, Address(rbp, in_bytes(VM_Version::apx_xstate_offset_offset())));
__ movl(Address(rsi, 0), rbx);
UseAPX = save_apx;
__ bind(vector_save_restore);
//
@ -1138,6 +1152,10 @@ void VM_Version::get_processor_features() {
warning("AES intrinsics require UseAES flag to be enabled. Intrinsics will be disabled.");
}
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) {
warning("AES_CTR intrinsics require UseAES flag to be enabled. AES_CTR intrinsics will be disabled.");
}
FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
} else {
if (UseSSE > 2) {
if (FLAG_IS_DEFAULT(UseAESIntrinsics)) {
@ -1156,8 +1174,8 @@ void VM_Version::get_processor_features() {
if (!UseAESIntrinsics) {
if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) {
warning("AES-CTR intrinsics require UseAESIntrinsics flag to be enabled. Intrinsics will be disabled.");
FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
}
FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
} else {
if (supports_sse4_1()) {
if (FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) {
@ -1177,16 +1195,16 @@ void VM_Version::get_processor_features() {
} else if (UseAES || UseAESIntrinsics || UseAESCTRIntrinsics) {
if (UseAES && !FLAG_IS_DEFAULT(UseAES)) {
warning("AES instructions are not available on this CPU");
FLAG_SET_DEFAULT(UseAES, false);
}
FLAG_SET_DEFAULT(UseAES, false);
if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
warning("AES intrinsics are not available on this CPU");
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
}
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) {
warning("AES-CTR intrinsics are not available on this CPU");
FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
}
FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
}
// Use CLMUL instructions if available.

View File

@ -676,6 +676,10 @@ protected:
// Space to save apx registers after signal handle
jlong apx_save[2]; // Save r16 and r31
// cpuid function 0xD, subleaf 19 (APX extended state)
uint32_t apx_xstate_size; // EAX: size of APX state (128)
uint32_t apx_xstate_offset; // EBX: offset in standard XSAVE area
VM_Features feature_flags() const;
// Asserts
@ -739,6 +743,11 @@ public:
static ByteSize ymm_save_offset() { return byte_offset_of(CpuidInfo, ymm_save); }
static ByteSize zmm_save_offset() { return byte_offset_of(CpuidInfo, zmm_save); }
static ByteSize apx_save_offset() { return byte_offset_of(CpuidInfo, apx_save); }
static ByteSize apx_xstate_offset_offset() { return byte_offset_of(CpuidInfo, apx_xstate_offset); }
static ByteSize apx_xstate_size_offset() { return byte_offset_of(CpuidInfo, apx_xstate_size); }
static uint32_t apx_xstate_offset() { return _cpuid_info.apx_xstate_offset; }
static uint32_t apx_xstate_size() { return _cpuid_info.apx_xstate_size; }
// The value used to check ymm register after signal handle
static int ymm_test_value() { return 0xCAFEBABE; }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -38,7 +38,7 @@ class AIXDecoder: public AbstractDecoder {
virtual bool demangle(const char* symbol, char* buf, int buflen) { return false; } // use AixSymbols::get_function_name to demangle
virtual bool decode(address addr, char* buf, int buflen, int* offset, const char* modulepath, bool demangle) {
return AixSymbols::get_function_name(addr, buf, buflen, offset, 0, demangle);
return AixSymbols::get_function_name(addr, buf, buflen, offset, nullptr, demangle);
}
virtual bool decode(address addr, char *buf, int buflen, int* offset, const void *base) {
ShouldNotReachHere();

View File

@ -703,7 +703,7 @@ static void *thread_native_entry(Thread *thread) {
log_info(os, thread)("Thread finished (tid: %zu, kernel thread id: %zu).",
os::current_thread_id(), (uintx) kernel_thread_id);
return 0;
return nullptr;
}
bool os::create_thread(Thread* thread, ThreadType thr_type,

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2012, 2024 SAP SE. All rights reserved.
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -78,7 +78,7 @@ class fixed_strings {
public:
fixed_strings() : first(0) {}
fixed_strings() : first(nullptr) {}
~fixed_strings() {
node* n = first;
while (n) {
@ -113,7 +113,7 @@ bool AixSymbols::get_function_name (
// information (null if not available)
bool demangle // [in] whether to demangle the name
) {
struct tbtable* tb = 0;
struct tbtable* tb = nullptr;
unsigned int searchcount = 0;
// initialize output parameters
@ -653,10 +653,10 @@ void AixNativeCallstack::print_callstack_for_context(outputStream* st, const uco
// To print the first frame, use the current value of iar:
// current entry indicated by iar (the current pc)
codeptr_t cur_iar = 0;
stackptr_t cur_sp = 0;
codeptr_t cur_rtoc = 0;
codeptr_t cur_lr = 0;
codeptr_t cur_iar = nullptr;
stackptr_t cur_sp = nullptr;
codeptr_t cur_rtoc = nullptr;
codeptr_t cur_lr = nullptr;
const ucontext_t* uc = (const ucontext_t*) context;
@ -926,7 +926,7 @@ static struct handletableentry* p_handletable = nullptr;
static const char* rtv_linkedin_libpath() {
constexpr int bufsize = 4096;
static char buffer[bufsize];
static const char* libpath = 0;
static const char* libpath = nullptr;
// we only try to retrieve the libpath once. After that try we
// let libpath point to buffer, which then contains a valid libpath

View File

@ -28,6 +28,7 @@
//
// Declare Bsd specific flags. They are not available on other platforms.
//
#ifdef AARCH64
#define RUNTIME_OS_FLAGS(develop, \
develop_pd, \
product, \
@ -35,9 +36,21 @@
range, \
constraint) \
\
AARCH64_ONLY(develop(bool, AssertWXAtThreadSync, true, \
"Conservatively check W^X thread state at possible safepoint" \
"or handshake"))
develop(bool, TraceWXHealing, false, \
"track occurrences of W^X mode healing") \
develop(bool, UseOldWX, false, \
"Choose old W^X implementation.") \
product(bool, StressWXHealing, false, DIAGNOSTIC, \
"Stress W xor X healing on MacOS")
#else
#define RUNTIME_OS_FLAGS(develop, \
develop_pd, \
product, \
product_pd, \
range, \
constraint)
#endif
// end of RUNTIME_OS_FLAGS

View File

@ -841,6 +841,7 @@ jlong os::javaTimeNanos() {
// We might also condition (c) on the magnitude of the delta between obsv and now.
// Avoiding excessive CAS operations to hot RW locations is critical.
// See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
// https://web.archive.org/web/20131214182431/https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
return (prev == obsv) ? now : obsv;
}

View File

@ -54,8 +54,11 @@
#include "signals_posix.hpp"
#include "utilities/align.hpp"
#include "utilities/debug.hpp"
#include "utilities/decoder.hpp"
#include "utilities/events.hpp"
#include "utilities/nativeStackPrinter.hpp"
#include "utilities/vmError.hpp"
#include "compiler/disassembler.hpp"
// put OS-includes here
# include <sys/types.h>
@ -85,6 +88,8 @@
#define SPELL_REG_SP "sp"
#ifdef __APPLE__
WXMode DefaultWXWriteMode;
// see darwin-xnu/osfmk/mach/arm/_structs.h
// 10.5 UNIX03 member name prefixes
@ -233,19 +238,56 @@ NOINLINE frame os::current_frame() {
bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
ucontext_t* uc, JavaThread* thread) {
// Enable WXWrite: this function is called by the signal handler at arbitrary
// point of execution.
ThreadWXEnable wx(WXWrite, thread);
// decide if this trap can be handled by a stub
address stub = nullptr;
address pc = nullptr;
address pc = nullptr;
//%note os_trap_1
if (info != nullptr && uc != nullptr && thread != nullptr) {
pc = (address) os::Posix::ucontext_get_pc(uc);
#ifdef MACOS_AARCH64
// If we got a SIGBUS because we tried to write into the code
// cache, try enabling WXWrite mode.
if (sig == SIGBUS
&& pc != info->si_addr
&& CodeCache::contains(info->si_addr)
&& os::address_is_in_vm(pc)) {
WXMode *entry_mode = thread->_cur_wx_mode;
if (entry_mode != nullptr && *entry_mode == WXArmedForWrite) {
if (TraceWXHealing) {
static const char *mode_names[3] = {"WXWrite", "WXExec", "WXArmedForWrite"};
tty->print("Healing WXMode %s at %p to WXWrite",
mode_names[*entry_mode], entry_mode);
char name[128];
int offset = 0;
if (os::dll_address_to_function_name(pc, name, sizeof name, &offset)) {
tty->print_cr(" (%s+0x%x)", name, offset);
} else {
tty->cr();
}
if (Verbose) {
char buf[O_BUFLEN];
NativeStackPrinter nsp(thread);
nsp.print_stack(tty, buf, sizeof(buf), pc,
true /* print_source_info */, -1 /* max stack */);
}
}
#ifndef PRODUCT
guarantee(StressWXHealing,
"We should not reach here unless StressWXHealing");
#endif
*(thread->_cur_wx_mode) = WXWrite;
return thread->wx_enable_write();
}
}
// There may be cases where code after this point that we call
// from the signal handler changes WX state, so we protect against
// that by saving and restoring the state.
ThreadWXEnable wx(thread->get_wx_state(), thread);
#endif
// Handle ALL stack overflow variations here
if (sig == SIGSEGV || sig == SIGBUS) {
address addr = (address) info->si_addr;
@ -515,11 +557,42 @@ int os::extra_bang_size_in_bytes() {
return 0;
}
#ifdef __APPLE__
#ifdef MACOS_AARCH64
THREAD_LOCAL bool os::_jit_exec_enabled;
// This is a wrapper around the standard library function
// pthread_jit_write_protect_np(3). We keep track of the state of
// per-thread write protection on the MAP_JIT region in the
// thread-local variable os::_jit_exec_enabled
void os::current_thread_enable_wx(WXMode mode) {
pthread_jit_write_protect_np(mode == WXExec);
bool exec_enabled = mode != WXWrite;
if (exec_enabled != _jit_exec_enabled NOT_PRODUCT( || DefaultWXWriteMode == WXWrite)) {
permit_forbidden_function::pthread_jit_write_protect_np(exec_enabled);
_jit_exec_enabled = exec_enabled;
}
}
#endif
// If the current thread is in the WX state WXArmedForWrite, change
// the state to WXWrite.
bool Thread::wx_enable_write() {
if (_wx_state == WXArmedForWrite) {
_wx_state = WXWrite;
os::current_thread_enable_wx(WXWrite);
return true;
} else {
return false;
}
}
// A wrapper around wx_enable_write() for when the current thread is
// not known.
void os::thread_wx_enable_write_impl() {
if (!StressWXHealing) {
Thread::current()->wx_enable_write();
}
}
#endif // MACOS_AARCH64
static inline void atomic_copy64(const volatile void *src, volatile void *dst) {
*(jlong *) dst = *(const jlong *) src;

View File

@ -42,6 +42,16 @@ frame JavaThread::pd_last_frame() {
void JavaThread::cache_global_variables() {
BarrierSet* bs = BarrierSet::barrier_set();
#if INCLUDE_G1GC
if (bs->is_a(BarrierSet::G1BarrierSet)) {
_card_table_base = nullptr;
} else
#endif
#if INCLUDE_SHENANDOAHGC
if (bs->is_a(BarrierSet::ShenandoahBarrierSet)) {
_card_table_base = nullptr;
} else
#endif
if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
_card_table_base = (address)ctbs->card_table_base_const();

View File

@ -52,6 +52,7 @@
#include "utilities/debug.hpp"
#include "utilities/events.hpp"
#include "utilities/vmError.hpp"
#include "runtime/vm_version.hpp"
// put OS-includes here
# include <sys/types.h>
@ -380,6 +381,43 @@ size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
/////////////////////////////////////////////////////////////////////////////
// helper functions for fatal error handler
// XSAVE constants - from Intel SDM Vol. 1, Chapter 13
#define XSAVE_HDR_OFFSET 512
#define XFEATURE_APX (1ULL << 19)
// XSAVE header structure
// See: Intel SDM Vol. 1, Section 13.4.2 "XSAVE Header"
// Also: Linux kernel arch/x86/include/asm/fpu/types.h
struct xstate_header {
uint64_t xfeatures;
uint64_t xcomp_bv;
uint64_t reserved[6];
};
// APX extended state - R16-R31 (16 x 64-bit registers)
// See: Intel APX Architecture Specification
struct apx_state {
uint64_t regs[16]; // r16-r31
};
static apx_state* get_apx_state(const ucontext_t* uc) {
uint32_t offset = VM_Version::apx_xstate_offset();
if (offset == 0 || uc->uc_mcontext.fpregs == nullptr) {
return nullptr;
}
char* xsave = (char*)uc->uc_mcontext.fpregs;
xstate_header* hdr = (xstate_header*)(xsave + XSAVE_HDR_OFFSET);
// Check if APX state is present in this context
if (!(hdr->xfeatures & XFEATURE_APX)) {
return nullptr;
}
return (apx_state*)(xsave + offset);
}
void os::print_context(outputStream *st, const void *context) {
if (context == nullptr) return;
@ -406,6 +444,14 @@ void os::print_context(outputStream *st, const void *context) {
st->print(", R14=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_R14]);
st->print(", R15=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_R15]);
st->cr();
// Dump APX EGPRs (R16-R31)
apx_state* apx = UseAPX ? get_apx_state(uc) : nullptr;
if (apx != nullptr) {
for (int i = 0; i < 16; i++) {
st->print("%sR%d=" INTPTR_FORMAT, (i % 4 == 0) ? "" : ", ", 16 + i, (intptr_t)apx->regs[i]);
if (i % 4 == 3) st->cr();
}
}
st->print( "RIP=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_RIP]);
st->print(", EFLAGS=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_EFL]);
st->print(", CSGSFS=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_CSGSFS]);
@ -432,37 +478,50 @@ void os::print_context(outputStream *st, const void *context) {
}
void os::print_register_info(outputStream *st, const void *context, int& continuation) {
const int register_count = 16;
if (context == nullptr) {
return;
}
const ucontext_t *uc = (const ucontext_t*)context;
apx_state* apx = UseAPX ? get_apx_state(uc) : nullptr;
const int register_count = 16 + (apx != nullptr ? 16 : 0);
int n = continuation;
assert(n >= 0 && n <= register_count, "Invalid continuation value");
if (context == nullptr || n == register_count) {
if (n == register_count) {
return;
}
const ucontext_t *uc = (const ucontext_t*)context;
while (n < register_count) {
// Update continuation with next index before printing location
continuation = n + 1;
if (n < 16) {
// Standard registers (RAX-R15)
# define CASE_PRINT_REG(n, str, id) case n: st->print(str); print_location(st, uc->uc_mcontext.gregs[REG_##id]);
switch (n) {
CASE_PRINT_REG( 0, "RAX=", RAX); break;
CASE_PRINT_REG( 1, "RBX=", RBX); break;
CASE_PRINT_REG( 2, "RCX=", RCX); break;
CASE_PRINT_REG( 3, "RDX=", RDX); break;
CASE_PRINT_REG( 4, "RSP=", RSP); break;
CASE_PRINT_REG( 5, "RBP=", RBP); break;
CASE_PRINT_REG( 6, "RSI=", RSI); break;
CASE_PRINT_REG( 7, "RDI=", RDI); break;
CASE_PRINT_REG( 8, "R8 =", R8); break;
CASE_PRINT_REG( 9, "R9 =", R9); break;
CASE_PRINT_REG(10, "R10=", R10); break;
CASE_PRINT_REG(11, "R11=", R11); break;
CASE_PRINT_REG(12, "R12=", R12); break;
CASE_PRINT_REG(13, "R13=", R13); break;
CASE_PRINT_REG(14, "R14=", R14); break;
CASE_PRINT_REG(15, "R15=", R15); break;
}
switch (n) {
CASE_PRINT_REG( 0, "RAX=", RAX); break;
CASE_PRINT_REG( 1, "RBX=", RBX); break;
CASE_PRINT_REG( 2, "RCX=", RCX); break;
CASE_PRINT_REG( 3, "RDX=", RDX); break;
CASE_PRINT_REG( 4, "RSP=", RSP); break;
CASE_PRINT_REG( 5, "RBP=", RBP); break;
CASE_PRINT_REG( 6, "RSI=", RSI); break;
CASE_PRINT_REG( 7, "RDI=", RDI); break;
CASE_PRINT_REG( 8, "R8 =", R8); break;
CASE_PRINT_REG( 9, "R9 =", R9); break;
CASE_PRINT_REG(10, "R10=", R10); break;
CASE_PRINT_REG(11, "R11=", R11); break;
CASE_PRINT_REG(12, "R12=", R12); break;
CASE_PRINT_REG(13, "R13=", R13); break;
CASE_PRINT_REG(14, "R14=", R14); break;
CASE_PRINT_REG(15, "R15=", R15); break;
}
# undef CASE_PRINT_REG
} else {
// APX extended general purpose registers (R16-R31)
st->print("R%d=", n);
print_location(st, apx->regs[n - 16]);
}
++n;
}
}

View File

@ -98,6 +98,8 @@ CodeBuffer::CodeBuffer(const CodeBlob* blob) DEBUG_ONLY(: Scrubber(this, sizeof(
}
void CodeBuffer::initialize(csize_t code_size, csize_t locs_size) {
MACOS_AARCH64_ONLY(os::thread_wx_enable_write());
// Always allow for empty slop around each section.
int slop = (int) CodeSection::end_slop();

View File

@ -541,6 +541,7 @@ extern void vm_exit(int code);
// unpack_with_exception entry instead. This makes life for the exception blob easier
// because making that same check and diverting is painful from assembly language.
JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* current, oopDesc* ex, address pc, nmethod*& nm))
MACOS_AARCH64_ONLY(current->wx_enable_write());
Handle exception(current, ex);
// This function is called when we are about to throw an exception. Therefore,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -360,10 +360,8 @@ bool AOTMappedHeapLoader::load_heap_region(FileMapInfo* mapinfo) {
}
objArrayOop AOTMappedHeapLoader::root_segment(int segment_idx) {
if (CDSConfig::is_dumping_heap()) {
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
} else {
assert(CDSConfig::is_using_archive(), "must be");
if (!CDSConfig::is_using_archive()) {
assert(CDSConfig::is_dumping_heap() && Thread::current() == (Thread*)VMThread::vm_thread(), "sanity");
}
objArrayOop segment = (objArrayOop)_root_segments->at(segment_idx).resolve();
@ -466,7 +464,9 @@ void AOTMappedHeapLoader::finish_initialization(FileMapInfo* info) {
add_root_segment((objArrayOop)segment_oop);
}
StringTable::load_shared_strings_array();
if (CDSConfig::is_dumping_final_static_archive()) {
StringTable::move_shared_strings_into_runtime_table();
}
}
}

View File

@ -1104,7 +1104,12 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS
#if INCLUDE_CDS_JAVA_HEAP
if (CDSConfig::is_dumping_heap()) {
assert(CDSConfig::allow_only_single_java_thread(), "Required");
if (!CDSConfig::is_dumping_preimage_static_archive()) {
// A single thread is required for Reference handling and deterministic CDS archive.
// Its's not required for dumping preimage, where References won't be archived and
// determinism is not needed.
assert(CDSConfig::allow_only_single_java_thread(), "Required");
}
if (!HeapShared::is_archived_boot_layer_available(THREAD)) {
report_loading_error("archivedBootLayer not available, disabling full module graph");
CDSConfig::stop_dumping_full_module_graph();
@ -1162,12 +1167,6 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS
// Perhaps there is a way to avoid hard-coding these names here.
// See discussion in JDK-8342481.
}
if (HeapShared::is_writing_mapping_mode()) {
// Do this at the very end, when no Java code will be executed. Otherwise
// some new strings may be added to the intern table.
StringTable::allocate_shared_strings_array(CHECK);
}
} else {
log_info(aot)("Not dumping heap, reset CDSConfig::_is_using_optimized_module_handling");
CDSConfig::stop_using_optimized_module_handling();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -177,12 +177,17 @@ void AOTReferenceObjSupport::init_keep_alive_objs_table() {
// Returns true IFF obj is an instance of java.lang.ref.Reference. If so, perform extra eligibility checks.
bool AOTReferenceObjSupport::check_if_ref_obj(oop obj) {
// We have a single Java thread. This means java.lang.ref.Reference$ReferenceHandler thread
// is not running. Otherwise the checks for next/discovered may not work.
precond(CDSConfig::allow_only_single_java_thread());
assert_at_safepoint(); // _keep_alive_objs_table uses raw oops
if (obj->klass()->is_subclass_of(vmClasses::Reference_klass())) {
// The following check works only if the java.lang.ref.Reference$ReferenceHandler thread
// is not running.
//
// This code is called on every object found by AOTArtifactFinder. When dumping the
// preimage archive, AOTArtifactFinder should not find any Reference objects.
precond(!CDSConfig::is_dumping_preimage_static_archive());
precond(CDSConfig::allow_only_single_java_thread());
precond(AOTReferenceObjSupport::is_enabled());
precond(JavaClasses::is_supported_for_archiving(obj));
precond(_keep_alive_objs_table != nullptr);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -556,7 +556,9 @@ void CDSConfig::check_aotmode_record() {
// At VM exit, the module graph may be contaminated with program states.
// We will rebuild the module graph when dumping the CDS final image.
disable_heap_dumping();
_is_using_optimized_module_handling = false;
_is_using_full_module_graph = false;
_is_dumping_full_module_graph = false;
}
void CDSConfig::check_aotmode_create() {
@ -582,6 +584,7 @@ void CDSConfig::check_aotmode_create() {
substitute_aot_filename(FLAG_MEMBER_ENUM(AOTCache));
_is_dumping_final_static_archive = true;
_is_using_full_module_graph = false;
UseSharedSpaces = true;
RequireSharedSpaces = true;
@ -954,7 +957,9 @@ bool CDSConfig::are_vm_options_incompatible_with_dumping_heap() {
}
bool CDSConfig::is_dumping_heap() {
if (!(is_dumping_classic_static_archive() || is_dumping_final_static_archive())
// Note: when dumping preimage static archive, only a very limited set of oops
// are dumped.
if (!is_dumping_static_archive()
|| are_vm_options_incompatible_with_dumping_heap()
|| _disable_heap_dumping) {
return false;
@ -966,6 +971,26 @@ bool CDSConfig::is_loading_heap() {
return HeapShared::is_archived_heap_in_use();
}
bool CDSConfig::is_dumping_klass_subgraphs() {
if (is_dumping_classic_static_archive() || is_dumping_final_static_archive()) {
// KlassSubGraphs (see heapShared.cpp) is a legacy mechanism for archiving oops. It
// has been superceded by AOT class linking. This feature is used only when
// AOT class linking is disabled.
//
// KlassSubGraphs are disabled in the preimage static archive, which contains a very
// limited set of oops.
return is_dumping_heap() && !is_dumping_aot_linked_classes();
} else {
return false;
}
}
bool CDSConfig::is_using_klass_subgraphs() {
return (is_loading_heap() &&
!CDSConfig::is_using_aot_linked_classes() &&
!CDSConfig::is_dumping_final_static_archive());
}
bool CDSConfig::is_using_full_module_graph() {
if (ClassLoaderDataShared::is_full_module_graph_loaded()) {
return true;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -188,6 +188,9 @@ public:
static bool is_dumping_heap() NOT_CDS_JAVA_HEAP_RETURN_(false);
static bool is_loading_heap() NOT_CDS_JAVA_HEAP_RETURN_(false);
static bool is_dumping_klass_subgraphs() NOT_CDS_JAVA_HEAP_RETURN_(false);
static bool is_using_klass_subgraphs() NOT_CDS_JAVA_HEAP_RETURN_(false);
static bool is_dumping_invokedynamic() NOT_CDS_JAVA_HEAP_RETURN_(false);
static bool is_dumping_method_handles() NOT_CDS_JAVA_HEAP_RETURN_(false);

View File

@ -210,7 +210,7 @@ static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], Instan
bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) {
assert(CDSConfig::is_dumping_heap(), "dump-time only");
if (!CDSConfig::is_dumping_aot_linked_classes()) {
if (CDSConfig::is_dumping_klass_subgraphs()) {
// Legacy CDS archive support (to be deprecated)
return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) ||
is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik);
@ -413,6 +413,8 @@ void HeapShared::materialize_thread_object() {
void HeapShared::add_to_dumped_interned_strings(oop string) {
assert(HeapShared::is_writing_mapping_mode(), "Only used by this mode");
AOTMappedHeapWriter::add_to_dumped_interned_strings(string);
bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, string);
assert(success, "shared strings array must not point to arrays or strings that are too large to archive");
}
void HeapShared::finalize_initialization(FileMapInfo* static_mapinfo) {
@ -453,7 +455,6 @@ int HeapShared::append_root(oop obj) {
oop HeapShared::get_root(int index, bool clear) {
assert(index >= 0, "sanity");
assert(!CDSConfig::is_dumping_heap() && CDSConfig::is_using_archive(), "runtime only");
assert(is_archived_heap_in_use(), "getting roots into heap that is not used");
oop result;
@ -598,8 +599,7 @@ public:
void set_oop(MetaspaceObj* ptr, oop o) {
MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
OopHandle handle(Universe::vm_global(), o);
bool is_new = put(ptr, handle);
assert(is_new, "cannot set twice");
put_when_absent(ptr, handle);
}
void remove_oop(MetaspaceObj* ptr) {
MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
@ -612,6 +612,11 @@ public:
};
void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) {
if (CDSConfig::is_dumping_preimage_static_archive() && scratch_resolved_references(src) != nullptr) {
// We are in AOT training run. The class has been redefined and we are giving it a new resolved_reference.
// Ignore it, as this class will be excluded from the AOT config.
return;
}
if (SystemDictionaryShared::is_builtin_loader(src->pool_holder()->class_loader_data())) {
_scratch_objects_table->set_oop(src, dest);
}
@ -831,14 +836,6 @@ static objArrayOop get_archived_resolved_references(InstanceKlass* src_ik) {
return nullptr;
}
void HeapShared::archive_strings() {
assert(HeapShared::is_writing_mapping_mode(), "should not reach here");
oop shared_strings_array = StringTable::init_shared_strings_array();
bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, shared_strings_array);
assert(success, "shared strings array must not point to arrays or strings that are too large to archive");
StringTable::set_shared_strings_array_index(append_root(shared_strings_array));
}
int HeapShared::archive_exception_instance(oop exception) {
bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, exception);
assert(success, "sanity");
@ -890,7 +887,7 @@ void HeapShared::start_scanning_for_oops() {
void HeapShared::end_scanning_for_oops() {
if (is_writing_mapping_mode()) {
archive_strings();
StringTable::init_shared_table();
}
delete_seen_objects_table();
}
@ -940,7 +937,7 @@ void HeapShared::scan_java_class(Klass* orig_k) {
void HeapShared::archive_subgraphs() {
assert(CDSConfig::is_dumping_heap(), "must be");
if (!CDSConfig::is_dumping_aot_linked_classes()) {
if (CDSConfig::is_dumping_klass_subgraphs()) {
archive_object_subgraphs(archive_subgraph_entry_fields,
false /* is_full_module_graph */);
if (CDSConfig::is_dumping_full_module_graph()) {
@ -1298,10 +1295,7 @@ static void verify_the_heap(Klass* k, const char* which) {
// this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots.
void HeapShared::resolve_classes(JavaThread* current) {
assert(CDSConfig::is_using_archive(), "runtime only!");
if (!is_archived_heap_in_use()) {
return; // nothing to do
}
if (!CDSConfig::is_using_aot_linked_classes()) {
if (CDSConfig::is_using_klass_subgraphs()) {
resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields);
resolve_classes_for_subgraphs(current, fmg_archive_subgraph_entry_fields);
}
@ -1391,7 +1385,7 @@ void HeapShared::init_classes_for_special_subgraph(Handle class_loader, TRAPS) {
void HeapShared::initialize_from_archived_subgraph(JavaThread* current, Klass* k) {
JavaThread* THREAD = current;
if (!is_archived_heap_in_use()) {
if (!CDSConfig::is_using_klass_subgraphs()) {
return; // nothing to do
}
@ -1867,7 +1861,7 @@ void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k,
const char* klass_name,
int field_offset,
const char* field_name) {
assert(CDSConfig::is_dumping_heap(), "dump time only");
precond(CDSConfig::is_dumping_klass_subgraphs());
assert(k->defined_by_boot_loader(), "must be boot class");
oop m = k->java_mirror();
@ -1918,7 +1912,7 @@ class VerifySharedOopClosure: public BasicOopIterateClosure {
};
void HeapShared::verify_subgraph_from_static_field(InstanceKlass* k, int field_offset) {
assert(CDSConfig::is_dumping_heap(), "dump time only");
precond(CDSConfig::is_dumping_klass_subgraphs());
assert(k->defined_by_boot_loader(), "must be boot class");
oop m = k->java_mirror();
@ -2144,7 +2138,7 @@ void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[],
void HeapShared::init_subgraph_entry_fields(TRAPS) {
assert(CDSConfig::is_dumping_heap(), "must be");
_dump_time_subgraph_info_table = new (mtClass)DumpTimeKlassSubGraphInfoTable();
if (!CDSConfig::is_dumping_aot_linked_classes()) {
if (CDSConfig::is_dumping_klass_subgraphs()) {
init_subgraph_entry_fields(archive_subgraph_entry_fields, CHECK);
if (CDSConfig::is_dumping_full_module_graph()) {
init_subgraph_entry_fields(fmg_archive_subgraph_entry_fields, CHECK);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -478,7 +478,6 @@ private:
static bool has_been_archived(oop orig_obj);
static void prepare_resolved_references();
static void archive_strings();
static void archive_subgraphs();
static void copy_java_mirror(oop orig_mirror, oop scratch_m);

View File

@ -127,6 +127,7 @@ PerfCounter* ClassLoader::_perf_ik_link_methods_count = nullptr;
PerfCounter* ClassLoader::_perf_method_adapters_count = nullptr;
PerfCounter* ClassLoader::_unsafe_defineClassCallCounter = nullptr;
PerfCounter* ClassLoader::_perf_secondary_hash_time = nullptr;
PerfCounter* ClassLoader::_perf_change_wx_time = nullptr;
PerfCounter* ClassLoader::_perf_resolve_indy_time = nullptr;
PerfCounter* ClassLoader::_perf_resolve_invokehandle_time = nullptr;
@ -1370,6 +1371,7 @@ void ClassLoader::initialize(TRAPS) {
NEWPERFBYTECOUNTER(_perf_sys_classfile_bytes_read, SUN_CLS, "sysClassBytes");
NEWPERFEVENTCOUNTER(_unsafe_defineClassCallCounter, SUN_CLS, "unsafeDefineClassCalls");
NEWPERFTICKCOUNTER(_perf_secondary_hash_time, SUN_CLS, "secondarySuperHashTime");
NEWPERFTICKCOUNTER(_perf_change_wx_time, SUN_CLS, "changeWXTime");
if (log_is_enabled(Info, perf, class, link)) {
NEWPERFTICKCOUNTER(_perf_ik_link_methods_time, SUN_CLS, "linkMethodsTime");
@ -1418,6 +1420,10 @@ char* ClassLoader::lookup_vm_options() {
jio_snprintf(modules_path, JVM_MAXPATHLEN, "%s%slib%smodules", Arguments::get_java_home(), fileSep, fileSep);
JImage_file =(*JImageOpen)(modules_path, &error);
if (JImage_file == nullptr) {
if (Arguments::has_jimage()) {
// The modules file exists but is unreadable or corrupt
vm_exit_during_initialization(err_msg("Unable to load %s", modules_path));
}
return nullptr;
}

View File

@ -184,6 +184,7 @@ class ClassLoader: AllStatic {
// Count the time taken to hash the scondary superclass arrays.
static PerfCounter* _perf_secondary_hash_time;
static PerfCounter* _perf_change_wx_time;
// The boot class path consists of 3 ordered pieces:
// 1. the module/path pairs specified to --patch-module
@ -268,6 +269,9 @@ class ClassLoader: AllStatic {
static PerfCounter* perf_secondary_hash_time() {
return _perf_secondary_hash_time;
}
static PerfCounter* perf_change_wx_time() {
return _perf_change_wx_time;
}
static PerfCounter* perf_sys_classload_time() { return _perf_sys_classload_time; }
static PerfCounter* perf_app_classload_time() { return _perf_app_classload_time; }
static PerfCounter* perf_app_classload_selftime() { return _perf_app_classload_selftime; }

View File

@ -1263,6 +1263,10 @@ bool java_lang_Class::restore_archived_mirror(Klass *k,
"Restored %s archived mirror " PTR_FORMAT, k->external_name(), p2i(mirror()));
}
if (CDSConfig::is_dumping_heap()) {
create_scratch_mirror(k, CHECK_(false));
}
return true;
}
#endif // INCLUDE_CDS_JAVA_HEAP
@ -1904,16 +1908,16 @@ oop java_lang_Thread::park_blocker(oop java_thread) {
return java_thread->obj_field_access<MO_RELAXED>(_park_blocker_offset);
}
// Obtain stack trace for platform or mounted virtual thread.
// If jthread is a virtual thread and it has been unmounted (or remounted to different carrier) the method returns null.
// The caller (java.lang.VirtualThread) handles returned nulls via retry.
// Obtain stack trace for a platform or virtual thread.
oop java_lang_Thread::async_get_stack_trace(jobject jthread, TRAPS) {
ThreadsListHandle tlh(THREAD);
JavaThread* java_thread = nullptr;
oop thread_oop;
oop thread_oop = nullptr;
bool has_java_thread = tlh.cv_internal_thread_to_JavaThread(jthread, &java_thread, &thread_oop);
if (!has_java_thread) {
assert(thread_oop != nullptr, "Missing Thread oop");
bool is_virtual = java_lang_VirtualThread::is_instance(thread_oop);
if (!has_java_thread && !is_virtual) {
return nullptr;
}
@ -1921,12 +1925,11 @@ oop java_lang_Thread::async_get_stack_trace(jobject jthread, TRAPS) {
public:
const Handle _thread_h;
int _depth;
bool _retry_handshake;
GrowableArray<Method*>* _methods;
GrowableArray<int>* _bcis;
GetStackTraceHandshakeClosure(Handle thread_h) :
HandshakeClosure("GetStackTraceHandshakeClosure"), _thread_h(thread_h), _depth(0), _retry_handshake(false),
HandshakeClosure("GetStackTraceHandshakeClosure"), _thread_h(thread_h), _depth(0),
_methods(nullptr), _bcis(nullptr) {
}
~GetStackTraceHandshakeClosure() {
@ -1934,37 +1937,15 @@ oop java_lang_Thread::async_get_stack_trace(jobject jthread, TRAPS) {
delete _bcis;
}
bool read_reset_retry() {
bool ret = _retry_handshake;
// If we re-execute the handshake this method need to return false
// when the handshake cannot be performed. (E.g. thread terminating)
_retry_handshake = false;
return ret;
}
void do_thread(Thread* th) {
if (!Thread::current()->is_Java_thread()) {
_retry_handshake = true;
JavaThread* java_thread = th != nullptr ? JavaThread::cast(th) : nullptr;
if (java_thread != nullptr && !java_thread->has_last_Java_frame()) {
// stack trace is empty
return;
}
JavaThread* java_thread = JavaThread::cast(th);
if (!java_thread->has_last_Java_frame()) {
return;
}
bool carrier = false;
if (java_lang_VirtualThread::is_instance(_thread_h())) {
// Ensure _thread_h is still mounted to java_thread.
const ContinuationEntry* ce = java_thread->vthread_continuation();
if (ce == nullptr || ce->cont_oop(java_thread) != java_lang_VirtualThread::continuation(_thread_h())) {
// Target thread has been unmounted.
return;
}
} else {
carrier = (java_thread->vthread_continuation() != nullptr);
}
bool is_virtual = java_lang_VirtualThread::is_instance(_thread_h());
bool vthread_carrier = !is_virtual && (java_thread->vthread_continuation() != nullptr);
const int max_depth = MaxJavaStackTraceDepth;
const bool skip_hidden = !ShowHiddenFrames;
@ -1975,7 +1956,10 @@ oop java_lang_Thread::async_get_stack_trace(jobject jthread, TRAPS) {
_bcis = new (mtInternal) GrowableArray<int>(init_length, mtInternal);
int total_count = 0;
for (vframeStream vfst(java_thread, false, false, carrier); // we don't process frames as we don't care about oops
vframeStream vfst(java_thread != nullptr
? vframeStream(java_thread, false, false, vthread_carrier) // we don't process frames as we don't care about oops
: vframeStream(java_lang_VirtualThread::continuation(_thread_h())));
for (;
!vfst.at_end() && (max_depth == 0 || max_depth != total_count);
vfst.next()) {
@ -1997,9 +1981,11 @@ oop java_lang_Thread::async_get_stack_trace(jobject jthread, TRAPS) {
ResourceMark rm(THREAD);
HandleMark hm(THREAD);
GetStackTraceHandshakeClosure gsthc(Handle(THREAD, thread_oop));
do {
Handshake::execute(&gsthc, &tlh, java_thread);
} while (gsthc.read_reset_retry());
if (is_virtual) {
Handshake::execute(&gsthc, thread_oop);
} else {
Handshake::execute(&gsthc, &tlh, java_thread);
}
// Stop if no stack trace is found.
if (gsthc._depth == 0) {
@ -2196,7 +2182,7 @@ void java_lang_VirtualThread::set_timeout(oop vthread, jlong value) {
JavaThreadStatus java_lang_VirtualThread::map_state_to_thread_status(int state) {
JavaThreadStatus status = JavaThreadStatus::NEW;
switch (state & ~SUSPENDED) {
switch (state) {
case NEW:
status = JavaThreadStatus::NEW;
break;

View File

@ -592,9 +592,6 @@ class java_lang_VirtualThread : AllStatic {
TIMED_WAITING = 17,
TIMED_WAIT = 18, // waiting in timed-Object.wait
TERMINATED = 99,
// additional state bits
SUSPENDED = 1 << 8, // suspended when unmounted
};
static void compute_offsets();

View File

@ -74,24 +74,9 @@ const size_t REHASH_LEN = 100;
const double CLEAN_DEAD_HIGH_WATER_MARK = 0.5;
#if INCLUDE_CDS_JAVA_HEAP
bool StringTable::_is_two_dimensional_shared_strings_array = false;
OopHandle StringTable::_shared_strings_array;
int StringTable::_shared_strings_array_root_index;
inline oop StringTable::read_string_from_compact_hashtable(address base_address, u4 index) {
assert(AOTMappedHeapLoader::is_in_use(), "sanity");
objArrayOop array = (objArrayOop)(_shared_strings_array.resolve());
oop s;
if (!_is_two_dimensional_shared_strings_array) {
s = array->obj_at((int)index);
} else {
int primary_index = index >> _secondary_array_index_bits;
int secondary_index = index & _secondary_array_index_mask;
objArrayOop secondary = (objArrayOop)array->obj_at(primary_index);
s = secondary->obj_at(secondary_index);
}
oop s = HeapShared::get_root((int)index, false);
assert(java_lang_String::is_instance(s), "must be");
return s;
}
@ -115,7 +100,6 @@ OopStorage* StringTable::_oop_storage;
static size_t _current_size = 0;
static volatile size_t _items_count = 0;
DEBUG_ONLY(static bool _disable_interning_during_cds_dump = false);
volatile bool _alt_hash = false;
@ -317,12 +301,6 @@ void StringTable::create_table() {
_oop_storage->register_num_dead_callback(&gc_notification);
}
#if INCLUDE_CDS_JAVA_HEAP
void StringTable::load_shared_strings_array() {
_shared_strings_array = OopHandle(Universe::vm_global(), HeapShared::get_root(_shared_strings_array_root_index));
}
#endif
void StringTable::item_added() {
AtomicAccess::inc(&_items_count);
}
@ -509,9 +487,6 @@ oop StringTable::intern(const char* utf8_string, TRAPS) {
}
oop StringTable::intern(const StringWrapper& name, TRAPS) {
assert(!AtomicAccess::load_acquire(&_disable_interning_during_cds_dump),
"All threads that may intern strings should have been stopped before CDS starts copying the interned string table");
// shared table always uses java_lang_String::hash_code
unsigned int hash = hash_wrapped_string(name);
oop found_string = lookup_shared(name, hash);
@ -957,118 +932,13 @@ oop StringTable::lookup_shared(const jchar* name, int len) {
return _shared_table.lookup(wrapped_name, java_lang_String::hash_code(name, len), 0);
}
// This is called BEFORE we enter the CDS safepoint. We can still allocate Java object arrays to
// be used by the shared strings table.
void StringTable::allocate_shared_strings_array(TRAPS) {
if (!CDSConfig::is_dumping_heap()) {
return;
}
void StringTable::init_shared_table() {
assert(SafepointSynchronize::is_at_safepoint(), "inside AOT safepoint");
precond(CDSConfig::is_dumping_heap());
assert(HeapShared::is_writing_mapping_mode(), "not used for streamed oops");
assert(HeapShared::is_writing_mapping_mode(), "should not reach here");
CompileBroker::wait_for_no_active_tasks();
precond(CDSConfig::allow_only_single_java_thread());
// At this point, no more strings will be added:
// - There's only a single Java thread (this thread). It no longer executes Java bytecodes
// so JIT compilation will eventually stop.
// - CompileBroker has no more active tasks, so all JIT requests have been processed.
// This flag will be cleared after intern table dumping has completed, so we can run the
// compiler again (for future AOT method compilation, etc).
DEBUG_ONLY(AtomicAccess::release_store(&_disable_interning_during_cds_dump, true));
if (items_count_acquire() > (size_t)max_jint) {
fatal("Too many strings to be archived: %zu", items_count_acquire());
}
int total = (int)items_count_acquire();
size_t single_array_size = objArrayOopDesc::object_size(total);
log_info(aot)("allocated string table for %d strings", total);
if (!HeapShared::is_too_large_to_archive(single_array_size)) {
// The entire table can fit in a single array
objArrayOop array = oopFactory::new_objArray(vmClasses::Object_klass(), total, CHECK);
_shared_strings_array = OopHandle(Universe::vm_global(), array);
log_info(aot)("string table array (single level) length = %d", total);
} else {
// Split the table in two levels of arrays.
int primary_array_length = (total + _secondary_array_max_length - 1) / _secondary_array_max_length;
size_t primary_array_size = objArrayOopDesc::object_size(primary_array_length);
size_t secondary_array_size = objArrayOopDesc::object_size(_secondary_array_max_length);
if (HeapShared::is_too_large_to_archive(secondary_array_size)) {
// This can only happen if you have an extremely large number of classes that
// refer to more than 16384 * 16384 = 26M interned strings! Not a practical concern
// but bail out for safety.
log_error(aot)("Too many strings to be archived: %zu", items_count_acquire());
AOTMetaspace::unrecoverable_writing_error();
}
objArrayOop primary = oopFactory::new_objArray(vmClasses::Object_klass(), primary_array_length, CHECK);
objArrayHandle primaryHandle(THREAD, primary);
_shared_strings_array = OopHandle(Universe::vm_global(), primary);
log_info(aot)("string table array (primary) length = %d", primary_array_length);
for (int i = 0; i < primary_array_length; i++) {
int len;
if (total > _secondary_array_max_length) {
len = _secondary_array_max_length;
} else {
len = total;
}
total -= len;
objArrayOop secondary = oopFactory::new_objArray(vmClasses::Object_klass(), len, CHECK);
primaryHandle()->obj_at_put(i, secondary);
log_info(aot)("string table array (secondary)[%d] length = %d", i, len);
assert(!HeapShared::is_too_large_to_archive(secondary), "sanity");
}
assert(total == 0, "must be");
_is_two_dimensional_shared_strings_array = true;
}
}
#ifndef PRODUCT
void StringTable::verify_secondary_array_index_bits() {
assert(HeapShared::is_writing_mapping_mode(), "should not reach here");
int max;
for (max = 1; ; max++) {
size_t next_size = objArrayOopDesc::object_size(1 << (max + 1));
if (HeapShared::is_too_large_to_archive(next_size)) {
break;
}
}
// Currently max is 17 for +UseCompressedOops, 16 for -UseCompressedOops.
// When we add support for Shenandoah (which has a smaller mininum region size than G1),
// max will become 15/14.
//
// We use _secondary_array_index_bits==14 as that will be the eventual value, and will
// make testing easier.
assert(_secondary_array_index_bits <= max,
"_secondary_array_index_bits (%d) must be smaller than max possible value (%d)",
_secondary_array_index_bits, max);
}
#endif // PRODUCT
// This is called AFTER we enter the CDS safepoint.
//
// For each shared string:
// [1] Store it into _shared_strings_array. Encode its position as a 32-bit index.
// [2] Store the index and hashcode into _shared_table.
oop StringTable::init_shared_strings_array() {
assert(CDSConfig::is_dumping_heap(), "must be");
assert(HeapShared::is_writing_mapping_mode(), "should not reach here");
objArrayOop array = (objArrayOop)(_shared_strings_array.resolve());
verify_secondary_array_index_bits();
int index = 0;
auto copy_into_array = [&] (WeakHandle* val) {
int n = 0;
auto copy_into_aot_heap = [&] (WeakHandle* val) {
oop string = val->peek();
if (string != nullptr && !HeapShared::is_string_too_large_to_archive(string)) {
// If string is too large, don't put it into the string table.
@ -1077,53 +947,34 @@ oop StringTable::init_shared_strings_array() {
// - If there's a reference to it, we will report an error inside HeapShared.cpp and
// dumping will fail.
HeapShared::add_to_dumped_interned_strings(string);
if (!_is_two_dimensional_shared_strings_array) {
assert(index < array->length(), "no strings should have been added");
array->obj_at_put(index, string);
} else {
int primary_index = index >> _secondary_array_index_bits;
int secondary_index = index & _secondary_array_index_mask;
assert(primary_index < array->length(), "no strings should have been added");
objArrayOop secondary = (objArrayOop)array->obj_at(primary_index);
assert(secondary != nullptr && secondary->is_objArray(), "must be");
assert(secondary_index < secondary->length(), "no strings should have been added");
secondary->obj_at_put(secondary_index, string);
}
index ++;
}
n++;
return true;
};
_local_table->do_safepoint_scan(copy_into_array);
log_info(aot)("Archived %d interned strings", index);
return array;
_local_table->do_safepoint_scan(copy_into_aot_heap);
log_info(aot)("Archived %d interned strings", n);
};
void StringTable::write_shared_table() {
assert(SafepointSynchronize::is_at_safepoint(), "inside AOT safepoint");
precond(CDSConfig::is_dumping_heap());
assert(HeapShared::is_writing_mapping_mode(), "not used for streamed oops");
_shared_table.reset();
CompactHashtableWriter writer((int)items_count_acquire(), ArchiveBuilder::string_stats());
int index = 0;
auto copy_into_shared_table = [&] (WeakHandle* val) {
oop string = val->peek();
if (string != nullptr && !HeapShared::is_string_too_large_to_archive(string)) {
unsigned int hash = java_lang_String::hash_code(string);
writer.add(hash, index);
index ++;
int root_id = HeapShared::append_root(string);
writer.add(hash, root_id);
}
return true;
};
_local_table->do_safepoint_scan(copy_into_shared_table);
writer.dump(&_shared_table, "string");
DEBUG_ONLY(AtomicAccess::release_store(&_disable_interning_during_cds_dump, false));
}
void StringTable::set_shared_strings_array_index(int root_index) {
assert(HeapShared::is_writing_mapping_mode(), "should not reach here");
_shared_strings_array_root_index = root_index;
}
void StringTable::serialize_shared_table_header(SerializeClosure* soc) {
@ -1135,8 +986,27 @@ void StringTable::serialize_shared_table_header(SerializeClosure* soc) {
} else if (!AOTMappedHeapLoader::is_in_use()) {
_shared_table.reset();
}
}
soc->do_bool(&_is_two_dimensional_shared_strings_array);
soc->do_int(&_shared_strings_array_root_index);
void StringTable::move_shared_strings_into_runtime_table() {
precond(CDSConfig::is_dumping_final_static_archive());
JavaThread* THREAD = JavaThread::current();
HandleMark hm(THREAD);
int n = 0;
_shared_table.iterate_all([&](oop string) {
int length = java_lang_String::length(string);
Handle h_string (THREAD, string);
StringWrapper name(h_string, length);
unsigned int hash = hash_wrapped_string(name);
assert(!_alt_hash, "too early");
oop interned = do_intern(name, hash, THREAD);
assert(string == interned, "must be");
n++;
});
_shared_table.reset();
log_info(aot)("Moved %d interned strings to runtime table", n);
}
#endif //INCLUDE_CDS_JAVA_HEAP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -109,49 +109,17 @@ public:
static bool needs_rehashing() { return _needs_rehashing; }
static inline void update_needs_rehash(bool rehash);
// Sharing
#if INCLUDE_CDS_JAVA_HEAP
static inline oop read_string_from_compact_hashtable(address base_address, u4 index);
// AOT support
static inline oop read_string_from_compact_hashtable(address base_address, u4 index) NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
private:
static bool _is_two_dimensional_shared_strings_array;
static OopHandle _shared_strings_array;
static int _shared_strings_array_root_index;
// All the shared strings are referenced through _shared_strings_array to keep them alive.
// Each shared string is stored as a 32-bit index in ::_shared_table. The index
// is interpreted in two ways:
//
// [1] _is_two_dimensional_shared_strings_array = false: _shared_strings_array is an Object[].
// Each shared string is stored as _shared_strings_array[index]
//
// [2] _is_two_dimensional_shared_strings_array = true: _shared_strings_array is an Object[][]
// This happens when there are too many elements in the shared table. We store them
// using two levels of objArrays, such that none of the arrays are too big for
// AOTMappedHeapWriter::is_too_large_to_archive(). In this case, the index is splited into two
// parts. Each shared string is stored as _shared_strings_array[primary_index][secondary_index]:
//
// [bits 31 .. 14][ bits 13 .. 0 ]
// primary_index secondary_index
const static int _secondary_array_index_bits = 14;
const static int _secondary_array_max_length = 1 << _secondary_array_index_bits;
const static int _secondary_array_index_mask = _secondary_array_max_length - 1;
// make sure _secondary_array_index_bits is not too big
static void verify_secondary_array_index_bits() PRODUCT_RETURN;
#endif // INCLUDE_CDS_JAVA_HEAP
private:
static oop lookup_shared(const StringWrapper& name, unsigned int hash) NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
public:
public:
static oop lookup_shared(const jchar* name, int len) NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
static size_t shared_entry_count() NOT_CDS_JAVA_HEAP_RETURN_(0);
static void allocate_shared_strings_array(TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
static void load_shared_strings_array() NOT_CDS_JAVA_HEAP_RETURN;
static oop init_shared_strings_array() NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
static void init_shared_table() NOT_CDS_JAVA_HEAP_RETURN;
static void write_shared_table() NOT_CDS_JAVA_HEAP_RETURN;
static void set_shared_strings_array_index(int root_index) NOT_CDS_JAVA_HEAP_RETURN;
static void serialize_shared_table_header(SerializeClosure* soc) NOT_CDS_JAVA_HEAP_RETURN;
static void move_shared_strings_into_runtime_table();
// Jcmd
static void dump(outputStream* st, bool verbose=false);

View File

@ -190,9 +190,8 @@ bool Verifier::verify(InstanceKlass* klass, bool should_verify_class, TRAPS) {
// effect (sic!) for external_name(), but instead of doing that, we opt to
// explicitly push the hashcode in here. This is signify the following block
// is IMPORTANT:
if (klass->java_mirror() != nullptr) {
klass->java_mirror()->identity_hash();
}
assert(klass->java_mirror() != nullptr, "must be");
klass->java_mirror()->identity_hash();
if (!is_eligible_for_verification(klass, should_verify_class)) {
return true;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -415,18 +415,18 @@ class methodHandle;
\
do_class(java_lang_StringCoding, "java/lang/StringCoding") \
do_intrinsic(_countPositives, java_lang_StringCoding, countPositives_name, countPositives_signature, F_S) \
do_name( countPositives_name, "countPositives") \
do_name( countPositives_name, "countPositives0") \
do_signature(countPositives_signature, "([BII)I") \
\
do_class(sun_nio_cs_iso8859_1_Encoder, "sun/nio/cs/ISO_8859_1$Encoder") \
do_intrinsic(_encodeISOArray, sun_nio_cs_iso8859_1_Encoder, encodeISOArray_name, encodeISOArray_signature, F_S) \
do_name( encodeISOArray_name, "implEncodeISOArray") \
do_name( encodeISOArray_name, "encodeISOArray0") \
do_signature(encodeISOArray_signature, "([CI[BII)I") \
\
do_intrinsic(_encodeByteISOArray, java_lang_StringCoding, encodeISOArray_name, indexOfI_signature, F_S) \
\
do_intrinsic(_encodeAsciiArray, java_lang_StringCoding, encodeAsciiArray_name, encodeISOArray_signature, F_S) \
do_name( encodeAsciiArray_name, "implEncodeAsciiArray") \
do_name( encodeAsciiArray_name, "encodeAsciiArray0") \
\
do_class(java_math_BigInteger, "java/math/BigInteger") \
do_intrinsic(_multiplyToLen, java_math_BigInteger, multiplyToLen_name, multiplyToLen_signature, F_S) \
@ -1003,7 +1003,7 @@ class methodHandle;
do_signature(vector_unary_op_sig, "(I" \
"Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"I" \
"I" \
"Ljdk/internal/vm/vector/VectorSupport$Vector;" \
"Ljdk/internal/vm/vector/VectorSupport$VectorMask;" \
@ -1015,7 +1015,7 @@ class methodHandle;
do_signature(vector_binary_op_sig, "(I" \
"Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"I" \
"I" \
"Ljdk/internal/vm/vector/VectorSupport$VectorPayload;" \
"Ljdk/internal/vm/vector/VectorSupport$VectorPayload;" \
@ -1051,7 +1051,7 @@ class methodHandle;
do_signature(vector_ternary_op_sig, "(I" \
"Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"I" \
"I" \
"Ljdk/internal/vm/vector/VectorSupport$Vector;" \
"Ljdk/internal/vm/vector/VectorSupport$Vector;" \
@ -1063,7 +1063,7 @@ class methodHandle;
\
do_intrinsic(_VectorSelectFromTwoVectorOp, jdk_internal_vm_vector_VectorSupport, vector_select_from_op_name, vector_select_from_op_sig, F_S) \
do_signature(vector_select_from_op_sig, "(Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"I" \
"I" \
"Ljdk/internal/vm/vector/VectorSupport$Vector;" \
"Ljdk/internal/vm/vector/VectorSupport$Vector;" \
@ -1074,7 +1074,7 @@ class methodHandle;
\
do_intrinsic(_VectorFromBitsCoerced, jdk_internal_vm_vector_VectorSupport, vector_frombits_coerced_name, vector_frombits_coerced_sig, F_S) \
do_signature(vector_frombits_coerced_sig, "(Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"I" \
"I" \
"J" \
"I" \
@ -1085,7 +1085,7 @@ class methodHandle;
\
do_intrinsic(_VectorLoadOp, jdk_internal_vm_vector_VectorSupport, vector_load_op_name, vector_load_op_sig, F_S) \
do_signature(vector_load_op_sig, "(Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"I" \
"I" \
"Ljava/lang/Object;" \
"J" \
@ -1100,7 +1100,7 @@ class methodHandle;
do_intrinsic(_VectorLoadMaskedOp, jdk_internal_vm_vector_VectorSupport, vector_load_masked_op_name, vector_load_masked_op_sig, F_S) \
do_signature(vector_load_masked_op_sig, "(Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"I" \
"I" \
"Ljava/lang/Object;" \
"J" \
@ -1116,7 +1116,7 @@ class methodHandle;
\
do_intrinsic(_VectorStoreOp, jdk_internal_vm_vector_VectorSupport, vector_store_op_name, vector_store_op_sig, F_S) \
do_signature(vector_store_op_sig, "(Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"I" \
"I" \
"Ljava/lang/Object;" \
"J" \
@ -1131,7 +1131,7 @@ class methodHandle;
do_intrinsic(_VectorStoreMaskedOp, jdk_internal_vm_vector_VectorSupport, vector_store_masked_op_name, vector_store_masked_op_sig, F_S) \
do_signature(vector_store_masked_op_sig, "(Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"I" \
"I" \
"Ljava/lang/Object;" \
"J" \
@ -1148,7 +1148,7 @@ class methodHandle;
do_signature(vector_reduction_coerced_sig, "(I" \
"Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"I" \
"I" \
"Ljdk/internal/vm/vector/VectorSupport$Vector;" \
"Ljdk/internal/vm/vector/VectorSupport$VectorMask;" \
@ -1159,7 +1159,7 @@ class methodHandle;
do_intrinsic(_VectorTest, jdk_internal_vm_vector_VectorSupport, vector_test_name, vector_test_sig, F_S) \
do_signature(vector_test_sig, "(I" \
"Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"I" \
"I" \
"Ljdk/internal/vm/vector/VectorSupport$VectorMask;" \
"Ljdk/internal/vm/vector/VectorSupport$VectorMask;" \
@ -1170,7 +1170,7 @@ class methodHandle;
do_intrinsic(_VectorBlend, jdk_internal_vm_vector_VectorSupport, vector_blend_name, vector_blend_sig, F_S) \
do_signature(vector_blend_sig, "(Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"I" \
"I" \
"Ljdk/internal/vm/vector/VectorSupport$Vector;" \
"Ljdk/internal/vm/vector/VectorSupport$Vector;" \
@ -1182,7 +1182,8 @@ class methodHandle;
do_intrinsic(_VectorCompare, jdk_internal_vm_vector_VectorSupport, vector_compare_name, vector_compare_sig, F_S) \
do_signature(vector_compare_sig, "(I" \
"Ljava/lang/Class;" \
"Ljava/lang/Class;Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"I" \
"I" \
"Ljdk/internal/vm/vector/VectorSupport$Vector;" \
"Ljdk/internal/vm/vector/VectorSupport$Vector;" \
@ -1195,7 +1196,7 @@ class methodHandle;
do_signature(vector_rearrange_sig, "(Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"I" \
"I" \
"Ljdk/internal/vm/vector/VectorSupport$Vector;" \
"Ljdk/internal/vm/vector/VectorSupport$VectorShuffle;" \
@ -1207,18 +1208,18 @@ class methodHandle;
do_intrinsic(_VectorSelectFrom, jdk_internal_vm_vector_VectorSupport, vector_select_from_name, vector_select_from_sig, F_S) \
do_signature(vector_select_from_sig, "(Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"I" \
"I" \
"Ljdk/internal/vm/vector/VectorSupport$Vector;" \
"Ljdk/internal/vm/vector/VectorSupport$Vector;" \
"Ljdk/internal/vm/vector/VectorSupport$VectorMask;" \
"Ljdk/internal/vm/vector/VectorSupport$VectorSelectFromOp;)" \
"Ljdk/internal/vm/vector/VectorSupport$Vector;") \
do_name(vector_select_from_name, "selectFromOp") \
do_name(vector_select_from_name, "selectFromOp") \
\
do_intrinsic(_VectorExtract, jdk_internal_vm_vector_VectorSupport, vector_extract_name, vector_extract_sig, F_S) \
do_signature(vector_extract_sig, "(Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"I" \
"I" \
"Ljdk/internal/vm/vector/VectorSupport$VectorPayload;" \
"I" \
@ -1228,7 +1229,7 @@ class methodHandle;
\
do_intrinsic(_VectorInsert, jdk_internal_vm_vector_VectorSupport, vector_insert_name, vector_insert_sig, F_S) \
do_signature(vector_insert_sig, "(Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"I" \
"I" \
"Ljdk/internal/vm/vector/VectorSupport$Vector;" \
"IJ" \
@ -1240,7 +1241,7 @@ class methodHandle;
do_signature(vector_broadcast_int_sig, "(I" \
"Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"I" \
"I" \
"Ljdk/internal/vm/vector/VectorSupport$Vector;" \
"I" \
@ -1252,10 +1253,10 @@ class methodHandle;
do_intrinsic(_VectorConvert, jdk_internal_vm_vector_VectorSupport, vector_convert_name, vector_convert_sig, F_S) \
do_signature(vector_convert_sig, "(I" \
"Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"I" \
"I" \
"Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"I" \
"I" \
"Ljdk/internal/vm/vector/VectorSupport$VectorPayload;" \
"Ljdk/internal/vm/vector/VectorSupport$VectorSpecies;" \
@ -1266,7 +1267,7 @@ class methodHandle;
do_intrinsic(_VectorGatherOp, jdk_internal_vm_vector_VectorSupport, vector_gather_name, vector_gather_sig, F_S) \
do_signature(vector_gather_sig, "(Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"I" \
"I" \
"Ljava/lang/Class;" \
"I" \
@ -1287,7 +1288,7 @@ class methodHandle;
do_intrinsic(_VectorScatterOp, jdk_internal_vm_vector_VectorSupport, vector_scatter_name, vector_scatter_sig, F_S) \
do_signature(vector_scatter_sig, "(Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"I" \
"I" \
"Ljava/lang/Class;" \
"I" \
@ -1309,7 +1310,7 @@ class methodHandle;
do_intrinsic(_VectorMaskOp, jdk_internal_vm_vector_VectorSupport, vector_mask_oper_name, vector_mask_oper_sig, F_S) \
do_signature(vector_mask_oper_sig, "(I" \
"Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"I" \
"I" \
"Ljdk/internal/vm/vector/VectorSupport$VectorMask;" \
"Ljdk/internal/vm/vector/VectorSupport$VectorMaskOp;)" \
@ -1320,7 +1321,7 @@ class methodHandle;
do_signature(vector_compress_expand_op_sig, "(I" \
"Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"I" \
"I" \
"Ljdk/internal/vm/vector/VectorSupport$Vector;" \
"Ljdk/internal/vm/vector/VectorSupport$VectorMask;" \
@ -1330,7 +1331,7 @@ class methodHandle;
\
do_intrinsic(_IndexVector, jdk_internal_vm_vector_VectorSupport, index_vector_op_name, index_vector_op_sig, F_S) \
do_signature(index_vector_op_sig, "(Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"I" \
"I" \
"Ljdk/internal/vm/vector/VectorSupport$Vector;" \
"I" \
@ -1341,7 +1342,7 @@ class methodHandle;
\
do_intrinsic(_IndexPartiallyInUpperRange, jdk_internal_vm_vector_VectorSupport, index_partially_in_upper_range_name, index_partially_in_upper_range_sig, F_S)\
do_signature(index_partially_in_upper_range_sig, "(Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"I" \
"I" \
"J" \
"J" \

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -748,8 +748,6 @@ class SerializeClosure;
template(jdk_internal_vm_ThreadLock, "jdk/internal/vm/ThreadSnapshot$ThreadLock") \
template(jdk_internal_vm_ThreadLock_signature, "Ljdk/internal/vm/ThreadSnapshot$ThreadLock;") \
template(jdk_internal_vm_ThreadLock_array, "[Ljdk/internal/vm/ThreadSnapshot$ThreadLock;") \
template(java_lang_StackTraceElement_of_name, "of") \
template(java_lang_StackTraceElement_of_signature, "([Ljava/lang/StackTraceElement;)[Ljava/lang/StackTraceElement;") \
\
/* jcmd Thread.vthread_scheduler and Thread.vthread_pollers */ \
template(jdk_internal_vm_JcmdVThreadCommands, "jdk/internal/vm/JcmdVThreadCommands") \

View File

@ -520,6 +520,8 @@ VtableBlob* VtableBlob::create(const char* name, int buffer_size) {
// eventually.
return nullptr;
}
MACOS_AARCH64_ONLY(os::thread_wx_enable_write());
blob = new (size) VtableBlob(name, size);
CodeCache_lock->unlock();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -299,13 +299,61 @@ void CodeCache::initialize_heaps() {
set_size_of_unset_code_heap(&non_nmethod, cache_size, profiled.size + non_profiled.size + hot.size, non_nmethod_min_size);
}
size_t total = non_nmethod.size + profiled.size + non_profiled.size + hot.size;
if (total != cache_size && !cache_size_set) {
log_info(codecache)("ReservedCodeCache size %zuK changed to total segments size NonNMethod "
"%zuK NonProfiled %zuK Profiled %zuK Hot %zuK = %zuK",
cache_size/K, non_nmethod.size/K, non_profiled.size/K, profiled.size/K, hot.size/K, total/K);
// Adjust ReservedCodeCacheSize as necessary because it was not set explicitly
cache_size = total;
// Note: if large page support is enabled, min_size is at least the large
// page size. This ensures that the code cache is covered by large pages.
non_nmethod.size = align_up(non_nmethod.size, min_size);
profiled.size = align_up(profiled.size, min_size);
non_profiled.size = align_up(non_profiled.size, min_size);
size_t aligned_total = non_nmethod.size + profiled.size + non_profiled.size;
if (!cache_size_set) {
// If ReservedCodeCacheSize is explicitly set and exceeds CODE_CACHE_SIZE_LIMIT,
// it is rejected by flag validation elsewhere. Here we only handle the case
// where ReservedCodeCacheSize is not set explicitly, but the computed segmented
// sizes (after alignment) exceed the platform limit.
if (aligned_total > CODE_CACHE_SIZE_LIMIT) {
err_msg message("ReservedCodeCacheSize (%zuK), Max (%zuK)."
"Segments: NonNMethod (%zuK), NonProfiled (%zuK), Profiled (%zuK).",
aligned_total/K, CODE_CACHE_SIZE_LIMIT/K,
non_nmethod.size/K, non_profiled.size/K, profiled.size/K);
vm_exit_during_initialization("Code cache size exceeds platform limit", message);
}
if (aligned_total != cache_size) {
log_info(codecache)("ReservedCodeCache size %zuK changed to total segments size NonNMethod "
"%zuK NonProfiled %zuK Profiled %zuK = %zuK",
cache_size/K, non_nmethod.size/K, non_profiled.size/K, profiled.size/K, aligned_total/K);
// Adjust ReservedCodeCacheSize as necessary because it was not set explicitly
cache_size = aligned_total;
}
} else {
check_min_size("reserved code cache", cache_size, min_cache_size);
// ReservedCodeCacheSize was set explicitly, so treat it as a hard cap.
// If alignment causes the total to exceed the cap, shrink unset heaps
// in min_size steps, never below their minimum sizes.
//
// A total smaller than cache_size typically happens when all segment sizes
// are explicitly set. In that case there is nothing to adjust, so we
// only validate the sizes.
if (aligned_total > cache_size) {
size_t delta = (aligned_total - cache_size) / min_size;
while (delta > 0) {
size_t start_delta = delta;
// Do not shrink the non-nmethod heap here: running out of non-nmethod space
// is more critical and may lead to unrecoverable VM errors.
if (non_profiled.enabled && !non_profiled.set && non_profiled.size > min_size) {
non_profiled.size -= min_size;
if (--delta == 0) break;
}
if (profiled.enabled && !profiled.set && profiled.size > min_size) {
profiled.size -= min_size;
delta--;
}
if (delta == start_delta) {
break;
}
}
aligned_total = non_nmethod.size + profiled.size + non_profiled.size;
}
}
log_debug(codecache)("Initializing code heaps ReservedCodeCache %zuK NonNMethod %zuK"
@ -324,12 +372,9 @@ void CodeCache::initialize_heaps() {
if (hot.enabled) {
check_min_size("hot code heap", hot.size, min_size);
}
if (cache_size_set) {
check_min_size("reserved code cache", cache_size, min_cache_size);
}
// ReservedCodeCacheSize was set explicitly, so report an error and abort if it doesn't match the segment sizes
if (total != cache_size && cache_size_set) {
if (aligned_total != cache_size && cache_size_set) {
err_msg message("NonNMethodCodeHeapSize (%zuK)", non_nmethod.size/K);
if (profiled.enabled) {
message.append(" + ProfiledCodeHeapSize (%zuK)", profiled.size/K);
@ -340,8 +385,8 @@ void CodeCache::initialize_heaps() {
if (hot.enabled) {
message.append(" + HotCodeHeapSize (%zuK)", hot.size/K);
}
message.append(" = %zuK", total/K);
message.append((total > cache_size) ? " is greater than " : " is less than ");
message.append(" = %zuK", aligned_total/K);
message.append((aligned_total > cache_size) ? " is greater than " : " is less than ");
message.append("ReservedCodeCacheSize (%zuK).", cache_size/K);
vm_exit_during_initialization("Invalid code heap sizes", message);
@ -357,19 +402,6 @@ void CodeCache::initialize_heaps() {
}
}
// Note: if large page support is enabled, min_size is at least the large
// page size. This ensures that the code cache is covered by large pages.
non_nmethod.size = align_up(non_nmethod.size, min_size);
profiled.size = align_up(profiled.size, min_size);
non_profiled.size = align_up(non_profiled.size, min_size);
if (hot.enabled) {
hot.size = align_up(hot.size, min_size);
FLAG_SET_ERGO(HotCodeHeapSize, hot.size);
}
cache_size = non_nmethod.size + profiled.size + non_profiled.size + hot.size;
FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod.size);
FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled.size);
FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled.size);

View File

@ -2145,6 +2145,9 @@ void nmethod::make_deoptimized() {
ResourceMark rm;
RelocIterator iter(this, oops_reloc_begin());
// Assume there will be some calls to make deoptimized.
MACOS_AARCH64_ONLY(os::thread_wx_enable_write());
while (iter.next()) {
switch (iter.type()) {
@ -2221,6 +2224,7 @@ void nmethod::verify_clean_inline_caches() {
}
void nmethod::mark_as_maybe_on_stack() {
MACOS_AARCH64_ONLY(os::thread_wx_enable_write());
AtomicAccess::store(&_gc_epoch, CodeCache::gc_epoch());
}
@ -2313,6 +2317,8 @@ bool nmethod::make_not_entrant(InvalidationReason invalidation_reason) {
return false;
}
MACOS_AARCH64_ONLY(os::thread_wx_enable_write());
{
// Enter critical section. Does not block for safepoint.
ConditionalMutexLocker ml(NMethodState_lock, !NMethodState_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
@ -2753,6 +2759,8 @@ bool nmethod::is_unloading() {
state_is_unloading = IsUnloadingBehaviour::is_unloading(this);
uint8_t new_state = IsUnloadingState::create(state_is_unloading, state_unloading_cycle);
MACOS_AARCH64_ONLY(os::thread_wx_enable_write());
// Note that if an nmethod has dead oops, everyone will agree that the
// nmethod is_unloading. However, the is_cold heuristics can yield
// different outcomes, so we guard the computed result with a CAS

View File

@ -51,6 +51,9 @@ VMReg VtableStub::_receiver_location = VMRegImpl::Bad();
void* VtableStub::operator new(size_t size, int code_size) throw() {
assert_lock_strong(VtableStubs_lock);
assert(size == sizeof(VtableStub), "mismatched size");
MACOS_AARCH64_ONLY(os::thread_wx_enable_write());
// compute real VtableStub size (rounded to nearest word)
const int real_size = align_up(code_size + (int)sizeof(VtableStub), wordSize);
// malloc them in chunks to minimize header overhead

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -2346,12 +2346,18 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
/* Repeat compilation without installing code for profiling purposes */
int repeat_compilation_count = directive->RepeatCompilationOption;
while (repeat_compilation_count > 0) {
ResourceMark rm(thread);
task->print_ul("NO CODE INSTALLED");
thread->timeout()->reset();
comp->compile_method(&ci_env, target, osr_bci, false, directive);
repeat_compilation_count--;
if (repeat_compilation_count > 0) {
CHeapStringHolder failure_reason;
failure_reason.set(ci_env._failure_reason.get());
while (repeat_compilation_count > 0) {
ResourceMark rm(thread);
task->print_ul("NO CODE INSTALLED");
thread->timeout()->reset();
ci_env._failure_reason.clear();
comp->compile_method(&ci_env, target, osr_bci, false, directive);
repeat_compilation_count--;
}
ci_env._failure_reason.set(failure_reason.get());
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -351,7 +351,6 @@ Node* G1BarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) co
Node* G1BarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
Node* new_val, const Type* value_type) const {
GraphKit* kit = access.kit();
if (!access.is_oop()) {
return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
}
@ -361,7 +360,6 @@ Node* G1BarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access
Node* G1BarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
Node* new_val, const Type* value_type) const {
GraphKit* kit = access.kit();
if (!access.is_oop()) {
return BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
}
@ -370,7 +368,6 @@ Node* G1BarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& acces
}
Node* G1BarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const {
GraphKit* kit = access.kit();
if (!access.is_oop()) {
return BarrierSetC2::atomic_xchg_at_resolved(access, new_val, value_type);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -64,13 +64,13 @@ G1BarrierSet::G1BarrierSet(G1CardTable* card_table,
{}
G1BarrierSet::~G1BarrierSet() {
delete _refinement_table;
delete refinement_table();
}
void G1BarrierSet::swap_global_card_table() {
G1CardTable* temp = static_cast<G1CardTable*>(_card_table);
_card_table = _refinement_table;
_refinement_table = temp;
G1CardTable* temp = static_cast<G1CardTable*>(card_table());
_card_table.store_relaxed(refinement_table());
_refinement_table.store_relaxed(temp);
}
void G1BarrierSet::update_card_table_base(Thread* thread) {
@ -80,7 +80,7 @@ void G1BarrierSet::update_card_table_base(Thread* thread) {
assert(thread->is_Java_thread(), "may only update card table base of JavaThreads, not %s", thread->name());
}
#endif
G1ThreadLocalData::set_byte_map_base(thread, _card_table->byte_map_base());
G1ThreadLocalData::set_byte_map_base(thread, card_table()->byte_map_base());
}
template <class T> void
@ -135,10 +135,10 @@ void G1BarrierSet::write_region(MemRegion mr) {
// marks next time.
// If we write to the old card table (after the switching, then the refinement
// table) the oncoming handshake will do the memory synchronization.
CardTable* card_table = AtomicAccess::load(&_card_table);
CardTable* local_card_table = card_table();
volatile CardValue* byte = card_table->byte_for(mr.start());
CardValue* last_byte = card_table->byte_for(mr.last());
volatile CardValue* byte = local_card_table->byte_for(mr.start());
CardValue* last_byte = local_card_table->byte_for(mr.last());
// Dirty cards only if necessary.
for (; byte <= last_byte; byte++) {
@ -190,6 +190,6 @@ void G1BarrierSet::on_thread_detach(Thread* thread) {
}
void G1BarrierSet::print_on(outputStream* st) const {
_card_table->print_on(st, "Card");
_refinement_table->print_on(st, "Refinement");
card_table()->print_on(st, "Card");
refinement_table()->print_on(st, "Refinement");
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,6 +29,7 @@
#include "gc/shared/bufferNode.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
#include "runtime/atomic.hpp"
class G1CardTable;
class Thread;
@ -66,7 +67,7 @@ class G1BarrierSet: public CardTableBarrierSet {
BufferNode::Allocator _satb_mark_queue_buffer_allocator;
G1SATBMarkQueueSet _satb_mark_queue_set;
G1CardTable* _refinement_table;
Atomic<G1CardTable*> _refinement_table;
public:
G1BarrierSet(G1CardTable* card_table, G1CardTable* refinement_table);
@ -76,7 +77,7 @@ class G1BarrierSet: public CardTableBarrierSet {
return barrier_set_cast<G1BarrierSet>(BarrierSet::barrier_set());
}
G1CardTable* refinement_table() const { return _refinement_table; }
G1CardTable* refinement_table() const { return _refinement_table.load_relaxed(); }
// Swap the global card table references, without synchronization.
void swap_global_card_table();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -73,8 +73,8 @@ inline void G1BarrierSet::write_ref_field_post(T* field) {
// Make sure that the card table reference is read only once. Otherwise the compiler
// might reload that value in the two accesses below, that could cause writes to
// the wrong card table.
CardTable* card_table = AtomicAccess::load(&_card_table);
CardValue* byte = card_table->byte_for(field);
CardTable* local_card_table = card_table();
CardValue* byte = local_card_table->byte_for(field);
if (*byte == G1CardTable::clean_card_val()) {
*byte = G1CardTable::dirty_card_val();
}

View File

@ -1320,7 +1320,6 @@ G1CollectedHeap::G1CollectedHeap() :
_card_set_freelist_pool(G1CardSetConfiguration::num_mem_object_types()),
_young_regions_cset_group(card_set_config(), &_card_set_freelist_pool, G1CSetCandidateGroup::YoungRegionId),
_cm(nullptr),
_cm_thread(nullptr),
_cr(nullptr),
_task_queues(nullptr),
_partial_array_state_manager(nullptr),
@ -1564,7 +1563,6 @@ jint G1CollectedHeap::initialize() {
// Create the G1ConcurrentMark data structure and thread.
// (Must do this late, so that "max_[reserved_]regions" is defined.)
_cm = new G1ConcurrentMark(this, bitmap_storage);
_cm_thread = _cm->cm_thread();
// Now expand into the initial heap size.
if (!expand(init_byte_size, _workers)) {
@ -1636,7 +1634,9 @@ jint G1CollectedHeap::initialize() {
}
bool G1CollectedHeap::concurrent_mark_is_terminating() const {
return _cm_thread->should_terminate();
assert(_cm != nullptr, "_cm must have been created");
assert(_cm->is_fully_initialized(), "thread must exist in order to check if mark is terminating");
return _cm->cm_thread()->should_terminate();
}
void G1CollectedHeap::stop() {
@ -1645,7 +1645,9 @@ void G1CollectedHeap::stop() {
// that are destroyed during shutdown.
_cr->stop();
_service_thread->stop();
_cm_thread->stop();
if (_cm->is_fully_initialized()) {
_cm->cm_thread()->stop();
}
}
void G1CollectedHeap::safepoint_synchronize_begin() {
@ -1842,7 +1844,7 @@ void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent,
// is set) so that if a waiter requests another System.gc() it doesn't
// incorrectly see that a marking cycle is still in progress.
if (concurrent) {
_cm_thread->set_idle();
_cm->cm_thread()->set_idle();
}
// Notify threads waiting in System.gc() (with ExplicitGCInvokesConcurrent)
@ -2421,7 +2423,6 @@ void G1CollectedHeap::print_gc_on(outputStream* st) const {
void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
workers()->threads_do(tc);
tc->do_thread(_cm_thread);
_cm->threads_do(tc);
_cr->threads_do(tc);
tc->do_thread(_service_thread);
@ -2542,15 +2543,15 @@ HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
}
void G1CollectedHeap::start_concurrent_cycle(bool concurrent_operation_is_full_mark) {
assert(!_cm_thread->in_progress(), "Can not start concurrent operation while in progress");
assert(_cm->is_fully_initialized(), "sanity");
assert(!_cm->in_progress(), "Can not start concurrent operation while in progress");
MutexLocker x(G1CGC_lock, Mutex::_no_safepoint_check_flag);
if (concurrent_operation_is_full_mark) {
_cm->post_concurrent_mark_start();
_cm_thread->start_full_mark();
_cm->cm_thread()->start_full_mark();
} else {
_cm->post_concurrent_undo_start();
_cm_thread->start_undo_mark();
_cm->cm_thread()->start_undo_mark();
}
G1CGC_lock->notify();
}
@ -2726,6 +2727,8 @@ void G1CollectedHeap::do_collection_pause_at_safepoint(size_t allocation_word_si
_bytes_used_during_gc = 0;
_cm->fully_initialize();
policy()->decide_on_concurrent_start_pause();
// Record whether this pause may need to trigger a concurrent operation. Later,
// when we signal the G1ConcurrentMarkThread, the collector state has already

View File

@ -823,7 +823,6 @@ public:
// The concurrent marker (and the thread it runs in.)
G1ConcurrentMark* _cm;
G1ConcurrentMarkThread* _cm_thread;
// The concurrent refiner.
G1ConcurrentRefine* _cr;

View File

@ -382,12 +382,12 @@ G1CMRootMemRegions::~G1CMRootMemRegions() {
}
void G1CMRootMemRegions::reset() {
_num_root_regions = 0;
_num_root_regions.store_relaxed(0);
}
void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) {
assert_at_safepoint();
size_t idx = AtomicAccess::fetch_then_add(&_num_root_regions, 1u);
size_t idx = _num_root_regions.fetch_then_add(1u);
assert(idx < _max_regions, "Trying to add more root MemRegions than there is space %zu", _max_regions);
assert(start != nullptr && end != nullptr && start <= end, "Start (" PTR_FORMAT ") should be less or equal to "
"end (" PTR_FORMAT ")", p2i(start), p2i(end));
@ -398,36 +398,38 @@ void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) {
void G1CMRootMemRegions::prepare_for_scan() {
assert(!scan_in_progress(), "pre-condition");
_scan_in_progress = _num_root_regions > 0;
_scan_in_progress.store_relaxed(num_root_regions() > 0);
_claimed_root_regions = 0;
_should_abort = false;
_claimed_root_regions.store_relaxed(0);
_should_abort.store_relaxed(false);
}
const MemRegion* G1CMRootMemRegions::claim_next() {
if (_should_abort) {
if (_should_abort.load_relaxed()) {
// If someone has set the should_abort flag, we return null to
// force the caller to bail out of their loop.
return nullptr;
}
if (_claimed_root_regions >= _num_root_regions) {
uint local_num_root_regions = num_root_regions();
if (_claimed_root_regions.load_relaxed() >= local_num_root_regions) {
return nullptr;
}
size_t claimed_index = AtomicAccess::fetch_then_add(&_claimed_root_regions, 1u);
if (claimed_index < _num_root_regions) {
size_t claimed_index = _claimed_root_regions.fetch_then_add(1u);
if (claimed_index < local_num_root_regions) {
return &_root_regions[claimed_index];
}
return nullptr;
}
uint G1CMRootMemRegions::num_root_regions() const {
return (uint)_num_root_regions;
return (uint)_num_root_regions.load_relaxed();
}
bool G1CMRootMemRegions::contains(const MemRegion mr) const {
for (uint i = 0; i < _num_root_regions; i++) {
uint local_num_root_regions = num_root_regions();
for (uint i = 0; i < local_num_root_regions; i++) {
if (_root_regions[i].equals(mr)) {
return true;
}
@ -437,7 +439,7 @@ bool G1CMRootMemRegions::contains(const MemRegion mr) const {
void G1CMRootMemRegions::notify_scan_done() {
MutexLocker x(G1RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
_scan_in_progress = false;
_scan_in_progress.store_relaxed(false);
G1RootRegionScan_lock->notify_all();
}
@ -448,10 +450,10 @@ void G1CMRootMemRegions::cancel_scan() {
void G1CMRootMemRegions::scan_finished() {
assert(scan_in_progress(), "pre-condition");
if (!_should_abort) {
assert(_claimed_root_regions >= num_root_regions(),
if (!_should_abort.load_relaxed()) {
assert(_claimed_root_regions.load_relaxed() >= num_root_regions(),
"we should have claimed all root regions, claimed %zu, length = %u",
_claimed_root_regions, num_root_regions());
_claimed_root_regions.load_relaxed(), num_root_regions());
}
notify_scan_done();
@ -473,7 +475,7 @@ bool G1CMRootMemRegions::wait_until_scan_finished() {
G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
G1RegionToSpaceMapper* bitmap_storage) :
// _cm_thread set inside the constructor
_cm_thread(nullptr),
_g1h(g1h),
_mark_bitmap(),
@ -484,13 +486,12 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
_global_mark_stack(),
// _finger set in set_non_marking_state
_finger(nullptr), // _finger set in set_non_marking_state
_worker_id_offset(G1ConcRefinementThreads), // The refinement control thread does not refine cards, so it's just the worker threads.
_max_num_tasks(MAX2(ConcGCThreads, ParallelGCThreads)),
// _num_active_tasks set in set_non_marking_state()
// _tasks set inside the constructor
_num_active_tasks(0), // _num_active_tasks set in set_non_marking_state()
_tasks(nullptr), // _tasks set inside late_init()
_task_queues(new G1CMTaskQueueSet(_max_num_tasks)),
_terminator(_max_num_tasks, _task_queues),
_partial_array_state_manager(new PartialArrayStateManager(_max_num_tasks)),
@ -525,6 +526,12 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
assert(G1CGC_lock != nullptr, "CGC_lock must be initialized");
_mark_bitmap.initialize(g1h->reserved(), bitmap_storage);
}
void G1ConcurrentMark::fully_initialize() {
if (is_fully_initialized()) {
return;
}
// Create & start ConcurrentMark thread.
_cm_thread = new G1ConcurrentMarkThread(this);
@ -560,6 +567,10 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
reset_at_marking_complete();
}
bool G1ConcurrentMark::in_progress() const {
return is_fully_initialized() ? _cm_thread->in_progress() : false;
}
PartialArrayStateManager* G1ConcurrentMark::partial_array_state_manager() const {
return _partial_array_state_manager;
}
@ -628,8 +639,7 @@ void G1ConcurrentMark::reset_marking_for_restart() {
_finger = _heap.start();
for (uint i = 0; i < _max_num_tasks; ++i) {
G1CMTaskQueue* queue = _task_queues->queue(i);
queue->set_empty();
_tasks[i]->reset_for_restart();
}
}
@ -765,7 +775,7 @@ private:
// as asserts here to minimize their overhead on the product. However, we
// will have them as guarantees at the beginning / end of the bitmap
// clearing to get some checking in the product.
assert(!suspendible() || _cm->cm_thread()->in_progress(), "invariant");
assert(!suspendible() || _cm->in_progress(), "invariant");
assert(!suspendible() || !G1CollectedHeap::heap()->collector_state()->mark_or_rebuild_in_progress(), "invariant");
// Abort iteration if necessary.
@ -821,7 +831,8 @@ void G1ConcurrentMark::clear_bitmap(WorkerThreads* workers, bool may_yield) {
void G1ConcurrentMark::cleanup_for_next_mark() {
// Make sure that the concurrent mark thread looks to still be in
// the current cycle.
guarantee(cm_thread()->in_progress(), "invariant");
guarantee(is_fully_initialized(), "should be initializd");
guarantee(in_progress(), "invariant");
// We are finishing up the current cycle by clearing the next
// marking bitmap and getting it ready for the next cycle. During
@ -834,7 +845,8 @@ void G1ConcurrentMark::cleanup_for_next_mark() {
reset_partial_array_state_manager();
// Repeat the asserts from above.
guarantee(cm_thread()->in_progress(), "invariant");
guarantee(is_fully_initialized(), "should be initializd");
guarantee(in_progress(), "invariant");
guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
}
@ -1925,15 +1937,12 @@ bool G1ConcurrentMark::concurrent_cycle_abort() {
// nothing, but this situation should be extremely rare (a full gc after shutdown
// has been signalled is already rare), and this work should be negligible compared
// to actual full gc work.
if (!cm_thread()->in_progress() && !_g1h->concurrent_mark_is_terminating()) {
if (!is_fully_initialized() || (!cm_thread()->in_progress() && !_g1h->concurrent_mark_is_terminating())) {
return false;
}
// Empty mark stack
reset_marking_for_restart();
for (uint i = 0; i < _max_num_tasks; ++i) {
_tasks[i]->clear_region_fields();
}
abort_marking_threads();
@ -1987,6 +1996,10 @@ void G1ConcurrentMark::print_summary_info() {
}
log.trace(" Concurrent marking:");
if (!is_fully_initialized()) {
log.trace(" has not been initialized yet");
return;
}
print_ms_time_info(" ", "remarks", _remark_times);
{
print_ms_time_info(" ", "final marks", _remark_mark_times);
@ -2003,7 +2016,10 @@ void G1ConcurrentMark::print_summary_info() {
}
void G1ConcurrentMark::threads_do(ThreadClosure* tc) const {
_concurrent_workers->threads_do(tc);
if (is_fully_initialized()) { // they are initialized late
tc->do_thread(_cm_thread);
_concurrent_workers->threads_do(tc);
}
}
void G1ConcurrentMark::print_on(outputStream* st) const {
@ -2097,6 +2113,13 @@ void G1CMTask::reset(G1CMBitMap* mark_bitmap) {
_mark_stats_cache.reset();
}
void G1CMTask::reset_for_restart() {
clear_region_fields();
_task_queue->set_empty();
TASKQUEUE_STATS_ONLY(_partial_array_splitter.stats()->reset());
TASKQUEUE_STATS_ONLY(_task_queue->stats.reset());
}
void G1CMTask::register_partial_array_splitter() {
::new (&_partial_array_splitter) PartialArraySplitter(_cm->partial_array_state_manager(),

View File

@ -290,12 +290,12 @@ class G1CMRootMemRegions {
MemRegion* _root_regions;
size_t const _max_regions;
volatile size_t _num_root_regions; // Actual number of root regions.
Atomic<size_t> _num_root_regions; // Actual number of root regions.
volatile size_t _claimed_root_regions; // Number of root regions currently claimed.
Atomic<size_t> _claimed_root_regions; // Number of root regions currently claimed.
volatile bool _scan_in_progress;
volatile bool _should_abort;
Atomic<bool> _scan_in_progress;
Atomic<bool> _should_abort;
void notify_scan_done();
@ -312,11 +312,11 @@ public:
void prepare_for_scan();
// Forces get_next() to return null so that the iteration aborts early.
void abort() { _should_abort = true; }
void abort() { _should_abort.store_relaxed(true); }
// Return true if the CM thread are actively scanning root regions,
// false otherwise.
bool scan_in_progress() { return _scan_in_progress; }
bool scan_in_progress() { return _scan_in_progress.load_relaxed(); }
// Claim the next root MemRegion to scan atomically, or return null if
// all have been claimed.
@ -555,6 +555,9 @@ public:
uint worker_id_offset() const { return _worker_id_offset; }
void fully_initialize();
bool is_fully_initialized() const { return _cm_thread != nullptr; }
bool in_progress() const;
uint max_num_tasks() const {return _max_num_tasks; }
// Clear statistics gathered during the concurrent cycle for the given region after
@ -841,8 +844,10 @@ private:
// Apply the closure to the given range of elements in the objArray.
inline void process_array_chunk(objArrayOop obj, size_t start, size_t end);
public:
// Resets the task; should be called right at the beginning of a marking phase.
// Resets the task completely for a new marking; should be called right at the beginning of a marking phase.
void reset(G1CMBitMap* mark_bitmap);
// Minimal reset of the task, making it ready for continuing to mark.
void reset_for_restart();
// Register/unregister Partial Array Splitter Allocator with the PartialArrayStateManager.
// This allows us to discard memory arenas used for partial object array states at the end
// of a concurrent mark cycle.

View File

@ -276,6 +276,21 @@ void G1FullCollector::before_marking_update_attribute_table(G1HeapRegion* hr) {
class G1FullGCRefProcProxyTask : public RefProcProxyTask {
G1FullCollector& _collector;
// G1 Full GC specific closure for handling discovered fields. Do NOT need any
// barriers as Full GC discards all this information anyway.
class G1FullGCDiscoveredFieldClosure : public EnqueueDiscoveredFieldClosure {
G1CollectedHeap* _g1h;
public:
G1FullGCDiscoveredFieldClosure() : _g1h(G1CollectedHeap::heap()) { }
void enqueue(HeapWord* discovered_field_addr, oop value) override {
assert(_g1h->is_in(discovered_field_addr), PTR_FORMAT " is not in heap ", p2i(discovered_field_addr));
// Store the value and done.
RawAccess<>::oop_store(discovered_field_addr, value);
}
};
public:
G1FullGCRefProcProxyTask(G1FullCollector &collector, uint max_workers)
: RefProcProxyTask("G1FullGCRefProcProxyTask", max_workers),
@ -286,7 +301,7 @@ public:
G1IsAliveClosure is_alive(&_collector);
uint index = (_tm == RefProcThreadModel::Single) ? 0 : worker_id;
G1FullKeepAliveClosure keep_alive(_collector.marker(index));
BarrierEnqueueDiscoveredFieldClosure enqueue;
G1FullGCDiscoveredFieldClosure enqueue;
G1MarkStackClosure* complete_marking = _collector.marker(index)->stack_closure();
_rp_task->rp_work(worker_id, &is_alive, &keep_alive, &enqueue, complete_marking);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -42,14 +42,13 @@ G1FullGCScope::G1FullGCScope(G1MonitoringSupport* monitoring_support,
_rm(),
_should_clear_soft_refs(clear_soft),
_do_maximal_compaction(do_maximal_compaction),
_g1h(G1CollectedHeap::heap()),
_svc_marker(SvcGCMarker::FULL),
_timer(),
_tracer(tracer),
_active(),
_tracer_mark(&_timer, _tracer),
_monitoring_scope(monitoring_support),
_heap_printer(_g1h),
_heap_printer(G1CollectedHeap::heap()),
_region_compaction_threshold(do_maximal_compaction ?
G1HeapRegion::GrainWords :
(1 - MarkSweepDeadRatio / 100.0) * G1HeapRegion::GrainWords) { }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -49,7 +49,6 @@ class G1FullGCScope : public StackObj {
ResourceMark _rm;
bool _should_clear_soft_refs;
bool _do_maximal_compaction;
G1CollectedHeap* _g1h;
SvcGCMarker _svc_marker;
STWGCTimer _timer;
G1FullGCTracer* _tracer;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -44,7 +44,7 @@
#include "oops/access.inline.hpp"
#include "oops/compressedOops.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/atomic.hpp"
#include "runtime/globals_extension.hpp"
#include "utilities/powerOfTwo.hpp"
@ -131,8 +131,8 @@ void G1HeapRegion::hr_clear(bool clear_space) {
G1CollectedHeap::heap()->concurrent_mark()->reset_top_at_mark_start(this);
_parsable_bottom = bottom();
_garbage_bytes = 0;
_parsable_bottom.store_relaxed(bottom());
_garbage_bytes.store_relaxed(0);
_incoming_refs = 0;
if (clear_space) clear(SpaceDecorator::Mangle);
@ -294,12 +294,12 @@ void G1HeapRegion::report_region_type_change(G1HeapRegionTraceType::Type to) {
// young gen regions never have their PB set to anything other than bottom.
assert(parsable_bottom_acquire() == bottom(), "must be");
_garbage_bytes = 0;
_garbage_bytes.store_relaxed(0);
_incoming_refs = 0;
}
void G1HeapRegion::note_self_forward_chunk_done(size_t garbage_bytes) {
AtomicAccess::add(&_garbage_bytes, garbage_bytes, memory_order_relaxed);
_garbage_bytes.add_then_fetch(garbage_bytes, memory_order_relaxed);
}
// Code roots support
@ -448,7 +448,7 @@ void G1HeapRegion::print_on(outputStream* st) const {
st->print("|-");
}
}
st->print("|%3zu", AtomicAccess::load(&_pinned_object_count));
st->print("|%3zu", _pinned_object_count.load_relaxed());
st->print_cr("");
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -33,6 +33,7 @@
#include "gc/shared/ageTable.hpp"
#include "gc/shared/spaceDecorator.hpp"
#include "gc/shared/verifyOption.hpp"
#include "runtime/atomic.hpp"
#include "runtime/mutex.hpp"
#include "utilities/macros.hpp"
@ -73,7 +74,7 @@ class G1HeapRegion : public CHeapObj<mtGC> {
HeapWord* const _bottom;
HeapWord* const _end;
HeapWord* volatile _top;
Atomic<HeapWord*> _top;
G1BlockOffsetTable* _bot;
@ -89,8 +90,8 @@ public:
HeapWord* bottom() const { return _bottom; }
HeapWord* end() const { return _end; }
void set_top(HeapWord* value) { _top = value; }
HeapWord* top() const { return _top; }
void set_top(HeapWord* value) { _top.store_relaxed(value); }
HeapWord* top() const { return _top.load_relaxed(); }
// See the comment above in the declaration of _pre_dummy_top for an
// explanation of what it is.
@ -231,10 +232,10 @@ private:
//
// Below this limit the marking bitmap must be used to determine size and
// liveness.
HeapWord* volatile _parsable_bottom;
Atomic<HeapWord*> _parsable_bottom;
// Amount of dead data in the region.
size_t _garbage_bytes;
Atomic<size_t> _garbage_bytes;
// Approximate number of references to this regions at the end of concurrent
// marking. We we do not mark through all objects, so this is an estimate.
@ -249,7 +250,7 @@ private:
uint _node_index;
// Number of objects in this region that are currently pinned.
volatile size_t _pinned_object_count;
Atomic<size_t> _pinned_object_count;
void report_region_type_change(G1HeapRegionTraceType::Type to);
@ -331,7 +332,7 @@ public:
}
// A lower bound on the amount of garbage bytes in the region.
size_t garbage_bytes() const { return _garbage_bytes; }
size_t garbage_bytes() const { return _garbage_bytes.load_relaxed(); }
// Return the amount of bytes we'll reclaim if we collect this
// region. This includes not only the known garbage bytes in the
@ -393,8 +394,8 @@ public:
bool is_old_or_humongous() const { return _type.is_old_or_humongous(); }
size_t pinned_count() const { return AtomicAccess::load(&_pinned_object_count); }
bool has_pinned_objects() const { return pinned_count() > 0; }
inline size_t pinned_count() const;
inline bool has_pinned_objects() const;
void set_free();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,7 +35,6 @@
#include "gc/g1/g1Policy.hpp"
#include "gc/g1/g1Predictions.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/init.hpp"
#include "runtime/prefetch.inline.hpp"
#include "runtime/safepoint.hpp"
@ -131,7 +130,7 @@ inline void G1HeapRegion::prepare_for_full_gc() {
// After marking and class unloading the heap temporarily contains dead objects
// with unloaded klasses. Moving parsable_bottom makes some (debug) code correctly
// skip dead objects.
_parsable_bottom = top();
_parsable_bottom.store_relaxed(top());
}
inline void G1HeapRegion::reset_compacted_after_full_gc(HeapWord* new_top) {
@ -154,7 +153,7 @@ inline void G1HeapRegion::reset_after_full_gc_common() {
// Everything above bottom() is parsable and live.
reset_parsable_bottom();
_garbage_bytes = 0;
_garbage_bytes.store_relaxed(0);
_incoming_refs = 0;
@ -188,20 +187,22 @@ inline void G1HeapRegion::apply_to_marked_objects(G1CMBitMap* bitmap, ApplyToMar
inline HeapWord* G1HeapRegion::par_allocate(size_t min_word_size,
size_t desired_word_size,
size_t* actual_word_size) {
HeapWord* obj = top();
do {
HeapWord* obj = top();
size_t available = pointer_delta(end(), obj);
size_t want_to_allocate = MIN2(available, desired_word_size);
if (want_to_allocate >= min_word_size) {
HeapWord* new_top = obj + want_to_allocate;
HeapWord* result = AtomicAccess::cmpxchg(&_top, obj, new_top);
// result can be one of two:
// the old top value: the exchange succeeded
HeapWord* result = _top.compare_exchange(obj, new_top);
// Result can be one of two:
// the old top value: the exchange succeeded, return.
// otherwise: the new value of the top is returned.
if (result == obj) {
assert(is_object_aligned(obj) && is_object_aligned(new_top), "checking alignment");
*actual_word_size = want_to_allocate;
return obj;
} else {
obj = result;
}
} else {
return nullptr;
@ -254,27 +255,27 @@ inline void G1HeapRegion::update_bot_for_block(HeapWord* start, HeapWord* end) {
inline HeapWord* G1HeapRegion::parsable_bottom() const {
assert(!is_init_completed() || SafepointSynchronize::is_at_safepoint(), "only during initialization or safepoint");
return _parsable_bottom;
return _parsable_bottom.load_relaxed();
}
inline HeapWord* G1HeapRegion::parsable_bottom_acquire() const {
return AtomicAccess::load_acquire(&_parsable_bottom);
return _parsable_bottom.load_acquire();
}
inline void G1HeapRegion::reset_parsable_bottom() {
AtomicAccess::release_store(&_parsable_bottom, bottom());
_parsable_bottom.release_store(bottom());
}
inline void G1HeapRegion::note_end_of_marking(HeapWord* top_at_mark_start, size_t marked_bytes, size_t incoming_refs) {
assert_at_safepoint();
if (top_at_mark_start != bottom()) {
_garbage_bytes = byte_size(bottom(), top_at_mark_start) - marked_bytes;
_garbage_bytes.store_relaxed(byte_size(bottom(), top_at_mark_start) - marked_bytes);
_incoming_refs = incoming_refs;
}
if (needs_scrubbing()) {
_parsable_bottom = top_at_mark_start;
_parsable_bottom.store_relaxed(top_at_mark_start);
}
}
@ -286,6 +287,14 @@ inline bool G1HeapRegion::needs_scrubbing() const {
return is_old();
}
inline size_t G1HeapRegion::pinned_count() const {
return _pinned_object_count.load_relaxed();
}
inline bool G1HeapRegion::has_pinned_objects() const {
return pinned_count() > 0;
}
inline bool G1HeapRegion::in_collection_set() const {
return G1CollectedHeap::heap()->is_in_cset(this);
}
@ -511,7 +520,7 @@ inline void G1HeapRegion::record_surv_words_in_group(size_t words_survived) {
inline void G1HeapRegion::add_pinned_object_count(size_t value) {
assert(value != 0, "wasted effort");
assert(!is_free(), "trying to pin free region %u, adding %zu", hrm_index(), value);
AtomicAccess::add(&_pinned_object_count, value, memory_order_relaxed);
_pinned_object_count.add_then_fetch(value, memory_order_relaxed);
}
inline void G1HeapRegion::install_cset_group(G1CSetCandidateGroup* cset_group) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -713,8 +713,10 @@ void G1HeapRegionManager::verify_optional() {
G1HeapRegionClaimer::G1HeapRegionClaimer(uint n_workers) :
_n_workers(n_workers), _n_regions(G1CollectedHeap::heap()->_hrm._next_highest_used_hrm_index), _claims(nullptr) {
uint* new_claims = NEW_C_HEAP_ARRAY(uint, _n_regions, mtGC);
memset(new_claims, Unclaimed, sizeof(*_claims) * _n_regions);
Atomic<uint>* new_claims = NEW_C_HEAP_ARRAY(Atomic<uint>, _n_regions, mtGC);
for (uint i = 0; i < _n_regions; i++) {
new_claims[i].store_relaxed(Unclaimed);
}
_claims = new_claims;
}
@ -730,13 +732,12 @@ uint G1HeapRegionClaimer::offset_for_worker(uint worker_id) const {
bool G1HeapRegionClaimer::is_region_claimed(uint region_index) const {
assert(region_index < _n_regions, "Invalid index.");
return _claims[region_index] == Claimed;
return _claims[region_index].load_relaxed() == Claimed;
}
bool G1HeapRegionClaimer::claim_region(uint region_index) {
assert(region_index < _n_regions, "Invalid index.");
uint old_val = AtomicAccess::cmpxchg(&_claims[region_index], Unclaimed, Claimed);
return old_val == Unclaimed;
return _claims[region_index].compare_set(Unclaimed, Claimed);
}
class G1RebuildFreeListTask : public WorkerTask {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,6 +30,7 @@
#include "gc/g1/g1HeapRegionSet.hpp"
#include "gc/g1/g1RegionToSpaceMapper.hpp"
#include "memory/allocation.hpp"
#include "runtime/atomic.hpp"
#include "services/memoryUsage.hpp"
class G1HeapRegion;
@ -294,7 +295,7 @@ public:
class G1HeapRegionClaimer : public StackObj {
uint _n_workers;
uint _n_regions;
volatile uint* _claims;
Atomic<uint>* _claims;
static const uint Unclaimed = 0;
static const uint Claimed = 1;

View File

@ -39,7 +39,7 @@ bool G1PeriodicGCTask::should_start_periodic_gc(G1CollectedHeap* g1h,
SuspendibleThreadSetJoiner sts;
// If we are currently in a concurrent mark we are going to uncommit memory soon.
if (g1h->concurrent_mark()->cm_thread()->in_progress()) {
if (g1h->concurrent_mark()->in_progress()) {
log_debug(gc, periodic)("Concurrent cycle in progress. Skipping.");
return false;
}

View File

@ -739,7 +739,7 @@ double G1Policy::constant_other_time_ms(double pause_time_ms) const {
}
bool G1Policy::about_to_start_mixed_phase() const {
return _g1h->concurrent_mark()->cm_thread()->in_progress() || collector_state()->in_young_gc_before_mixed();
return _g1h->concurrent_mark()->in_progress() || collector_state()->in_young_gc_before_mixed();
}
bool G1Policy::need_to_start_conc_mark(const char* source, size_t allocation_word_size) {
@ -1235,7 +1235,7 @@ bool G1Policy::force_concurrent_start_if_outside_cycle(GCCause::Cause gc_cause)
// We actually check whether we are marking here and not if we are in a
// reclamation phase. This means that we will schedule a concurrent mark
// even while we are still in the process of reclaiming memory.
bool during_cycle = _g1h->concurrent_mark()->cm_thread()->in_progress();
bool during_cycle = _g1h->concurrent_mark()->in_progress();
if (!during_cycle) {
log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). "
"GC cause: %s",

View File

@ -29,12 +29,12 @@
G1RegionMarkStatsCache::G1RegionMarkStatsCache(G1RegionMarkStats* target, uint num_cache_entries) :
_target(target),
_cache(NEW_C_HEAP_ARRAY(G1RegionMarkStatsCacheEntry, num_cache_entries, mtGC)),
_num_cache_entries(num_cache_entries),
_num_cache_entries_mask(_num_cache_entries - 1) {
guarantee(is_power_of_2(num_cache_entries),
"Number of cache entries must be power of two, but is %u", num_cache_entries);
_cache = NEW_C_HEAP_ARRAY(G1RegionMarkStatsCacheEntry, _num_cache_entries, mtGC);
}
G1RegionMarkStatsCache::~G1RegionMarkStatsCache() {

View File

@ -85,7 +85,7 @@ void VM_G1TryInitiateConcMark::doit() {
GCCauseSetter x(g1h, _gc_cause);
_mark_in_progress = g1h->collector_state()->mark_in_progress();
_cycle_already_in_progress = g1h->concurrent_mark()->cm_thread()->in_progress();
_cycle_already_in_progress = g1h->concurrent_mark()->in_progress();
if (!g1h->policy()->force_concurrent_start_if_outside_cycle(_gc_cause)) {
// Failure to force the next GC pause to be a concurrent start indicates

View File

@ -36,6 +36,7 @@
#include "gc/g1/g1EvacFailureRegions.inline.hpp"
#include "gc/g1/g1EvacInfo.hpp"
#include "gc/g1/g1GCPhaseTimes.hpp"
#include "gc/g1/g1HeapRegion.inline.hpp"
#include "gc/g1/g1HeapRegionPrinter.hpp"
#include "gc/g1/g1MonitoringSupport.hpp"
#include "gc/g1/g1ParScanThreadState.inline.hpp"

View File

@ -28,6 +28,7 @@
#include "gc/g1/g1CollectedHeap.hpp"
#include "gc/g1/g1HeapRegion.hpp"
#include "gc/g1/g1HeapRegionManager.hpp"
#include "runtime/atomic.hpp"
#include "utilities/macros.hpp"
#define VM_STRUCTS_G1GC(nonstatic_field, \
@ -39,9 +40,9 @@
\
nonstatic_field(G1HeapRegion, _type, G1HeapRegionType) \
nonstatic_field(G1HeapRegion, _bottom, HeapWord* const) \
nonstatic_field(G1HeapRegion, _top, HeapWord* volatile) \
nonstatic_field(G1HeapRegion, _top, Atomic<HeapWord*>) \
nonstatic_field(G1HeapRegion, _end, HeapWord* const) \
volatile_nonstatic_field(G1HeapRegion, _pinned_object_count, size_t) \
volatile_nonstatic_field(G1HeapRegion, _pinned_object_count, Atomic<size_t>)\
\
nonstatic_field(G1HeapRegionType, _tag, G1HeapRegionType::Tag volatile) \
\

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2006, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2006, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,7 +31,7 @@
#include "memory/allocation.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/typeArrayOop.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/atomic.hpp"
#include "runtime/java.hpp"
#include "runtime/javaThread.hpp"
#include "runtime/os.inline.hpp"
@ -489,7 +489,7 @@ HeapWord* MutableNUMASpace::cas_allocate(size_t size) {
if (p != nullptr) {
HeapWord* cur_top, *cur_chunk_top = p + size;
while ((cur_top = top()) < cur_chunk_top) { // Keep _top updated.
if (AtomicAccess::cmpxchg(top_addr(), cur_top, cur_chunk_top) == cur_top) {
if (top_addr()->compare_set(cur_top, cur_chunk_top)) {
break;
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,7 +28,6 @@
#include "memory/iterator.inline.hpp"
#include "memory/universe.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/javaThread.hpp"
#include "runtime/safepoint.hpp"
#include "utilities/align.hpp"
@ -123,7 +122,7 @@ void MutableSpace::initialize(MemRegion mr,
// makes the new space available for allocation by other threads. So this
// assignment must follow all other configuration and initialization that
// might be done for expansion.
AtomicAccess::release_store(end_addr(), mr.end());
_end.release_store(mr.end());
if (clear_space) {
clear(mangle_space);
@ -140,7 +139,7 @@ void MutableSpace::clear(bool mangle_space) {
#ifndef PRODUCT
void MutableSpace::mangle_unused_area() {
mangle_region(MemRegion(_top, _end));
mangle_region(MemRegion(top(), end()));
}
void MutableSpace::mangle_region(MemRegion mr) {
@ -155,14 +154,10 @@ HeapWord* MutableSpace::cas_allocate(size_t size) {
// If end is read first, other threads may advance end and top such that
// current top > old end and current top + size > current end. Then
// pointer_delta underflows, allowing installation of top > current end.
HeapWord* obj = AtomicAccess::load_acquire(top_addr());
HeapWord* obj = _top.load_acquire();
if (pointer_delta(end(), obj) >= size) {
HeapWord* new_top = obj + size;
HeapWord* result = AtomicAccess::cmpxchg(top_addr(), obj, new_top);
// result can be one of two:
// the old top value: the exchange succeeded
// otherwise: the new value of the top is returned.
if (result != obj) {
if (!_top.compare_set(obj, new_top)) {
continue; // another thread beat us to the allocation, try again
}
assert(is_object_aligned(obj) && is_object_aligned(new_top),
@ -177,7 +172,7 @@ HeapWord* MutableSpace::cas_allocate(size_t size) {
// Try to deallocate previous allocation. Returns true upon success.
bool MutableSpace::cas_deallocate(HeapWord *obj, size_t size) {
HeapWord* expected_top = obj + size;
return AtomicAccess::cmpxchg(top_addr(), expected_top, obj) == expected_top;
return _top.compare_set(expected_top, obj);
}
void MutableSpace::oop_iterate(OopIterateClosure* cl) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,6 +28,7 @@
#include "memory/allocation.hpp"
#include "memory/iterator.hpp"
#include "memory/memRegion.hpp"
#include "runtime/atomic.hpp"
#include "utilities/copy.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
@ -53,8 +54,8 @@ class MutableSpace: public CHeapObj<mtGC> {
MemRegion _last_setup_region;
size_t _page_size;
HeapWord* _bottom;
HeapWord* volatile _top;
HeapWord* _end;
Atomic<HeapWord*> _top;
Atomic<HeapWord*> _end;
void numa_setup_pages(MemRegion mr, bool clear_space);
@ -64,21 +65,20 @@ class MutableSpace: public CHeapObj<mtGC> {
protected:
size_t page_size() const { return _page_size; }
Atomic<HeapWord*>* top_addr() { return &_top; }
public:
virtual ~MutableSpace() = default;
MutableSpace(size_t page_size);
// Accessors
HeapWord* bottom() const { return _bottom; }
HeapWord* top() const { return _top; }
HeapWord* end() const { return _end; }
HeapWord* top() const { return _top.load_relaxed(); }
HeapWord* end() const { return _end.load_relaxed(); }
void set_bottom(HeapWord* value) { _bottom = value; }
virtual void set_top(HeapWord* value) { _top = value; }
void set_end(HeapWord* value) { _end = value; }
HeapWord* volatile* top_addr() { return &_top; }
HeapWord** end_addr() { return &_end; }
virtual void set_top(HeapWord* value) { _top.store_relaxed(value); }
void set_end(HeapWord* value) { _end.store_relaxed(value); }
MemRegion region() const { return MemRegion(bottom(), end()); }
@ -110,7 +110,7 @@ public:
// Boolean queries.
bool is_empty() const { return used_in_words() == 0; }
bool not_empty() const { return used_in_words() > 0; }
bool contains(const void* p) const { return _bottom <= p && p < _end; }
bool contains(const void* p) const { return _bottom <= p && p < end(); }
// Size computations. Sizes are in bytes.
size_t used_in_bytes() const { return used_in_words() * HeapWordSize; }

View File

@ -43,6 +43,7 @@
#include "memory/resourceArea.hpp"
#include "oops/access.inline.hpp"
#include "oops/compressedOops.inline.hpp"
#include "oops/oopsHierarchy.hpp"
#include "utilities/checkedCast.hpp"
PaddedEnd<PSPromotionManager>* PSPromotionManager::_manager_array = nullptr;
@ -248,30 +249,19 @@ void PSPromotionManager::flush_labs() {
}
}
template <class T>
void PSPromotionManager::process_array_chunk_work(oop obj, int start, int end) {
assert(start <= end, "invariant");
T* const base = (T*)objArrayOop(obj)->base();
T* p = base + start;
T* const chunk_end = base + end;
while (p < chunk_end) {
claim_or_forward_depth(p);
++p;
}
void PSPromotionManager::process_array_chunk(objArrayOop obj, size_t start, size_t end) {
PSPushContentsClosure pcc(this);
obj->oop_iterate_elements_range(&pcc,
checked_cast<int>(start),
checked_cast<int>(end));
}
void PSPromotionManager::process_array_chunk(PartialArrayState* state, bool stolen) {
// Access before release by claim().
oop new_obj = state->destination();
objArrayOop to_array = objArrayOop(state->destination());
PartialArraySplitter::Claim claim =
_partial_array_splitter.claim(state, &_claimed_stack_depth, stolen);
int start = checked_cast<int>(claim._start);
int end = checked_cast<int>(claim._end);
if (UseCompressedOops) {
process_array_chunk_work<narrowOop>(new_obj, start, end);
} else {
process_array_chunk_work<oop>(new_obj, start, end);
}
process_array_chunk(to_array, claim._start, claim._end);
}
void PSPromotionManager::push_objArray(oop old_obj, oop new_obj) {
@ -284,12 +274,8 @@ void PSPromotionManager::push_objArray(oop old_obj, oop new_obj) {
size_t initial_chunk_size =
// The source array is unused when processing states.
_partial_array_splitter.start(&_claimed_stack_depth, nullptr, to_array, array_length);
int end = checked_cast<int>(initial_chunk_size);
if (UseCompressedOops) {
process_array_chunk_work<narrowOop>(to_array, 0, end);
} else {
process_array_chunk_work<oop>(to_array, 0, end);
}
process_array_chunk(to_array, 0, initial_chunk_size);
}
oop PSPromotionManager::oop_promotion_failed(oop obj, markWord obj_mark) {

View File

@ -97,9 +97,8 @@ class PSPromotionManager {
inline static PSPromotionManager* manager_array(uint index);
template <class T> void process_array_chunk_work(oop obj,
int start, int end);
void process_array_chunk(PartialArrayState* state, bool stolen);
void process_array_chunk(objArrayOop obj, size_t start, size_t end);
void push_objArray(oop old_obj, oop new_obj);
inline void promotion_trace_event(oop new_obj, Klass* klass, size_t obj_size,

View File

@ -51,7 +51,7 @@ inline PSPromotionManager* PSPromotionManager::manager_array(uint index) {
}
template <class T>
inline void PSPromotionManager::claim_or_forward_depth(T* p) {
ALWAYSINLINE void PSPromotionManager::claim_or_forward_depth(T* p) {
assert(ParallelScavengeHeap::heap()->is_in(p), "pointer outside heap");
T heap_oop = RawAccess<>::oop_load(p);
if (PSScavenge::is_obj_in_young(heap_oop)) {

Some files were not shown because too many files have changed in this diff Show More