Merge branch 'master' into JDK-8376269

This commit is contained in:
Yasumasa Suenaga 2026-02-03 18:46:07 +09:00
commit 047e569351
736 changed files with 25572 additions and 18283 deletions

View File

@ -1,7 +1,7 @@
#!/bin/bash -f
#
# Copyright (c) 2010, 2025, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2010, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -23,9 +23,13 @@
# questions.
#
# Script to update the Copyright YEAR range in Mercurial & Git sources.
# Script to update the Copyright YEAR range in Git sources.
# (Originally from xdono, Thanks!)
# To update Copyright years for changes in a specific branch,
# you use a command along these lines:
# $ git diff upstream/master...<branch-name> | lsdiff | cut -d '/' -f 2- | bash bin/update_copyright_year.sh -m -
#------------------------------------------------------------
copyright="Copyright"
copyright_symbol="(c)"
@ -47,7 +51,7 @@ rm -f -r ${tmp}
mkdir -p ${tmp}
total=0
usage="Usage: `basename "$0"` [-c company] [-y year] [-h|f]"
usage="Usage: `basename "$0"` [-c company] [-y year] [-m file] [-h|f]"
Help()
{
# Display Help
@ -65,15 +69,18 @@ Help()
echo "-b Specifies the base reference for change set lookup."
echo "-f Updates the copyright for all change sets in a given year,"
echo " as specified by -y. Overrides -b flag."
echo "-m Read the list of modified files from the given file,"
echo " use - to read from stdin"
echo "-h Print this help."
echo
}
full_year=false
base_reference=master
modified_files_origin="";
# Process options
while getopts "b:c:fhy:" option; do
while getopts "b:c:fhm:y:" option; do
case $option in
b) # supplied base reference
base_reference=${OPTARG}
@ -91,6 +98,9 @@ while getopts "b:c:fhy:" option; do
y) # supplied company year
year=${OPTARG}
;;
m) # modified files will be read from the given origin
modified_files_origin="${OPTARG}"
;;
\?) # illegal option
echo "$usage"
exit 1
@ -110,18 +120,10 @@ git status &> /dev/null && git_found=true
if [ "$git_found" != "true" ]; then
echo "Error: Please execute script from within a JDK git repository."
exit 1
else
echo "Using Git version control system"
vcs_status=(git ls-files -m)
if [ "$full_year" = "true" ]; then
vcs_list_changesets=(git log --no-merges --since="${year}-01-01T00:00:00Z" --until="${year}-12-31T23:59:59Z" --pretty=tformat:"%H")
else
vcs_list_changesets=(git log --no-merges "${base_reference}..HEAD" --since="${year}-01-01T00:00:00Z" --until="${year}-12-31T23:59:59Z" --pretty=tformat:"%H")
fi
vcs_changeset_message=(git log -1 --pretty=tformat:"%B") # followed by ${changeset}
vcs_changeset_files=(git diff-tree --no-commit-id --name-only -r) # followed by ${changeset}
fi
echo "Using Git version control system"
# Return true if it makes sense to edit this file
saneFileToCheck()
{
@ -168,6 +170,25 @@ updateFile() # file
echo "${changed}"
}
# Update the copyright year on files sent in stdin
updateFiles() # stdin: list of files to update
{
count=0
fcount=0
while read i; do
fcount=`expr ${fcount} '+' 1`
if [ `updateFile "${i}"` = "true" ] ; then
count=`expr ${count} '+' 1`
fi
done
if [ ${count} -gt 0 ] ; then
printf " UPDATED year on %d of %d files.\n" ${count} ${fcount}
total=`expr ${total} '+' ${count}`
else
printf " None of the %d files were changed.\n" ${fcount}
fi
}
# Update the copyright year on all files changed by this changeset
updateChangesetFiles() # changeset
{
@ -178,18 +199,7 @@ updateChangesetFiles() # changeset
| ${awk} -F' ' '{for(i=1;i<=NF;i++)print $i}' \
> ${files}
if [ -f "${files}" -a -s "${files}" ] ; then
fcount=`cat ${files}| wc -l`
for i in `cat ${files}` ; do
if [ `updateFile "${i}"` = "true" ] ; then
count=`expr ${count} '+' 1`
fi
done
if [ ${count} -gt 0 ] ; then
printf " UPDATED year on %d of %d files.\n" ${count} ${fcount}
total=`expr ${total} '+' ${count}`
else
printf " None of the %d files were changed.\n" ${fcount}
fi
cat ${files} | updateFiles
else
printf " ERROR: No files changed in the changeset? Must be a mistake.\n"
set -x
@ -204,67 +214,80 @@ updateChangesetFiles() # changeset
}
# Check if repository is clean
vcs_status=(git ls-files -m)
previous=`"${vcs_status[@]}"|wc -l`
if [ ${previous} -ne 0 ] ; then
echo "WARNING: This repository contains previously edited working set files."
echo " ${vcs_status[*]} | wc -l = `"${vcs_status[@]}" | wc -l`"
fi
# Get all changesets this year
all_changesets=${tmp}/all_changesets
rm -f ${all_changesets}
"${vcs_list_changesets[@]}" > ${all_changesets}
# Check changeset to see if it is Copyright only changes, filter changesets
if [ -s ${all_changesets} ] ; then
echo "Changesets made in ${year}: `cat ${all_changesets} | wc -l`"
index=0
cat ${all_changesets} | while read changeset ; do
index=`expr ${index} '+' 1`
desc=${tmp}/desc.${changeset}
rm -f ${desc}
echo "------------------------------------------------"
"${vcs_changeset_message[@]}" "${changeset}" > ${desc}
printf "%d: %s\n%s\n" ${index} "${changeset}" "`cat ${desc}|head -1`"
if [ "${year}" = "2010" ] ; then
if cat ${desc} | grep -i -F "Added tag" > /dev/null ; then
printf " EXCLUDED tag changeset.\n"
elif cat ${desc} | grep -i -F rebrand > /dev/null ; then
printf " EXCLUDED rebrand changeset.\n"
elif cat ${desc} | grep -i -F copyright > /dev/null ; then
printf " EXCLUDED copyright changeset.\n"
else
updateChangesetFiles ${changeset}
fi
else
if cat ${desc} | grep -i -F "Added tag" > /dev/null ; then
printf " EXCLUDED tag changeset.\n"
elif cat ${desc} | grep -i -F "copyright year" > /dev/null ; then
printf " EXCLUDED copyright year changeset.\n"
else
updateChangesetFiles ${changeset}
fi
fi
rm -f ${desc}
done
fi
if [ ${total} -gt 0 ] ; then
echo "---------------------------------------------"
echo "Updated the copyright year on a total of ${total} files."
if [ ${previous} -eq 0 ] ; then
echo "This count should match the count of modified files in the repository: ${vcs_status[*]}"
else
echo "WARNING: This repository contained previously edited working set files."
fi
echo " ${vcs_status[*]} | wc -l = `"${vcs_status[@]}" | wc -l`"
if [ "x$modified_files_origin" != "x" ]; then
cat $modified_files_origin | updateFiles
else
echo "---------------------------------------------"
echo "No files were changed"
if [ ${previous} -ne 0 ] ; then
echo "WARNING: This repository contained previously edited working set files."
fi
echo " ${vcs_status[*]} | wc -l = `"${vcs_status[@]}" | wc -l`"
# Get all changesets this year
if [ "$full_year" = "true" ]; then
vcs_list_changesets=(git log --no-merges --since="${year}-01-01T00:00:00Z" --until="${year}-12-31T23:59:59Z" --pretty=tformat:"%H")
else
vcs_list_changesets=(git log --no-merges "${base_reference}..HEAD" --since="${year}-01-01T00:00:00Z" --until="${year}-12-31T23:59:59Z" --pretty=tformat:"%H")
fi
vcs_changeset_message=(git log -1 --pretty=tformat:"%B") # followed by ${changeset}
vcs_changeset_files=(git diff-tree --no-commit-id --name-only -r) # followed by ${changeset}
all_changesets=${tmp}/all_changesets
rm -f ${all_changesets}
"${vcs_list_changesets[@]}" > ${all_changesets}
# Check changeset to see if it is Copyright only changes, filter changesets
if [ -s ${all_changesets} ] ; then
echo "Changesets made in ${year}: `cat ${all_changesets} | wc -l`"
index=0
cat ${all_changesets} | while read changeset ; do
index=`expr ${index} '+' 1`
desc=${tmp}/desc.${changeset}
rm -f ${desc}
echo "------------------------------------------------"
"${vcs_changeset_message[@]}" "${changeset}" > ${desc}
printf "%d: %s\n%s\n" ${index} "${changeset}" "`cat ${desc}|head -1`"
if [ "${year}" = "2010" ] ; then
if cat ${desc} | grep -i -F "Added tag" > /dev/null ; then
printf " EXCLUDED tag changeset.\n"
elif cat ${desc} | grep -i -F rebrand > /dev/null ; then
printf " EXCLUDED rebrand changeset.\n"
elif cat ${desc} | grep -i -F copyright > /dev/null ; then
printf " EXCLUDED copyright changeset.\n"
else
updateChangesetFiles ${changeset}
fi
else
if cat ${desc} | grep -i -F "Added tag" > /dev/null ; then
printf " EXCLUDED tag changeset.\n"
elif cat ${desc} | grep -i -F "copyright year" > /dev/null ; then
printf " EXCLUDED copyright year changeset.\n"
else
updateChangesetFiles ${changeset}
fi
fi
rm -f ${desc}
done
fi
if [ ${total} -gt 0 ] ; then
echo "---------------------------------------------"
echo "Updated the copyright year on a total of ${total} files."
if [ ${previous} -eq 0 ] ; then
echo "This count should match the count of modified files in the repository: ${vcs_status[*]}"
else
echo "WARNING: This repository contained previously edited working set files."
fi
echo " ${vcs_status[*]} | wc -l = `"${vcs_status[@]}" | wc -l`"
else
echo "---------------------------------------------"
echo "No files were changed"
if [ ${previous} -ne 0 ] ; then
echo "WARNING: This repository contained previously edited working set files."
fi
echo " ${vcs_status[*]} | wc -l = `"${vcs_status[@]}" | wc -l`"
fi
fi
# Cleanup

View File

@ -72,6 +72,7 @@ id="toc-notes-for-specific-tests">Notes for Specific Tests</a>
<li><a href="#non-us-locale" id="toc-non-us-locale">Non-US
locale</a></li>
<li><a href="#pkcs11-tests" id="toc-pkcs11-tests">PKCS11 Tests</a></li>
<li><a href="#sctp-tests" id="toc-sctp-tests">SCTP Tests</a></li>
<li><a href="#testing-ahead-of-time-optimizations"
id="toc-testing-ahead-of-time-optimizations">Testing Ahead-of-time
Optimizations</a></li>
@ -621,6 +622,21 @@ element of the appropriate <code>@Artifact</code> class. (See
JTREG=&quot;JAVA_OPTIONS=-Djdk.test.lib.artifacts.nsslib-linux_aarch64=/path/to/NSS-libs&quot;</code></pre>
<p>For more notes about the PKCS11 tests, please refer to
test/jdk/sun/security/pkcs11/README.</p>
<h3 id="sctp-tests">SCTP Tests</h3>
<p>The SCTP tests require the SCTP runtime library, which is often not
installed by default in popular Linux distributions. Without this
library, the SCTP tests will be skipped. If you want to enable the SCTP
tests, you should install the SCTP library before running the tests.</p>
<p>For distributions using the .deb packaging format and the apt tool
(such as Debian, Ubuntu, etc.), try this:</p>
<pre><code>sudo apt install libsctp1
sudo modprobe sctp
lsmod | grep sctp</code></pre>
<p>For distributions using the .rpm packaging format and the dnf tool
(such as Fedora, Red Hat, etc.), try this:</p>
<pre><code>sudo dnf install -y lksctp-tools
sudo modprobe sctp
lsmod | grep sctp</code></pre>
<h3 id="testing-ahead-of-time-optimizations">Testing Ahead-of-time
Optimizations</h3>
<p>One way to improve test coverage of ahead-of-time (AOT) optimizations

View File

@ -640,6 +640,32 @@ $ make test TEST="jtreg:sun/security/pkcs11/Secmod/AddTrustedCert.java" \
For more notes about the PKCS11 tests, please refer to
test/jdk/sun/security/pkcs11/README.
### SCTP Tests
The SCTP tests require the SCTP runtime library, which is often not installed
by default in popular Linux distributions. Without this library, the SCTP tests
will be skipped. If you want to enable the SCTP tests, you should install the
SCTP library before running the tests.
For distributions using the .deb packaging format and the apt tool
(such as Debian, Ubuntu, etc.), try this:
```
sudo apt install libsctp1
sudo modprobe sctp
lsmod | grep sctp
```
For distributions using the .rpm packaging format and the dnf tool
(such as Fedora, Red Hat, etc.), try this:
```
sudo dnf install -y lksctp-tools
sudo modprobe sctp
lsmod | grep sctp
```
### Testing Ahead-of-time Optimizations
One way to improve test coverage of ahead-of-time (AOT) optimizations in

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -972,6 +972,10 @@ define SetupRunJtregTestBody
JTREG_AUTO_PROBLEM_LISTS += ProblemList-enable-preview.txt
endif
ifneq ($$(findstring -XX:+UseCompactObjectHeaders, $$(JTREG_ALL_OPTIONS)), )
JTREG_AUTO_PROBLEM_LISTS += ProblemList-coh.txt
endif
ifneq ($$(JTREG_EXTRA_PROBLEM_LISTS), )
# Accept both absolute paths as well as relative to the current test root.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -49,13 +49,15 @@ import static com.sun.source.doctree.DocTree.Kind.*;
* The tags can be used as follows:
*
* <pre>
* &commat;jls section-number description
* &commat;jls chapter.section description
* &commat;jls preview-feature-chapter.section description
* </pre>
*
* For example:
*
* <pre>
* &commat;jls 3.4 Line Terminators
* &commat;jls primitive-types-in-patterns-instanceof-switch-5.7.1 Exact Testing Conversions
* </pre>
*
* will produce the following HTML, depending on the file containing
@ -64,10 +66,24 @@ import static com.sun.source.doctree.DocTree.Kind.*;
* <pre>{@code
* <dt>See <i>Java Language Specification</i>:
* <dd><a href="../../specs/jls/jls-3.html#jls-3.4">3.4 Line terminators</a>
* <dd><a href="../../specs/primitive-types-in-patterns-instanceof-switch-jls.html#jls-5.7.1">
* 5.7.1 Exact Testing Conversions</a><sup class="preview-mark">
* <a href="../../specs/jls/jls-1.html#jls-1.5.1">PREVIEW</a></sup>
* }</pre>
*
* Copies of JLS and JVMS are expected to have been placed in the {@code specs}
* folder. These documents are not included in open-source repositories.
* In inline tags (note you need manual JLS/JVMS prefix):
* <pre>
* JLS {&commat;jls 3.4}
* </pre>
*
* produces (note the section sign and no trailing dot):
* <pre>
* JLS <a href="../../specs/jls/jls-3.html#jls-3.4">§3.4</a>
* </pre>
*
* Copies of JLS, JVMS, and preview JLS and JVMS changes are expected to have
* been placed in the {@code specs} folder. These documents are not included
* in open-source repositories.
*/
public class JSpec implements Taglet {
@ -87,9 +103,9 @@ public class JSpec implements Taglet {
}
}
private String tagName;
private String specTitle;
private String idPrefix;
private final String tagName;
private final String specTitle;
private final String idPrefix;
JSpec(String tagName, String specTitle, String idPrefix) {
this.tagName = tagName;
@ -98,7 +114,7 @@ public class JSpec implements Taglet {
}
// Note: Matches special cases like @jvms 6.5.checkcast
private static final Pattern TAG_PATTERN = Pattern.compile("(?s)(.+ )?(?<chapter>[1-9][0-9]*)(?<section>[0-9a-z_.]*)( .*)?$");
private static final Pattern TAG_PATTERN = Pattern.compile("(?s)(.+ )?(?<preview>([a-z0-9]+-)+)?(?<chapter>[1-9][0-9]*)(?<section>[0-9a-z_.]*)( .*)?$");
/**
* Returns the set of locations in which the tag may be used.
@ -157,19 +173,50 @@ public class JSpec implements Taglet {
.trim();
Matcher m = TAG_PATTERN.matcher(tagText);
if (m.find()) {
// preview-feature-4.6 is preview-feature-, 4, .6
String preview = m.group("preview"); // null if no preview feature
String chapter = m.group("chapter");
String section = m.group("section");
String rootParent = currentPath().replaceAll("[^/]+", "..");
String url = String.format("%1$s/specs/%2$s/%2$s-%3$s.html#%2$s-%3$s%4$s",
rootParent, idPrefix, chapter, section);
String url = preview == null ?
String.format("%1$s/specs/%2$s/%2$s-%3$s.html#%2$s-%3$s%4$s",
rootParent, idPrefix, chapter, section) :
String.format("%1$s/specs/%5$s%2$s.html#%2$s-%3$s%4$s",
rootParent, idPrefix, chapter, section, preview);
var literal = expand(contents).trim();
var prefix = (preview == null ? "" : preview) + chapter + section;
if (literal.startsWith(prefix)) {
var hasFullTitle = literal.length() > prefix.length();
if (hasFullTitle) {
// Drop the preview identifier
literal = chapter + section + literal.substring(prefix.length());
} else {
// No section sign if the tag refers to a chapter, like {@jvms 4}
String sectionSign = section.isEmpty() ? "" : "§";
// Change whole text to "§chapter.x" in inline tags.
literal = sectionSign + chapter + section;
}
}
sb.append("<a href=\"")
.append(url)
.append("\">")
.append(expand(contents))
.append(literal)
.append("</a>");
if (preview != null) {
// Add PREVIEW superscript that links to JLS/JVMS 1.5.1
// "Restrictions on the Use of Preview Features"
// Similar to how APIs link to the Preview info box warning
var sectionLink = String.format("%1$s/specs/%2$s/%2$s-%3$s.html#%2$s-%3$s%4$s",
rootParent, idPrefix, "1", ".5.1");
sb.append("<sup class=\"preview-mark\"><a href=\"")
.append(sectionLink)
.append("\">PREVIEW</a></sup>");
}
if (tag.getKind() == DocTree.Kind.UNKNOWN_BLOCK_TAG) {
sb.append("<br>");
}

View File

@ -1,264 +0,0 @@
/*
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Neither the name of Oracle nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
* IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* This source code is provided to illustrate the usage of a given feature
* or technique and has been deliberately simplified. Additional steps
* required for a production-quality application, such as security checks,
* input validation and proper error handling, might not be present in
* this sample code.
*/
import java.util.EventObject;
import java.util.List;
import javax.swing.JTable;
import javax.swing.table.DefaultTableModel;
import javax.swing.table.TableCellEditor;
import javax.swing.table.TableCellRenderer;
import javax.swing.table.TableColumn;
/**
* The OldJTable is an unsupported class containing some methods that were
* deleted from the JTable between releases 0.6 and 0.7
*/
@SuppressWarnings("serial")
public class OldJTable extends JTable
{
/*
* A new convenience method returning the index of the column in the
* co-ordinate space of the view.
*/
public int getColumnIndex(Object identifier) {
return getColumnModel().getColumnIndex(identifier);
}
//
// Methods deleted from the JTable because they only work with the
// DefaultTableModel.
//
public TableColumn addColumn(Object columnIdentifier, int width) {
return addColumn(columnIdentifier, width, null, null, null);
}
public TableColumn addColumn(Object columnIdentifier, List<?> columnData) {
return addColumn(columnIdentifier, -1, null, null, columnData);
}
// Override the new JTable implementation - it will not add a column to the
// DefaultTableModel.
public TableColumn addColumn(Object columnIdentifier, int width,
TableCellRenderer renderer,
TableCellEditor editor) {
return addColumn(columnIdentifier, width, renderer, editor, null);
}
public TableColumn addColumn(Object columnIdentifier, int width,
TableCellRenderer renderer,
TableCellEditor editor, List<?> columnData) {
checkDefaultTableModel();
// Set up the model side first
DefaultTableModel m = (DefaultTableModel)getModel();
m.addColumn(columnIdentifier, columnData.toArray());
// The column will have been added to the end, so the index of the
// column in the model is the last element.
TableColumn newColumn = new TableColumn(
m.getColumnCount()-1, width, renderer, editor);
super.addColumn(newColumn);
return newColumn;
}
// Not possilble to make this work the same way ... change it so that
// it does not delete columns from the model.
public void removeColumn(Object columnIdentifier) {
super.removeColumn(getColumn(columnIdentifier));
}
public void addRow(Object[] rowData) {
checkDefaultTableModel();
((DefaultTableModel)getModel()).addRow(rowData);
}
public void addRow(List<?> rowData) {
checkDefaultTableModel();
((DefaultTableModel)getModel()).addRow(rowData.toArray());
}
public void removeRow(int rowIndex) {
checkDefaultTableModel();
((DefaultTableModel)getModel()).removeRow(rowIndex);
}
public void moveRow(int startIndex, int endIndex, int toIndex) {
checkDefaultTableModel();
((DefaultTableModel)getModel()).moveRow(startIndex, endIndex, toIndex);
}
public void insertRow(int rowIndex, Object[] rowData) {
checkDefaultTableModel();
((DefaultTableModel)getModel()).insertRow(rowIndex, rowData);
}
public void insertRow(int rowIndex, List<?> rowData) {
checkDefaultTableModel();
((DefaultTableModel)getModel()).insertRow(rowIndex, rowData.toArray());
}
public void setNumRows(int newSize) {
checkDefaultTableModel();
((DefaultTableModel)getModel()).setNumRows(newSize);
}
public void setDataVector(Object[][] newData, List<?> columnIds) {
checkDefaultTableModel();
((DefaultTableModel)getModel()).setDataVector(
newData, columnIds.toArray());
}
public void setDataVector(Object[][] newData, Object[] columnIds) {
checkDefaultTableModel();
((DefaultTableModel)getModel()).setDataVector(newData, columnIds);
}
protected void checkDefaultTableModel() {
if(!(dataModel instanceof DefaultTableModel))
throw new InternalError("In order to use this method, the data model must be an instance of DefaultTableModel.");
}
//
// Methods removed from JTable in the move from identifiers to ints.
//
public Object getValueAt(Object columnIdentifier, int rowIndex) {
return super.getValueAt(rowIndex, getColumnIndex(columnIdentifier));
}
public boolean isCellEditable(Object columnIdentifier, int rowIndex) {
return super.isCellEditable(rowIndex, getColumnIndex(columnIdentifier));
}
public void setValueAt(Object aValue, Object columnIdentifier, int rowIndex) {
super.setValueAt(aValue, rowIndex, getColumnIndex(columnIdentifier));
}
public boolean editColumnRow(Object identifier, int row) {
return super.editCellAt(row, getColumnIndex(identifier));
}
public void moveColumn(Object columnIdentifier, Object targetColumnIdentifier) {
moveColumn(getColumnIndex(columnIdentifier),
getColumnIndex(targetColumnIdentifier));
}
public boolean isColumnSelected(Object identifier) {
return isColumnSelected(getColumnIndex(identifier));
}
public TableColumn addColumn(int modelColumn, int width) {
return addColumn(modelColumn, width, null, null);
}
public TableColumn addColumn(int modelColumn) {
return addColumn(modelColumn, 75, null, null);
}
/**
* Creates a new column with <I>modelColumn</I>, <I>width</I>,
* <I>renderer</I>, and <I>editor</I> and adds it to the end of
* the JTable's array of columns. This method also retrieves the
* name of the column using the model's <I>getColumnName(modelColumn)</I>
* method, and sets the both the header value and the identifier
* for this TableColumn accordingly.
* <p>
* The <I>modelColumn</I> is the index of the column in the model which
* will supply the data for this column in the table. This, like the
* <I>columnIdentifier</I> in previous releases, does not change as the
* columns are moved in the view.
* <p>
* For the rest of the JTable API, and all of its associated classes,
* columns are referred to in the co-ordinate system of the view, the
* index of the column in the model is kept inside the TableColumn
* and is used only to retrieve the information from the appropraite
* column in the model.
* <p>
*
* @param modelColumn The index of the column in the model
* @param width The new column's width. Or -1 to use
* the default width
* @param renderer The renderer used with the new column.
* Or null to use the default renderer.
* @param editor The editor used with the new column.
* Or null to use the default editor.
*/
public TableColumn addColumn(int modelColumn, int width,
TableCellRenderer renderer,
TableCellEditor editor) {
TableColumn newColumn = new TableColumn(
modelColumn, width, renderer, editor);
addColumn(newColumn);
return newColumn;
}
//
// Methods that had their arguments switched.
//
// These won't work with the new table package.
/*
public Object getValueAt(int columnIndex, int rowIndex) {
return super.getValueAt(rowIndex, columnIndex);
}
public boolean isCellEditable(int columnIndex, int rowIndex) {
return super.isCellEditable(rowIndex, columnIndex);
}
public void setValueAt(Object aValue, int columnIndex, int rowIndex) {
super.setValueAt(aValue, rowIndex, columnIndex);
}
*/
public boolean editColumnRow(int columnIndex, int rowIndex) {
return super.editCellAt(rowIndex, columnIndex);
}
public boolean editColumnRow(int columnIndex, int rowIndex, EventObject e){
return super.editCellAt(rowIndex, columnIndex, e);
}
} // End Of Class OldJTable

View File

@ -1229,7 +1229,7 @@ public:
// predicate controlling addressing modes
bool size_fits_all_mem_uses(AddPNode* addp, int shift);
// Convert BootTest condition to Assembler condition.
// Convert BoolTest condition to Assembler condition.
// Replicate the logic of cmpOpOper::ccode() and cmpOpUOper::ccode().
Assembler::Condition to_assembler_cond(BoolTest::mask cond);
%}
@ -2579,7 +2579,7 @@ bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
return true;
}
// Convert BootTest condition to Assembler condition.
// Convert BoolTest condition to Assembler condition.
// Replicate the logic of cmpOpOper::ccode() and cmpOpUOper::ccode().
Assembler::Condition to_assembler_cond(BoolTest::mask cond) {
Assembler::Condition result;

View File

@ -1218,43 +1218,11 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
__ bind(*op->stub()->continuation());
}
void LIR_Assembler::type_profile_helper(Register mdo,
ciMethodData *md, ciProfileData *data,
Register recv, Label* update_done) {
void LIR_Assembler::type_profile_helper(Register mdo, ciMethodData *md,
ciProfileData *data, Register recv) {
// Given a profile data offset, generate an Address which points to
// the corresponding slot in mdo->data().
// Clobbers rscratch2.
auto slot_at = [=](ByteSize offset) -> Address {
return __ form_address(rscratch2, mdo,
md->byte_offset_of_slot(data, offset),
LogBytesPerWord);
};
for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
Label next_test;
// See if the receiver is receiver[n].
__ ldr(rscratch1, slot_at(ReceiverTypeData::receiver_offset(i)));
__ cmp(recv, rscratch1);
__ br(Assembler::NE, next_test);
__ addptr(slot_at(ReceiverTypeData::receiver_count_offset(i)),
DataLayout::counter_increment);
__ b(*update_done);
__ bind(next_test);
}
// Didn't find receiver; find next empty slot and fill it in
for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
Label next_test;
Address recv_addr(slot_at(ReceiverTypeData::receiver_offset(i)));
__ ldr(rscratch1, recv_addr);
__ cbnz(rscratch1, next_test);
__ str(recv, recv_addr);
__ mov(rscratch1, DataLayout::counter_increment);
__ str(rscratch1, slot_at(ReceiverTypeData::receiver_count_offset(i)));
__ b(*update_done);
__ bind(next_test);
}
int mdp_offset = md->byte_offset_of_slot(data, in_ByteSize(0));
__ profile_receiver_type(recv, mdo, mdp_offset);
}
void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
@ -1316,14 +1284,9 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
__ b(*obj_is_null);
__ bind(not_null);
Label update_done;
Register recv = k_RInfo;
__ load_klass(recv, obj);
type_profile_helper(mdo, md, data, recv, &update_done);
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
__ addptr(counter_addr, DataLayout::counter_increment);
__ bind(update_done);
type_profile_helper(mdo, md, data, recv);
} else {
__ cbz(obj, *obj_is_null);
}
@ -1430,13 +1393,9 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
__ b(done);
__ bind(not_null);
Label update_done;
Register recv = k_RInfo;
__ load_klass(recv, value);
type_profile_helper(mdo, md, data, recv, &update_done);
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
__ addptr(counter_addr, DataLayout::counter_increment);
__ bind(update_done);
type_profile_helper(mdo, md, data, recv);
} else {
__ cbz(value, done);
}
@ -2540,13 +2499,9 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) {
// We know the type that will be seen at this call site; we can
// statically update the MethodData* rather than needing to do
// dynamic tests on the receiver type
// NOTE: we should probably put a lock around this search to
// avoid collisions by concurrent compilations
// dynamic tests on the receiver type.
ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
uint i;
for (i = 0; i < VirtualCallData::row_limit(); i++) {
for (uint i = 0; i < VirtualCallData::row_limit(); i++) {
ciKlass* receiver = vc_data->receiver(i);
if (known_klass->equals(receiver)) {
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
@ -2554,36 +2509,13 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
return;
}
}
// Receiver type not found in profile data; select an empty slot
// Note that this is less efficient than it should be because it
// always does a write to the receiver part of the
// VirtualCallData rather than just the first time
for (i = 0; i < VirtualCallData::row_limit(); i++) {
ciKlass* receiver = vc_data->receiver(i);
if (receiver == nullptr) {
__ mov_metadata(rscratch1, known_klass->constant_encoding());
Address recv_addr =
__ form_address(rscratch2, mdo,
md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)),
LogBytesPerWord);
__ str(rscratch1, recv_addr);
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
__ addptr(data_addr, DataLayout::counter_increment);
return;
}
}
// Receiver type is not found in profile data.
// Fall back to runtime helper to handle the rest at runtime.
__ mov_metadata(recv, known_klass->constant_encoding());
} else {
__ load_klass(recv, recv);
Label update_done;
type_profile_helper(mdo, md, data, recv, &update_done);
// Receiver did not match any saved receiver and there is no empty row for it.
// Increment total counter to indicate polymorphic case.
__ addptr(counter_addr, DataLayout::counter_increment);
__ bind(update_done);
}
type_profile_helper(mdo, md, data, recv);
} else {
// Static call
__ addptr(counter_addr, DataLayout::counter_increment);

View File

@ -50,9 +50,8 @@ friend class ArrayCopyStub;
Address stack_slot_address(int index, uint shift, Register tmp, int adjust = 0);
// Record the type of the receiver in ReceiverTypeData
void type_profile_helper(Register mdo,
ciMethodData *md, ciProfileData *data,
Register recv, Label* update_done);
void type_profile_helper(Register mdo, ciMethodData *md,
ciProfileData *data, Register recv);
void add_debug_info_for_branch(address adr, CodeEmitInfo* info);
void casw(Register addr, Register newval, Register cmpval);

View File

@ -85,26 +85,16 @@ void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, Dec
}
}
void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp,
bool tosca_live,
bool expand_call) {
if (ShenandoahSATBBarrier) {
satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, rscratch1, tosca_live, expand_call);
}
}
void ShenandoahBarrierSetAssembler::satb_barrier(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp1,
Register tmp2,
bool tosca_live,
bool expand_call) {
assert(ShenandoahSATBBarrier, "Should be checked by caller");
void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp1,
Register tmp2,
bool tosca_live,
bool expand_call) {
// If expand_call is true then we expand the call_VM_leaf macro
// directly to skip generating the check by
// InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
@ -358,20 +348,20 @@ void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet d
if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
__ enter(/*strip_ret_addr*/true);
__ push_call_clobbered_registers();
satb_write_barrier_pre(masm /* masm */,
noreg /* obj */,
dst /* pre_val */,
rthread /* thread */,
tmp1 /* tmp1 */,
tmp2 /* tmp2 */,
true /* tosca_live */,
true /* expand_call */);
satb_barrier(masm /* masm */,
noreg /* obj */,
dst /* pre_val */,
rthread /* thread */,
tmp1 /* tmp1 */,
tmp2 /* tmp2 */,
true /* tosca_live */,
true /* expand_call */);
__ pop_call_clobbered_registers();
__ leave();
}
}
void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj) {
void ShenandoahBarrierSetAssembler::card_barrier(MacroAssembler* masm, Register obj) {
assert(ShenandoahCardBarrier, "Should have been checked by caller");
__ lsr(obj, obj, CardTable::card_shift());
@ -394,13 +384,13 @@ void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register o
void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
bool on_oop = is_reference_type(type);
if (!on_oop) {
// 1: non-reference types require no barriers
if (!is_reference_type(type)) {
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
return;
}
// flatten object address if needed
// Flatten object address right away for simplicity: likely needed by barriers
if (dst.index() == noreg && dst.offset() == 0) {
if (dst.base() != tmp3) {
__ mov(tmp3, dst.base());
@ -409,20 +399,26 @@ void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet
__ lea(tmp3, dst);
}
shenandoah_write_barrier_pre(masm,
tmp3 /* obj */,
tmp2 /* pre_val */,
rthread /* thread */,
tmp1 /* tmp */,
val != noreg /* tosca_live */,
false /* expand_call */);
bool storing_non_null = (val != noreg);
// 2: pre-barrier: SATB needs the previous value
if (ShenandoahBarrierSet::need_satb_barrier(decorators, type)) {
satb_barrier(masm,
tmp3 /* obj */,
tmp2 /* pre_val */,
rthread /* thread */,
tmp1 /* tmp */,
rscratch1 /* tmp2 */,
storing_non_null /* tosca_live */,
false /* expand_call */);
}
// Store!
BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), val, noreg, noreg, noreg);
bool in_heap = (decorators & IN_HEAP) != 0;
bool needs_post_barrier = (val != noreg) && in_heap && ShenandoahCardBarrier;
if (needs_post_barrier) {
store_check(masm, tmp3);
// 3: post-barrier: card barrier needs store address
if (ShenandoahBarrierSet::need_card_barrier(decorators, type) && storing_non_null) {
card_barrier(masm, tmp3);
}
}

View File

@ -40,23 +40,16 @@ class StubCodeGenerator;
class ShenandoahBarrierSetAssembler: public BarrierSetAssembler {
private:
void satb_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp1,
Register tmp2,
bool tosca_live,
bool expand_call);
void shenandoah_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp,
bool tosca_live,
bool expand_call);
void satb_barrier(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp1,
Register tmp2,
bool tosca_live,
bool expand_call);
void store_check(MacroAssembler* masm, Register obj);
void card_barrier(MacroAssembler* masm, Register obj);
void resolve_forward_pointer(MacroAssembler* masm, Register dst, Register tmp = noreg);
void resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp = noreg);

View File

@ -240,15 +240,14 @@ void InterpreterMacroAssembler::load_resolved_klass_at_offset(
// Rsub_klass: subklass
//
// Kills:
// r2, r5
// r2
void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
Label& ok_is_subtype) {
assert(Rsub_klass != r0, "r0 holds superklass");
assert(Rsub_klass != r2, "r2 holds 2ndary super array length");
assert(Rsub_klass != r5, "r5 holds 2ndary super array scan ptr");
// Profile the not-null value's klass.
profile_typecheck(r2, Rsub_klass, r5); // blows r2, reloads r5
profile_typecheck(r2, Rsub_klass); // blows r2
// Do the check.
check_klass_subtype(Rsub_klass, r0, r2, ok_is_subtype); // blows r2
@ -991,7 +990,6 @@ void InterpreterMacroAssembler::profile_final_call(Register mdp) {
void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
Register mdp,
Register reg2,
bool receiver_can_be_null) {
if (ProfileInterpreter) {
Label profile_continue;
@ -1009,7 +1007,7 @@ void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
}
// Record the receiver type.
record_klass_in_profile(receiver, mdp, reg2);
profile_receiver_type(receiver, mdp, 0);
bind(skip_receiver_profile);
// The method data pointer needs to be updated to reflect the new target.
@ -1018,131 +1016,6 @@ void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
}
}
// This routine creates a state machine for updating the multi-row
// type profile at a virtual call site (or other type-sensitive bytecode).
// The machine visits each row (of receiver/count) until the receiver type
// is found, or until it runs out of rows. At the same time, it remembers
// the location of the first empty row. (An empty row records null for its
// receiver, and can be allocated for a newly-observed receiver type.)
// Because there are two degrees of freedom in the state, a simple linear
// search will not work; it must be a decision tree. Hence this helper
// function is recursive, to generate the required tree structured code.
// It's the interpreter, so we are trading off code space for speed.
// See below for example code.
void InterpreterMacroAssembler::record_klass_in_profile_helper(
Register receiver, Register mdp,
Register reg2, int start_row,
Label& done) {
if (TypeProfileWidth == 0) {
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
} else {
record_item_in_profile_helper(receiver, mdp, reg2, 0, done, TypeProfileWidth,
&VirtualCallData::receiver_offset, &VirtualCallData::receiver_count_offset);
}
}
void InterpreterMacroAssembler::record_item_in_profile_helper(Register item, Register mdp,
Register reg2, int start_row, Label& done, int total_rows,
OffsetFunction item_offset_fn, OffsetFunction item_count_offset_fn) {
int last_row = total_rows - 1;
assert(start_row <= last_row, "must be work left to do");
// Test this row for both the item and for null.
// Take any of three different outcomes:
// 1. found item => increment count and goto done
// 2. found null => keep looking for case 1, maybe allocate this cell
// 3. found something else => keep looking for cases 1 and 2
// Case 3 is handled by a recursive call.
for (int row = start_row; row <= last_row; row++) {
Label next_test;
bool test_for_null_also = (row == start_row);
// See if the item is item[n].
int item_offset = in_bytes(item_offset_fn(row));
test_mdp_data_at(mdp, item_offset, item,
(test_for_null_also ? reg2 : noreg),
next_test);
// (Reg2 now contains the item from the CallData.)
// The item is item[n]. Increment count[n].
int count_offset = in_bytes(item_count_offset_fn(row));
increment_mdp_data_at(mdp, count_offset);
b(done);
bind(next_test);
if (test_for_null_also) {
Label found_null;
// Failed the equality check on item[n]... Test for null.
if (start_row == last_row) {
// The only thing left to do is handle the null case.
cbz(reg2, found_null);
// Item did not match any saved item and there is no empty row for it.
// Increment total counter to indicate polymorphic case.
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
b(done);
bind(found_null);
break;
}
// Since null is rare, make it be the branch-taken case.
cbz(reg2, found_null);
// Put all the "Case 3" tests here.
record_item_in_profile_helper(item, mdp, reg2, start_row + 1, done, total_rows,
item_offset_fn, item_count_offset_fn);
// Found a null. Keep searching for a matching item,
// but remember that this is an empty (unused) slot.
bind(found_null);
}
}
// In the fall-through case, we found no matching item, but we
// observed the item[start_row] is null.
// Fill in the item field and increment the count.
int item_offset = in_bytes(item_offset_fn(start_row));
set_mdp_data_at(mdp, item_offset, item);
int count_offset = in_bytes(item_count_offset_fn(start_row));
mov(reg2, DataLayout::counter_increment);
set_mdp_data_at(mdp, count_offset, reg2);
if (start_row > 0) {
b(done);
}
}
// Example state machine code for three profile rows:
// // main copy of decision tree, rooted at row[1]
// if (row[0].rec == rec) { row[0].incr(); goto done; }
// if (row[0].rec != nullptr) {
// // inner copy of decision tree, rooted at row[1]
// if (row[1].rec == rec) { row[1].incr(); goto done; }
// if (row[1].rec != nullptr) {
// // degenerate decision tree, rooted at row[2]
// if (row[2].rec == rec) { row[2].incr(); goto done; }
// if (row[2].rec != nullptr) { count.incr(); goto done; } // overflow
// row[2].init(rec); goto done;
// } else {
// // remember row[1] is empty
// if (row[2].rec == rec) { row[2].incr(); goto done; }
// row[1].init(rec); goto done;
// }
// } else {
// // remember row[0] is empty
// if (row[1].rec == rec) { row[1].incr(); goto done; }
// if (row[2].rec == rec) { row[2].incr(); goto done; }
// row[0].init(rec); goto done;
// }
// done:
void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
Register mdp, Register reg2) {
assert(ProfileInterpreter, "must be profiling");
Label done;
record_klass_in_profile_helper(receiver, mdp, reg2, 0, done);
bind (done);
}
void InterpreterMacroAssembler::profile_ret(Register return_bci,
Register mdp) {
if (ProfileInterpreter) {
@ -1200,7 +1073,7 @@ void InterpreterMacroAssembler::profile_null_seen(Register mdp) {
}
}
void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass, Register reg2) {
void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass) {
if (ProfileInterpreter) {
Label profile_continue;
@ -1213,7 +1086,7 @@ void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass,
mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
// Record the object type.
record_klass_in_profile(klass, mdp, reg2);
profile_receiver_type(klass, mdp, 0);
}
update_mdp_by_constant(mdp, mdp_delta);

View File

@ -273,15 +273,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
Register test_value_out,
Label& not_equal_continue);
void record_klass_in_profile(Register receiver, Register mdp,
Register reg2);
void record_klass_in_profile_helper(Register receiver, Register mdp,
Register reg2, int start_row,
Label& done);
void record_item_in_profile_helper(Register item, Register mdp,
Register reg2, int start_row, Label& done, int total_rows,
OffsetFunction item_offset_fn, OffsetFunction item_count_offset_fn);
void update_mdp_by_offset(Register mdp_in, int offset_of_offset);
void update_mdp_by_offset(Register mdp_in, Register reg, int offset_of_disp);
void update_mdp_by_constant(Register mdp_in, int constant);
@ -295,11 +286,10 @@ class InterpreterMacroAssembler: public MacroAssembler {
void profile_call(Register mdp);
void profile_final_call(Register mdp);
void profile_virtual_call(Register receiver, Register mdp,
Register scratch2,
bool receiver_can_be_null = false);
void profile_ret(Register return_bci, Register mdp);
void profile_null_seen(Register mdp);
void profile_typecheck(Register mdp, Register klass, Register scratch);
void profile_typecheck(Register mdp, Register klass);
void profile_typecheck_failed(Register mdp);
void profile_switch_default(Register mdp);
void profile_switch_case(Register index_in_scratch, Register mdp,

View File

@ -2118,6 +2118,161 @@ Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
}
}
// Handle the receiver type profile update given the "recv" klass.
//
// Normally updates the ReceiverData (RD) that starts at "mdp" + "mdp_offset".
// If there are no matching or claimable receiver entries in RD, updates
// the polymorphic counter.
//
// This code expected to run by either the interpreter or JIT-ed code, without
// extra synchronization. For safety, receiver cells are claimed atomically, which
// avoids grossly misrepresenting the profiles under concurrent updates. For speed,
// counter updates are not atomic.
//
void MacroAssembler::profile_receiver_type(Register recv, Register mdp, int mdp_offset) {
assert_different_registers(recv, mdp, rscratch1, rscratch2);
int base_receiver_offset = in_bytes(ReceiverTypeData::receiver_offset(0));
int end_receiver_offset = in_bytes(ReceiverTypeData::receiver_offset(ReceiverTypeData::row_limit()));
int poly_count_offset = in_bytes(CounterData::count_offset());
int receiver_step = in_bytes(ReceiverTypeData::receiver_offset(1)) - base_receiver_offset;
int receiver_to_count_step = in_bytes(ReceiverTypeData::receiver_count_offset(0)) - base_receiver_offset;
// Adjust for MDP offsets.
base_receiver_offset += mdp_offset;
end_receiver_offset += mdp_offset;
poly_count_offset += mdp_offset;
#ifdef ASSERT
// We are about to walk the MDO slots without asking for offsets.
// Check that our math hits all the right spots.
for (uint c = 0; c < ReceiverTypeData::row_limit(); c++) {
int real_recv_offset = mdp_offset + in_bytes(ReceiverTypeData::receiver_offset(c));
int real_count_offset = mdp_offset + in_bytes(ReceiverTypeData::receiver_count_offset(c));
int offset = base_receiver_offset + receiver_step*c;
int count_offset = offset + receiver_to_count_step;
assert(offset == real_recv_offset, "receiver slot math");
assert(count_offset == real_count_offset, "receiver count math");
}
int real_poly_count_offset = mdp_offset + in_bytes(CounterData::count_offset());
assert(poly_count_offset == real_poly_count_offset, "poly counter math");
#endif
// Corner case: no profile table. Increment poly counter and exit.
if (ReceiverTypeData::row_limit() == 0) {
increment(Address(mdp, poly_count_offset), DataLayout::counter_increment);
return;
}
Register offset = rscratch2;
Label L_loop_search_receiver, L_loop_search_empty;
Label L_restart, L_found_recv, L_found_empty, L_polymorphic, L_count_update;
// The code here recognizes three major cases:
// A. Fastest: receiver found in the table
// B. Fast: no receiver in the table, and the table is full
// C. Slow: no receiver in the table, free slots in the table
//
// The case A performance is most important, as perfectly-behaved code would end up
// there, especially with larger TypeProfileWidth. The case B performance is
// important as well, this is where bulk of code would land for normally megamorphic
// cases. The case C performance is not essential, its job is to deal with installation
// races, we optimize for code density instead. Case C needs to make sure that receiver
// rows are only claimed once. This makes sure we never overwrite a row for another
// receiver and never duplicate the receivers in the list, making profile type-accurate.
//
// It is very tempting to handle these cases in a single loop, and claim the first slot
// without checking the rest of the table. But, profiling code should tolerate free slots
// in the table, as class unloading can clear them. After such cleanup, the receiver
// we need might be _after_ the free slot. Therefore, we need to let at least full scan
// to complete, before trying to install new slots. Splitting the code in several tight
// loops also helpfully optimizes for cases A and B.
//
// This code is effectively:
//
// restart:
// // Fastest: receiver is already installed
// for (i = 0; i < receiver_count(); i++) {
// if (receiver(i) == recv) goto found_recv(i);
// }
//
// // Fast: no receiver, but profile is full
// for (i = 0; i < receiver_count(); i++) {
// if (receiver(i) == null) goto found_null(i);
// }
// goto polymorphic
//
// // Slow: try to install receiver
// found_null(i):
// CAS(&receiver(i), null, recv);
// goto restart
//
// polymorphic:
// count++;
// return
//
// found_recv(i):
// *receiver_count(i)++
//
bind(L_restart);
// Fastest: receiver is already installed
mov(offset, base_receiver_offset);
bind(L_loop_search_receiver);
ldr(rscratch1, Address(mdp, offset));
cmp(rscratch1, recv);
br(Assembler::EQ, L_found_recv);
add(offset, offset, receiver_step);
sub(rscratch1, offset, end_receiver_offset);
cbnz(rscratch1, L_loop_search_receiver);
// Fast: no receiver, but profile is full
mov(offset, base_receiver_offset);
bind(L_loop_search_empty);
ldr(rscratch1, Address(mdp, offset));
cbz(rscratch1, L_found_empty);
add(offset, offset, receiver_step);
sub(rscratch1, offset, end_receiver_offset);
cbnz(rscratch1, L_loop_search_empty);
b(L_polymorphic);
// Slow: try to install receiver
bind(L_found_empty);
// Atomically swing receiver slot: null -> recv.
//
// The update uses CAS, which clobbers rscratch1. Therefore, rscratch2
// is used to hold the destination address. This is safe because the
// offset is no longer needed after the address is computed.
lea(rscratch2, Address(mdp, offset));
cmpxchg(/*addr*/ rscratch2, /*expected*/ zr, /*new*/ recv, Assembler::xword,
/*acquire*/ false, /*release*/ false, /*weak*/ true, noreg);
// CAS success means the slot now has the receiver we want. CAS failure means
// something had claimed the slot concurrently: it can be the same receiver we want,
// or something else. Since this is a slow path, we can optimize for code density,
// and just restart the search from the beginning.
b(L_restart);
// Counter updates:
// Increment polymorphic counter instead of receiver slot.
bind(L_polymorphic);
mov(offset, poly_count_offset);
b(L_count_update);
// Found a receiver, convert its slot offset to corresponding count offset.
bind(L_found_recv);
add(offset, offset, receiver_to_count_step);
bind(L_count_update);
increment(Address(mdp, offset), DataLayout::counter_increment);
}
void MacroAssembler::call_VM_leaf_base(address entry_point,
int number_of_arguments,
Label *retaddr) {
@ -5606,12 +5761,11 @@ void MacroAssembler::adrp(Register reg1, const Address &dest, uint64_t &byte_off
}
void MacroAssembler::load_byte_map_base(Register reg) {
CardTable::CardValue* byte_map_base =
((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base();
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
// Strictly speaking the byte_map_base isn't an address at all, and it might
// Strictly speaking the card table base isn't an address at all, and it might
// even be negative. It is thus materialised as a constant.
mov(reg, (uint64_t)byte_map_base);
mov(reg, (uint64_t)ctbs->card_table_base_const());
}
void MacroAssembler::build_frame(int framesize) {

View File

@ -1122,6 +1122,8 @@ public:
Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
void profile_receiver_type(Register recv, Register mdp, int mdp_offset);
void verify_sve_vector_length(Register tmp = rscratch1);
void reinitialize_ptrue() {
if (UseSVE > 0) {

View File

@ -6081,14 +6081,18 @@ class StubGenerator: public StubCodeGenerator {
// static int implKyber12To16(
// byte[] condensed, int index, short[] parsed, int parsedLength) {}
//
// (parsedLength or (parsedLength - 48) must be divisible by 64.)
// we assume that parsed and condensed are allocated such that for
// n = (parsedLength + 63) / 64
// n blocks of 96 bytes of input can be processed, i.e.
// index + n * 96 <= condensed.length and
// n * 64 <= parsed.length
//
// condensed (byte[]) = c_rarg0
// condensedIndex = c_rarg1
// parsed (short[112 or 256]) = c_rarg2
// parsedLength (112 or 256) = c_rarg3
// parsed (short[]) = c_rarg2
// parsedLength = c_rarg3
address generate_kyber12To16() {
Label L_F00, L_loop, L_end;
Label L_F00, L_loop;
__ align(CodeEntryAlignment);
StubId stub_id = StubId::stubgen_kyber12To16_id;
@ -6209,75 +6213,8 @@ class StubGenerator: public StubCodeGenerator {
vs_st2_post(vs_front(vb), __ T8H, parsed);
__ sub(parsedLength, parsedLength, 64);
__ cmp(parsedLength, (u1)64);
__ br(Assembler::GE, L_loop);
__ cbz(parsedLength, L_end);
// if anything is left it should be a final 72 bytes of input
// i.e. a final 48 12-bit values. so we handle this by loading
// 48 bytes into all 16B lanes of front(vin) and only 24
// bytes into the lower 8B lane of back(vin)
vs_ld3_post(vs_front(vin), __ T16B, condensed);
vs_ld3(vs_back(vin), __ T8B, condensed);
// Expand vin[0] into va[0:1], and vin[1] into va[2:3] and va[4:5]
// n.b. target elements 2 and 3 of va duplicate elements 4 and
// 5 and target element 2 of vb duplicates element 4.
__ ushll(va[0], __ T8H, vin[0], __ T8B, 0);
__ ushll2(va[1], __ T8H, vin[0], __ T16B, 0);
__ ushll(va[2], __ T8H, vin[1], __ T8B, 0);
__ ushll2(va[3], __ T8H, vin[1], __ T16B, 0);
__ ushll(va[4], __ T8H, vin[1], __ T8B, 0);
__ ushll2(va[5], __ T8H, vin[1], __ T16B, 0);
// This time expand just the lower 8 lanes
__ ushll(vb[0], __ T8H, vin[3], __ T8B, 0);
__ ushll(vb[2], __ T8H, vin[4], __ T8B, 0);
__ ushll(vb[4], __ T8H, vin[4], __ T8B, 0);
// shift lo byte of copy 1 of the middle stripe into the high byte
__ shl(va[2], __ T8H, va[2], 8);
__ shl(va[3], __ T8H, va[3], 8);
__ shl(vb[2], __ T8H, vb[2], 8);
// expand vin[2] into va[6:7] and lower 8 lanes of vin[5] into
// vb[6] pre-shifted by 4 to ensure top bits of the input 12-bit
// int are in bit positions [4..11].
__ ushll(va[6], __ T8H, vin[2], __ T8B, 4);
__ ushll2(va[7], __ T8H, vin[2], __ T16B, 4);
__ ushll(vb[6], __ T8H, vin[5], __ T8B, 4);
// mask hi 4 bits of each 1st 12-bit int in pair from copy1 and
// shift lo 4 bits of each 2nd 12-bit int in pair to bottom of
// copy2
__ andr(va[2], __ T16B, va[2], v31);
__ andr(va[3], __ T16B, va[3], v31);
__ ushr(va[4], __ T8H, va[4], 4);
__ ushr(va[5], __ T8H, va[5], 4);
__ andr(vb[2], __ T16B, vb[2], v31);
__ ushr(vb[4], __ T8H, vb[4], 4);
// sum hi 4 bits and lo 8 bits of each 1st 12-bit int in pair and
// hi 8 bits plus lo 4 bits of each 2nd 12-bit int in pair
// n.b. ordering ensures: i) inputs are consumed before they are
// overwritten ii) order of 16-bit results across succsessive
// pairs of vectors in va and then lower half of vb reflects order
// of corresponding 12-bit inputs
__ addv(va[0], __ T8H, va[0], va[2]);
__ addv(va[2], __ T8H, va[1], va[3]);
__ addv(va[1], __ T8H, va[4], va[6]);
__ addv(va[3], __ T8H, va[5], va[7]);
__ addv(vb[0], __ T8H, vb[0], vb[2]);
__ addv(vb[1], __ T8H, vb[4], vb[6]);
// store 48 results interleaved as shorts
vs_st2_post(vs_front(va), __ T8H, parsed);
vs_st2_post(vs_front(vs_front(vb)), __ T8H, parsed);
__ BIND(L_end);
__ cmp(parsedLength, (u1)0);
__ br(Assembler::GT, L_loop);
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ mov(r0, zr); // return 0

View File

@ -3370,7 +3370,7 @@ void TemplateTable::invokevirtual_helper(Register index,
__ load_klass(r0, recv);
// profile this call
__ profile_virtual_call(r0, rlocals, r3);
__ profile_virtual_call(r0, rlocals);
// get target Method & entry point
__ lookup_virtual_method(r0, index, method);
@ -3500,7 +3500,7 @@ void TemplateTable::invokeinterface(int byte_no) {
/*return_method=*/false);
// profile this call
__ profile_virtual_call(r3, r13, r19);
__ profile_virtual_call(r3, r13);
// Get declaring interface class from method, and itable index

View File

@ -67,9 +67,7 @@ void CardTableBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet d
void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, Register tmp) {
BLOCK_COMMENT("CardTablePostBarrier");
BarrierSet* bs = BarrierSet::barrier_set();
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
Label L_cardtable_loop, L_done;
@ -83,7 +81,7 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
__ sub(count, count, addr); // nb of cards
// warning: Rthread has not been preserved
__ mov_address(tmp, (address) ct->byte_map_base());
__ mov_address(tmp, (address)ctbs->card_table_base_const());
__ add(addr,tmp, addr);
Register zero = __ zero_register(tmp);
@ -122,8 +120,7 @@ void CardTableBarrierSetAssembler::store_check_part1(MacroAssembler* masm, Regis
assert(bs->kind() == BarrierSet::CardTableBarrierSet,
"Wrong barrier set kind");
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
// Load card table base address.
@ -140,7 +137,7 @@ void CardTableBarrierSetAssembler::store_check_part1(MacroAssembler* masm, Regis
Possible cause is a cache miss (card table base address resides in a
rarely accessed area of thread descriptor).
*/
__ mov_address(card_table_base, (address)ct->byte_map_base());
__ mov_address(card_table_base, (address)ctbs->card_table_base_const());
}
// The 2nd part of the store check.
@ -170,8 +167,8 @@ void CardTableBarrierSetAssembler::store_check_part2(MacroAssembler* masm, Regis
void CardTableBarrierSetAssembler::set_card(MacroAssembler* masm, Register card_table_base, Address card_table_addr, Register tmp) {
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
CardTable* ct = ctbs->card_table();
if ((((uintptr_t)ct->byte_map_base() & 0xff) == 0)) {
if ((((uintptr_t)ctbs->card_table_base_const() & 0xff) == 0)) {
// Card table is aligned so the lowest byte of the table address base is zero.
// This works only if the code is not saved for later use, possibly
// in a context where the base would no longer be aligned.

View File

@ -568,6 +568,9 @@ class Assembler : public AbstractAssembler {
XSCVDPHP_OPCODE= (60u << OPCODE_SHIFT | 347u << 2 | 17u << 16), // XX2-FORM
XXPERM_OPCODE = (60u << OPCODE_SHIFT | 26u << 3),
XXSEL_OPCODE = (60u << OPCODE_SHIFT | 3u << 4),
XSCMPEQDP_OPCODE=(60u << OPCODE_SHIFT | 3u << 3),
XSCMPGEDP_OPCODE=(60u << OPCODE_SHIFT | 19u << 3),
XSCMPGTDP_OPCODE=(60u << OPCODE_SHIFT | 11u << 3),
XXSPLTIB_OPCODE= (60u << OPCODE_SHIFT | 360u << 1),
XVDIVDP_OPCODE = (60u << OPCODE_SHIFT | 120u << 3),
XVABSSP_OPCODE = (60u << OPCODE_SHIFT | 409u << 2),
@ -2424,6 +2427,9 @@ class Assembler : public AbstractAssembler {
inline void xscvdphp( VectorSRegister d, VectorSRegister b);
inline void xxland( VectorSRegister d, VectorSRegister a, VectorSRegister b);
inline void xxsel( VectorSRegister d, VectorSRegister a, VectorSRegister b, VectorSRegister c);
inline void xscmpeqdp(VectorSRegister t, VectorSRegister a, VectorSRegister b); // Requires Power9
inline void xscmpgedp(VectorSRegister t, VectorSRegister a, VectorSRegister b); // Requires Power9
inline void xscmpgtdp(VectorSRegister t, VectorSRegister a, VectorSRegister b); // Requires Power9
inline void xxspltib( VectorSRegister d, int ui8);
inline void xvdivsp( VectorSRegister d, VectorSRegister a, VectorSRegister b);
inline void xvdivdp( VectorSRegister d, VectorSRegister a, VectorSRegister b);

View File

@ -923,6 +923,10 @@ inline void Assembler::xxmrghw( VectorSRegister d, VectorSRegister a, VectorSReg
inline void Assembler::xxmrglw( VectorSRegister d, VectorSRegister a, VectorSRegister b) { emit_int32( XXMRGHW_OPCODE | vsrt(d) | vsra(a) | vsrb(b)); }
inline void Assembler::xxsel( VectorSRegister d, VectorSRegister a, VectorSRegister b, VectorSRegister c) { emit_int32( XXSEL_OPCODE | vsrt(d) | vsra(a) | vsrb(b) | vsrc(c)); }
inline void Assembler::xscmpeqdp(VectorSRegister t, VectorSRegister a, VectorSRegister b) { emit_int32( XSCMPEQDP_OPCODE | vsrt(t) | vsra(a) | vsrb(b) );}
inline void Assembler::xscmpgedp(VectorSRegister t, VectorSRegister a, VectorSRegister b) { emit_int32( XSCMPGEDP_OPCODE | vsrt(t) | vsra(a) | vsrb(b) );}
inline void Assembler::xscmpgtdp(VectorSRegister t, VectorSRegister a, VectorSRegister b) { emit_int32( XSCMPGTDP_OPCODE | vsrt(t) | vsra(a) | vsrb(b) );}
// VSX Extended Mnemonics
inline void Assembler::xxspltd( VectorSRegister d, VectorSRegister a, int x) { xxpermdi(d, a, a, x ? 3 : 0); }
inline void Assembler::xxmrghd( VectorSRegister d, VectorSRegister a, VectorSRegister b) { xxpermdi(d, a, b, 0); }

View File

@ -664,3 +664,37 @@ void C2_MacroAssembler::reduceI(int opcode, Register dst, Register iSrc, VectorR
fn_scalar_op(opcode, dst, iSrc, R0); // dst <- op(iSrc, R0)
}
// Works for single and double precision floats.
// dst = (op1 cmp(cc) op2) ? src1 : src2;
// Unordered semantics are the same as for CmpF3Node/CmpD3Node which implement the fcmpl/dcmpl bytecodes.
// Comparing unordered values has the same result as when src1 is less than src2.
// So dst = src1 for <, <=, != and dst = src2 for >, >=, ==.
void C2_MacroAssembler::cmovF(int cc, VectorSRegister dst, VectorSRegister op1, VectorSRegister op2,
VectorSRegister src1, VectorSRegister src2, VectorSRegister tmp) {
// See operand cmpOp() for details.
bool invert_cond = (cc & 8) == 0; // invert reflects bcondCRbiIs0
auto cmp = (Assembler::Condition)(cc & 3);
switch(cmp) {
case Assembler::Condition::equal:
// Use false_result if "unordered".
xscmpeqdp(tmp, op1, op2);
break;
case Assembler::Condition::greater:
// Use false_result if "unordered".
xscmpgtdp(tmp, op1, op2);
break;
case Assembler::Condition::less:
// Use true_result if "unordered".
xscmpgedp(tmp, op1, op2);
invert_cond = !invert_cond;
break;
default:
assert(false, "unsupported compare condition: %d", cc);
ShouldNotReachHere();
}
VectorSRegister true_result = invert_cond ? src2 : src1;
VectorSRegister false_result = invert_cond ? src1 : src2;
xxsel(dst, false_result, true_result, tmp);
}

View File

@ -74,5 +74,7 @@
void count_positives(Register src, Register cnt, Register result, Register tmp1, Register tmp2);
void reduceI(int opcode, Register dst, Register iSrc, VectorRegister vSrc, VectorRegister vTmp1, VectorRegister vTmp2);
void cmovF(int cc, VectorSRegister dst, VectorSRegister op1, VectorSRegister op2,
VectorSRegister src1, VectorSRegister src2, VectorSRegister tmp);
#endif // CPU_PPC_C2_MACROASSEMBLER_PPC_HPP

View File

@ -103,8 +103,7 @@ void CardTableBarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Registe
void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr,
Register count, Register preserve) {
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
CardTable* ct = ctbs->card_table();
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
assert_different_registers(addr, count, R0);
Label Lskip_loop, Lstore_loop;
@ -117,7 +116,7 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
__ srdi(addr, addr, CardTable::card_shift());
__ srdi(count, count, CardTable::card_shift());
__ subf(count, addr, count);
__ add_const_optimized(addr, addr, (address)ct->byte_map_base(), R0);
__ add_const_optimized(addr, addr, (address)ctbs->card_table_base_const(), R0);
__ addi(count, count, 1);
__ li(R0, 0);
__ mtctr(count);
@ -140,8 +139,8 @@ void CardTableBarrierSetAssembler::card_table_write(MacroAssembler* masm,
}
void CardTableBarrierSetAssembler::card_write_barrier_post(MacroAssembler* masm, Register store_addr, Register tmp) {
CardTableBarrierSet* bs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
card_table_write(masm, bs->card_table()->byte_map_base(), tmp, store_addr);
CardTableBarrierSet* bs = CardTableBarrierSet::barrier_set();
card_table_write(masm, bs->card_table_base_const(), tmp, store_addr);
}
void CardTableBarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,

View File

@ -50,14 +50,14 @@
#define __ masm->
void ShenandoahBarrierSetAssembler::satb_write_barrier(MacroAssembler *masm,
Register base, RegisterOrConstant ind_or_offs,
Register tmp1, Register tmp2, Register tmp3,
MacroAssembler::PreservationLevel preservation_level) {
void ShenandoahBarrierSetAssembler::satb_barrier(MacroAssembler *masm,
Register base, RegisterOrConstant ind_or_offs,
Register tmp1, Register tmp2, Register tmp3,
MacroAssembler::PreservationLevel preservation_level) {
if (ShenandoahSATBBarrier) {
__ block_comment("satb_write_barrier (shenandoahgc) {");
satb_write_barrier_impl(masm, 0, base, ind_or_offs, tmp1, tmp2, tmp3, preservation_level);
__ block_comment("} satb_write_barrier (shenandoahgc)");
__ block_comment("satb_barrier (shenandoahgc) {");
satb_barrier_impl(masm, 0, base, ind_or_offs, tmp1, tmp2, tmp3, preservation_level);
__ block_comment("} satb_barrier (shenandoahgc)");
}
}
@ -198,11 +198,12 @@ void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, Dec
// In "load mode", this register acts as a temporary register and must
// thus not be 'noreg'. In "preloaded mode", its content will be sustained.
// tmp1/tmp2: Temporary registers, one of which must be non-volatile in "preloaded mode".
void ShenandoahBarrierSetAssembler::satb_write_barrier_impl(MacroAssembler *masm, DecoratorSet decorators,
Register base, RegisterOrConstant ind_or_offs,
Register pre_val,
Register tmp1, Register tmp2,
MacroAssembler::PreservationLevel preservation_level) {
void ShenandoahBarrierSetAssembler::satb_barrier_impl(MacroAssembler *masm, DecoratorSet decorators,
Register base, RegisterOrConstant ind_or_offs,
Register pre_val,
Register tmp1, Register tmp2,
MacroAssembler::PreservationLevel preservation_level) {
assert(ShenandoahSATBBarrier, "Should be checked by caller");
assert_different_registers(tmp1, tmp2, pre_val, noreg);
Label skip_barrier;
@ -574,13 +575,13 @@ void ShenandoahBarrierSetAssembler::load_at(
if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
if (ShenandoahSATBBarrier) {
__ block_comment("keep_alive_barrier (shenandoahgc) {");
satb_write_barrier_impl(masm, 0, noreg, noreg, dst, tmp1, tmp2, preservation_level);
satb_barrier_impl(masm, 0, noreg, noreg, dst, tmp1, tmp2, preservation_level);
__ block_comment("} keep_alive_barrier (shenandoahgc)");
}
}
}
void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register base, RegisterOrConstant ind_or_offs, Register tmp) {
void ShenandoahBarrierSetAssembler::card_barrier(MacroAssembler* masm, Register base, RegisterOrConstant ind_or_offs, Register tmp) {
assert(ShenandoahCardBarrier, "Should have been checked by caller");
assert_different_registers(base, tmp, R0);
@ -603,21 +604,33 @@ void ShenandoahBarrierSetAssembler::store_at(MacroAssembler *masm, DecoratorSet
Register base, RegisterOrConstant ind_or_offs, Register val,
Register tmp1, Register tmp2, Register tmp3,
MacroAssembler::PreservationLevel preservation_level) {
if (is_reference_type(type)) {
if (ShenandoahSATBBarrier) {
satb_write_barrier(masm, base, ind_or_offs, tmp1, tmp2, tmp3, preservation_level);
}
// 1: non-reference types require no barriers
if (!is_reference_type(type)) {
BarrierSetAssembler::store_at(masm, decorators, type,
base, ind_or_offs,
val,
tmp1, tmp2, tmp3,
preservation_level);
return;
}
bool storing_non_null = (val != noreg);
// 2: pre-barrier: SATB needs the previous value
if (ShenandoahBarrierSet::need_satb_barrier(decorators, type)) {
satb_barrier(masm, base, ind_or_offs, tmp1, tmp2, tmp3, preservation_level);
}
// Store!
BarrierSetAssembler::store_at(masm, decorators, type,
base, ind_or_offs,
val,
tmp1, tmp2, tmp3,
preservation_level);
// No need for post barrier if storing null
if (ShenandoahCardBarrier && is_reference_type(type) && val != noreg) {
store_check(masm, base, ind_or_offs, tmp1);
// 3: post-barrier: card barrier needs store address
if (ShenandoahBarrierSet::need_card_barrier(decorators, type) && storing_non_null) {
card_barrier(masm, base, ind_or_offs, tmp1);
}
}
@ -771,9 +784,6 @@ void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler *masm, Register b
void ShenandoahBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, Register preserve) {
assert(ShenandoahCardBarrier, "Should have been checked by caller");
ShenandoahBarrierSet* bs = ShenandoahBarrierSet::barrier_set();
CardTable* ct = bs->card_table();
assert_different_registers(addr, count, R0);
Label L_skip_loop, L_store_loop;

View File

@ -45,15 +45,15 @@ class ShenandoahBarrierSetAssembler: public BarrierSetAssembler {
private:
/* ==== Actual barrier implementations ==== */
void satb_write_barrier_impl(MacroAssembler* masm, DecoratorSet decorators,
Register base, RegisterOrConstant ind_or_offs,
Register pre_val,
Register tmp1, Register tmp2,
MacroAssembler::PreservationLevel preservation_level);
void satb_barrier_impl(MacroAssembler* masm, DecoratorSet decorators,
Register base, RegisterOrConstant ind_or_offs,
Register pre_val,
Register tmp1, Register tmp2,
MacroAssembler::PreservationLevel preservation_level);
void store_check(MacroAssembler* masm,
Register base, RegisterOrConstant ind_or_offs,
Register tmp);
void card_barrier(MacroAssembler* masm,
Register base, RegisterOrConstant ind_or_offs,
Register tmp);
void load_reference_barrier_impl(MacroAssembler* masm, DecoratorSet decorators,
Register base, RegisterOrConstant ind_or_offs,
@ -85,10 +85,10 @@ public:
#endif
/* ==== Available barriers (facades of the actual implementations) ==== */
void satb_write_barrier(MacroAssembler* masm,
Register base, RegisterOrConstant ind_or_offs,
Register tmp1, Register tmp2, Register tmp3,
MacroAssembler::PreservationLevel preservation_level);
void satb_barrier(MacroAssembler* masm,
Register base, RegisterOrConstant ind_or_offs,
Register tmp1, Register tmp2, Register tmp3,
MacroAssembler::PreservationLevel preservation_level);
void load_reference_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register base, RegisterOrConstant ind_or_offs,

View File

@ -64,12 +64,10 @@
return true;
}
// Use conditional move (CMOVL) on Power7.
static constexpr int long_cmove_cost() { return 0; } // this only makes long cmoves more expensive than int cmoves
// Suppress CMOVF. Conditional move available (sort of) on PPC64 only from P7 onwards. Not exploited yet.
// fsel doesn't accept a condition register as input, so this would be slightly different.
static int float_cmove_cost() { return ConditionalMoveLimit; }
// Suppress CMOVF for Power8 because there are no fast nodes.
static int float_cmove_cost() { return (PowerArchitecturePPC64 >= 9) ? 0 : ConditionalMoveLimit; }
// This affects two different things:
// - how Decode nodes are matched

View File

@ -3024,7 +3024,6 @@ encode %{
%}
enc_class postalloc_expand_encode_oop(iRegNdst dst, iRegPdst src, flagsReg crx) %{
// use isel instruction with Power 7
cmpP_reg_imm16Node *n_compare = new cmpP_reg_imm16Node();
encodeP_subNode *n_sub_base = new encodeP_subNode();
encodeP_shiftNode *n_shift = new encodeP_shiftNode();
@ -3099,7 +3098,6 @@ encode %{
n_shift->_opnds[1] = op_src;
n_shift->_bottom_type = _bottom_type;
// use isel instruction with Power 7
decodeN_addNode *n_add_base = new decodeN_addNode();
n_add_base->add_req(n_region, n_shift);
n_add_base->_opnds[0] = op_dst;
@ -6618,7 +6616,6 @@ instruct cond_sub_base(iRegNdst dst, flagsRegSrc crx, iRegPsrc src1) %{
ins_pipe(pipe_class_default);
%}
// Power 7 can use isel instruction
instruct cond_set_0_oop(iRegNdst dst, flagsRegSrc crx, iRegPsrc src1) %{
// The match rule is needed to make it a 'MachTypeNode'!
match(Set dst (EncodeP (Binary crx src1)));
@ -7293,7 +7290,6 @@ instruct cmovF_reg(cmpOp cmp, flagsRegSrc crx, regF dst, regF src) %{
ins_variable_size_depending_on_alignment(true);
format %{ "CMOVEF $cmp, $crx, $dst, $src\n\t" %}
// Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode %{
Label done;
@ -7313,7 +7309,6 @@ instruct cmovD_reg(cmpOp cmp, flagsRegSrc crx, regD dst, regD src) %{
ins_variable_size_depending_on_alignment(true);
format %{ "CMOVEF $cmp, $crx, $dst, $src\n\t" %}
// Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode %{
Label done;
@ -7326,6 +7321,70 @@ instruct cmovD_reg(cmpOp cmp, flagsRegSrc crx, regD dst, regD src) %{
ins_pipe(pipe_class_default);
%}
instruct cmovF_cmpF(cmpOp cop, regF op1, regF op2, regF dst, regF false_result, regF true_result, regD tmp) %{
match(Set dst (CMoveF (Binary cop (CmpF op1 op2)) (Binary false_result true_result)));
predicate(PowerArchitecturePPC64 >= 9);
effect(TEMP tmp);
ins_cost(2*DEFAULT_COST);
format %{ "cmovF_cmpF $dst = ($op1 $cop $op2) ? $true_result : $false_result\n\t" %}
size(8);
ins_encode %{
__ cmovF($cop$$cmpcode, $dst$$FloatRegister->to_vsr(),
$op1$$FloatRegister->to_vsr(), $op2$$FloatRegister->to_vsr(),
$true_result$$FloatRegister->to_vsr(), $false_result$$FloatRegister->to_vsr(),
$tmp$$FloatRegister->to_vsr());
%}
ins_pipe(pipe_class_default);
%}
instruct cmovF_cmpD(cmpOp cop, regD op1, regD op2, regF dst, regF false_result, regF true_result, regD tmp) %{
match(Set dst (CMoveF (Binary cop (CmpD op1 op2)) (Binary false_result true_result)));
predicate(PowerArchitecturePPC64 >= 9);
effect(TEMP tmp);
ins_cost(2*DEFAULT_COST);
format %{ "cmovF_cmpD $dst = ($op1 $cop $op2) ? $true_result : $false_result\n\t" %}
size(8);
ins_encode %{
__ cmovF($cop$$cmpcode, $dst$$FloatRegister->to_vsr(),
$op1$$FloatRegister->to_vsr(), $op2$$FloatRegister->to_vsr(),
$true_result$$FloatRegister->to_vsr(), $false_result$$FloatRegister->to_vsr(),
$tmp$$FloatRegister->to_vsr());
%}
ins_pipe(pipe_class_default);
%}
instruct cmovD_cmpD(cmpOp cop, regD op1, regD op2, regD dst, regD false_result, regD true_result, regD tmp) %{
match(Set dst (CMoveD (Binary cop (CmpD op1 op2)) (Binary false_result true_result)));
predicate(PowerArchitecturePPC64 >= 9);
effect(TEMP tmp);
ins_cost(2*DEFAULT_COST);
format %{ "cmovD_cmpD $dst = ($op1 $cop $op2) ? $true_result : $false_result\n\t" %}
size(8);
ins_encode %{
__ cmovF($cop$$cmpcode, $dst$$FloatRegister->to_vsr(),
$op1$$FloatRegister->to_vsr(), $op2$$FloatRegister->to_vsr(),
$true_result$$FloatRegister->to_vsr(), $false_result$$FloatRegister->to_vsr(),
$tmp$$FloatRegister->to_vsr());
%}
ins_pipe(pipe_class_default);
%}
instruct cmovD_cmpF(cmpOp cop, regF op1, regF op2, regD dst, regD false_result, regD true_result, regD tmp) %{
match(Set dst (CMoveD (Binary cop (CmpF op1 op2)) (Binary false_result true_result)));
predicate(PowerArchitecturePPC64 >= 9);
effect(TEMP tmp);
ins_cost(2*DEFAULT_COST);
format %{ "cmovD_cmpF $dst = ($op1 $cop $op2) ? $true_result : $false_result\n\t" %}
size(8);
ins_encode %{
__ cmovF($cop$$cmpcode, $dst$$FloatRegister->to_vsr(),
$op1$$FloatRegister->to_vsr(), $op2$$FloatRegister->to_vsr(),
$true_result$$FloatRegister->to_vsr(), $false_result$$FloatRegister->to_vsr(),
$tmp$$FloatRegister->to_vsr());
%}
ins_pipe(pipe_class_default);
%}
//----------Compare-And-Swap---------------------------------------------------
// CompareAndSwap{P,I,L} have more than one output, therefore "CmpI
@ -8492,7 +8551,6 @@ instruct cmovI_bne_negI_reg(iRegIdst dst, flagsRegSrc crx, iRegIsrc src1) %{
ins_variable_size_depending_on_alignment(true);
format %{ "CMOVE $dst, neg($src1), $crx" %}
// Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode %{
Label done;
@ -8551,7 +8609,6 @@ instruct cmovL_bne_negL_reg(iRegLdst dst, flagsRegSrc crx, iRegLsrc src1) %{
ins_variable_size_depending_on_alignment(true);
format %{ "CMOVE $dst, neg($src1), $crx" %}
// Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode %{
Label done;
@ -10262,7 +10319,6 @@ instruct cmovI_bso_stackSlotL(iRegIdst dst, flagsRegSrc crx, stackSlotL src) %{
ins_variable_size_depending_on_alignment(true);
format %{ "cmovI $crx, $dst, $src" %}
// Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode( enc_cmove_bso_stackSlotL(dst, crx, src) );
ins_pipe(pipe_class_default);
@ -10276,7 +10332,6 @@ instruct cmovI_bso_reg(iRegIdst dst, flagsRegSrc crx, regD src) %{
ins_variable_size_depending_on_alignment(true);
format %{ "cmovI $crx, $dst, $src" %}
// Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode( enc_cmove_bso_reg(dst, crx, src) );
ins_pipe(pipe_class_default);
@ -10439,7 +10494,6 @@ instruct cmovL_bso_stackSlotL(iRegLdst dst, flagsRegSrc crx, stackSlotL src) %{
ins_variable_size_depending_on_alignment(true);
format %{ "cmovL $crx, $dst, $src" %}
// Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode( enc_cmove_bso_stackSlotL(dst, crx, src) );
ins_pipe(pipe_class_default);
@ -10453,7 +10507,6 @@ instruct cmovL_bso_reg(iRegLdst dst, flagsRegSrc crx, regD src) %{
ins_variable_size_depending_on_alignment(true);
format %{ "cmovL $crx, $dst, $src" %}
// Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode( enc_cmove_bso_reg(dst, crx, src) );
ins_pipe(pipe_class_default);
@ -11080,7 +11133,6 @@ instruct cmov_bns_less(flagsReg crx) %{
ins_variable_size_depending_on_alignment(true);
format %{ "cmov $crx" %}
// Worst case is branch + move + stop, no stop without scheduler.
size(12);
ins_encode %{
Label done;

View File

@ -88,26 +88,16 @@ void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, Dec
}
}
void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp,
bool tosca_live,
bool expand_call) {
if (ShenandoahSATBBarrier) {
satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, t0, tosca_live, expand_call);
}
}
void ShenandoahBarrierSetAssembler::satb_barrier(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp1,
Register tmp2,
bool tosca_live,
bool expand_call) {
assert(ShenandoahSATBBarrier, "Should be checked by caller");
void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp1,
Register tmp2,
bool tosca_live,
bool expand_call) {
// If expand_call is true then we expand the call_VM_leaf macro
// directly to skip generating the check by
// InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
@ -376,21 +366,21 @@ void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm,
if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
__ enter();
__ push_call_clobbered_registers();
satb_write_barrier_pre(masm /* masm */,
noreg /* obj */,
dst /* pre_val */,
xthread /* thread */,
tmp1 /* tmp1 */,
tmp2 /* tmp2 */,
true /* tosca_live */,
true /* expand_call */);
satb_barrier(masm /* masm */,
noreg /* obj */,
dst /* pre_val */,
xthread /* thread */,
tmp1 /* tmp1 */,
tmp2 /* tmp2 */,
true /* tosca_live */,
true /* expand_call */);
__ pop_call_clobbered_registers();
__ leave();
}
}
void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj) {
assert(ShenandoahCardBarrier, "Did you mean to enable ShenandoahCardBarrier?");
void ShenandoahBarrierSetAssembler::card_barrier(MacroAssembler* masm, Register obj) {
assert(ShenandoahCardBarrier, "Should have been checked by caller");
__ srli(obj, obj, CardTable::card_shift());
@ -413,13 +403,13 @@ void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register o
void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
bool on_oop = is_reference_type(type);
if (!on_oop) {
// 1: non-reference types require no barriers
if (!is_reference_type(type)) {
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
return;
}
// flatten object address if needed
// Flatten object address right away for simplicity: likely needed by barriers
if (dst.offset() == 0) {
if (dst.base() != tmp3) {
__ mv(tmp3, dst.base());
@ -428,20 +418,26 @@ void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet
__ la(tmp3, dst);
}
shenandoah_write_barrier_pre(masm,
tmp3 /* obj */,
tmp2 /* pre_val */,
xthread /* thread */,
tmp1 /* tmp */,
val != noreg /* tosca_live */,
false /* expand_call */);
bool storing_non_null = (val != noreg);
// 2: pre-barrier: SATB needs the previous value
if (ShenandoahBarrierSet::need_satb_barrier(decorators, type)) {
satb_barrier(masm,
tmp3 /* obj */,
tmp2 /* pre_val */,
xthread /* thread */,
tmp1 /* tmp */,
t0 /* tmp2 */,
storing_non_null /* tosca_live */,
false /* expand_call */);
}
// Store!
BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), val, noreg, noreg, noreg);
bool in_heap = (decorators & IN_HEAP) != 0;
bool needs_post_barrier = (val != noreg) && in_heap && ShenandoahCardBarrier;
if (needs_post_barrier) {
store_check(masm, tmp3);
// 3: post-barrier: card barrier needs store address
if (ShenandoahBarrierSet::need_card_barrier(decorators, type) && storing_non_null) {
card_barrier(masm, tmp3);
}
}

View File

@ -41,23 +41,16 @@ class StubCodeGenerator;
class ShenandoahBarrierSetAssembler: public BarrierSetAssembler {
private:
void satb_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp1,
Register tmp2,
bool tosca_live,
bool expand_call);
void shenandoah_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp,
bool tosca_live,
bool expand_call);
void satb_barrier(MacroAssembler* masm,
Register obj,
Register pre_val,
Register thread,
Register tmp1,
Register tmp2,
bool tosca_live,
bool expand_call);
void store_check(MacroAssembler* masm, Register obj);
void card_barrier(MacroAssembler* masm, Register obj);
void resolve_forward_pointer(MacroAssembler* masm, Register dst, Register tmp = noreg);
void resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp = noreg);

View File

@ -5110,9 +5110,8 @@ void MacroAssembler::get_thread(Register thread) {
}
void MacroAssembler::load_byte_map_base(Register reg) {
CardTable::CardValue* byte_map_base =
((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base();
mv(reg, (uint64_t)byte_map_base);
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
mv(reg, (uint64_t)ctbs->card_table_base_const());
}
void MacroAssembler::build_frame(int framesize) {

View File

@ -708,7 +708,6 @@ void TemplateTable::index_check(Register array, Register index) {
__ mv(x11, index);
}
Label ok;
__ sext(index, index, 32);
__ bltu(index, length, ok);
__ mv(x13, array);
__ mv(t1, Interpreter::_throw_ArrayIndexOutOfBoundsException_entry);
@ -1052,7 +1051,7 @@ void TemplateTable::aastore() {
transition(vtos, vtos);
// stack: ..., array, index, value
__ ld(x10, at_tos()); // value
__ ld(x12, at_tos_p1()); // index
__ lw(x12, at_tos_p1()); // index
__ ld(x13, at_tos_p2()); // array
index_check(x13, x12); // kills x11
@ -1462,9 +1461,9 @@ void TemplateTable::iinc() {
transition(vtos, vtos);
__ load_signed_byte(x11, at_bcp(2)); // get constant
locals_index(x12);
__ ld(x10, iaddress(x12, x10, _masm));
__ lw(x10, iaddress(x12, x10, _masm));
__ addw(x10, x10, x11);
__ sd(x10, iaddress(x12, t0, _masm));
__ sw(x10, iaddress(x12, t0, _masm));
}
void TemplateTable::wide_iinc() {
@ -1477,9 +1476,9 @@ void TemplateTable::wide_iinc() {
__ orr(x11, x11, t1);
locals_index_wide(x12);
__ ld(x10, iaddress(x12, t0, _masm));
__ lw(x10, iaddress(x12, t0, _masm));
__ addw(x10, x10, x11);
__ sd(x10, iaddress(x12, t0, _masm));
__ sw(x10, iaddress(x12, t0, _masm));
}
void TemplateTable::convert() {

View File

@ -83,8 +83,7 @@ void CardTableBarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Registe
void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count,
bool do_return) {
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
CardTable* ct = ctbs->card_table();
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
NearLabel doXC, done;
assert_different_registers(Z_R0, Z_R1, addr, count);
@ -105,7 +104,7 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
__ add2reg_with_index(count, -BytesPerHeapOop, count, addr);
// Get base address of card table.
__ load_const_optimized(Z_R1, (address)ct->byte_map_base());
__ load_const_optimized(Z_R1, (address)ctbs->card_table_base_const());
// count = (count>>shift) - (addr>>shift)
__ z_srlg(addr, addr, CardTable::card_shift());
@ -179,13 +178,12 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register store_addr, Register tmp) {
// Does a store check for the oop in register obj. The content of
// register obj is destroyed afterwards.
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
CardTable* ct = ctbs->card_table();
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
assert_different_registers(store_addr, tmp);
__ z_srlg(store_addr, store_addr, CardTable::card_shift());
__ load_absolute_address(tmp, (address)ct->byte_map_base());
__ load_absolute_address(tmp, (address)ctbs->card_table_base_const());
__ z_agr(store_addr, tmp);
__ z_mvi(0, store_addr, CardTable::dirty_card_val());
}

View File

@ -95,11 +95,7 @@ void CardTableBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet d
void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, Register tmp) {
BarrierSet *bs = BarrierSet::barrier_set();
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
intptr_t disp = (intptr_t) ct->byte_map_base();
SHENANDOAHGC_ONLY(assert(!UseShenandoahGC, "Shenandoah byte_map_base is not constant.");)
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
Label L_loop, L_done;
const Register end = count;
@ -115,7 +111,7 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
__ shrptr(end, CardTable::card_shift());
__ subptr(end, addr); // end --> cards count
__ mov64(tmp, disp);
__ mov64(tmp, (intptr_t)ctbs->card_table_base_const());
__ addptr(addr, tmp);
__ BIND(L_loop);
__ movb(Address(addr, count, Address::times_1), 0);
@ -128,10 +124,7 @@ __ BIND(L_done);
void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj, Address dst) {
// Does a store check for the oop in register obj. The content of
// register obj is destroyed afterwards.
BarrierSet* bs = BarrierSet::barrier_set();
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
__ shrptr(obj, CardTable::card_shift());
@ -142,7 +135,7 @@ void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register ob
// So this essentially converts an address to a displacement and it will
// never need to be relocated. On 64bit however the value may be too
// large for a 32bit displacement.
intptr_t byte_map_base = (intptr_t)ct->byte_map_base();
intptr_t byte_map_base = (intptr_t)ctbs->card_table_base_const();
if (__ is_simm32(byte_map_base)) {
card_addr = Address(noreg, obj, Address::times_1, byte_map_base);
} else {

View File

@ -174,24 +174,14 @@ void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, Dec
}
}
void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register tmp,
bool tosca_live,
bool expand_call) {
void ShenandoahBarrierSetAssembler::satb_barrier(MacroAssembler* masm,
Register obj,
Register pre_val,
Register tmp,
bool tosca_live,
bool expand_call) {
assert(ShenandoahSATBBarrier, "Should be checked by caller");
if (ShenandoahSATBBarrier) {
satb_write_barrier_pre(masm, obj, pre_val, tmp, tosca_live, expand_call);
}
}
void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register tmp,
bool tosca_live,
bool expand_call) {
// If expand_call is true then we expand the call_VM_leaf macro
// directly to skip generating the check by
// InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
@ -533,18 +523,18 @@ void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet d
assert_different_registers(dst, tmp1, r15_thread);
// Generate the SATB pre-barrier code to log the value of
// the referent field in an SATB buffer.
shenandoah_write_barrier_pre(masm /* masm */,
noreg /* obj */,
dst /* pre_val */,
tmp1 /* tmp */,
true /* tosca_live */,
true /* expand_call */);
satb_barrier(masm /* masm */,
noreg /* obj */,
dst /* pre_val */,
tmp1 /* tmp */,
true /* tosca_live */,
true /* expand_call */);
restore_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ true);
}
}
void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj) {
void ShenandoahBarrierSetAssembler::card_barrier(MacroAssembler* masm, Register obj) {
assert(ShenandoahCardBarrier, "Should have been checked by caller");
// Does a store check for the oop in register obj. The content of
@ -575,41 +565,40 @@ void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register o
void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
bool on_oop = is_reference_type(type);
bool in_heap = (decorators & IN_HEAP) != 0;
bool as_normal = (decorators & AS_NORMAL) != 0;
if (on_oop && in_heap) {
bool needs_pre_barrier = as_normal;
// 1: non-reference types require no barriers
if (!is_reference_type(type)) {
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
return;
}
// flatten object address if needed
// We do it regardless of precise because we need the registers
if (dst.index() == noreg && dst.disp() == 0) {
if (dst.base() != tmp1) {
__ movptr(tmp1, dst.base());
}
} else {
__ lea(tmp1, dst);
}
assert_different_registers(val, tmp1, tmp2, tmp3, r15_thread);
if (needs_pre_barrier) {
shenandoah_write_barrier_pre(masm /*masm*/,
tmp1 /* obj */,
tmp2 /* pre_val */,
tmp3 /* tmp */,
val != noreg /* tosca_live */,
false /* expand_call */);
}
BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg, noreg);
if (val != noreg) {
if (ShenandoahCardBarrier) {
store_check(masm, tmp1);
}
// Flatten object address right away for simplicity: likely needed by barriers
assert_different_registers(val, tmp1, tmp2, tmp3, r15_thread);
if (dst.index() == noreg && dst.disp() == 0) {
if (dst.base() != tmp1) {
__ movptr(tmp1, dst.base());
}
} else {
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
__ lea(tmp1, dst);
}
bool storing_non_null = (val != noreg);
// 2: pre-barrier: SATB needs the previous value
if (ShenandoahBarrierSet::need_satb_barrier(decorators, type)) {
satb_barrier(masm,
tmp1 /* obj */,
tmp2 /* pre_val */,
tmp3 /* tmp */,
storing_non_null /* tosca_live */,
false /* expand_call */);
}
// Store!
BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg, noreg);
// 3: post-barrier: card barrier needs store address
if (ShenandoahBarrierSet::need_card_barrier(decorators, type) && storing_non_null) {
card_barrier(masm, tmp1);
}
}

View File

@ -41,21 +41,14 @@ class StubCodeGenerator;
class ShenandoahBarrierSetAssembler: public BarrierSetAssembler {
private:
void satb_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register tmp,
bool tosca_live,
bool expand_call);
void satb_barrier(MacroAssembler* masm,
Register obj,
Register pre_val,
Register tmp,
bool tosca_live,
bool expand_call);
void shenandoah_write_barrier_pre(MacroAssembler* masm,
Register obj,
Register pre_val,
Register tmp,
bool tosca_live,
bool expand_call);
void store_check(MacroAssembler* masm, Register obj);
void card_barrier(MacroAssembler* masm, Register obj);
void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -143,7 +143,7 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4, std_cpuid24, std_cpuid29;
Label sef_cpuid, sefsl1_cpuid, ext_cpuid, ext_cpuid1, ext_cpuid5, ext_cpuid7;
Label ext_cpuid8, done, wrapup, vector_save_restore, apx_save_restore_warning;
Label ext_cpuid8, done, wrapup, vector_save_restore, apx_save_restore_warning, apx_xstate;
Label legacy_setup, save_restore_except, legacy_save_restore, start_simd_check;
StubCodeMark mark(this, "VM_Version", "get_cpu_info_stub");
@ -468,6 +468,20 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
__ movq(Address(rsi, 0), r16);
__ movq(Address(rsi, 8), r31);
//
// Query CPUID 0xD.19 for APX XSAVE offset
// Extended State Enumeration Sub-leaf 19 (APX)
// EAX = size of APX state (should be 128)
// EBX = offset in standard XSAVE format
//
__ movl(rax, 0xD);
__ movl(rcx, 19);
__ cpuid();
__ lea(rsi, Address(rbp, in_bytes(VM_Version::apx_xstate_size_offset())));
__ movl(Address(rsi, 0), rax);
__ lea(rsi, Address(rbp, in_bytes(VM_Version::apx_xstate_offset_offset())));
__ movl(Address(rsi, 0), rbx);
UseAPX = save_apx;
__ bind(vector_save_restore);
//
@ -921,8 +935,9 @@ void VM_Version::get_processor_features() {
// Check if processor has Intel Ecore
if (FLAG_IS_DEFAULT(EnableX86ECoreOpts) && is_intel() && is_intel_server_family() &&
(_model == 0x97 || _model == 0xAA || _model == 0xAC || _model == 0xAF ||
_model == 0xCC || _model == 0xDD)) {
(supports_hybrid() ||
_model == 0xAF /* Xeon 6 E-cores (Sierra Forest) */ ||
_model == 0xDD /* Xeon 6+ E-cores (Clearwater Forest) */ )) {
FLAG_SET_DEFAULT(EnableX86ECoreOpts, true);
}

View File

@ -676,6 +676,10 @@ protected:
// Space to save apx registers after signal handle
jlong apx_save[2]; // Save r16 and r31
// cpuid function 0xD, subleaf 19 (APX extended state)
uint32_t apx_xstate_size; // EAX: size of APX state (128)
uint32_t apx_xstate_offset; // EBX: offset in standard XSAVE area
VM_Features feature_flags() const;
// Asserts
@ -739,6 +743,11 @@ public:
static ByteSize ymm_save_offset() { return byte_offset_of(CpuidInfo, ymm_save); }
static ByteSize zmm_save_offset() { return byte_offset_of(CpuidInfo, zmm_save); }
static ByteSize apx_save_offset() { return byte_offset_of(CpuidInfo, apx_save); }
static ByteSize apx_xstate_offset_offset() { return byte_offset_of(CpuidInfo, apx_xstate_offset); }
static ByteSize apx_xstate_size_offset() { return byte_offset_of(CpuidInfo, apx_xstate_size); }
static uint32_t apx_xstate_offset() { return _cpuid_info.apx_xstate_offset; }
static uint32_t apx_xstate_size() { return _cpuid_info.apx_xstate_size; }
// The value used to check ymm register after signal handle
static int ymm_test_value() { return 0xCAFEBABE; }

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -412,12 +412,8 @@ run_stub:
}
void os::Aix::init_thread_fpu_state(void) {
#if !defined(USE_XLC_BUILTINS)
// Disable FP exceptions.
__asm__ __volatile__ ("mtfsfi 6,0");
#else
__mtfsfi(6, 0);
#endif
}
////////////////////////////////////////////////////////////////////////////////

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2013 SAP SE. All rights reserved.
* Copyright (c) 2012, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,29 +29,21 @@
// Included in runtime/prefetch.inline.hpp
inline void Prefetch::read(const void *loc, intx interval) {
#if !defined(USE_XLC_BUILTINS)
__asm__ __volatile__ (
" dcbt 0, %0 \n"
:
: /*%0*/"r" ( ((address)loc) +((long)interval) )
//:
);
#else
__dcbt(((address)loc) +((long)interval));
#endif
}
inline void Prefetch::write(void *loc, intx interval) {
#if !defined(USE_XLC_BUILTINS)
__asm__ __volatile__ (
" dcbtst 0, %0 \n"
:
: /*%0*/"r" ( ((address)loc) +((long)interval) )
//:
);
#else
__dcbtst( ((address)loc) +((long)interval) );
#endif
}
#endif // OS_CPU_AIX_PPC_PREFETCH_AIX_PPC_INLINE_HPP

View File

@ -43,7 +43,8 @@ void JavaThread::cache_global_variables() {
BarrierSet* bs = BarrierSet::barrier_set();
if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
_card_table_base = (address) (barrier_set_cast<CardTableBarrierSet>(bs)->card_table()->byte_map_base());
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
_card_table_base = (address)ctbs->card_table_base_const();
} else {
_card_table_base = nullptr;
}

View File

@ -52,6 +52,7 @@
#include "utilities/debug.hpp"
#include "utilities/events.hpp"
#include "utilities/vmError.hpp"
#include "runtime/vm_version.hpp"
// put OS-includes here
# include <sys/types.h>
@ -380,6 +381,43 @@ size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
/////////////////////////////////////////////////////////////////////////////
// helper functions for fatal error handler
// XSAVE constants - from Intel SDM Vol. 1, Chapter 13
#define XSAVE_HDR_OFFSET 512
#define XFEATURE_APX (1ULL << 19)
// XSAVE header structure
// See: Intel SDM Vol. 1, Section 13.4.2 "XSAVE Header"
// Also: Linux kernel arch/x86/include/asm/fpu/types.h
struct xstate_header {
uint64_t xfeatures;
uint64_t xcomp_bv;
uint64_t reserved[6];
};
// APX extended state - R16-R31 (16 x 64-bit registers)
// See: Intel APX Architecture Specification
struct apx_state {
uint64_t regs[16]; // r16-r31
};
static apx_state* get_apx_state(const ucontext_t* uc) {
uint32_t offset = VM_Version::apx_xstate_offset();
if (offset == 0 || uc->uc_mcontext.fpregs == nullptr) {
return nullptr;
}
char* xsave = (char*)uc->uc_mcontext.fpregs;
xstate_header* hdr = (xstate_header*)(xsave + XSAVE_HDR_OFFSET);
// Check if APX state is present in this context
if (!(hdr->xfeatures & XFEATURE_APX)) {
return nullptr;
}
return (apx_state*)(xsave + offset);
}
void os::print_context(outputStream *st, const void *context) {
if (context == nullptr) return;
@ -406,6 +444,14 @@ void os::print_context(outputStream *st, const void *context) {
st->print(", R14=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_R14]);
st->print(", R15=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_R15]);
st->cr();
// Dump APX EGPRs (R16-R31)
apx_state* apx = UseAPX ? get_apx_state(uc) : nullptr;
if (apx != nullptr) {
for (int i = 0; i < 16; i++) {
st->print("%sR%d=" INTPTR_FORMAT, (i % 4 == 0) ? "" : ", ", 16 + i, (intptr_t)apx->regs[i]);
if (i % 4 == 3) st->cr();
}
}
st->print( "RIP=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_RIP]);
st->print(", EFLAGS=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_EFL]);
st->print(", CSGSFS=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_CSGSFS]);
@ -432,37 +478,50 @@ void os::print_context(outputStream *st, const void *context) {
}
void os::print_register_info(outputStream *st, const void *context, int& continuation) {
const int register_count = 16;
if (context == nullptr) {
return;
}
const ucontext_t *uc = (const ucontext_t*)context;
apx_state* apx = UseAPX ? get_apx_state(uc) : nullptr;
const int register_count = 16 + (apx != nullptr ? 16 : 0);
int n = continuation;
assert(n >= 0 && n <= register_count, "Invalid continuation value");
if (context == nullptr || n == register_count) {
if (n == register_count) {
return;
}
const ucontext_t *uc = (const ucontext_t*)context;
while (n < register_count) {
// Update continuation with next index before printing location
continuation = n + 1;
if (n < 16) {
// Standard registers (RAX-R15)
# define CASE_PRINT_REG(n, str, id) case n: st->print(str); print_location(st, uc->uc_mcontext.gregs[REG_##id]);
switch (n) {
CASE_PRINT_REG( 0, "RAX=", RAX); break;
CASE_PRINT_REG( 1, "RBX=", RBX); break;
CASE_PRINT_REG( 2, "RCX=", RCX); break;
CASE_PRINT_REG( 3, "RDX=", RDX); break;
CASE_PRINT_REG( 4, "RSP=", RSP); break;
CASE_PRINT_REG( 5, "RBP=", RBP); break;
CASE_PRINT_REG( 6, "RSI=", RSI); break;
CASE_PRINT_REG( 7, "RDI=", RDI); break;
CASE_PRINT_REG( 8, "R8 =", R8); break;
CASE_PRINT_REG( 9, "R9 =", R9); break;
CASE_PRINT_REG(10, "R10=", R10); break;
CASE_PRINT_REG(11, "R11=", R11); break;
CASE_PRINT_REG(12, "R12=", R12); break;
CASE_PRINT_REG(13, "R13=", R13); break;
CASE_PRINT_REG(14, "R14=", R14); break;
CASE_PRINT_REG(15, "R15=", R15); break;
}
switch (n) {
CASE_PRINT_REG( 0, "RAX=", RAX); break;
CASE_PRINT_REG( 1, "RBX=", RBX); break;
CASE_PRINT_REG( 2, "RCX=", RCX); break;
CASE_PRINT_REG( 3, "RDX=", RDX); break;
CASE_PRINT_REG( 4, "RSP=", RSP); break;
CASE_PRINT_REG( 5, "RBP=", RBP); break;
CASE_PRINT_REG( 6, "RSI=", RSI); break;
CASE_PRINT_REG( 7, "RDI=", RDI); break;
CASE_PRINT_REG( 8, "R8 =", R8); break;
CASE_PRINT_REG( 9, "R9 =", R9); break;
CASE_PRINT_REG(10, "R10=", R10); break;
CASE_PRINT_REG(11, "R11=", R11); break;
CASE_PRINT_REG(12, "R12=", R12); break;
CASE_PRINT_REG(13, "R13=", R13); break;
CASE_PRINT_REG(14, "R14=", R14); break;
CASE_PRINT_REG(15, "R15=", R15); break;
}
# undef CASE_PRINT_REG
} else {
// APX extended general purpose registers (R16-R31)
st->print("R%d=", n);
print_location(st, apx->regs[n - 16]);
}
++n;
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -19,24 +19,16 @@
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
import javax.sql.rowset.serial.SerialClob;
import javax.sql.rowset.serial.SerialException;
/**
* @test
* @bug 7077451
* @summary tests if the correct exception is thrown when calling method setCharacterStream() on SerialClob
*/
public class SetCharacterStream {
public static void main(String[] args) throws Exception {
SerialClob clob = new SerialClob(new char[0]);
try {
clob.setCharacterStream(0);
} catch (SerialException e) {
System.out.println("Test PASSED");
}
}
#include "cds/aotGrowableArray.hpp"
#include "cds/aotMetaspace.hpp"
#include "memory/allocation.inline.hpp"
#include "utilities/growableArray.hpp"
void AOTGrowableArrayHelper::deallocate(void* mem) {
if (!AOTMetaspace::in_aot_cache(mem)) {
GrowableArrayCHeapAllocator::deallocate(mem);
}
}

View File

@ -0,0 +1,76 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_AOT_AOTGROWABLEARRAY_HPP
#define SHARE_AOT_AOTGROWABLEARRAY_HPP
#include <memory/metaspaceClosureType.hpp>
#include <utilities/growableArray.hpp>
class AOTGrowableArrayHelper {
public:
static void deallocate(void* mem);
};
// An AOTGrowableArray<T> provides the same functionality as a GrowableArray<T> that
// uses the C heap allocator. In addition, AOTGrowableArray<T> can be iterated with
// MetaspaceClosure. This type should be used for growable arrays that need to be
// stored in the AOT cache. See ModuleEntry::_reads for an example.
template <typename E>
class AOTGrowableArray : public GrowableArrayWithAllocator<E, AOTGrowableArray<E>> {
friend class VMStructs;
friend class GrowableArrayWithAllocator<E, AOTGrowableArray>;
static E* allocate(int max, MemTag mem_tag) {
return (E*)GrowableArrayCHeapAllocator::allocate(max, sizeof(E), mem_tag);
}
E* allocate() {
return allocate(this->_capacity, mtClass);
}
void deallocate(E* mem) {
#if INCLUDE_CDS
AOTGrowableArrayHelper::deallocate(mem);
#else
GrowableArrayCHeapAllocator::deallocate(mem);
#endif
}
public:
AOTGrowableArray(int initial_capacity, MemTag mem_tag) :
GrowableArrayWithAllocator<E, AOTGrowableArray>(
allocate(initial_capacity, mem_tag),
initial_capacity) {}
AOTGrowableArray() : AOTGrowableArray(0, mtClassShared) {}
// methods required by MetaspaceClosure
void metaspace_pointers_do(MetaspaceClosure* it);
int size_in_heapwords() const { return (int)heap_word_size(sizeof(*this)); }
MetaspaceClosureType type() const { return MetaspaceClosureType::GrowableArrayType; }
static bool is_read_only_by_default() { return false; }
};
#endif // SHARE_AOT_AOTGROWABLEARRAY_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -19,24 +19,19 @@
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
import javax.sql.rowset.serial.SerialBlob;
import javax.sql.rowset.serial.SerialException;
/**
* @test
* @bug 7077451
* @summary tests if the correct exception is thrown when calling method setBinaryStream() on SerialBlob
*/
public class SetBinaryStream {
#ifndef SHARE_CDS_AOTGROWABLEARRAY_INLINE_HPP
#define SHARE_CDS_AOTGROWABLEARRAY_INLINE_HPP
public static void main(String[] args) throws Exception {
SerialBlob blob = new SerialBlob(new byte[0]);
try {
blob.setBinaryStream(0);
} catch (SerialException e) {
System.out.println("Test PASSED");
}
}
#include "cds/aotGrowableArray.hpp"
#include "memory/metaspaceClosure.hpp"
template <typename E>
void AOTGrowableArray<E>::metaspace_pointers_do(MetaspaceClosure* it) {
it->push_c_array(AOTGrowableArray<E>::data_addr(), AOTGrowableArray<E>::capacity());
}
#endif // SHARE_CDS_AOTGROWABLEARRAY_INLINE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,6 +29,8 @@
#include "cds/aotStreamedHeapWriter.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/filemap.hpp"
#include "classfile/moduleEntry.hpp"
#include "classfile/packageEntry.hpp"
#include "classfile/systemDictionaryShared.hpp"
#include "classfile/vmClasses.hpp"
#include "logging/log.hpp"
@ -141,7 +143,7 @@ public:
info._buffered_addr = ref->obj();
info._requested_addr = ref->obj();
info._bytes = ref->size() * BytesPerWord;
info._type = ref->msotype();
info._type = ref->type();
_objs.append(info);
}
@ -214,7 +216,7 @@ void AOTMapLogger::dumptime_log_metaspace_region(const char* name, DumpRegion* r
info._buffered_addr = src_info->buffered_addr();
info._requested_addr = info._buffered_addr + _buffer_to_requested_delta;
info._bytes = src_info->size_in_bytes();
info._type = src_info->msotype();
info._type = src_info->type();
objs.append(info);
}
@ -332,43 +334,52 @@ void AOTMapLogger::log_metaspace_objects_impl(address region_base, address regio
address buffered_addr = info._buffered_addr;
address requested_addr = info._requested_addr;
int bytes = info._bytes;
MetaspaceObj::Type type = info._type;
const char* type_name = MetaspaceObj::type_name(type);
MetaspaceClosureType type = info._type;
const char* type_name = MetaspaceClosure::type_name(type);
log_as_hex(last_obj_base, buffered_addr, last_obj_base + _buffer_to_requested_delta);
switch (type) {
case MetaspaceObj::ClassType:
case MetaspaceClosureType::ClassType:
log_klass((Klass*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceObj::ConstantPoolType:
case MetaspaceClosureType::ConstantPoolType:
log_constant_pool((ConstantPool*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceObj::ConstantPoolCacheType:
case MetaspaceClosureType::ConstantPoolCacheType:
log_constant_pool_cache((ConstantPoolCache*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceObj::ConstMethodType:
case MetaspaceClosureType::ConstMethodType:
log_const_method((ConstMethod*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceObj::MethodType:
case MetaspaceClosureType::MethodType:
log_method((Method*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceObj::MethodCountersType:
case MetaspaceClosureType::MethodCountersType:
log_method_counters((MethodCounters*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceObj::MethodDataType:
case MetaspaceClosureType::MethodDataType:
log_method_data((MethodData*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceObj::SymbolType:
case MetaspaceClosureType::ModuleEntryType:
log_module_entry((ModuleEntry*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceClosureType::PackageEntryType:
log_package_entry((PackageEntry*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceClosureType::GrowableArrayType:
log_growable_array((GrowableArrayBase*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceClosureType::SymbolType:
log_symbol((Symbol*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceObj::KlassTrainingDataType:
case MetaspaceClosureType::KlassTrainingDataType:
log_klass_training_data((KlassTrainingData*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceObj::MethodTrainingDataType:
case MetaspaceClosureType::MethodTrainingDataType:
log_method_training_data((MethodTrainingData*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceObj::CompileTrainingDataType:
case MetaspaceClosureType::CompileTrainingDataType:
log_compile_training_data((CompileTrainingData*)src, requested_addr, type_name, bytes, current);
break;
default:
@ -421,6 +432,27 @@ void AOTMapLogger::log_method_data(MethodData* md, address requested_addr, const
log_debug(aot, map)(_LOG_PREFIX " %s", p2i(requested_addr), type_name, bytes, md->method()->external_name());
}
void AOTMapLogger::log_module_entry(ModuleEntry* mod, address requested_addr, const char* type_name,
int bytes, Thread* current) {
ResourceMark rm(current);
log_debug(aot, map)(_LOG_PREFIX " %s", p2i(requested_addr), type_name, bytes,
mod->name_as_C_string());
}
void AOTMapLogger::log_package_entry(PackageEntry* pkg, address requested_addr, const char* type_name,
int bytes, Thread* current) {
ResourceMark rm(current);
log_debug(aot, map)(_LOG_PREFIX " %s - %s", p2i(requested_addr), type_name, bytes,
pkg->module()->name_as_C_string(), pkg->name_as_C_string());
}
void AOTMapLogger::log_growable_array(GrowableArrayBase* arr, address requested_addr, const char* type_name,
int bytes, Thread* current) {
ResourceMark rm(current);
log_debug(aot, map)(_LOG_PREFIX " %d (%d)", p2i(requested_addr), type_name, bytes,
arr->length(), arr->capacity());
}
void AOTMapLogger::log_klass(Klass* k, address requested_addr, const char* type_name,
int bytes, Thread* current) {
ResourceMark rm(current);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,6 +28,7 @@
#include "cds/archiveBuilder.hpp"
#include "memory/allocation.hpp"
#include "memory/allStatic.hpp"
#include "memory/metaspaceClosureType.hpp"
#include "oops/oopsHierarchy.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/growableArray.hpp"
@ -37,9 +38,13 @@ class ArchiveStreamedHeapInfo;
class CompileTrainingData;
class DumpRegion;
class FileMapInfo;
class GrowableArrayBase;
class KlassTrainingData;
class MethodCounters;
class MethodTrainingData;
class ModuleEntry;
class outputStream;
class PackageEntry;
// Write detailed info to a mapfile to analyze contents of the AOT cache/CDS archive.
// -Xlog:aot+map* can be used both when creating an AOT cache, or when using an AOT cache.
@ -62,7 +67,7 @@ class AOTMapLogger : AllStatic {
address _buffered_addr;
address _requested_addr;
int _bytes;
MetaspaceObj::Type _type;
MetaspaceClosureType _type;
};
public:
@ -142,6 +147,9 @@ private:
Thread* current);
static void log_klass(Klass* k, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_method(Method* m, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_module_entry(ModuleEntry* mod, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_package_entry(PackageEntry* pkg, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_growable_array(GrowableArrayBase* arr, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_symbol(Symbol* s, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_klass_training_data(KlassTrainingData* ktd, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_method_training_data(MethodTrainingData* mtd, address requested_addr, const char* type_name, int bytes, Thread* current);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -360,10 +360,8 @@ bool AOTMappedHeapLoader::load_heap_region(FileMapInfo* mapinfo) {
}
objArrayOop AOTMappedHeapLoader::root_segment(int segment_idx) {
if (CDSConfig::is_dumping_heap()) {
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
} else {
assert(CDSConfig::is_using_archive(), "must be");
if (!CDSConfig::is_using_archive()) {
assert(CDSConfig::is_dumping_heap() && Thread::current() == (Thread*)VMThread::vm_thread(), "sanity");
}
objArrayOop segment = (objArrayOop)_root_segments->at(segment_idx).resolve();
@ -466,7 +464,9 @@ void AOTMappedHeapLoader::finish_initialization(FileMapInfo* info) {
add_root_segment((objArrayOop)segment_oop);
}
StringTable::load_shared_strings_array();
if (CDSConfig::is_dumping_final_static_archive()) {
StringTable::move_shared_strings_into_runtime_table();
}
}
}
@ -619,7 +619,7 @@ bool AOTMappedHeapLoader::map_heap_region_impl(FileMapInfo* info) {
aot_log_info(aot)("Preferred address to map heap data (to avoid relocation) is " INTPTR_FORMAT, p2i(requested_start));
// allocate from java heap
HeapWord* start = G1CollectedHeap::heap()->alloc_archive_region(word_size, (HeapWord*)requested_start);
HeapWord* start = G1CollectedHeap::heap()->alloc_archive_region(word_size);
if (start == nullptr) {
AOTMetaspace::report_loading_error("UseSharedSpaces: Unable to allocate java heap region for archive heap.");
return false;

View File

@ -698,6 +698,9 @@ public:
Universe::metaspace_pointers_do(it);
vmSymbols::metaspace_pointers_do(it);
TrainingData::iterate_roots(it);
if (CDSConfig::is_dumping_full_module_graph()) {
ClassLoaderDataShared::iterate_roots(it);
}
// The above code should find all the symbols that are referenced by the
// archived classes. We just need to add the extra symbols which
@ -795,6 +798,10 @@ void VM_PopulateDumpSharedSpace::doit() {
_builder.make_klasses_shareable();
AOTMetaspace::make_method_handle_intrinsics_shareable();
if (CDSConfig::is_dumping_full_module_graph()) {
ClassLoaderDataShared::remove_unshareable_info();
}
dump_java_heap_objects();
dump_shared_symbol_table(_builder.symbols());
@ -1097,7 +1104,12 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS
#if INCLUDE_CDS_JAVA_HEAP
if (CDSConfig::is_dumping_heap()) {
assert(CDSConfig::allow_only_single_java_thread(), "Required");
if (!CDSConfig::is_dumping_preimage_static_archive()) {
// A single thread is required for Reference handling and deterministic CDS archive.
// Its's not required for dumping preimage, where References won't be archived and
// determinism is not needed.
assert(CDSConfig::allow_only_single_java_thread(), "Required");
}
if (!HeapShared::is_archived_boot_layer_available(THREAD)) {
report_loading_error("archivedBootLayer not available, disabling full module graph");
CDSConfig::stop_dumping_full_module_graph();
@ -1135,6 +1147,7 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS
HeapShared::init_heap_writer();
if (CDSConfig::is_dumping_full_module_graph()) {
ClassLoaderDataShared::ensure_module_entry_tables_exist();
ClassLoaderDataShared::build_tables(CHECK);
HeapShared::reset_archived_object_states(CHECK);
}
@ -1154,12 +1167,6 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS
// Perhaps there is a way to avoid hard-coding these names here.
// See discussion in JDK-8342481.
}
if (HeapShared::is_writing_mapping_mode()) {
// Do this at the very end, when no Java code will be executed. Otherwise
// some new strings may be added to the intern table.
StringTable::allocate_shared_strings_array(CHECK);
}
} else {
log_info(aot)("Not dumping heap, reset CDSConfig::_is_using_optimized_module_handling");
CDSConfig::stop_using_optimized_module_handling();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -177,12 +177,17 @@ void AOTReferenceObjSupport::init_keep_alive_objs_table() {
// Returns true IFF obj is an instance of java.lang.ref.Reference. If so, perform extra eligibility checks.
bool AOTReferenceObjSupport::check_if_ref_obj(oop obj) {
// We have a single Java thread. This means java.lang.ref.Reference$ReferenceHandler thread
// is not running. Otherwise the checks for next/discovered may not work.
precond(CDSConfig::allow_only_single_java_thread());
assert_at_safepoint(); // _keep_alive_objs_table uses raw oops
if (obj->klass()->is_subclass_of(vmClasses::Reference_klass())) {
// The following check works only if the java.lang.ref.Reference$ReferenceHandler thread
// is not running.
//
// This code is called on every object found by AOTArtifactFinder. When dumping the
// preimage archive, AOTArtifactFinder should not find any Reference objects.
precond(!CDSConfig::is_dumping_preimage_static_archive());
precond(CDSConfig::allow_only_single_java_thread());
precond(AOTReferenceObjSupport::is_enabled());
precond(JavaClasses::is_supported_for_archiving(obj));
precond(_keep_alive_objs_table != nullptr);

View File

@ -243,7 +243,7 @@ bool ArchiveBuilder::gather_klass_and_symbol(MetaspaceClosure::Ref* ref, bool re
if (get_follow_mode(ref) != make_a_copy) {
return false;
}
if (ref->msotype() == MetaspaceObj::ClassType) {
if (ref->type() == MetaspaceClosureType::ClassType) {
Klass* klass = (Klass*)ref->obj();
assert(klass->is_klass(), "must be");
if (!is_excluded(klass)) {
@ -252,7 +252,7 @@ bool ArchiveBuilder::gather_klass_and_symbol(MetaspaceClosure::Ref* ref, bool re
assert(klass->is_instance_klass(), "must be");
}
}
} else if (ref->msotype() == MetaspaceObj::SymbolType) {
} else if (ref->type() == MetaspaceClosureType::SymbolType) {
// Make sure the symbol won't be GC'ed while we are dumping the archive.
Symbol* sym = (Symbol*)ref->obj();
sym->increment_refcount();
@ -271,11 +271,6 @@ void ArchiveBuilder::gather_klasses_and_symbols() {
aot_log_info(aot)("Gathering classes and symbols ... ");
GatherKlassesAndSymbols doit(this);
iterate_roots(&doit);
#if INCLUDE_CDS_JAVA_HEAP
if (CDSConfig::is_dumping_full_module_graph()) {
ClassLoaderDataShared::iterate_symbols(&doit);
}
#endif
doit.finish();
if (CDSConfig::is_dumping_static_archive()) {
@ -446,14 +441,14 @@ bool ArchiveBuilder::gather_one_source_obj(MetaspaceClosure::Ref* ref, bool read
}
#ifdef ASSERT
if (ref->msotype() == MetaspaceObj::MethodType) {
if (ref->type() == MetaspaceClosureType::MethodType) {
Method* m = (Method*)ref->obj();
assert(!RegeneratedClasses::has_been_regenerated((address)m->method_holder()),
"Should not archive methods in a class that has been regenerated");
}
#endif
if (ref->msotype() == MetaspaceObj::MethodDataType) {
if (ref->type() == MetaspaceClosureType::MethodDataType) {
MethodData* md = (MethodData*)ref->obj();
md->clean_method_data(false /* always_clean */);
}
@ -554,16 +549,16 @@ ArchiveBuilder::FollowMode ArchiveBuilder::get_follow_mode(MetaspaceClosure::Ref
if (CDSConfig::is_dumping_dynamic_archive() && AOTMetaspace::in_aot_cache(obj)) {
// Don't dump existing shared metadata again.
return point_to_it;
} else if (ref->msotype() == MetaspaceObj::MethodDataType ||
ref->msotype() == MetaspaceObj::MethodCountersType ||
ref->msotype() == MetaspaceObj::KlassTrainingDataType ||
ref->msotype() == MetaspaceObj::MethodTrainingDataType ||
ref->msotype() == MetaspaceObj::CompileTrainingDataType) {
} else if (ref->type() == MetaspaceClosureType::MethodDataType ||
ref->type() == MetaspaceClosureType::MethodCountersType ||
ref->type() == MetaspaceClosureType::KlassTrainingDataType ||
ref->type() == MetaspaceClosureType::MethodTrainingDataType ||
ref->type() == MetaspaceClosureType::CompileTrainingDataType) {
return (TrainingData::need_data() || TrainingData::assembling_data()) ? make_a_copy : set_to_null;
} else if (ref->msotype() == MetaspaceObj::AdapterHandlerEntryType) {
} else if (ref->type() == MetaspaceClosureType::AdapterHandlerEntryType) {
return CDSConfig::is_dumping_adapters() ? make_a_copy : set_to_null;
} else {
if (ref->msotype() == MetaspaceObj::ClassType) {
if (ref->type() == MetaspaceClosureType::ClassType) {
Klass* klass = (Klass*)ref->obj();
assert(klass->is_klass(), "must be");
if (RegeneratedClasses::has_been_regenerated(klass)) {
@ -620,15 +615,6 @@ void ArchiveBuilder::dump_rw_metadata() {
ResourceMark rm;
aot_log_info(aot)("Allocating RW objects ... ");
make_shallow_copies(&_rw_region, &_rw_src_objs);
#if INCLUDE_CDS_JAVA_HEAP
if (CDSConfig::is_dumping_full_module_graph()) {
// Archive the ModuleEntry's and PackageEntry's of the 3 built-in loaders
char* start = rw_region()->top();
ClassLoaderDataShared::allocate_archived_tables();
alloc_stats()->record_modules(rw_region()->top() - start, /*read_only*/false);
}
#endif
}
void ArchiveBuilder::dump_ro_metadata() {
@ -637,15 +623,6 @@ void ArchiveBuilder::dump_ro_metadata() {
start_dump_region(&_ro_region);
make_shallow_copies(&_ro_region, &_ro_src_objs);
#if INCLUDE_CDS_JAVA_HEAP
if (CDSConfig::is_dumping_full_module_graph()) {
char* start = ro_region()->top();
ClassLoaderDataShared::init_archived_tables();
alloc_stats()->record_modules(ro_region()->top() - start, /*read_only*/true);
}
#endif
RegeneratedClasses::record_regenerated_objects();
}
@ -663,7 +640,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
size_t alignment = SharedSpaceObjectAlignment; // alignment for the dest pointer
char* oldtop = dump_region->top();
if (src_info->msotype() == MetaspaceObj::ClassType) {
if (src_info->type() == MetaspaceClosureType::ClassType) {
// Allocate space for a pointer directly in front of the future InstanceKlass, so
// we can do a quick lookup from InstanceKlass* -> RunTimeClassInfo*
// without building another hashtable. See RunTimeClassInfo::get_for()
@ -679,7 +656,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
alignment = nth_bit(ArchiveBuilder::precomputed_narrow_klass_shift());
}
#endif
} else if (src_info->msotype() == MetaspaceObj::SymbolType) {
} else if (src_info->type() == MetaspaceClosureType::SymbolType) {
// Symbols may be allocated by using AllocateHeap, so their sizes
// may be less than size_in_bytes() indicates.
bytes = ((Symbol*)src)->byte_size();
@ -689,7 +666,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
memcpy(dest, src, bytes);
// Update the hash of buffered sorted symbols for static dump so that the symbols have deterministic contents
if (CDSConfig::is_dumping_static_archive() && (src_info->msotype() == MetaspaceObj::SymbolType)) {
if (CDSConfig::is_dumping_static_archive() && (src_info->type() == MetaspaceClosureType::SymbolType)) {
Symbol* buffered_symbol = (Symbol*)dest;
assert(((Symbol*)src)->is_permanent(), "archived symbols must be permanent");
buffered_symbol->update_identity_hash();
@ -704,7 +681,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
}
}
intptr_t* archived_vtable = CppVtables::get_archived_vtable(src_info->msotype(), (address)dest);
intptr_t* archived_vtable = CppVtables::get_archived_vtable(src_info->type(), (address)dest);
if (archived_vtable != nullptr) {
*(address*)dest = (address)archived_vtable;
ArchivePtrMarker::mark_pointer((address*)dest);
@ -714,7 +691,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
src_info->set_buffered_addr((address)dest);
char* newtop = dump_region->top();
_alloc_stats.record(src_info->msotype(), int(newtop - oldtop), src_info->read_only());
_alloc_stats.record(src_info->type(), int(newtop - oldtop), src_info->read_only());
DEBUG_ONLY(_alloc_stats.verify((int)dump_region->used(), src_info->read_only()));
}
@ -997,15 +974,15 @@ void ArchiveBuilder::make_training_data_shareable() {
return;
}
if (info.msotype() == MetaspaceObj::KlassTrainingDataType ||
info.msotype() == MetaspaceObj::MethodTrainingDataType ||
info.msotype() == MetaspaceObj::CompileTrainingDataType) {
if (info.type() == MetaspaceClosureType::KlassTrainingDataType ||
info.type() == MetaspaceClosureType::MethodTrainingDataType ||
info.type() == MetaspaceClosureType::CompileTrainingDataType) {
TrainingData* buffered_td = (TrainingData*)info.buffered_addr();
buffered_td->remove_unshareable_info();
} else if (info.msotype() == MetaspaceObj::MethodDataType) {
} else if (info.type() == MetaspaceClosureType::MethodDataType) {
MethodData* buffered_mdo = (MethodData*)info.buffered_addr();
buffered_mdo->remove_unshareable_info();
} else if (info.msotype() == MetaspaceObj::MethodCountersType) {
} else if (info.type() == MetaspaceClosureType::MethodCountersType) {
MethodCounters* buffered_mc = (MethodCounters*)info.buffered_addr();
buffered_mc->remove_unshareable_info();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -134,13 +134,13 @@ private:
int _size_in_bytes;
int _id; // Each object has a unique serial ID, starting from zero. The ID is assigned
// when the object is added into _source_objs.
MetaspaceObj::Type _msotype;
MetaspaceClosureType _type;
address _source_addr; // The source object to be copied.
address _buffered_addr; // The copy of this object insider the buffer.
public:
SourceObjInfo(MetaspaceClosure::Ref* ref, bool read_only, FollowMode follow_mode) :
_ptrmap_start(0), _ptrmap_end(0), _read_only(read_only), _has_embedded_pointer(false), _follow_mode(follow_mode),
_size_in_bytes(ref->size() * BytesPerWord), _id(0), _msotype(ref->msotype()),
_size_in_bytes(ref->size() * BytesPerWord), _id(0), _type(ref->type()),
_source_addr(ref->obj()) {
if (follow_mode == point_to_it) {
_buffered_addr = ref->obj();
@ -155,7 +155,7 @@ private:
SourceObjInfo(address src, SourceObjInfo* renegerated_obj_info) :
_ptrmap_start(0), _ptrmap_end(0), _read_only(false),
_follow_mode(renegerated_obj_info->_follow_mode),
_size_in_bytes(0), _msotype(renegerated_obj_info->_msotype),
_size_in_bytes(0), _type(renegerated_obj_info->_type),
_source_addr(src), _buffered_addr(renegerated_obj_info->_buffered_addr) {}
bool should_copy() const { return _follow_mode == make_a_copy; }
@ -182,7 +182,7 @@ private:
}
return _buffered_addr;
}
MetaspaceObj::Type msotype() const { return _msotype; }
MetaspaceClosureType type() const { return _type; }
FollowMode follow_mode() const { return _follow_mode; }
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -556,7 +556,9 @@ void CDSConfig::check_aotmode_record() {
// At VM exit, the module graph may be contaminated with program states.
// We will rebuild the module graph when dumping the CDS final image.
disable_heap_dumping();
_is_using_optimized_module_handling = false;
_is_using_full_module_graph = false;
_is_dumping_full_module_graph = false;
}
void CDSConfig::check_aotmode_create() {
@ -582,6 +584,7 @@ void CDSConfig::check_aotmode_create() {
substitute_aot_filename(FLAG_MEMBER_ENUM(AOTCache));
_is_dumping_final_static_archive = true;
_is_using_full_module_graph = false;
UseSharedSpaces = true;
RequireSharedSpaces = true;
@ -954,7 +957,9 @@ bool CDSConfig::are_vm_options_incompatible_with_dumping_heap() {
}
bool CDSConfig::is_dumping_heap() {
if (!(is_dumping_classic_static_archive() || is_dumping_final_static_archive())
// Note: when dumping preimage static archive, only a very limited set of oops
// are dumped.
if (!is_dumping_static_archive()
|| are_vm_options_incompatible_with_dumping_heap()
|| _disable_heap_dumping) {
return false;
@ -966,6 +971,26 @@ bool CDSConfig::is_loading_heap() {
return HeapShared::is_archived_heap_in_use();
}
bool CDSConfig::is_dumping_klass_subgraphs() {
if (is_dumping_classic_static_archive() || is_dumping_final_static_archive()) {
// KlassSubGraphs (see heapShared.cpp) is a legacy mechanism for archiving oops. It
// has been superceded by AOT class linking. This feature is used only when
// AOT class linking is disabled.
//
// KlassSubGraphs are disabled in the preimage static archive, which contains a very
// limited set of oops.
return is_dumping_heap() && !is_dumping_aot_linked_classes();
} else {
return false;
}
}
bool CDSConfig::is_using_klass_subgraphs() {
return (is_loading_heap() &&
!CDSConfig::is_using_aot_linked_classes() &&
!CDSConfig::is_dumping_final_static_archive());
}
bool CDSConfig::is_using_full_module_graph() {
if (ClassLoaderDataShared::is_full_module_graph_loaded()) {
return true;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -188,6 +188,9 @@ public:
static bool is_dumping_heap() NOT_CDS_JAVA_HEAP_RETURN_(false);
static bool is_loading_heap() NOT_CDS_JAVA_HEAP_RETURN_(false);
static bool is_dumping_klass_subgraphs() NOT_CDS_JAVA_HEAP_RETURN_(false);
static bool is_using_klass_subgraphs() NOT_CDS_JAVA_HEAP_RETURN_(false);
static bool is_dumping_invokedynamic() NOT_CDS_JAVA_HEAP_RETURN_(false);
static bool is_dumping_method_handles() NOT_CDS_JAVA_HEAP_RETURN_(false);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,12 +22,14 @@
*
*/
#include "cds/aotGrowableArray.hpp"
#include "cds/aotMetaspace.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/archiveUtils.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/cppVtables.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
#include "oops/instanceClassLoaderKlass.hpp"
#include "oops/instanceMirrorKlass.hpp"
#include "oops/instanceRefKlass.hpp"
@ -53,6 +55,19 @@
// + at run time: we clone the actual contents of the vtables from libjvm.so
// into our own tables.
#ifndef PRODUCT
// AOTGrowableArray has a vtable only when in non-product builds (due to
// the virtual printing functions in AnyObj).
using GrowableArray_ModuleEntry_ptr = AOTGrowableArray<ModuleEntry*>;
#define DEBUG_CPP_VTABLE_TYPES_DO(f) \
f(GrowableArray_ModuleEntry_ptr) \
#endif
// Currently, the archive contains ONLY the following types of objects that have C++ vtables.
#define CPP_VTABLE_TYPES_DO(f) \
f(ConstantPool) \
@ -68,7 +83,8 @@
f(TypeArrayKlass) \
f(KlassTrainingData) \
f(MethodTrainingData) \
f(CompileTrainingData)
f(CompileTrainingData) \
NOT_PRODUCT(DEBUG_CPP_VTABLE_TYPES_DO(f))
class CppVtableInfo {
intptr_t _vtable_size;
@ -86,7 +102,7 @@ public:
}
};
static inline intptr_t* vtable_of(const Metadata* m) {
static inline intptr_t* vtable_of(const void* m) {
return *((intptr_t**)m);
}
@ -116,6 +132,7 @@ CppVtableInfo* CppVtableCloner<T>::allocate_and_initialize(const char* name) {
template <class T>
void CppVtableCloner<T>::initialize(const char* name, CppVtableInfo* info) {
ResourceMark rm;
T tmp; // Allocate temporary dummy metadata object to get to the original vtable.
int n = info->vtable_size();
intptr_t* srcvtable = vtable_of(&tmp);
@ -268,7 +285,7 @@ void CppVtables::serialize(SerializeClosure* soc) {
}
}
intptr_t* CppVtables::get_archived_vtable(MetaspaceObj::Type msotype, address obj) {
intptr_t* CppVtables::get_archived_vtable(MetaspaceClosureType type, address obj) {
if (!_orig_cpp_vtptrs_inited) {
CPP_VTABLE_TYPES_DO(INIT_ORIG_CPP_VTPTRS);
_orig_cpp_vtptrs_inited = true;
@ -276,19 +293,23 @@ intptr_t* CppVtables::get_archived_vtable(MetaspaceObj::Type msotype, address ob
assert(CDSConfig::is_dumping_archive(), "sanity");
int kind = -1;
switch (msotype) {
case MetaspaceObj::SymbolType:
case MetaspaceObj::TypeArrayU1Type:
case MetaspaceObj::TypeArrayU2Type:
case MetaspaceObj::TypeArrayU4Type:
case MetaspaceObj::TypeArrayU8Type:
case MetaspaceObj::TypeArrayOtherType:
case MetaspaceObj::ConstMethodType:
case MetaspaceObj::ConstantPoolCacheType:
case MetaspaceObj::AnnotationsType:
case MetaspaceObj::RecordComponentType:
case MetaspaceObj::AdapterHandlerEntryType:
case MetaspaceObj::AdapterFingerPrintType:
switch (type) {
case MetaspaceClosureType::SymbolType:
case MetaspaceClosureType::TypeArrayU1Type:
case MetaspaceClosureType::TypeArrayU2Type:
case MetaspaceClosureType::TypeArrayU4Type:
case MetaspaceClosureType::TypeArrayU8Type:
case MetaspaceClosureType::TypeArrayOtherType:
case MetaspaceClosureType::CArrayType:
case MetaspaceClosureType::ConstMethodType:
case MetaspaceClosureType::ConstantPoolCacheType:
case MetaspaceClosureType::AnnotationsType:
case MetaspaceClosureType::ModuleEntryType:
case MetaspaceClosureType::PackageEntryType:
case MetaspaceClosureType::RecordComponentType:
case MetaspaceClosureType::AdapterHandlerEntryType:
case MetaspaceClosureType::AdapterFingerPrintType:
PRODUCT_ONLY(case MetaspaceClosureType::GrowableArrayType:)
// These have no vtables.
break;
default:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,6 +27,7 @@
#include "memory/allocation.hpp"
#include "memory/allStatic.hpp"
#include "memory/metaspaceClosureType.hpp"
#include "utilities/globalDefinitions.hpp"
class ArchiveBuilder;
@ -40,7 +41,7 @@ class CppVtables : AllStatic {
public:
static void dumptime_init(ArchiveBuilder* builder);
static void zero_archived_vtables();
static intptr_t* get_archived_vtable(MetaspaceObj::Type msotype, address obj);
static intptr_t* get_archived_vtable(MetaspaceClosureType type, address obj);
static void serialize(SerializeClosure* sc);
static bool is_valid_shared_method(const Method* m) NOT_CDS_RETURN_(false);
static char* vtables_serialized_base() { return _vtables_serialized_base; }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,32 +27,34 @@
#include "classfile/compactHashtable.hpp"
#include "memory/allocation.hpp"
#include "memory/metaspaceClosureType.hpp"
// This is for dumping detailed statistics for the allocations
// in the shared spaces.
class DumpAllocStats : public StackObj {
public:
// Here's poor man's enum inheritance
#define SHAREDSPACE_OBJ_TYPES_DO(f) \
METASPACE_OBJ_TYPES_DO(f) \
#define DUMPED_OBJ_TYPES_DO(f) \
METASPACE_CLOSURE_TYPES_DO(f) \
f(SymbolHashentry) \
f(SymbolBucket) \
f(StringHashentry) \
f(StringBucket) \
f(ModulesNatives) \
f(CppVTables) \
f(Other)
#define DUMPED_TYPE_DECLARE(name) name ## Type,
#define DUMPED_TYPE_NAME_CASE(name) case name ## Type: return #name;
enum Type {
// Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc
SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE)
DUMPED_OBJ_TYPES_DO(DUMPED_TYPE_DECLARE)
_number_of_types
};
static const char* type_name(Type type) {
switch(type) {
SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE)
DUMPED_OBJ_TYPES_DO(DUMPED_TYPE_NAME_CASE)
default:
ShouldNotReachHere();
return nullptr;
@ -101,16 +103,12 @@ public:
CompactHashtableStats* symbol_stats() { return &_symbol_stats; }
CompactHashtableStats* string_stats() { return &_string_stats; }
void record(MetaspaceObj::Type type, int byte_size, bool read_only) {
assert(int(type) >= 0 && type < MetaspaceObj::_number_of_types, "sanity");
void record(MetaspaceClosureType type, int byte_size, bool read_only) {
int t = (int)type;
assert(t >= 0 && t < (int)MetaspaceClosureType::_number_of_types, "sanity");
int which = (read_only) ? RO : RW;
_counts[which][type] ++;
_bytes [which][type] += byte_size;
}
void record_modules(int byte_size, bool read_only) {
int which = (read_only) ? RO : RW;
_bytes [which][ModulesNativesType] += byte_size;
_counts[which][t] ++;
_bytes [which][t] += byte_size;
}
void record_other_type(int byte_size, bool read_only) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -210,7 +210,7 @@ static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], Instan
bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) {
assert(CDSConfig::is_dumping_heap(), "dump-time only");
if (!CDSConfig::is_dumping_aot_linked_classes()) {
if (CDSConfig::is_dumping_klass_subgraphs()) {
// Legacy CDS archive support (to be deprecated)
return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) ||
is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik);
@ -413,6 +413,8 @@ void HeapShared::materialize_thread_object() {
void HeapShared::add_to_dumped_interned_strings(oop string) {
assert(HeapShared::is_writing_mapping_mode(), "Only used by this mode");
AOTMappedHeapWriter::add_to_dumped_interned_strings(string);
bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, string);
assert(success, "shared strings array must not point to arrays or strings that are too large to archive");
}
void HeapShared::finalize_initialization(FileMapInfo* static_mapinfo) {
@ -453,7 +455,6 @@ int HeapShared::append_root(oop obj) {
oop HeapShared::get_root(int index, bool clear) {
assert(index >= 0, "sanity");
assert(!CDSConfig::is_dumping_heap() && CDSConfig::is_using_archive(), "runtime only");
assert(is_archived_heap_in_use(), "getting roots into heap that is not used");
oop result;
@ -598,8 +599,7 @@ public:
void set_oop(MetaspaceObj* ptr, oop o) {
MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
OopHandle handle(Universe::vm_global(), o);
bool is_new = put(ptr, handle);
assert(is_new, "cannot set twice");
put_when_absent(ptr, handle);
}
void remove_oop(MetaspaceObj* ptr) {
MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
@ -612,6 +612,11 @@ public:
};
void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) {
if (CDSConfig::is_dumping_preimage_static_archive() && scratch_resolved_references(src) != nullptr) {
// We are in AOT training run. The class has been redefined and we are giving it a new resolved_reference.
// Ignore it, as this class will be excluded from the AOT config.
return;
}
if (SystemDictionaryShared::is_builtin_loader(src->pool_holder()->class_loader_data())) {
_scratch_objects_table->set_oop(src, dest);
}
@ -831,14 +836,6 @@ static objArrayOop get_archived_resolved_references(InstanceKlass* src_ik) {
return nullptr;
}
void HeapShared::archive_strings() {
assert(HeapShared::is_writing_mapping_mode(), "should not reach here");
oop shared_strings_array = StringTable::init_shared_strings_array();
bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, shared_strings_array);
assert(success, "shared strings array must not point to arrays or strings that are too large to archive");
StringTable::set_shared_strings_array_index(append_root(shared_strings_array));
}
int HeapShared::archive_exception_instance(oop exception) {
bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, exception);
assert(success, "sanity");
@ -890,7 +887,7 @@ void HeapShared::start_scanning_for_oops() {
void HeapShared::end_scanning_for_oops() {
if (is_writing_mapping_mode()) {
archive_strings();
StringTable::init_shared_table();
}
delete_seen_objects_table();
}
@ -940,7 +937,7 @@ void HeapShared::scan_java_class(Klass* orig_k) {
void HeapShared::archive_subgraphs() {
assert(CDSConfig::is_dumping_heap(), "must be");
if (!CDSConfig::is_dumping_aot_linked_classes()) {
if (CDSConfig::is_dumping_klass_subgraphs()) {
archive_object_subgraphs(archive_subgraph_entry_fields,
false /* is_full_module_graph */);
if (CDSConfig::is_dumping_full_module_graph()) {
@ -948,10 +945,6 @@ void HeapShared::archive_subgraphs() {
true /* is_full_module_graph */);
}
}
if (CDSConfig::is_dumping_full_module_graph()) {
Modules::verify_archived_modules();
}
}
//
@ -1302,10 +1295,7 @@ static void verify_the_heap(Klass* k, const char* which) {
// this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots.
void HeapShared::resolve_classes(JavaThread* current) {
assert(CDSConfig::is_using_archive(), "runtime only!");
if (!is_archived_heap_in_use()) {
return; // nothing to do
}
if (!CDSConfig::is_using_aot_linked_classes()) {
if (CDSConfig::is_using_klass_subgraphs()) {
resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields);
resolve_classes_for_subgraphs(current, fmg_archive_subgraph_entry_fields);
}
@ -1395,7 +1385,7 @@ void HeapShared::init_classes_for_special_subgraph(Handle class_loader, TRAPS) {
void HeapShared::initialize_from_archived_subgraph(JavaThread* current, Klass* k) {
JavaThread* THREAD = current;
if (!is_archived_heap_in_use()) {
if (!CDSConfig::is_using_klass_subgraphs()) {
return; // nothing to do
}
@ -1871,7 +1861,7 @@ void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k,
const char* klass_name,
int field_offset,
const char* field_name) {
assert(CDSConfig::is_dumping_heap(), "dump time only");
precond(CDSConfig::is_dumping_klass_subgraphs());
assert(k->defined_by_boot_loader(), "must be boot class");
oop m = k->java_mirror();
@ -1922,7 +1912,7 @@ class VerifySharedOopClosure: public BasicOopIterateClosure {
};
void HeapShared::verify_subgraph_from_static_field(InstanceKlass* k, int field_offset) {
assert(CDSConfig::is_dumping_heap(), "dump time only");
precond(CDSConfig::is_dumping_klass_subgraphs());
assert(k->defined_by_boot_loader(), "must be boot class");
oop m = k->java_mirror();
@ -2148,7 +2138,7 @@ void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[],
void HeapShared::init_subgraph_entry_fields(TRAPS) {
assert(CDSConfig::is_dumping_heap(), "must be");
_dump_time_subgraph_info_table = new (mtClass)DumpTimeKlassSubGraphInfoTable();
if (!CDSConfig::is_dumping_aot_linked_classes()) {
if (CDSConfig::is_dumping_klass_subgraphs()) {
init_subgraph_entry_fields(archive_subgraph_entry_fields, CHECK);
if (CDSConfig::is_dumping_full_module_graph()) {
init_subgraph_entry_fields(fmg_archive_subgraph_entry_fields, CHECK);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -478,7 +478,6 @@ private:
static bool has_been_archived(oop orig_obj);
static void prepare_resolved_references();
static void archive_strings();
static void archive_subgraphs();
static void copy_java_mirror(oop orig_mirror, oop scratch_m);

View File

@ -216,6 +216,10 @@ ciField::ciField(fieldDescriptor *fd) :
static bool trust_final_non_static_fields(ciInstanceKlass* holder) {
if (holder == nullptr)
return false;
if (holder->trust_final_fields()) {
// Explicit opt-in from system classes
return true;
}
// Even if general trusting is disabled, trust system-built closures in these packages.
if (holder->is_in_package("java/lang/invoke") || holder->is_in_package("sun/invoke") ||
holder->is_in_package("java/lang/reflect") || holder->is_in_package("jdk/internal/reflect") ||
@ -230,14 +234,6 @@ static bool trust_final_non_static_fields(ciInstanceKlass* holder) {
// Trust final fields in records
if (holder->is_record())
return true;
// Trust Atomic*FieldUpdaters: they are very important for performance, and make up one
// more reason not to use Unsafe, if their final fields are trusted. See more in JDK-8140483.
if (holder->name() == ciSymbols::java_util_concurrent_atomic_AtomicIntegerFieldUpdater_Impl() ||
holder->name() == ciSymbols::java_util_concurrent_atomic_AtomicLongFieldUpdater_CASUpdater() ||
holder->name() == ciSymbols::java_util_concurrent_atomic_AtomicLongFieldUpdater_LockedUpdater() ||
holder->name() == ciSymbols::java_util_concurrent_atomic_AtomicReferenceFieldUpdater_Impl()) {
return true;
}
return TrustFinalNonStaticFields;
}

View File

@ -65,6 +65,7 @@ ciInstanceKlass::ciInstanceKlass(Klass* k) :
_has_nonstatic_concrete_methods = ik->has_nonstatic_concrete_methods();
_is_hidden = ik->is_hidden();
_is_record = ik->is_record();
_trust_final_fields = ik->trust_final_fields();
_nonstatic_fields = nullptr; // initialized lazily by compute_nonstatic_fields:
_has_injected_fields = -1;
_implementor = nullptr; // we will fill these lazily

View File

@ -59,6 +59,7 @@ private:
bool _has_nonstatic_concrete_methods;
bool _is_hidden;
bool _is_record;
bool _trust_final_fields;
bool _has_trusted_loader;
ciFlags _flags;
@ -207,6 +208,10 @@ public:
return _is_record;
}
bool trust_final_fields() const {
return _trust_final_fields;
}
ciInstanceKlass* get_canonical_holder(int offset);
ciField* get_field_by_offset(int field_offset, bool is_static);
ciField* get_field_by_name(ciSymbol* name, ciSymbol* signature, bool is_static);

View File

@ -42,10 +42,7 @@ const char* basictype_to_str(BasicType t) {
// ------------------------------------------------------------------
// card_table_base
CardTable::CardValue* ci_card_table_address() {
BarrierSet* bs = BarrierSet::barrier_set();
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
assert(!UseShenandoahGC, "Shenandoah byte_map_base is not constant.");
return ct->byte_map_base();
CardTable::CardValue* ci_card_table_address_const() {
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
return ctbs->card_table_base_const();
}

View File

@ -51,9 +51,9 @@ inline const char* bool_to_str(bool b) {
const char* basictype_to_str(BasicType t);
CardTable::CardValue* ci_card_table_address();
CardTable::CardValue* ci_card_table_address_const();
template <typename T> T ci_card_table_address_as() {
return reinterpret_cast<T>(ci_card_table_address());
return reinterpret_cast<T>(ci_card_table_address_const());
}
#endif // SHARE_CI_CIUTILITIES_HPP

View File

@ -943,6 +943,7 @@ public:
_java_lang_Deprecated_for_removal,
_jdk_internal_vm_annotation_AOTSafeClassInitializer,
_method_AOTRuntimeSetup,
_jdk_internal_vm_annotation_TrustFinalFields,
_annotation_LIMIT
};
const Location _location;
@ -1878,6 +1879,11 @@ AnnotationCollector::annotation_index(const ClassLoaderData* loader_data,
if (!privileged) break; // only allow in privileged code
return _field_Stable;
}
case VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_TrustFinalFields_signature): {
if (_location != _in_class) break; // only allow for classes
if (!privileged) break; // only allow in privileged code
return _jdk_internal_vm_annotation_TrustFinalFields;
}
case VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_Contended_signature): {
if (_location != _in_field && _location != _in_class) {
break; // only allow for fields and classes
@ -1992,6 +1998,9 @@ void ClassFileParser::ClassAnnotationCollector::apply_to(InstanceKlass* ik) {
if (has_annotation(_jdk_internal_vm_annotation_AOTSafeClassInitializer)) {
ik->set_has_aot_safe_initializer();
}
if (has_annotation(_jdk_internal_vm_annotation_TrustFinalFields)) {
ik->set_trust_final_fields(true);
}
}
#define MAX_ARGS_SIZE 255

View File

@ -1418,6 +1418,10 @@ char* ClassLoader::lookup_vm_options() {
jio_snprintf(modules_path, JVM_MAXPATHLEN, "%s%slib%smodules", Arguments::get_java_home(), fileSep, fileSep);
JImage_file =(*JImageOpen)(modules_path, &error);
if (JImage_file == nullptr) {
if (Arguments::has_jimage()) {
// The modules file exists but is unreadable or corrupt
vm_exit_during_initialization(err_msg("Unable to load %s", modules_path));
}
return nullptr;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -33,6 +33,7 @@
#include "classfile/packageEntry.hpp"
#include "classfile/systemDictionary.hpp"
#include "logging/log.hpp"
#include "memory/metaspaceClosure.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/safepoint.hpp"
@ -56,9 +57,9 @@ class ArchivedClassLoaderData {
public:
ArchivedClassLoaderData() : _packages(nullptr), _modules(nullptr), _unnamed_module(nullptr) {}
void iterate_symbols(ClassLoaderData* loader_data, MetaspaceClosure* closure);
void allocate(ClassLoaderData* loader_data);
void init_archived_entries(ClassLoaderData* loader_data);
void iterate_roots(MetaspaceClosure* closure);
void build_tables(ClassLoaderData* loader_data, TRAPS);
void remove_unshareable_info();
ModuleEntry* unnamed_module() {
return _unnamed_module;
}
@ -80,17 +81,14 @@ static ModuleEntry* _archived_javabase_moduleEntry = nullptr;
static int _platform_loader_root_index = -1;
static int _system_loader_root_index = -1;
void ArchivedClassLoaderData::iterate_symbols(ClassLoaderData* loader_data, MetaspaceClosure* closure) {
void ArchivedClassLoaderData::iterate_roots(MetaspaceClosure* it) {
assert(CDSConfig::is_dumping_full_module_graph(), "must be");
assert_valid(loader_data);
if (loader_data != nullptr) {
loader_data->packages()->iterate_symbols(closure);
loader_data->modules() ->iterate_symbols(closure);
loader_data->unnamed_module()->iterate_symbols(closure);
}
it->push(&_packages);
it->push(&_modules);
it->push(&_unnamed_module);
}
void ArchivedClassLoaderData::allocate(ClassLoaderData* loader_data) {
void ArchivedClassLoaderData::build_tables(ClassLoaderData* loader_data, TRAPS) {
assert(CDSConfig::is_dumping_full_module_graph(), "must be");
assert_valid(loader_data);
if (loader_data != nullptr) {
@ -98,19 +96,28 @@ void ArchivedClassLoaderData::allocate(ClassLoaderData* loader_data) {
// address of the Symbols, which may be relocated at runtime due to ASLR.
// So we store the packages/modules in Arrays. At runtime, we create
// the hashtables using these arrays.
_packages = loader_data->packages()->allocate_archived_entries();
_modules = loader_data->modules() ->allocate_archived_entries();
_unnamed_module = loader_data->unnamed_module()->allocate_archived_entry();
_packages = loader_data->packages()->build_aot_table(loader_data, CHECK);
_modules = loader_data->modules()->build_aot_table(loader_data, CHECK);
_unnamed_module = loader_data->unnamed_module();
}
}
void ArchivedClassLoaderData::init_archived_entries(ClassLoaderData* loader_data) {
assert(CDSConfig::is_dumping_full_module_graph(), "must be");
assert_valid(loader_data);
if (loader_data != nullptr) {
loader_data->packages()->init_archived_entries(_packages);
loader_data->modules() ->init_archived_entries(_modules);
_unnamed_module->init_as_archived_entry();
void ArchivedClassLoaderData::remove_unshareable_info() {
if (_packages != nullptr) {
_packages = ArchiveBuilder::current()->get_buffered_addr(_packages);
for (int i = 0; i < _packages->length(); i++) {
_packages->at(i)->remove_unshareable_info();
}
}
if (_modules != nullptr) {
_modules = ArchiveBuilder::current()->get_buffered_addr(_modules);
for (int i = 0; i < _modules->length(); i++) {
_modules->at(i)->remove_unshareable_info();
}
}
if (_unnamed_module != nullptr) {
_unnamed_module = ArchiveBuilder::current()->get_buffered_addr(_unnamed_module);
_unnamed_module->remove_unshareable_info();
}
}
@ -153,7 +160,6 @@ void ArchivedClassLoaderData::clear_archived_oops() {
// ------------------------------
void ClassLoaderDataShared::load_archived_platform_and_system_class_loaders() {
#if INCLUDE_CDS_JAVA_HEAP
// The streaming object loader prefers loading the class loader related objects before
// the CLD constructor which has a NoSafepointVerifier.
if (!HeapShared::is_loading_streaming_mode()) {
@ -178,7 +184,6 @@ void ClassLoaderDataShared::load_archived_platform_and_system_class_loaders() {
if (system_loader_module_entry != nullptr) {
system_loader_module_entry->preload_archived_oops();
}
#endif
}
static ClassLoaderData* null_class_loader_data() {
@ -210,28 +215,27 @@ void ClassLoaderDataShared::ensure_module_entry_table_exists(oop class_loader) {
assert(met != nullptr, "sanity");
}
void ClassLoaderDataShared::iterate_symbols(MetaspaceClosure* closure) {
void ClassLoaderDataShared::build_tables(TRAPS) {
assert(CDSConfig::is_dumping_full_module_graph(), "must be");
_archived_boot_loader_data.iterate_symbols (null_class_loader_data(), closure);
_archived_platform_loader_data.iterate_symbols(java_platform_loader_data_or_null(), closure);
_archived_system_loader_data.iterate_symbols (java_system_loader_data_or_null(), closure);
_archived_boot_loader_data.build_tables(null_class_loader_data(), CHECK);
_archived_platform_loader_data.build_tables(java_platform_loader_data_or_null(), CHECK);
_archived_system_loader_data.build_tables(java_system_loader_data_or_null(), CHECK);
}
void ClassLoaderDataShared::allocate_archived_tables() {
void ClassLoaderDataShared::iterate_roots(MetaspaceClosure* it) {
assert(CDSConfig::is_dumping_full_module_graph(), "must be");
_archived_boot_loader_data.allocate (null_class_loader_data());
_archived_platform_loader_data.allocate(java_platform_loader_data_or_null());
_archived_system_loader_data.allocate (java_system_loader_data_or_null());
_archived_boot_loader_data.iterate_roots(it);
_archived_platform_loader_data.iterate_roots(it);
_archived_system_loader_data.iterate_roots(it);
}
void ClassLoaderDataShared::init_archived_tables() {
void ClassLoaderDataShared::remove_unshareable_info() {
assert(CDSConfig::is_dumping_full_module_graph(), "must be");
_archived_boot_loader_data.remove_unshareable_info();
_archived_platform_loader_data.remove_unshareable_info();
_archived_system_loader_data.remove_unshareable_info();
_archived_boot_loader_data.init_archived_entries (null_class_loader_data());
_archived_platform_loader_data.init_archived_entries(java_platform_loader_data_or_null());
_archived_system_loader_data.init_archived_entries (java_system_loader_data_or_null());
_archived_javabase_moduleEntry = ModuleEntry::get_archived_entry(ModuleEntryTable::javabase_moduleEntry());
_archived_javabase_moduleEntry = ArchiveBuilder::current()->get_buffered_addr(ModuleEntryTable::javabase_moduleEntry());
_platform_loader_root_index = HeapShared::append_root(SystemDictionary::java_platform_loader());
_system_loader_root_index = HeapShared::append_root(SystemDictionary::java_system_loader());
@ -271,7 +275,6 @@ ModuleEntry* ClassLoaderDataShared::archived_unnamed_module(ClassLoaderData* loa
return archived_module;
}
void ClassLoaderDataShared::clear_archived_oops() {
assert(!CDSConfig::is_using_full_module_graph(), "must be");
_archived_boot_loader_data.clear_archived_oops();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -40,11 +40,11 @@ class ClassLoaderDataShared : AllStatic {
public:
static void load_archived_platform_and_system_class_loaders() NOT_CDS_JAVA_HEAP_RETURN;
static void restore_archived_modules_for_preloading_classes(JavaThread* current) NOT_CDS_JAVA_HEAP_RETURN;
static void build_tables(TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
static void iterate_roots(MetaspaceClosure* closure) NOT_CDS_JAVA_HEAP_RETURN;
static void remove_unshareable_info() NOT_CDS_JAVA_HEAP_RETURN;
#if INCLUDE_CDS_JAVA_HEAP
static void ensure_module_entry_tables_exist();
static void allocate_archived_tables();
static void iterate_symbols(MetaspaceClosure* closure);
static void init_archived_tables();
static void serialize(SerializeClosure* f);
static void clear_archived_oops();
static void restore_archived_entries_for_null_class_loader_data();

View File

@ -1263,6 +1263,10 @@ bool java_lang_Class::restore_archived_mirror(Klass *k,
"Restored %s archived mirror " PTR_FORMAT, k->external_name(), p2i(mirror()));
}
if (CDSConfig::is_dumping_heap()) {
create_scratch_mirror(k, CHECK_(false));
}
return true;
}
#endif // INCLUDE_CDS_JAVA_HEAP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,6 +23,7 @@
*/
#include "cds/aotClassLocation.hpp"
#include "cds/aotGrowableArray.inline.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/archiveUtils.hpp"
#include "cds/cdsConfig.hpp"
@ -37,6 +38,7 @@
#include "jni.h"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/oopHandle.inline.hpp"
@ -44,7 +46,6 @@
#include "runtime/handles.inline.hpp"
#include "runtime/safepoint.hpp"
#include "utilities/events.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/hashTable.hpp"
#include "utilities/ostream.hpp"
#include "utilities/quickSort.hpp"
@ -167,7 +168,7 @@ void ModuleEntry::add_read(ModuleEntry* m) {
} else {
if (reads() == nullptr) {
// Lazily create a module's reads list
GrowableArray<ModuleEntry*>* new_reads = new (mtModule) GrowableArray<ModuleEntry*>(MODULE_READS_SIZE, mtModule);
AOTGrowableArray<ModuleEntry*>* new_reads = new (mtModule) AOTGrowableArray<ModuleEntry*>(MODULE_READS_SIZE, mtModule);
set_reads(new_reads);
}
@ -274,8 +275,7 @@ ModuleEntry::ModuleEntry(Handle module_handle,
_has_default_read_edges(false),
_must_walk_reads(false),
_is_open(is_open),
_is_patched(false)
DEBUG_ONLY(COMMA _reads_is_archived(false)) {
_is_patched(false) {
// Initialize fields specific to a ModuleEntry
if (_name == nullptr) {
@ -394,7 +394,6 @@ ModuleEntryTable::~ModuleEntryTable() {
ModuleEntryTableDeleter deleter;
_table.unlink(&deleter);
assert(_table.number_of_entries() == 0, "should have removed all entries");
}
void ModuleEntry::set_loader_data(ClassLoaderData* cld) {
@ -402,147 +401,51 @@ void ModuleEntry::set_loader_data(ClassLoaderData* cld) {
_loader_data = cld;
}
void ModuleEntry::metaspace_pointers_do(MetaspaceClosure* it) {
it->push(&_name);
it->push(&_reads);
it->push(&_version);
it->push(&_location);
}
#if INCLUDE_CDS_JAVA_HEAP
typedef HashTable<
const ModuleEntry*,
ModuleEntry*,
557, // prime number
AnyObj::C_HEAP> ArchivedModuleEntries;
static ArchivedModuleEntries* _archive_modules_entries = nullptr;
#ifndef PRODUCT
static int _num_archived_module_entries = 0;
static int _num_inited_module_entries = 0;
#endif
bool ModuleEntry::should_be_archived() const {
return SystemDictionaryShared::is_builtin_loader(loader_data());
}
ModuleEntry* ModuleEntry::allocate_archived_entry() const {
precond(should_be_archived());
precond(CDSConfig::is_dumping_full_module_graph());
ModuleEntry* archived_entry = (ModuleEntry*)ArchiveBuilder::rw_region_alloc(sizeof(ModuleEntry));
memcpy((void*)archived_entry, (void*)this, sizeof(ModuleEntry));
void ModuleEntry::remove_unshareable_info() {
_archived_module_index = HeapShared::append_root(module_oop());
archived_entry->_archived_module_index = HeapShared::append_root(module_oop());
if (_archive_modules_entries == nullptr) {
_archive_modules_entries = new (mtClass)ArchivedModuleEntries();
}
assert(_archive_modules_entries->get(this) == nullptr, "Each ModuleEntry must not be shared across ModuleEntryTables");
_archive_modules_entries->put(this, archived_entry);
DEBUG_ONLY(_num_archived_module_entries++);
if (CDSConfig::is_dumping_final_static_archive()) {
OopHandle null_handle;
archived_entry->_shared_pd = null_handle;
} else {
assert(archived_entry->shared_protection_domain() == nullptr, "never set during -Xshare:dump");
if (_reads != nullptr) {
_reads->set_in_aot_cache();
}
// Clear handles and restore at run time. Handles cannot be archived.
if (CDSConfig::is_dumping_final_static_archive()) {
OopHandle null_handle;
_shared_pd = null_handle;
} else {
assert(shared_protection_domain() == nullptr, "never set during -Xshare:dump");
}
OopHandle null_handle;
archived_entry->_module_handle = null_handle;
// For verify_archived_module_entries()
DEBUG_ONLY(_num_inited_module_entries++);
if (log_is_enabled(Info, aot, module)) {
ResourceMark rm;
LogStream ls(Log(aot, module)::info());
ls.print("Stored in archive: ");
archived_entry->print(&ls);
}
return archived_entry;
}
bool ModuleEntry::has_been_archived() {
assert(!ArchiveBuilder::current()->is_in_buffer_space(this), "must be called on original ModuleEntry");
return _archive_modules_entries->contains(this);
}
ModuleEntry* ModuleEntry::get_archived_entry(ModuleEntry* orig_entry) {
ModuleEntry** ptr = _archive_modules_entries->get(orig_entry);
assert(ptr != nullptr && *ptr != nullptr, "must have been allocated");
return *ptr;
}
// This function is used to archive ModuleEntry::_reads and PackageEntry::_qualified_exports.
// GrowableArray cannot be directly archived, as it needs to be expandable at runtime.
// Write it out as an Array, and convert it back to GrowableArray at runtime.
Array<ModuleEntry*>* ModuleEntry::write_growable_array(GrowableArray<ModuleEntry*>* array) {
Array<ModuleEntry*>* archived_array = nullptr;
int length = (array == nullptr) ? 0 : array->length();
if (length > 0) {
archived_array = ArchiveBuilder::new_ro_array<ModuleEntry*>(length);
for (int i = 0; i < length; i++) {
ModuleEntry* archived_entry = get_archived_entry(array->at(i));
archived_array->at_put(i, archived_entry);
ArchivePtrMarker::mark_pointer((address*)archived_array->adr_at(i));
}
}
return archived_array;
}
GrowableArray<ModuleEntry*>* ModuleEntry::restore_growable_array(Array<ModuleEntry*>* archived_array) {
GrowableArray<ModuleEntry*>* array = nullptr;
int length = (archived_array == nullptr) ? 0 : archived_array->length();
if (length > 0) {
array = new (mtModule) GrowableArray<ModuleEntry*>(length, mtModule);
for (int i = 0; i < length; i++) {
ModuleEntry* archived_entry = archived_array->at(i);
array->append(archived_entry);
}
}
return array;
}
void ModuleEntry::iterate_symbols(MetaspaceClosure* closure) {
closure->push(&_name);
closure->push(&_version);
closure->push(&_location);
}
void ModuleEntry::init_as_archived_entry() {
set_archived_reads(write_growable_array(reads()));
_module_handle = null_handle;
_loader_data = nullptr; // re-init at runtime
if (name() != nullptr) {
_shared_path_index = AOTClassLocationConfig::dumptime()->get_module_shared_path_index(_location);
_name = ArchiveBuilder::get_buffered_symbol(_name);
ArchivePtrMarker::mark_pointer((address*)&_name);
Symbol* src_location = ArchiveBuilder::current()->get_source_addr(_location);
_shared_path_index = AOTClassLocationConfig::dumptime()->get_module_shared_path_index(src_location);
} else {
// _shared_path_index is used only by SystemDictionary::is_shared_class_visible_impl()
// for checking classes in named modules.
_shared_path_index = -1;
}
if (_version != nullptr) {
_version = ArchiveBuilder::get_buffered_symbol(_version);
}
if (_location != nullptr) {
_location = ArchiveBuilder::get_buffered_symbol(_location);
}
JFR_ONLY(set_trace_id(0);) // re-init at runtime
ArchivePtrMarker::mark_pointer((address*)&_reads);
ArchivePtrMarker::mark_pointer((address*)&_version);
ArchivePtrMarker::mark_pointer((address*)&_location);
}
#ifndef PRODUCT
void ModuleEntry::verify_archived_module_entries() {
assert(_num_archived_module_entries == _num_inited_module_entries,
"%d ModuleEntries have been archived but %d of them have been properly initialized with archived java.lang.Module objects",
_num_archived_module_entries, _num_inited_module_entries);
}
#endif // PRODUCT
void ModuleEntry::load_from_archive(ClassLoaderData* loader_data) {
assert(CDSConfig::is_using_archive(), "runtime only");
set_loader_data(loader_data);
set_reads(restore_growable_array(archived_reads()));
JFR_ONLY(INIT_ID(this);)
}
@ -581,38 +484,28 @@ static int compare_module_by_name(ModuleEntry* a, ModuleEntry* b) {
return a->name()->fast_compare(b->name());
}
void ModuleEntryTable::iterate_symbols(MetaspaceClosure* closure) {
auto syms = [&] (const SymbolHandle& key, ModuleEntry*& m) {
m->iterate_symbols(closure);
};
_table.iterate_all(syms);
}
Array<ModuleEntry*>* ModuleEntryTable::allocate_archived_entries() {
Array<ModuleEntry*>* archived_modules = ArchiveBuilder::new_rw_array<ModuleEntry*>(_table.number_of_entries());
Array<ModuleEntry*>* ModuleEntryTable::build_aot_table(ClassLoaderData* loader_data, TRAPS) {
Array<ModuleEntry*>* aot_table =
MetadataFactory::new_array<ModuleEntry*>(loader_data, _table.number_of_entries(), nullptr, CHECK_NULL);
int n = 0;
auto grab = [&] (const SymbolHandle& key, ModuleEntry*& m) {
archived_modules->at_put(n++, m);
m->pack_reads();
aot_table->at_put(n++, m);
if (log_is_enabled(Info, aot, module)) {
ResourceMark rm;
LogStream ls(Log(aot, module)::info());
ls.print("Stored in archive: ");
m->print(&ls);
}
};
_table.iterate_all(grab);
if (n > 1) {
// Always allocate in the same order to produce deterministic archive.
QuickSort::sort(archived_modules->data(), n, compare_module_by_name);
QuickSort::sort(aot_table->data(), n, compare_module_by_name);
}
for (int i = 0; i < n; i++) {
archived_modules->at_put(i, archived_modules->at(i)->allocate_archived_entry());
ArchivePtrMarker::mark_pointer((address*)archived_modules->adr_at(i));
}
return archived_modules;
}
void ModuleEntryTable::init_archived_entries(Array<ModuleEntry*>* archived_modules) {
assert(CDSConfig::is_dumping_full_module_graph(), "sanity");
for (int i = 0; i < archived_modules->length(); i++) {
ModuleEntry* archived_entry = archived_modules->at(i);
archived_entry->init_as_archived_entry();
}
return aot_table;
}
void ModuleEntryTable::load_archived_entries(ClassLoaderData* loader_data,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,7 +25,9 @@
#ifndef SHARE_CLASSFILE_MODULEENTRY_HPP
#define SHARE_CLASSFILE_MODULEENTRY_HPP
#include "cds/aotGrowableArray.hpp"
#include "jni.h"
#include "memory/metaspaceClosureType.hpp"
#include "oops/oopHandle.hpp"
#include "oops/symbol.hpp"
#include "oops/symbolHandle.hpp"
@ -68,11 +70,8 @@ private:
// for shared classes from this module
Symbol* _name; // name of this module
ClassLoaderData* _loader_data;
AOTGrowableArray<ModuleEntry*>* _reads; // list of modules that are readable by this module
union {
GrowableArray<ModuleEntry*>* _reads; // list of modules that are readable by this module
Array<ModuleEntry*>* _archived_reads; // List of readable modules stored in the CDS archive
};
Symbol* _version; // module version number
Symbol* _location; // module location
CDS_ONLY(int _shared_path_index;) // >=0 if classes in this module are in CDS archive
@ -81,7 +80,6 @@ private:
bool _must_walk_reads; // walk module's reads list at GC safepoints to purge out dead modules
bool _is_open; // whether the packages in the module are all unqualifiedly exported
bool _is_patched; // whether the module is patched via --patch-module
DEBUG_ONLY(bool _reads_is_archived);
CDS_JAVA_HEAP_ONLY(int _archived_module_index;)
JFR_ONLY(DEFINE_TRACE_ID_FIELD;)
@ -120,22 +118,18 @@ public:
bool can_read(ModuleEntry* m) const;
bool has_reads_list() const;
GrowableArray<ModuleEntry*>* reads() const {
assert(!_reads_is_archived, "sanity");
AOTGrowableArray<ModuleEntry*>* reads() const {
return _reads;
}
void set_reads(GrowableArray<ModuleEntry*>* r) {
void set_reads(AOTGrowableArray<ModuleEntry*>* r) {
_reads = r;
DEBUG_ONLY(_reads_is_archived = false);
}
Array<ModuleEntry*>* archived_reads() const {
assert(_reads_is_archived, "sanity");
return _archived_reads;
}
void set_archived_reads(Array<ModuleEntry*>* r) {
_archived_reads = r;
DEBUG_ONLY(_reads_is_archived = true);
void pack_reads() {
if (_reads != nullptr) {
_reads->shrink_to_fit();
}
}
void add_read(ModuleEntry* m);
void set_read_walk_required(ClassLoaderData* m_loader_data);
@ -189,6 +183,13 @@ public:
const char* name_as_C_string() const {
return is_named() ? name()->as_C_string() : UNNAMED_MODULE;
}
// methods required by MetaspaceClosure
void metaspace_pointers_do(MetaspaceClosure* it);
int size_in_heapwords() const { return (int)heap_word_size(sizeof(ModuleEntry)); }
MetaspaceClosureType type() const { return MetaspaceClosureType::ModuleEntryType; }
static bool is_read_only_by_default() { return false; }
void print(outputStream* st = tty) const;
void verify();
@ -198,18 +199,11 @@ public:
#if INCLUDE_CDS_JAVA_HEAP
bool should_be_archived() const;
void iterate_symbols(MetaspaceClosure* closure);
ModuleEntry* allocate_archived_entry() const;
void init_as_archived_entry();
static ModuleEntry* get_archived_entry(ModuleEntry* orig_entry);
bool has_been_archived();
static Array<ModuleEntry*>* write_growable_array(GrowableArray<ModuleEntry*>* array);
static GrowableArray<ModuleEntry*>* restore_growable_array(Array<ModuleEntry*>* archived_array);
void remove_unshareable_info();
void load_from_archive(ClassLoaderData* loader_data);
void preload_archived_oops();
void restore_archived_oops(ClassLoaderData* loader_data);
void clear_archived_oops();
static void verify_archived_module_entries() PRODUCT_RETURN;
#endif
};
@ -275,9 +269,7 @@ public:
void verify();
#if INCLUDE_CDS_JAVA_HEAP
void iterate_symbols(MetaspaceClosure* closure);
Array<ModuleEntry*>* allocate_archived_entries();
void init_archived_entries(Array<ModuleEntry*>* archived_modules);
Array<ModuleEntry*>* build_aot_table(ClassLoaderData* loader_data, TRAPS);
void load_archived_entries(ClassLoaderData* loader_data,
Array<ModuleEntry*>* archived_modules);
void restore_archived_oops(ClassLoaderData* loader_data,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -505,13 +505,10 @@ void Modules::check_archived_module_oop(oop orig_module_obj) {
ClassLoaderData* loader_data = orig_module_ent->loader_data();
assert(loader_data->is_builtin_class_loader_data(), "must be");
if (orig_module_ent->name() != nullptr) {
// For each named module, we archive both the java.lang.Module oop and the ModuleEntry.
assert(orig_module_ent->has_been_archived(), "sanity");
} else {
precond(ArchiveBuilder::current()->has_been_archived(orig_module_ent));
if (orig_module_ent->name() == nullptr) {
// We always archive unnamed module oop for boot, platform, and system loaders.
precond(orig_module_ent->should_be_archived());
precond(orig_module_ent->has_been_archived());
if (loader_data->is_boot_class_loader_data()) {
assert(!_seen_boot_unnamed_module, "only once");
@ -529,10 +526,6 @@ void Modules::check_archived_module_oop(oop orig_module_obj) {
}
}
void Modules::verify_archived_modules() {
ModuleEntry::verify_archived_module_entries();
}
class Modules::ArchivedProperty {
const char* _prop;
const bool _numbered;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -59,7 +59,6 @@ public:
TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
static void init_archived_modules(JavaThread* current, Handle h_platform_loader, Handle h_system_loader)
NOT_CDS_JAVA_HEAP_RETURN;
static void verify_archived_modules() NOT_CDS_JAVA_HEAP_RETURN;
static void dump_archived_module_info() NOT_CDS_JAVA_HEAP_RETURN;
static void serialize_archived_module_info(SerializeClosure* soc) NOT_CDS_JAVA_HEAP_RETURN;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,6 +22,8 @@
*
*/
#include "cds/aotGrowableArray.inline.hpp"
#include "cds/aotMetaspace.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/archiveUtils.hpp"
#include "cds/cdsConfig.hpp"
@ -31,13 +33,13 @@
#include "classfile/vmSymbols.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/resourceArea.hpp"
#include "oops/array.hpp"
#include "oops/symbol.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
#include "utilities/events.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/hashTable.hpp"
#include "utilities/ostream.hpp"
#include "utilities/quickSort.hpp"
@ -51,7 +53,7 @@ PackageEntry::PackageEntry(Symbol* name, ModuleEntry* module) :
_qualified_exports(nullptr),
_defined_by_cds_in_class_path(0)
{
// name can't be null
// name can't be null -- a class in the default package gets a PackageEntry of nullptr.
_name->increment_refcount();
JFR_ONLY(INIT_ID(this);)
@ -81,7 +83,7 @@ void PackageEntry::add_qexport(ModuleEntry* m) {
if (!has_qual_exports_list()) {
// Lazily create a package's qualified exports list.
// Initial size is small, do not anticipate export lists to be large.
_qualified_exports = new (mtModule) GrowableArray<ModuleEntry*>(QUAL_EXP_SIZE, mtModule);
_qualified_exports = new (mtModule) AOTGrowableArray<ModuleEntry*>(QUAL_EXP_SIZE, mtModule);
}
// Determine, based on this newly established export to module m,
@ -183,12 +185,24 @@ void PackageEntry::purge_qualified_exports() {
}
void PackageEntry::delete_qualified_exports() {
if (_qualified_exports != nullptr) {
if (_qualified_exports != nullptr && !AOTMetaspace::in_aot_cache(_qualified_exports)) {
delete _qualified_exports;
}
_qualified_exports = nullptr;
}
void PackageEntry::pack_qualified_exports() {
if (_qualified_exports != nullptr) {
_qualified_exports->shrink_to_fit();
}
}
void PackageEntry::metaspace_pointers_do(MetaspaceClosure* it) {
it->push(&_name);
it->push(&_module);
it->push(&_qualified_exports);
}
PackageEntryTable::PackageEntryTable() { }
PackageEntryTable::~PackageEntryTable() {
@ -212,66 +226,19 @@ PackageEntryTable::~PackageEntryTable() {
}
#if INCLUDE_CDS_JAVA_HEAP
typedef HashTable<
const PackageEntry*,
PackageEntry*,
557, // prime number
AnyObj::C_HEAP> ArchivedPackageEntries;
static ArchivedPackageEntries* _archived_packages_entries = nullptr;
bool PackageEntry::should_be_archived() const {
return module()->should_be_archived();
}
PackageEntry* PackageEntry::allocate_archived_entry() const {
precond(should_be_archived());
PackageEntry* archived_entry = (PackageEntry*)ArchiveBuilder::rw_region_alloc(sizeof(PackageEntry));
memcpy((void*)archived_entry, (void*)this, sizeof(PackageEntry));
if (_archived_packages_entries == nullptr) {
_archived_packages_entries = new (mtClass)ArchivedPackageEntries();
void PackageEntry::remove_unshareable_info() {
if (_qualified_exports != nullptr) {
_qualified_exports->set_in_aot_cache();
}
assert(_archived_packages_entries->get(this) == nullptr, "Each PackageEntry must not be shared across PackageEntryTables");
_archived_packages_entries->put(this, archived_entry);
return archived_entry;
}
PackageEntry* PackageEntry::get_archived_entry(PackageEntry* orig_entry) {
PackageEntry** ptr = _archived_packages_entries->get(orig_entry);
if (ptr != nullptr) {
return *ptr;
} else {
return nullptr;
}
}
void PackageEntry::iterate_symbols(MetaspaceClosure* closure) {
closure->push(&_name);
}
void PackageEntry::init_as_archived_entry() {
Array<ModuleEntry*>* archived_qualified_exports = ModuleEntry::write_growable_array(_qualified_exports);
_name = ArchiveBuilder::get_buffered_symbol(_name);
_module = ModuleEntry::get_archived_entry(_module);
_qualified_exports = (GrowableArray<ModuleEntry*>*)archived_qualified_exports;
_defined_by_cds_in_class_path = 0;
JFR_ONLY(set_trace_id(0);) // re-init at runtime
ArchivePtrMarker::mark_pointer((address*)&_name);
ArchivePtrMarker::mark_pointer((address*)&_module);
ArchivePtrMarker::mark_pointer((address*)&_qualified_exports);
LogStreamHandle(Info, aot, package) st;
if (st.is_enabled()) {
st.print("archived ");
print(&st);
}
}
void PackageEntry::load_from_archive() {
_qualified_exports = ModuleEntry::restore_growable_array((Array<ModuleEntry*>*)_qualified_exports);
JFR_ONLY(INIT_ID(this);)
}
@ -280,14 +247,7 @@ static int compare_package_by_name(PackageEntry* a, PackageEntry* b) {
return a->name()->fast_compare(b->name());
}
void PackageEntryTable::iterate_symbols(MetaspaceClosure* closure) {
auto syms = [&] (const SymbolHandle& key, PackageEntry*& p) {
p->iterate_symbols(closure);
};
_table.iterate_all(syms);
}
Array<PackageEntry*>* PackageEntryTable::allocate_archived_entries() {
Array<PackageEntry*>* PackageEntryTable::build_aot_table(ClassLoaderData* loader_data, TRAPS) {
// First count the packages in named modules
int n = 0;
auto count = [&] (const SymbolHandle& key, PackageEntry*& p) {
@ -297,12 +257,19 @@ Array<PackageEntry*>* PackageEntryTable::allocate_archived_entries() {
};
_table.iterate_all(count);
Array<PackageEntry*>* archived_packages = ArchiveBuilder::new_rw_array<PackageEntry*>(n);
Array<PackageEntry*>* archived_packages = MetadataFactory::new_array<PackageEntry*>(loader_data, n, nullptr, CHECK_NULL);
// reset n
n = 0;
auto grab = [&] (const SymbolHandle& key, PackageEntry*& p) {
if (p->should_be_archived()) {
p->pack_qualified_exports();
archived_packages->at_put(n++, p);
LogStreamHandle(Info, aot, package) st;
if (st.is_enabled()) {
st.print("archived ");
p->print(&st);
}
}
};
_table.iterate_all(grab);
@ -311,18 +278,8 @@ Array<PackageEntry*>* PackageEntryTable::allocate_archived_entries() {
// Always allocate in the same order to produce deterministic archive.
QuickSort::sort(archived_packages->data(), n, compare_package_by_name);
}
for (int i = 0; i < n; i++) {
archived_packages->at_put(i, archived_packages->at(i)->allocate_archived_entry());
ArchivePtrMarker::mark_pointer((address*)archived_packages->adr_at(i));
}
return archived_packages;
}
void PackageEntryTable::init_archived_entries(Array<PackageEntry*>* archived_packages) {
for (int i = 0; i < archived_packages->length(); i++) {
PackageEntry* archived_entry = archived_packages->at(i);
archived_entry->init_as_archived_entry();
}
return archived_packages;
}
void PackageEntryTable::load_archived_entries(Array<PackageEntry*>* archived_packages) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,7 +25,9 @@
#ifndef SHARE_CLASSFILE_PACKAGEENTRY_HPP
#define SHARE_CLASSFILE_PACKAGEENTRY_HPP
#include "cds/aotGrowableArray.hpp"
#include "classfile/moduleEntry.hpp"
#include "memory/metaspaceClosureType.hpp"
#include "oops/symbol.hpp"
#include "oops/symbolHandle.hpp"
#include "runtime/atomicAccess.hpp"
@ -114,7 +116,7 @@ private:
bool _must_walk_exports;
// Contains list of modules this package is qualifiedly exported to. Access
// to this list is protected by the Module_lock.
GrowableArray<ModuleEntry*>* _qualified_exports;
AOTGrowableArray<ModuleEntry*>* _qualified_exports;
JFR_ONLY(DEFINE_TRACE_ID_FIELD;)
// Initial size of a package entry's list of qualified exports.
@ -205,14 +207,24 @@ public:
void purge_qualified_exports();
void delete_qualified_exports();
void pack_qualified_exports(); // used by AOT
// methods required by MetaspaceClosure
void metaspace_pointers_do(MetaspaceClosure* it);
int size_in_heapwords() const { return (int)heap_word_size(sizeof(PackageEntry)); }
MetaspaceClosureType type() const { return MetaspaceClosureType::PackageEntryType; }
static bool is_read_only_by_default() { return false; }
void print(outputStream* st = tty);
char* name_as_C_string() const {
assert(_name != nullptr, "name can't be null");
return name()->as_C_string();
}
#if INCLUDE_CDS_JAVA_HEAP
bool should_be_archived() const;
void iterate_symbols(MetaspaceClosure* closure);
PackageEntry* allocate_archived_entry() const;
void init_as_archived_entry();
static PackageEntry* get_archived_entry(PackageEntry* orig_entry);
void remove_unshareable_info();
void load_from_archive();
#endif
@ -271,9 +283,7 @@ public:
void print(outputStream* st = tty);
#if INCLUDE_CDS_JAVA_HEAP
void iterate_symbols(MetaspaceClosure* closure);
Array<PackageEntry*>* allocate_archived_entries();
void init_archived_entries(Array<PackageEntry*>* archived_packages);
Array<PackageEntry*>* build_aot_table(ClassLoaderData* loader_data, TRAPS);
void load_archived_entries(Array<PackageEntry*>* archived_packages);
#endif
};

View File

@ -74,24 +74,9 @@ const size_t REHASH_LEN = 100;
const double CLEAN_DEAD_HIGH_WATER_MARK = 0.5;
#if INCLUDE_CDS_JAVA_HEAP
bool StringTable::_is_two_dimensional_shared_strings_array = false;
OopHandle StringTable::_shared_strings_array;
int StringTable::_shared_strings_array_root_index;
inline oop StringTable::read_string_from_compact_hashtable(address base_address, u4 index) {
assert(AOTMappedHeapLoader::is_in_use(), "sanity");
objArrayOop array = (objArrayOop)(_shared_strings_array.resolve());
oop s;
if (!_is_two_dimensional_shared_strings_array) {
s = array->obj_at((int)index);
} else {
int primary_index = index >> _secondary_array_index_bits;
int secondary_index = index & _secondary_array_index_mask;
objArrayOop secondary = (objArrayOop)array->obj_at(primary_index);
s = secondary->obj_at(secondary_index);
}
oop s = HeapShared::get_root((int)index, false);
assert(java_lang_String::is_instance(s), "must be");
return s;
}
@ -115,7 +100,6 @@ OopStorage* StringTable::_oop_storage;
static size_t _current_size = 0;
static volatile size_t _items_count = 0;
DEBUG_ONLY(static bool _disable_interning_during_cds_dump = false);
volatile bool _alt_hash = false;
@ -317,12 +301,6 @@ void StringTable::create_table() {
_oop_storage->register_num_dead_callback(&gc_notification);
}
#if INCLUDE_CDS_JAVA_HEAP
void StringTable::load_shared_strings_array() {
_shared_strings_array = OopHandle(Universe::vm_global(), HeapShared::get_root(_shared_strings_array_root_index));
}
#endif
void StringTable::item_added() {
AtomicAccess::inc(&_items_count);
}
@ -509,9 +487,6 @@ oop StringTable::intern(const char* utf8_string, TRAPS) {
}
oop StringTable::intern(const StringWrapper& name, TRAPS) {
assert(!AtomicAccess::load_acquire(&_disable_interning_during_cds_dump),
"All threads that may intern strings should have been stopped before CDS starts copying the interned string table");
// shared table always uses java_lang_String::hash_code
unsigned int hash = hash_wrapped_string(name);
oop found_string = lookup_shared(name, hash);
@ -957,118 +932,13 @@ oop StringTable::lookup_shared(const jchar* name, int len) {
return _shared_table.lookup(wrapped_name, java_lang_String::hash_code(name, len), 0);
}
// This is called BEFORE we enter the CDS safepoint. We can still allocate Java object arrays to
// be used by the shared strings table.
void StringTable::allocate_shared_strings_array(TRAPS) {
if (!CDSConfig::is_dumping_heap()) {
return;
}
void StringTable::init_shared_table() {
assert(SafepointSynchronize::is_at_safepoint(), "inside AOT safepoint");
precond(CDSConfig::is_dumping_heap());
assert(HeapShared::is_writing_mapping_mode(), "not used for streamed oops");
assert(HeapShared::is_writing_mapping_mode(), "should not reach here");
CompileBroker::wait_for_no_active_tasks();
precond(CDSConfig::allow_only_single_java_thread());
// At this point, no more strings will be added:
// - There's only a single Java thread (this thread). It no longer executes Java bytecodes
// so JIT compilation will eventually stop.
// - CompileBroker has no more active tasks, so all JIT requests have been processed.
// This flag will be cleared after intern table dumping has completed, so we can run the
// compiler again (for future AOT method compilation, etc).
DEBUG_ONLY(AtomicAccess::release_store(&_disable_interning_during_cds_dump, true));
if (items_count_acquire() > (size_t)max_jint) {
fatal("Too many strings to be archived: %zu", items_count_acquire());
}
int total = (int)items_count_acquire();
size_t single_array_size = objArrayOopDesc::object_size(total);
log_info(aot)("allocated string table for %d strings", total);
if (!HeapShared::is_too_large_to_archive(single_array_size)) {
// The entire table can fit in a single array
objArrayOop array = oopFactory::new_objArray(vmClasses::Object_klass(), total, CHECK);
_shared_strings_array = OopHandle(Universe::vm_global(), array);
log_info(aot)("string table array (single level) length = %d", total);
} else {
// Split the table in two levels of arrays.
int primary_array_length = (total + _secondary_array_max_length - 1) / _secondary_array_max_length;
size_t primary_array_size = objArrayOopDesc::object_size(primary_array_length);
size_t secondary_array_size = objArrayOopDesc::object_size(_secondary_array_max_length);
if (HeapShared::is_too_large_to_archive(secondary_array_size)) {
// This can only happen if you have an extremely large number of classes that
// refer to more than 16384 * 16384 = 26M interned strings! Not a practical concern
// but bail out for safety.
log_error(aot)("Too many strings to be archived: %zu", items_count_acquire());
AOTMetaspace::unrecoverable_writing_error();
}
objArrayOop primary = oopFactory::new_objArray(vmClasses::Object_klass(), primary_array_length, CHECK);
objArrayHandle primaryHandle(THREAD, primary);
_shared_strings_array = OopHandle(Universe::vm_global(), primary);
log_info(aot)("string table array (primary) length = %d", primary_array_length);
for (int i = 0; i < primary_array_length; i++) {
int len;
if (total > _secondary_array_max_length) {
len = _secondary_array_max_length;
} else {
len = total;
}
total -= len;
objArrayOop secondary = oopFactory::new_objArray(vmClasses::Object_klass(), len, CHECK);
primaryHandle()->obj_at_put(i, secondary);
log_info(aot)("string table array (secondary)[%d] length = %d", i, len);
assert(!HeapShared::is_too_large_to_archive(secondary), "sanity");
}
assert(total == 0, "must be");
_is_two_dimensional_shared_strings_array = true;
}
}
#ifndef PRODUCT
void StringTable::verify_secondary_array_index_bits() {
assert(HeapShared::is_writing_mapping_mode(), "should not reach here");
int max;
for (max = 1; ; max++) {
size_t next_size = objArrayOopDesc::object_size(1 << (max + 1));
if (HeapShared::is_too_large_to_archive(next_size)) {
break;
}
}
// Currently max is 17 for +UseCompressedOops, 16 for -UseCompressedOops.
// When we add support for Shenandoah (which has a smaller mininum region size than G1),
// max will become 15/14.
//
// We use _secondary_array_index_bits==14 as that will be the eventual value, and will
// make testing easier.
assert(_secondary_array_index_bits <= max,
"_secondary_array_index_bits (%d) must be smaller than max possible value (%d)",
_secondary_array_index_bits, max);
}
#endif // PRODUCT
// This is called AFTER we enter the CDS safepoint.
//
// For each shared string:
// [1] Store it into _shared_strings_array. Encode its position as a 32-bit index.
// [2] Store the index and hashcode into _shared_table.
oop StringTable::init_shared_strings_array() {
assert(CDSConfig::is_dumping_heap(), "must be");
assert(HeapShared::is_writing_mapping_mode(), "should not reach here");
objArrayOop array = (objArrayOop)(_shared_strings_array.resolve());
verify_secondary_array_index_bits();
int index = 0;
auto copy_into_array = [&] (WeakHandle* val) {
int n = 0;
auto copy_into_aot_heap = [&] (WeakHandle* val) {
oop string = val->peek();
if (string != nullptr && !HeapShared::is_string_too_large_to_archive(string)) {
// If string is too large, don't put it into the string table.
@ -1077,53 +947,34 @@ oop StringTable::init_shared_strings_array() {
// - If there's a reference to it, we will report an error inside HeapShared.cpp and
// dumping will fail.
HeapShared::add_to_dumped_interned_strings(string);
if (!_is_two_dimensional_shared_strings_array) {
assert(index < array->length(), "no strings should have been added");
array->obj_at_put(index, string);
} else {
int primary_index = index >> _secondary_array_index_bits;
int secondary_index = index & _secondary_array_index_mask;
assert(primary_index < array->length(), "no strings should have been added");
objArrayOop secondary = (objArrayOop)array->obj_at(primary_index);
assert(secondary != nullptr && secondary->is_objArray(), "must be");
assert(secondary_index < secondary->length(), "no strings should have been added");
secondary->obj_at_put(secondary_index, string);
}
index ++;
}
n++;
return true;
};
_local_table->do_safepoint_scan(copy_into_array);
log_info(aot)("Archived %d interned strings", index);
return array;
_local_table->do_safepoint_scan(copy_into_aot_heap);
log_info(aot)("Archived %d interned strings", n);
};
void StringTable::write_shared_table() {
assert(SafepointSynchronize::is_at_safepoint(), "inside AOT safepoint");
precond(CDSConfig::is_dumping_heap());
assert(HeapShared::is_writing_mapping_mode(), "not used for streamed oops");
_shared_table.reset();
CompactHashtableWriter writer((int)items_count_acquire(), ArchiveBuilder::string_stats());
int index = 0;
auto copy_into_shared_table = [&] (WeakHandle* val) {
oop string = val->peek();
if (string != nullptr && !HeapShared::is_string_too_large_to_archive(string)) {
unsigned int hash = java_lang_String::hash_code(string);
writer.add(hash, index);
index ++;
int root_id = HeapShared::append_root(string);
writer.add(hash, root_id);
}
return true;
};
_local_table->do_safepoint_scan(copy_into_shared_table);
writer.dump(&_shared_table, "string");
DEBUG_ONLY(AtomicAccess::release_store(&_disable_interning_during_cds_dump, false));
}
void StringTable::set_shared_strings_array_index(int root_index) {
assert(HeapShared::is_writing_mapping_mode(), "should not reach here");
_shared_strings_array_root_index = root_index;
}
void StringTable::serialize_shared_table_header(SerializeClosure* soc) {
@ -1135,8 +986,27 @@ void StringTable::serialize_shared_table_header(SerializeClosure* soc) {
} else if (!AOTMappedHeapLoader::is_in_use()) {
_shared_table.reset();
}
}
soc->do_bool(&_is_two_dimensional_shared_strings_array);
soc->do_int(&_shared_strings_array_root_index);
void StringTable::move_shared_strings_into_runtime_table() {
precond(CDSConfig::is_dumping_final_static_archive());
JavaThread* THREAD = JavaThread::current();
HandleMark hm(THREAD);
int n = 0;
_shared_table.iterate_all([&](oop string) {
int length = java_lang_String::length(string);
Handle h_string (THREAD, string);
StringWrapper name(h_string, length);
unsigned int hash = hash_wrapped_string(name);
assert(!_alt_hash, "too early");
oop interned = do_intern(name, hash, THREAD);
assert(string == interned, "must be");
n++;
});
_shared_table.reset();
log_info(aot)("Moved %d interned strings to runtime table", n);
}
#endif //INCLUDE_CDS_JAVA_HEAP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -109,49 +109,17 @@ public:
static bool needs_rehashing() { return _needs_rehashing; }
static inline void update_needs_rehash(bool rehash);
// Sharing
#if INCLUDE_CDS_JAVA_HEAP
static inline oop read_string_from_compact_hashtable(address base_address, u4 index);
// AOT support
static inline oop read_string_from_compact_hashtable(address base_address, u4 index) NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
private:
static bool _is_two_dimensional_shared_strings_array;
static OopHandle _shared_strings_array;
static int _shared_strings_array_root_index;
// All the shared strings are referenced through _shared_strings_array to keep them alive.
// Each shared string is stored as a 32-bit index in ::_shared_table. The index
// is interpreted in two ways:
//
// [1] _is_two_dimensional_shared_strings_array = false: _shared_strings_array is an Object[].
// Each shared string is stored as _shared_strings_array[index]
//
// [2] _is_two_dimensional_shared_strings_array = true: _shared_strings_array is an Object[][]
// This happens when there are too many elements in the shared table. We store them
// using two levels of objArrays, such that none of the arrays are too big for
// AOTMappedHeapWriter::is_too_large_to_archive(). In this case, the index is splited into two
// parts. Each shared string is stored as _shared_strings_array[primary_index][secondary_index]:
//
// [bits 31 .. 14][ bits 13 .. 0 ]
// primary_index secondary_index
const static int _secondary_array_index_bits = 14;
const static int _secondary_array_max_length = 1 << _secondary_array_index_bits;
const static int _secondary_array_index_mask = _secondary_array_max_length - 1;
// make sure _secondary_array_index_bits is not too big
static void verify_secondary_array_index_bits() PRODUCT_RETURN;
#endif // INCLUDE_CDS_JAVA_HEAP
private:
static oop lookup_shared(const StringWrapper& name, unsigned int hash) NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
public:
public:
static oop lookup_shared(const jchar* name, int len) NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
static size_t shared_entry_count() NOT_CDS_JAVA_HEAP_RETURN_(0);
static void allocate_shared_strings_array(TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
static void load_shared_strings_array() NOT_CDS_JAVA_HEAP_RETURN;
static oop init_shared_strings_array() NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
static void init_shared_table() NOT_CDS_JAVA_HEAP_RETURN;
static void write_shared_table() NOT_CDS_JAVA_HEAP_RETURN;
static void set_shared_strings_array_index(int root_index) NOT_CDS_JAVA_HEAP_RETURN;
static void serialize_shared_table_header(SerializeClosure* soc) NOT_CDS_JAVA_HEAP_RETURN;
static void move_shared_strings_into_runtime_table();
// Jcmd
static void dump(outputStream* st, bool verbose=false);

View File

@ -190,9 +190,8 @@ bool Verifier::verify(InstanceKlass* klass, bool should_verify_class, TRAPS) {
// effect (sic!) for external_name(), but instead of doing that, we opt to
// explicitly push the hashcode in here. This is signify the following block
// is IMPORTANT:
if (klass->java_mirror() != nullptr) {
klass->java_mirror()->identity_hash();
}
assert(klass->java_mirror() != nullptr, "must be");
klass->java_mirror()->identity_hash();
if (!is_eligible_for_verification(klass, should_verify_class)) {
return true;

View File

@ -245,10 +245,6 @@ class SerializeClosure;
\
/* Concurrency support */ \
template(java_util_concurrent_locks_AbstractOwnableSynchronizer, "java/util/concurrent/locks/AbstractOwnableSynchronizer") \
template(java_util_concurrent_atomic_AtomicIntegerFieldUpdater_Impl, "java/util/concurrent/atomic/AtomicIntegerFieldUpdater$AtomicIntegerFieldUpdaterImpl") \
template(java_util_concurrent_atomic_AtomicLongFieldUpdater_CASUpdater, "java/util/concurrent/atomic/AtomicLongFieldUpdater$CASUpdater") \
template(java_util_concurrent_atomic_AtomicLongFieldUpdater_LockedUpdater, "java/util/concurrent/atomic/AtomicLongFieldUpdater$LockedUpdater") \
template(java_util_concurrent_atomic_AtomicReferenceFieldUpdater_Impl, "java/util/concurrent/atomic/AtomicReferenceFieldUpdater$AtomicReferenceFieldUpdaterImpl") \
template(jdk_internal_vm_annotation_Contended_signature, "Ljdk/internal/vm/annotation/Contended;") \
template(jdk_internal_vm_annotation_ReservedStackAccess_signature, "Ljdk/internal/vm/annotation/ReservedStackAccess;") \
template(jdk_internal_ValueBased_signature, "Ljdk/internal/ValueBased;") \
@ -302,6 +298,7 @@ class SerializeClosure;
template(jdk_internal_misc_Scoped_signature, "Ljdk/internal/misc/ScopedMemoryAccess$Scoped;") \
template(jdk_internal_vm_annotation_IntrinsicCandidate_signature, "Ljdk/internal/vm/annotation/IntrinsicCandidate;") \
template(jdk_internal_vm_annotation_Stable_signature, "Ljdk/internal/vm/annotation/Stable;") \
template(jdk_internal_vm_annotation_TrustFinalFields_signature, "Ljdk/internal/vm/annotation/TrustFinalFields;") \
\
template(jdk_internal_vm_annotation_ChangesCurrentThread_signature, "Ljdk/internal/vm/annotation/ChangesCurrentThread;") \
template(jdk_internal_vm_annotation_JvmtiHideEvents_signature, "Ljdk/internal/vm/annotation/JvmtiHideEvents;") \

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1124,7 +1124,7 @@ class AbstractClassHierarchyWalker {
Klass* find_witness(InstanceKlass* context_type, KlassDepChange* changes = nullptr);
static void init();
static void print_statistics();
NOT_PRODUCT(static void print_statistics();)
};
PerfCounter* AbstractClassHierarchyWalker::_perf_find_witness_anywhere_calls_count = nullptr;
@ -2277,6 +2277,7 @@ bool KlassDepChange::involves_context(Klass* k) {
return is_contained;
}
#ifndef PRODUCT
void Dependencies::print_statistics() {
AbstractClassHierarchyWalker::print_statistics();
}
@ -2302,6 +2303,7 @@ void AbstractClassHierarchyWalker::print_statistics() {
}
}
}
#endif
CallSiteDepChange::CallSiteDepChange(Handle call_site, Handle method_handle) :
_call_site(call_site),

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -649,7 +649,7 @@ class Dependencies: public ResourceObj {
};
friend class Dependencies::DepStream;
static void print_statistics();
NOT_PRODUCT(static void print_statistics();)
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -2346,12 +2346,18 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
/* Repeat compilation without installing code for profiling purposes */
int repeat_compilation_count = directive->RepeatCompilationOption;
while (repeat_compilation_count > 0) {
ResourceMark rm(thread);
task->print_ul("NO CODE INSTALLED");
thread->timeout()->reset();
comp->compile_method(&ci_env, target, osr_bci, false, directive);
repeat_compilation_count--;
if (repeat_compilation_count > 0) {
CHeapStringHolder failure_reason;
failure_reason.set(ci_env._failure_reason.get());
while (repeat_compilation_count > 0) {
ResourceMark rm(thread);
task->print_ul("NO CODE INSTALLED");
thread->timeout()->reset();
ci_env._failure_reason.clear();
comp->compile_method(&ci_env, target, osr_bci, false, directive);
repeat_compilation_count--;
}
ci_env._failure_reason.set(failure_reason.get());
}
}

View File

@ -607,10 +607,27 @@ void decode_env::print_address(address adr) {
return;
}
address card_table_base = nullptr;
BarrierSet* bs = BarrierSet::barrier_set();
if (bs->is_a(BarrierSet::CardTableBarrierSet) &&
adr == ci_card_table_address_as<address>()) {
st->print("word_map_base");
#if INCLUDE_G1GC
if (bs->is_a(BarrierSet::G1BarrierSet)) {
G1BarrierSet* g1bs = barrier_set_cast<G1BarrierSet>(bs);
card_table_base = g1bs->card_table()->byte_map_base();
} else
#endif
#if INCLUDE_SHENANDOAHGC
if (bs->is_a(BarrierSet::ShenandoahBarrierSet)) {
ShenandoahBarrierSet* sbs = barrier_set_cast<ShenandoahBarrierSet>(bs);
if (sbs->card_table() != nullptr) {
card_table_base = sbs->card_table()->byte_map_base();
}
} else
#endif
if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
card_table_base = ci_card_table_address_as<address>();
}
if (card_table_base != nullptr && adr == card_table_base) {
st->print("card_table_base");
if (WizardMode) st->print(" " INTPTR_FORMAT, p2i(adr));
return;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -64,13 +64,13 @@ G1BarrierSet::G1BarrierSet(G1CardTable* card_table,
{}
G1BarrierSet::~G1BarrierSet() {
delete _refinement_table;
delete refinement_table();
}
void G1BarrierSet::swap_global_card_table() {
G1CardTable* temp = static_cast<G1CardTable*>(_card_table);
_card_table = _refinement_table;
_refinement_table = temp;
G1CardTable* temp = static_cast<G1CardTable*>(card_table());
_card_table.store_relaxed(refinement_table());
_refinement_table.store_relaxed(temp);
}
void G1BarrierSet::update_card_table_base(Thread* thread) {
@ -80,7 +80,7 @@ void G1BarrierSet::update_card_table_base(Thread* thread) {
assert(thread->is_Java_thread(), "may only update card table base of JavaThreads, not %s", thread->name());
}
#endif
G1ThreadLocalData::set_byte_map_base(thread, _card_table->byte_map_base());
G1ThreadLocalData::set_byte_map_base(thread, card_table()->byte_map_base());
}
template <class T> void
@ -135,10 +135,10 @@ void G1BarrierSet::write_region(MemRegion mr) {
// marks next time.
// If we write to the old card table (after the switching, then the refinement
// table) the oncoming handshake will do the memory synchronization.
CardTable* card_table = AtomicAccess::load(&_card_table);
CardTable* local_card_table = card_table();
volatile CardValue* byte = card_table->byte_for(mr.start());
CardValue* last_byte = card_table->byte_for(mr.last());
volatile CardValue* byte = local_card_table->byte_for(mr.start());
CardValue* last_byte = local_card_table->byte_for(mr.last());
// Dirty cards only if necessary.
for (; byte <= last_byte; byte++) {
@ -190,6 +190,6 @@ void G1BarrierSet::on_thread_detach(Thread* thread) {
}
void G1BarrierSet::print_on(outputStream* st) const {
_card_table->print_on(st, "Card");
_refinement_table->print_on(st, "Refinement");
card_table()->print_on(st, "Card");
refinement_table()->print_on(st, "Refinement");
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,6 +29,7 @@
#include "gc/shared/bufferNode.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
#include "runtime/atomic.hpp"
class G1CardTable;
class Thread;
@ -66,7 +67,7 @@ class G1BarrierSet: public CardTableBarrierSet {
BufferNode::Allocator _satb_mark_queue_buffer_allocator;
G1SATBMarkQueueSet _satb_mark_queue_set;
G1CardTable* _refinement_table;
Atomic<G1CardTable*> _refinement_table;
public:
G1BarrierSet(G1CardTable* card_table, G1CardTable* refinement_table);
@ -76,7 +77,7 @@ class G1BarrierSet: public CardTableBarrierSet {
return barrier_set_cast<G1BarrierSet>(BarrierSet::barrier_set());
}
G1CardTable* refinement_table() const { return _refinement_table; }
G1CardTable* refinement_table() const { return _refinement_table.load_relaxed(); }
// Swap the global card table references, without synchronization.
void swap_global_card_table();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -73,8 +73,8 @@ inline void G1BarrierSet::write_ref_field_post(T* field) {
// Make sure that the card table reference is read only once. Otherwise the compiler
// might reload that value in the two accesses below, that could cause writes to
// the wrong card table.
CardTable* card_table = AtomicAccess::load(&_card_table);
CardValue* byte = card_table->byte_for(field);
CardTable* local_card_table = card_table();
CardValue* byte = local_card_table->byte_for(field);
if (*byte == G1CardTable::clean_card_val()) {
*byte = G1CardTable::dirty_card_val();
}

View File

@ -29,7 +29,6 @@
#include "gc/shared/gcLogPrecious.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
#include "memory/allocation.inline.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/java.hpp"
#include "utilities/bitMap.inline.hpp"
@ -192,32 +191,32 @@ const char* G1CardSetConfiguration::mem_object_type_name_str(uint index) {
void G1CardSetCoarsenStats::reset() {
STATIC_ASSERT(ARRAY_SIZE(_coarsen_from) == ARRAY_SIZE(_coarsen_collision));
for (uint i = 0; i < ARRAY_SIZE(_coarsen_from); i++) {
_coarsen_from[i] = 0;
_coarsen_collision[i] = 0;
_coarsen_from[i].store_relaxed(0);
_coarsen_collision[i].store_relaxed(0);
}
}
void G1CardSetCoarsenStats::set(G1CardSetCoarsenStats& other) {
STATIC_ASSERT(ARRAY_SIZE(_coarsen_from) == ARRAY_SIZE(_coarsen_collision));
for (uint i = 0; i < ARRAY_SIZE(_coarsen_from); i++) {
_coarsen_from[i] = other._coarsen_from[i];
_coarsen_collision[i] = other._coarsen_collision[i];
_coarsen_from[i].store_relaxed(other._coarsen_from[i].load_relaxed());
_coarsen_collision[i].store_relaxed(other._coarsen_collision[i].load_relaxed());
}
}
void G1CardSetCoarsenStats::subtract_from(G1CardSetCoarsenStats& other) {
STATIC_ASSERT(ARRAY_SIZE(_coarsen_from) == ARRAY_SIZE(_coarsen_collision));
for (uint i = 0; i < ARRAY_SIZE(_coarsen_from); i++) {
_coarsen_from[i] = other._coarsen_from[i] - _coarsen_from[i];
_coarsen_collision[i] = other._coarsen_collision[i] - _coarsen_collision[i];
_coarsen_from[i].store_relaxed(other._coarsen_from[i].load_relaxed() - _coarsen_from[i].load_relaxed());
_coarsen_collision[i].store_relaxed(other._coarsen_collision[i].load_relaxed() - _coarsen_collision[i].load_relaxed());
}
}
void G1CardSetCoarsenStats::record_coarsening(uint tag, bool collision) {
assert(tag < ARRAY_SIZE(_coarsen_from), "tag %u out of bounds", tag);
AtomicAccess::inc(&_coarsen_from[tag], memory_order_relaxed);
_coarsen_from[tag].add_then_fetch(1u, memory_order_relaxed);
if (collision) {
AtomicAccess::inc(&_coarsen_collision[tag], memory_order_relaxed);
_coarsen_collision[tag].add_then_fetch(1u, memory_order_relaxed);
}
}
@ -228,13 +227,13 @@ void G1CardSetCoarsenStats::print_on(outputStream* out) {
"Inline->AoC %zu (%zu) "
"AoC->BitMap %zu (%zu) "
"BitMap->Full %zu (%zu) ",
_coarsen_from[0], _coarsen_collision[0],
_coarsen_from[1], _coarsen_collision[1],
_coarsen_from[0].load_relaxed(), _coarsen_collision[0].load_relaxed(),
_coarsen_from[1].load_relaxed(), _coarsen_collision[1].load_relaxed(),
// There is no BitMap at the first level so we can't .
_coarsen_from[3], _coarsen_collision[3],
_coarsen_from[4], _coarsen_collision[4],
_coarsen_from[5], _coarsen_collision[5],
_coarsen_from[6], _coarsen_collision[6]
_coarsen_from[3].load_relaxed(), _coarsen_collision[3].load_relaxed(),
_coarsen_from[4].load_relaxed(), _coarsen_collision[4].load_relaxed(),
_coarsen_from[5].load_relaxed(), _coarsen_collision[5].load_relaxed(),
_coarsen_from[6].load_relaxed(), _coarsen_collision[6].load_relaxed()
);
}
@ -248,7 +247,7 @@ class G1CardSetHashTable : public CHeapObj<mtGCCardSet> {
// the per region cardsets.
const static uint GroupBucketClaimSize = 4;
// Did we insert at least one card in the table?
bool volatile _inserted_card;
Atomic<bool> _inserted_card;
G1CardSetMemoryManager* _mm;
CardSetHash _table;
@ -311,10 +310,10 @@ public:
G1CardSetHashTableValue value(region_idx, G1CardSetInlinePtr());
bool inserted = _table.insert_get(Thread::current(), lookup, value, found, should_grow);
if (!_inserted_card && inserted) {
if (!_inserted_card.load_relaxed() && inserted) {
// It does not matter to us who is setting the flag so a regular atomic store
// is sufficient.
AtomicAccess::store(&_inserted_card, true);
_inserted_card.store_relaxed(true);
}
return found.value();
@ -343,9 +342,9 @@ public:
}
void reset() {
if (AtomicAccess::load(&_inserted_card)) {
if (_inserted_card.load_relaxed()) {
_table.unsafe_reset(InitialLogTableSize);
AtomicAccess::store(&_inserted_card, false);
_inserted_card.store_relaxed(false);
}
}
@ -455,14 +454,14 @@ void G1CardSet::free_mem_object(ContainerPtr container) {
_mm->free(container_type_to_mem_object_type(type), value);
}
G1CardSet::ContainerPtr G1CardSet::acquire_container(ContainerPtr volatile* container_addr) {
G1CardSet::ContainerPtr G1CardSet::acquire_container(Atomic<ContainerPtr>* container_addr) {
// Update reference counts under RCU critical section to avoid a
// use-after-cleapup bug where we increment a reference count for
// an object whose memory has already been cleaned up and reused.
GlobalCounter::CriticalSection cs(Thread::current());
while (true) {
// Get ContainerPtr and increment refcount atomically wrt to memory reuse.
ContainerPtr container = AtomicAccess::load_acquire(container_addr);
ContainerPtr container = container_addr->load_acquire();
uint cs_type = container_type(container);
if (container == FullCardSet || cs_type == ContainerInlinePtr) {
return container;
@ -503,15 +502,15 @@ class G1ReleaseCardsets : public StackObj {
G1CardSet* _card_set;
using ContainerPtr = G1CardSet::ContainerPtr;
void coarsen_to_full(ContainerPtr* container_addr) {
void coarsen_to_full(Atomic<ContainerPtr>* container_addr) {
while (true) {
ContainerPtr cur_container = AtomicAccess::load_acquire(container_addr);
ContainerPtr cur_container = container_addr->load_acquire();
uint cs_type = G1CardSet::container_type(cur_container);
if (cur_container == G1CardSet::FullCardSet) {
return;
}
ContainerPtr old_value = AtomicAccess::cmpxchg(container_addr, cur_container, G1CardSet::FullCardSet);
ContainerPtr old_value = container_addr->compare_exchange(cur_container, G1CardSet::FullCardSet);
if (old_value == cur_container) {
_card_set->release_and_maybe_free_container(cur_container);
@ -523,7 +522,7 @@ class G1ReleaseCardsets : public StackObj {
public:
explicit G1ReleaseCardsets(G1CardSet* card_set) : _card_set(card_set) { }
void operator ()(ContainerPtr* container_addr) {
void operator ()(Atomic<ContainerPtr>* container_addr) {
coarsen_to_full(container_addr);
}
};
@ -544,10 +543,10 @@ G1AddCardResult G1CardSet::add_to_howl(ContainerPtr parent_container,
ContainerPtr container;
uint bucket = _config->howl_bucket_index(card_in_region);
ContainerPtr volatile* bucket_entry = howl->container_addr(bucket);
Atomic<ContainerPtr>* bucket_entry = howl->container_addr(bucket);
while (true) {
if (AtomicAccess::load(&howl->_num_entries) >= _config->cards_in_howl_threshold()) {
if (howl->_num_entries.load_relaxed() >= _config->cards_in_howl_threshold()) {
return Overflow;
}
@ -571,7 +570,7 @@ G1AddCardResult G1CardSet::add_to_howl(ContainerPtr parent_container,
}
if (increment_total && add_result == Added) {
AtomicAccess::inc(&howl->_num_entries, memory_order_relaxed);
howl->_num_entries.add_then_fetch(1u, memory_order_relaxed);
}
if (to_transfer != nullptr) {
@ -588,7 +587,7 @@ G1AddCardResult G1CardSet::add_to_bitmap(ContainerPtr container, uint card_in_re
return bitmap->add(card_offset, _config->cards_in_howl_bitmap_threshold(), _config->max_cards_in_howl_bitmap());
}
G1AddCardResult G1CardSet::add_to_inline_ptr(ContainerPtr volatile* container_addr, ContainerPtr container, uint card_in_region) {
G1AddCardResult G1CardSet::add_to_inline_ptr(Atomic<ContainerPtr>* container_addr, ContainerPtr container, uint card_in_region) {
G1CardSetInlinePtr value(container_addr, container);
return value.add(card_in_region, _config->inline_ptr_bits_per_card(), _config->max_cards_in_inline_ptr());
}
@ -610,7 +609,7 @@ G1CardSet::ContainerPtr G1CardSet::create_coarsened_array_of_cards(uint card_in_
return new_container;
}
bool G1CardSet::coarsen_container(ContainerPtr volatile* container_addr,
bool G1CardSet::coarsen_container(Atomic<ContainerPtr>* container_addr,
ContainerPtr cur_container,
uint card_in_region,
bool within_howl) {
@ -640,7 +639,7 @@ bool G1CardSet::coarsen_container(ContainerPtr volatile* container_addr,
ShouldNotReachHere();
}
ContainerPtr old_value = AtomicAccess::cmpxchg(container_addr, cur_container, new_container); // Memory order?
ContainerPtr old_value = container_addr->compare_exchange(cur_container, new_container); // Memory order?
if (old_value == cur_container) {
// Success. Indicate that the cards from the current card set must be transferred
// by this caller.
@ -687,7 +686,7 @@ void G1CardSet::transfer_cards(G1CardSetHashTableValue* table_entry, ContainerPt
assert(container_type(source_container) == ContainerHowl, "must be");
// Need to correct for that the Full remembered set occupies more cards than the
// AoCS before.
AtomicAccess::add(&_num_occupied, _config->max_cards_in_region() - table_entry->_num_occupied, memory_order_relaxed);
_num_occupied.add_then_fetch(_config->max_cards_in_region() - table_entry->_num_occupied.load_relaxed(), memory_order_relaxed);
}
}
@ -713,18 +712,18 @@ void G1CardSet::transfer_cards_in_howl(ContainerPtr parent_container,
diff -= 1;
G1CardSetHowl* howling_array = container_ptr<G1CardSetHowl>(parent_container);
AtomicAccess::add(&howling_array->_num_entries, diff, memory_order_relaxed);
howling_array->_num_entries.add_then_fetch(diff, memory_order_relaxed);
G1CardSetHashTableValue* table_entry = get_container(card_region);
assert(table_entry != nullptr, "Table entry not found for transferred cards");
AtomicAccess::add(&table_entry->_num_occupied, diff, memory_order_relaxed);
table_entry->_num_occupied.add_then_fetch(diff, memory_order_relaxed);
AtomicAccess::add(&_num_occupied, diff, memory_order_relaxed);
_num_occupied.add_then_fetch(diff, memory_order_relaxed);
}
}
G1AddCardResult G1CardSet::add_to_container(ContainerPtr volatile* container_addr,
G1AddCardResult G1CardSet::add_to_container(Atomic<ContainerPtr>* container_addr,
ContainerPtr container,
uint card_region,
uint card_in_region,
@ -827,8 +826,8 @@ G1AddCardResult G1CardSet::add_card(uint card_region, uint card_in_region, bool
}
if (increment_total && add_result == Added) {
AtomicAccess::inc(&table_entry->_num_occupied, memory_order_relaxed);
AtomicAccess::inc(&_num_occupied, memory_order_relaxed);
table_entry->_num_occupied.add_then_fetch(1u, memory_order_relaxed);
_num_occupied.add_then_fetch(1u, memory_order_relaxed);
}
if (should_grow_table) {
_table->grow();
@ -853,7 +852,7 @@ bool G1CardSet::contains_card(uint card_region, uint card_in_region) {
return false;
}
ContainerPtr container = table_entry->_container;
ContainerPtr container = table_entry->_container.load_relaxed();
if (container == FullCardSet) {
// contains_card() is not a performance critical method so we do not hide that
// case in the switch below.
@ -889,7 +888,7 @@ void G1CardSet::print_info(outputStream* st, uintptr_t card) {
return;
}
ContainerPtr container = table_entry->_container;
ContainerPtr container = table_entry->_container.load_relaxed();
if (container == FullCardSet) {
st->print("FULL card set)");
return;
@ -940,7 +939,7 @@ void G1CardSet::iterate_cards_during_transfer(ContainerPtr const container, Card
void G1CardSet::iterate_containers(ContainerPtrClosure* cl, bool at_safepoint) {
auto do_value =
[&] (G1CardSetHashTableValue* value) {
cl->do_containerptr(value->_region_idx, value->_num_occupied, value->_container);
cl->do_containerptr(value->_region_idx, value->_num_occupied.load_relaxed(), value->_container.load_relaxed());
return true;
};
@ -1001,11 +1000,11 @@ bool G1CardSet::occupancy_less_or_equal_to(size_t limit) const {
}
bool G1CardSet::is_empty() const {
return _num_occupied == 0;
return _num_occupied.load_relaxed() == 0;
}
size_t G1CardSet::occupied() const {
return _num_occupied;
return _num_occupied.load_relaxed();
}
size_t G1CardSet::num_containers() {
@ -1051,7 +1050,7 @@ size_t G1CardSet::static_mem_size() {
void G1CardSet::clear() {
_table->reset();
_num_occupied = 0;
_num_occupied.store_relaxed(0);
_mm->flush();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,6 +27,7 @@
#include "memory/allocation.hpp"
#include "memory/memRegion.hpp"
#include "runtime/atomic.hpp"
#include "utilities/concurrentHashTable.hpp"
class G1CardSetAllocOptions;
@ -154,8 +155,8 @@ public:
private:
// Indices are "from" indices.
size_t _coarsen_from[NumCoarsenCategories];
size_t _coarsen_collision[NumCoarsenCategories];
Atomic<size_t> _coarsen_from[NumCoarsenCategories];
Atomic<size_t> _coarsen_collision[NumCoarsenCategories];
public:
G1CardSetCoarsenStats() { reset(); }
@ -271,11 +272,11 @@ private:
// Total number of cards in this card set. This is a best-effort value, i.e. there may
// be (slightly) more cards in the card set than this value in reality.
size_t _num_occupied;
Atomic<size_t> _num_occupied;
ContainerPtr make_container_ptr(void* value, uintptr_t type);
ContainerPtr acquire_container(ContainerPtr volatile* container_addr);
ContainerPtr acquire_container(Atomic<ContainerPtr>* container_addr);
// Returns true if the card set container should be released
bool release_container(ContainerPtr container);
// Release card set and free if needed.
@ -288,7 +289,7 @@ private:
// coarsen_container does not transfer cards from cur_container
// to the new container. Transfer is achieved by transfer_cards.
// Returns true if this was the thread that coarsened the container (and added the card).
bool coarsen_container(ContainerPtr volatile* container_addr,
bool coarsen_container(Atomic<ContainerPtr>* container_addr,
ContainerPtr cur_container,
uint card_in_region, bool within_howl = false);
@ -300,9 +301,9 @@ private:
void transfer_cards(G1CardSetHashTableValue* table_entry, ContainerPtr source_container, uint card_region);
void transfer_cards_in_howl(ContainerPtr parent_container, ContainerPtr source_container, uint card_region);
G1AddCardResult add_to_container(ContainerPtr volatile* container_addr, ContainerPtr container, uint card_region, uint card, bool increment_total = true);
G1AddCardResult add_to_container(Atomic<ContainerPtr>* container_addr, ContainerPtr container, uint card_region, uint card, bool increment_total = true);
G1AddCardResult add_to_inline_ptr(ContainerPtr volatile* container_addr, ContainerPtr container, uint card_in_region);
G1AddCardResult add_to_inline_ptr(Atomic<ContainerPtr>* container_addr, ContainerPtr container, uint card_in_region);
G1AddCardResult add_to_array(ContainerPtr container, uint card_in_region);
G1AddCardResult add_to_bitmap(ContainerPtr container, uint card_in_region);
G1AddCardResult add_to_howl(ContainerPtr parent_container, uint card_region, uint card_in_region, bool increment_total = true);
@ -366,7 +367,6 @@ public:
size_t num_containers();
static G1CardSetCoarsenStats coarsen_stats();
static void print_coarsen_stats(outputStream* out);
// Returns size of the actual remembered set containers in bytes.
@ -412,8 +412,15 @@ public:
using ContainerPtr = G1CardSet::ContainerPtr;
const uint _region_idx;
uint volatile _num_occupied;
ContainerPtr volatile _container;
Atomic<uint> _num_occupied;
Atomic<ContainerPtr> _container;
// Copy constructor needed for use in ConcurrentHashTable.
G1CardSetHashTableValue(const G1CardSetHashTableValue& other) :
_region_idx(other._region_idx),
_num_occupied(other._num_occupied.load_relaxed()),
_container(other._container.load_relaxed())
{ }
G1CardSetHashTableValue(uint region_idx, ContainerPtr container) : _region_idx(region_idx), _num_occupied(0), _container(container) { }
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,7 +27,7 @@
#include "gc/g1/g1CardSet.hpp"
#include "memory/allocation.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/atomic.hpp"
#include "utilities/bitMap.hpp"
#include "utilities/globalDefinitions.hpp"
@ -67,7 +67,7 @@ class G1CardSetInlinePtr : public StackObj {
using ContainerPtr = G1CardSet::ContainerPtr;
ContainerPtr volatile * _value_addr;
Atomic<ContainerPtr>* _value_addr;
ContainerPtr _value;
static const uint SizeFieldLen = 3;
@ -103,7 +103,7 @@ public:
explicit G1CardSetInlinePtr(ContainerPtr value) :
G1CardSetInlinePtr(nullptr, value) {}
G1CardSetInlinePtr(ContainerPtr volatile* value_addr, ContainerPtr value) : _value_addr(value_addr), _value(value) {
G1CardSetInlinePtr(Atomic<ContainerPtr>* value_addr, ContainerPtr value) : _value_addr(value_addr), _value(value) {
assert(G1CardSet::container_type(_value) == G1CardSet::ContainerInlinePtr, "Value " PTR_FORMAT " is not a valid G1CardSetInlinePtr.", p2i(_value));
}
@ -145,13 +145,13 @@ public:
// All but inline pointers are of this kind. For those, card entries are stored
// directly in the ContainerPtr of the ConcurrentHashTable node.
class G1CardSetContainer {
uintptr_t _ref_count;
Atomic<uintptr_t> _ref_count;
protected:
~G1CardSetContainer() = default;
public:
G1CardSetContainer() : _ref_count(3) { }
uintptr_t refcount() const { return AtomicAccess::load_acquire(&_ref_count); }
uintptr_t refcount() const { return _ref_count.load_acquire(); }
bool try_increment_refcount();
@ -172,7 +172,7 @@ public:
using ContainerPtr = G1CardSet::ContainerPtr;
private:
EntryCountType _size;
EntryCountType volatile _num_entries;
Atomic<EntryCountType> _num_entries;
// VLA implementation.
EntryDataType _data[1];
@ -180,10 +180,10 @@ private:
static const EntryCountType EntryMask = LockBitMask - 1;
class G1CardSetArrayLocker : public StackObj {
EntryCountType volatile* _num_entries_addr;
Atomic<EntryCountType>* _num_entries_addr;
EntryCountType _local_num_entries;
public:
G1CardSetArrayLocker(EntryCountType volatile* value);
G1CardSetArrayLocker(Atomic<EntryCountType>* value);
EntryCountType num_entries() const { return _local_num_entries; }
void inc_num_entries() {
@ -192,7 +192,7 @@ private:
}
~G1CardSetArrayLocker() {
AtomicAccess::release_store(_num_entries_addr, _local_num_entries);
_num_entries_addr->release_store(_local_num_entries);
}
};
@ -213,7 +213,7 @@ public:
template <class CardVisitor>
void iterate(CardVisitor& found);
size_t num_entries() const { return _num_entries & EntryMask; }
size_t num_entries() const { return _num_entries.load_relaxed() & EntryMask; }
static size_t header_size_in_bytes();
@ -223,7 +223,7 @@ public:
};
class G1CardSetBitMap : public G1CardSetContainer {
size_t _num_bits_set;
Atomic<size_t> _num_bits_set;
BitMap::bm_word_t _bits[1];
public:
@ -236,7 +236,7 @@ public:
return bm.at(card_idx);
}
uint num_bits_set() const { return (uint)_num_bits_set; }
uint num_bits_set() const { return (uint)_num_bits_set.load_relaxed(); }
template <class CardVisitor>
void iterate(CardVisitor& found, size_t const size_in_bits, uint offset);
@ -255,10 +255,10 @@ class G1CardSetHowl : public G1CardSetContainer {
public:
typedef uint EntryCountType;
using ContainerPtr = G1CardSet::ContainerPtr;
EntryCountType volatile _num_entries;
Atomic<EntryCountType> _num_entries;
private:
// VLA implementation.
ContainerPtr _buckets[1];
Atomic<ContainerPtr> _buckets[1];
// Do not add class member variables beyond this point.
// Iterates over the given ContainerPtr with at index in this Howl card set,
@ -268,14 +268,14 @@ private:
ContainerPtr at(EntryCountType index) const;
ContainerPtr const* buckets() const;
Atomic<ContainerPtr> const* buckets() const;
public:
G1CardSetHowl(EntryCountType card_in_region, G1CardSetConfiguration* config);
ContainerPtr const* container_addr(EntryCountType index) const;
Atomic<ContainerPtr> const* container_addr(EntryCountType index) const;
ContainerPtr* container_addr(EntryCountType index);
Atomic<ContainerPtr>* container_addr(EntryCountType index);
bool contains(uint card_idx, G1CardSetConfiguration* config);
// Iterates over all ContainerPtrs in this Howl card set, applying a CardOrRangeVisitor

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -67,7 +67,7 @@ inline G1AddCardResult G1CardSetInlinePtr::add(uint card_idx, uint bits_per_card
return Overflow;
}
ContainerPtr new_value = merge(_value, card_idx, num_cards, bits_per_card);
ContainerPtr old_value = AtomicAccess::cmpxchg(_value_addr, _value, new_value, memory_order_relaxed);
ContainerPtr old_value = _value_addr->compare_exchange(_value, new_value, memory_order_relaxed);
if (_value == old_value) {
return Added;
}
@ -126,7 +126,7 @@ inline bool G1CardSetContainer::try_increment_refcount() {
}
uintptr_t new_value = old_value + 2;
uintptr_t ref_count = AtomicAccess::cmpxchg(&_ref_count, old_value, new_value);
uintptr_t ref_count = _ref_count.compare_exchange(old_value, new_value);
if (ref_count == old_value) {
return true;
}
@ -137,7 +137,7 @@ inline bool G1CardSetContainer::try_increment_refcount() {
inline uintptr_t G1CardSetContainer::decrement_refcount() {
uintptr_t old_value = refcount();
assert((old_value & 0x1) != 0 && old_value >= 3, "precondition");
return AtomicAccess::sub(&_ref_count, 2u);
return _ref_count.sub_then_fetch(2u);
}
inline G1CardSetArray::G1CardSetArray(uint card_in_region, EntryCountType num_cards) :
@ -149,14 +149,13 @@ inline G1CardSetArray::G1CardSetArray(uint card_in_region, EntryCountType num_ca
*entry_addr(0) = checked_cast<EntryDataType>(card_in_region);
}
inline G1CardSetArray::G1CardSetArrayLocker::G1CardSetArrayLocker(EntryCountType volatile* num_entries_addr) :
inline G1CardSetArray::G1CardSetArrayLocker::G1CardSetArrayLocker(Atomic<EntryCountType>* num_entries_addr) :
_num_entries_addr(num_entries_addr) {
SpinYield s;
EntryCountType num_entries = AtomicAccess::load(_num_entries_addr) & EntryMask;
EntryCountType num_entries = _num_entries_addr->load_relaxed() & EntryMask;
while (true) {
EntryCountType old_value = AtomicAccess::cmpxchg(_num_entries_addr,
num_entries,
(EntryCountType)(num_entries | LockBitMask));
EntryCountType old_value = _num_entries_addr->compare_exchange(num_entries,
(EntryCountType)(num_entries | LockBitMask));
if (old_value == num_entries) {
// Succeeded locking the array.
_local_num_entries = num_entries;
@ -174,7 +173,7 @@ inline G1CardSetArray::EntryDataType const* G1CardSetArray::base_addr() const {
}
inline G1CardSetArray::EntryDataType const* G1CardSetArray::entry_addr(EntryCountType index) const {
assert(index < _num_entries, "precondition");
assert(index < _num_entries.load_relaxed(), "precondition");
return base_addr() + index;
}
@ -189,7 +188,7 @@ inline G1CardSetArray::EntryDataType G1CardSetArray::at(EntryCountType index) co
inline G1AddCardResult G1CardSetArray::add(uint card_idx) {
assert(card_idx < (1u << (sizeof(EntryDataType) * BitsPerByte)),
"Card index %u does not fit allowed card value range.", card_idx);
EntryCountType num_entries = AtomicAccess::load_acquire(&_num_entries) & EntryMask;
EntryCountType num_entries = _num_entries.load_acquire() & EntryMask;
EntryCountType idx = 0;
for (; idx < num_entries; idx++) {
if (at(idx) == card_idx) {
@ -223,7 +222,7 @@ inline G1AddCardResult G1CardSetArray::add(uint card_idx) {
}
inline bool G1CardSetArray::contains(uint card_idx) {
EntryCountType num_entries = AtomicAccess::load_acquire(&_num_entries) & EntryMask;
EntryCountType num_entries = _num_entries.load_acquire() & EntryMask;
for (EntryCountType idx = 0; idx < num_entries; idx++) {
if (at(idx) == card_idx) {
@ -235,7 +234,7 @@ inline bool G1CardSetArray::contains(uint card_idx) {
template <class CardVisitor>
void G1CardSetArray::iterate(CardVisitor& found) {
EntryCountType num_entries = AtomicAccess::load_acquire(&_num_entries) & EntryMask;
EntryCountType num_entries = _num_entries.load_acquire() & EntryMask;
for (EntryCountType idx = 0; idx < num_entries; idx++) {
found(at(idx));
}
@ -256,11 +255,11 @@ inline G1CardSetBitMap::G1CardSetBitMap(uint card_in_region, uint size_in_bits)
inline G1AddCardResult G1CardSetBitMap::add(uint card_idx, size_t threshold, size_t size_in_bits) {
BitMapView bm(_bits, size_in_bits);
if (_num_bits_set >= threshold) {
if (_num_bits_set.load_relaxed() >= threshold) {
return bm.at(card_idx) ? Found : Overflow;
}
if (bm.par_set_bit(card_idx)) {
AtomicAccess::inc(&_num_bits_set, memory_order_relaxed);
_num_bits_set.add_then_fetch(1u, memory_order_relaxed);
return Added;
}
return Found;
@ -276,22 +275,22 @@ inline size_t G1CardSetBitMap::header_size_in_bytes() {
return offset_of(G1CardSetBitMap, _bits);
}
inline G1CardSetHowl::ContainerPtr const* G1CardSetHowl::container_addr(EntryCountType index) const {
assert(index < _num_entries, "precondition");
inline Atomic<G1CardSetHowl::ContainerPtr> const* G1CardSetHowl::container_addr(EntryCountType index) const {
assert(index < _num_entries.load_relaxed(), "precondition");
return buckets() + index;
}
inline G1CardSetHowl::ContainerPtr* G1CardSetHowl::container_addr(EntryCountType index) {
return const_cast<ContainerPtr*>(const_cast<const G1CardSetHowl*>(this)->container_addr(index));
inline Atomic<G1CardSetHowl::ContainerPtr>* G1CardSetHowl::container_addr(EntryCountType index) {
return const_cast<Atomic<ContainerPtr>*>(const_cast<const G1CardSetHowl*>(this)->container_addr(index));
}
inline G1CardSetHowl::ContainerPtr G1CardSetHowl::at(EntryCountType index) const {
return *container_addr(index);
return (*container_addr(index)).load_relaxed();
}
inline G1CardSetHowl::ContainerPtr const* G1CardSetHowl::buckets() const {
inline Atomic<G1CardSetHowl::ContainerPtr> const* G1CardSetHowl::buckets() const {
const void* ptr = reinterpret_cast<const char*>(this) + header_size_in_bytes();
return reinterpret_cast<ContainerPtr const*>(ptr);
return reinterpret_cast<Atomic<ContainerPtr> const*>(ptr);
}
inline G1CardSetHowl::G1CardSetHowl(EntryCountType card_in_region, G1CardSetConfiguration* config) :
@ -300,7 +299,7 @@ inline G1CardSetHowl::G1CardSetHowl(EntryCountType card_in_region, G1CardSetConf
EntryCountType num_buckets = config->num_buckets_in_howl();
EntryCountType bucket = config->howl_bucket_index(card_in_region);
for (uint i = 0; i < num_buckets; ++i) {
*container_addr(i) = G1CardSetInlinePtr();
container_addr(i)->store_relaxed(G1CardSetInlinePtr());
if (i == bucket) {
G1CardSetInlinePtr value(container_addr(i), at(i));
value.add(card_in_region, config->inline_ptr_bits_per_card(), config->max_cards_in_inline_ptr());
@ -310,8 +309,8 @@ inline G1CardSetHowl::G1CardSetHowl(EntryCountType card_in_region, G1CardSetConf
inline bool G1CardSetHowl::contains(uint card_idx, G1CardSetConfiguration* config) {
EntryCountType bucket = config->howl_bucket_index(card_idx);
ContainerPtr* array_entry = container_addr(bucket);
ContainerPtr container = AtomicAccess::load_acquire(array_entry);
Atomic<ContainerPtr>* array_entry = container_addr(bucket);
ContainerPtr container = array_entry->load_acquire();
switch (G1CardSet::container_type(container)) {
case G1CardSet::ContainerArrayOfCards: {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,7 +26,6 @@
#include "gc/g1/g1CardSetContainers.inline.hpp"
#include "gc/g1/g1CardSetMemory.inline.hpp"
#include "gc/g1/g1MonotonicArena.inline.hpp"
#include "runtime/atomicAccess.hpp"
#include "utilities/ostream.hpp"
G1CardSetAllocator::G1CardSetAllocator(const char* name,

View File

@ -525,7 +525,7 @@ void G1CollectedHeap::iterate_regions_in_range(MemRegion range, const Func& func
}
}
HeapWord* G1CollectedHeap::alloc_archive_region(size_t word_size, HeapWord* preferred_addr) {
HeapWord* G1CollectedHeap::alloc_archive_region(size_t word_size) {
assert(!is_init_completed(), "Expect to be called at JVM init time");
MutexLocker x(Heap_lock);
@ -1320,7 +1320,6 @@ G1CollectedHeap::G1CollectedHeap() :
_card_set_freelist_pool(G1CardSetConfiguration::num_mem_object_types()),
_young_regions_cset_group(card_set_config(), &_card_set_freelist_pool, G1CSetCandidateGroup::YoungRegionId),
_cm(nullptr),
_cm_thread(nullptr),
_cr(nullptr),
_task_queues(nullptr),
_partial_array_state_manager(nullptr),
@ -1564,7 +1563,6 @@ jint G1CollectedHeap::initialize() {
// Create the G1ConcurrentMark data structure and thread.
// (Must do this late, so that "max_[reserved_]regions" is defined.)
_cm = new G1ConcurrentMark(this, bitmap_storage);
_cm_thread = _cm->cm_thread();
// Now expand into the initial heap size.
if (!expand(init_byte_size, _workers)) {
@ -1636,7 +1634,9 @@ jint G1CollectedHeap::initialize() {
}
bool G1CollectedHeap::concurrent_mark_is_terminating() const {
return _cm_thread->should_terminate();
assert(_cm != nullptr, "_cm must have been created");
assert(_cm->is_fully_initialized(), "thread must exist in order to check if mark is terminating");
return _cm->cm_thread()->should_terminate();
}
void G1CollectedHeap::stop() {
@ -1645,7 +1645,9 @@ void G1CollectedHeap::stop() {
// that are destroyed during shutdown.
_cr->stop();
_service_thread->stop();
_cm_thread->stop();
if (_cm->is_fully_initialized()) {
_cm->cm_thread()->stop();
}
}
void G1CollectedHeap::safepoint_synchronize_begin() {
@ -1842,7 +1844,7 @@ void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent,
// is set) so that if a waiter requests another System.gc() it doesn't
// incorrectly see that a marking cycle is still in progress.
if (concurrent) {
_cm_thread->set_idle();
_cm->cm_thread()->set_idle();
}
// Notify threads waiting in System.gc() (with ExplicitGCInvokesConcurrent)
@ -2421,7 +2423,6 @@ void G1CollectedHeap::print_gc_on(outputStream* st) const {
void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
workers()->threads_do(tc);
tc->do_thread(_cm_thread);
_cm->threads_do(tc);
_cr->threads_do(tc);
tc->do_thread(_service_thread);
@ -2542,15 +2543,15 @@ HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
}
void G1CollectedHeap::start_concurrent_cycle(bool concurrent_operation_is_full_mark) {
assert(!_cm_thread->in_progress(), "Can not start concurrent operation while in progress");
assert(_cm->is_fully_initialized(), "sanity");
assert(!_cm->in_progress(), "Can not start concurrent operation while in progress");
MutexLocker x(G1CGC_lock, Mutex::_no_safepoint_check_flag);
if (concurrent_operation_is_full_mark) {
_cm->post_concurrent_mark_start();
_cm_thread->start_full_mark();
_cm->cm_thread()->start_full_mark();
} else {
_cm->post_concurrent_undo_start();
_cm_thread->start_undo_mark();
_cm->cm_thread()->start_undo_mark();
}
G1CGC_lock->notify();
}
@ -2726,6 +2727,8 @@ void G1CollectedHeap::do_collection_pause_at_safepoint(size_t allocation_word_si
_bytes_used_during_gc = 0;
_cm->fully_initialize();
policy()->decide_on_concurrent_start_pause();
// Record whether this pause may need to trigger a concurrent operation. Later,
// when we signal the G1ConcurrentMarkThread, the collector state has already

View File

@ -736,12 +736,10 @@ public:
void iterate_regions_in_range(MemRegion range, const Func& func);
// Commit the required number of G1 region(s) according to the size requested
// and mark them as 'old' region(s). Preferred address is treated as a hint for
// the location of the archive space in the heap. The returned address may or may
// not be same as the preferred address.
// and mark them as 'old' region(s).
// This API is only used for allocating heap space for the archived heap objects
// in the CDS archive.
HeapWord* alloc_archive_region(size_t word_size, HeapWord* preferred_addr);
HeapWord* alloc_archive_region(size_t word_size);
// Populate the G1BlockOffsetTable for archived regions with the given
// memory range.
@ -825,7 +823,6 @@ public:
// The concurrent marker (and the thread it runs in.)
G1ConcurrentMark* _cm;
G1ConcurrentMarkThread* _cm_thread;
// The concurrent refiner.
G1ConcurrentRefine* _cr;

View File

@ -31,6 +31,7 @@
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1ConcurrentMark.inline.hpp"
#include "gc/g1/g1EvacFailureRegions.hpp"
#include "gc/g1/g1EvacStats.inline.hpp"
#include "gc/g1/g1HeapRegion.inline.hpp"
#include "gc/g1/g1HeapRegionManager.inline.hpp"
#include "gc/g1/g1HeapRegionRemSet.hpp"

View File

@ -203,13 +203,13 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
G1CollectedHeap* _g1h;
G1HeapRegionClaimer _hrclaimer;
uint volatile _num_regions_added;
Atomic<uint> _num_regions_added;
G1BuildCandidateArray _result;
void update_totals(uint num_regions) {
if (num_regions > 0) {
AtomicAccess::add(&_num_regions_added, num_regions);
_num_regions_added.add_then_fetch(num_regions);
}
}
@ -221,7 +221,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
void prune(G1HeapRegion** data) {
G1Policy* p = G1CollectedHeap::heap()->policy();
uint num_candidates = AtomicAccess::load(&_num_regions_added);
uint num_candidates = _num_regions_added.load_relaxed();
uint min_old_cset_length = p->calc_min_old_cset_length(num_candidates);
uint num_pruned = 0;
@ -254,7 +254,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
wasted_bytes,
allowed_waste);
AtomicAccess::sub(&_num_regions_added, num_pruned, memory_order_relaxed);
_num_regions_added.sub_then_fetch(num_pruned, memory_order_relaxed);
}
public:
@ -275,7 +275,7 @@ public:
_result.sort_by_gc_efficiency();
prune(_result.array());
candidates->set_candidates_from_marking(_result.array(),
_num_regions_added);
_num_regions_added.load_relaxed());
}
};

View File

@ -291,9 +291,9 @@ void G1CMMarkStack::expand() {
_chunk_allocator.try_expand();
}
void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) {
elem->next = *list;
*list = elem;
void G1CMMarkStack::add_chunk_to_list(Atomic<TaskQueueEntryChunk*>* list, TaskQueueEntryChunk* elem) {
elem->next = list->load_relaxed();
list->store_relaxed(elem);
}
void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) {
@ -307,10 +307,10 @@ void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) {
add_chunk_to_list(&_free_list, elem);
}
G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) {
TaskQueueEntryChunk* result = *list;
G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(Atomic<TaskQueueEntryChunk*>* list) {
TaskQueueEntryChunk* result = list->load_relaxed();
if (result != nullptr) {
*list = (*list)->next;
list->store_relaxed(list->load_relaxed()->next);
}
return result;
}
@ -364,8 +364,8 @@ bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) {
void G1CMMarkStack::set_empty() {
_chunks_in_chunk_list = 0;
_chunk_list = nullptr;
_free_list = nullptr;
_chunk_list.store_relaxed(nullptr);
_free_list.store_relaxed(nullptr);
_chunk_allocator.reset();
}
@ -382,12 +382,12 @@ G1CMRootMemRegions::~G1CMRootMemRegions() {
}
void G1CMRootMemRegions::reset() {
_num_root_regions = 0;
_num_root_regions.store_relaxed(0);
}
void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) {
assert_at_safepoint();
size_t idx = AtomicAccess::fetch_then_add(&_num_root_regions, 1u);
size_t idx = _num_root_regions.fetch_then_add(1u);
assert(idx < _max_regions, "Trying to add more root MemRegions than there is space %zu", _max_regions);
assert(start != nullptr && end != nullptr && start <= end, "Start (" PTR_FORMAT ") should be less or equal to "
"end (" PTR_FORMAT ")", p2i(start), p2i(end));
@ -398,36 +398,38 @@ void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) {
void G1CMRootMemRegions::prepare_for_scan() {
assert(!scan_in_progress(), "pre-condition");
_scan_in_progress = _num_root_regions > 0;
_scan_in_progress.store_relaxed(num_root_regions() > 0);
_claimed_root_regions = 0;
_should_abort = false;
_claimed_root_regions.store_relaxed(0);
_should_abort.store_relaxed(false);
}
const MemRegion* G1CMRootMemRegions::claim_next() {
if (_should_abort) {
if (_should_abort.load_relaxed()) {
// If someone has set the should_abort flag, we return null to
// force the caller to bail out of their loop.
return nullptr;
}
if (_claimed_root_regions >= _num_root_regions) {
uint local_num_root_regions = num_root_regions();
if (_claimed_root_regions.load_relaxed() >= local_num_root_regions) {
return nullptr;
}
size_t claimed_index = AtomicAccess::fetch_then_add(&_claimed_root_regions, 1u);
if (claimed_index < _num_root_regions) {
size_t claimed_index = _claimed_root_regions.fetch_then_add(1u);
if (claimed_index < local_num_root_regions) {
return &_root_regions[claimed_index];
}
return nullptr;
}
uint G1CMRootMemRegions::num_root_regions() const {
return (uint)_num_root_regions;
return (uint)_num_root_regions.load_relaxed();
}
bool G1CMRootMemRegions::contains(const MemRegion mr) const {
for (uint i = 0; i < _num_root_regions; i++) {
uint local_num_root_regions = num_root_regions();
for (uint i = 0; i < local_num_root_regions; i++) {
if (_root_regions[i].equals(mr)) {
return true;
}
@ -437,7 +439,7 @@ bool G1CMRootMemRegions::contains(const MemRegion mr) const {
void G1CMRootMemRegions::notify_scan_done() {
MutexLocker x(G1RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
_scan_in_progress = false;
_scan_in_progress.store_relaxed(false);
G1RootRegionScan_lock->notify_all();
}
@ -448,10 +450,10 @@ void G1CMRootMemRegions::cancel_scan() {
void G1CMRootMemRegions::scan_finished() {
assert(scan_in_progress(), "pre-condition");
if (!_should_abort) {
assert(_claimed_root_regions >= num_root_regions(),
if (!_should_abort.load_relaxed()) {
assert(_claimed_root_regions.load_relaxed() >= num_root_regions(),
"we should have claimed all root regions, claimed %zu, length = %u",
_claimed_root_regions, num_root_regions());
_claimed_root_regions.load_relaxed(), num_root_regions());
}
notify_scan_done();
@ -473,7 +475,7 @@ bool G1CMRootMemRegions::wait_until_scan_finished() {
G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
G1RegionToSpaceMapper* bitmap_storage) :
// _cm_thread set inside the constructor
_cm_thread(nullptr),
_g1h(g1h),
_mark_bitmap(),
@ -484,13 +486,12 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
_global_mark_stack(),
// _finger set in set_non_marking_state
_finger(nullptr), // _finger set in set_non_marking_state
_worker_id_offset(G1ConcRefinementThreads), // The refinement control thread does not refine cards, so it's just the worker threads.
_max_num_tasks(MAX2(ConcGCThreads, ParallelGCThreads)),
// _num_active_tasks set in set_non_marking_state()
// _tasks set inside the constructor
_num_active_tasks(0), // _num_active_tasks set in set_non_marking_state()
_tasks(nullptr), // _tasks set inside late_init()
_task_queues(new G1CMTaskQueueSet(_max_num_tasks)),
_terminator(_max_num_tasks, _task_queues),
_partial_array_state_manager(new PartialArrayStateManager(_max_num_tasks)),
@ -525,6 +526,12 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
assert(G1CGC_lock != nullptr, "CGC_lock must be initialized");
_mark_bitmap.initialize(g1h->reserved(), bitmap_storage);
}
void G1ConcurrentMark::fully_initialize() {
if (is_fully_initialized()) {
return;
}
// Create & start ConcurrentMark thread.
_cm_thread = new G1ConcurrentMarkThread(this);
@ -560,6 +567,10 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
reset_at_marking_complete();
}
bool G1ConcurrentMark::in_progress() const {
return is_fully_initialized() ? _cm_thread->in_progress() : false;
}
PartialArrayStateManager* G1ConcurrentMark::partial_array_state_manager() const {
return _partial_array_state_manager;
}
@ -765,7 +776,7 @@ private:
// as asserts here to minimize their overhead on the product. However, we
// will have them as guarantees at the beginning / end of the bitmap
// clearing to get some checking in the product.
assert(!suspendible() || _cm->cm_thread()->in_progress(), "invariant");
assert(!suspendible() || _cm->in_progress(), "invariant");
assert(!suspendible() || !G1CollectedHeap::heap()->collector_state()->mark_or_rebuild_in_progress(), "invariant");
// Abort iteration if necessary.
@ -821,7 +832,8 @@ void G1ConcurrentMark::clear_bitmap(WorkerThreads* workers, bool may_yield) {
void G1ConcurrentMark::cleanup_for_next_mark() {
// Make sure that the concurrent mark thread looks to still be in
// the current cycle.
guarantee(cm_thread()->in_progress(), "invariant");
guarantee(is_fully_initialized(), "should be initializd");
guarantee(in_progress(), "invariant");
// We are finishing up the current cycle by clearing the next
// marking bitmap and getting it ready for the next cycle. During
@ -834,7 +846,8 @@ void G1ConcurrentMark::cleanup_for_next_mark() {
reset_partial_array_state_manager();
// Repeat the asserts from above.
guarantee(cm_thread()->in_progress(), "invariant");
guarantee(is_fully_initialized(), "should be initializd");
guarantee(in_progress(), "invariant");
guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
}
@ -1925,7 +1938,8 @@ bool G1ConcurrentMark::concurrent_cycle_abort() {
// nothing, but this situation should be extremely rare (a full gc after shutdown
// has been signalled is already rare), and this work should be negligible compared
// to actual full gc work.
if (!cm_thread()->in_progress() && !_g1h->concurrent_mark_is_terminating()) {
if (!is_fully_initialized() || (!cm_thread()->in_progress() && !_g1h->concurrent_mark_is_terminating())) {
return false;
}
@ -1987,6 +2001,10 @@ void G1ConcurrentMark::print_summary_info() {
}
log.trace(" Concurrent marking:");
if (!is_fully_initialized()) {
log.trace(" has not been initialized yet");
return;
}
print_ms_time_info(" ", "remarks", _remark_times);
{
print_ms_time_info(" ", "final marks", _remark_mark_times);
@ -2003,7 +2021,10 @@ void G1ConcurrentMark::print_summary_info() {
}
void G1ConcurrentMark::threads_do(ThreadClosure* tc) const {
_concurrent_workers->threads_do(tc);
if (is_fully_initialized()) { // they are initialized late
tc->do_thread(_cm_thread);
_concurrent_workers->threads_do(tc);
}
}
void G1ConcurrentMark::print_on(outputStream* st) const {

View File

@ -210,17 +210,17 @@ private:
ChunkAllocator _chunk_allocator;
char _pad0[DEFAULT_PADDING_SIZE];
TaskQueueEntryChunk* volatile _free_list; // Linked list of free chunks that can be allocated by users.
Atomic<TaskQueueEntryChunk*> _free_list; // Linked list of free chunks that can be allocated by users.
char _pad1[DEFAULT_PADDING_SIZE - sizeof(TaskQueueEntryChunk*)];
TaskQueueEntryChunk* volatile _chunk_list; // List of chunks currently containing data.
Atomic<TaskQueueEntryChunk*> _chunk_list; // List of chunks currently containing data.
volatile size_t _chunks_in_chunk_list;
char _pad2[DEFAULT_PADDING_SIZE - sizeof(TaskQueueEntryChunk*) - sizeof(size_t)];
// Atomically add the given chunk to the list.
void add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem);
void add_chunk_to_list(Atomic<TaskQueueEntryChunk*>* list, TaskQueueEntryChunk* elem);
// Atomically remove and return a chunk from the given list. Returns null if the
// list is empty.
TaskQueueEntryChunk* remove_chunk_from_list(TaskQueueEntryChunk* volatile* list);
TaskQueueEntryChunk* remove_chunk_from_list(Atomic<TaskQueueEntryChunk*>* list);
void add_chunk_to_chunk_list(TaskQueueEntryChunk* elem);
void add_chunk_to_free_list(TaskQueueEntryChunk* elem);
@ -252,7 +252,7 @@ private:
// Return whether the chunk list is empty. Racy due to unsynchronized access to
// _chunk_list.
bool is_empty() const { return _chunk_list == nullptr; }
bool is_empty() const { return _chunk_list.load_relaxed() == nullptr; }
size_t capacity() const { return _chunk_allocator.capacity(); }
@ -290,12 +290,12 @@ class G1CMRootMemRegions {
MemRegion* _root_regions;
size_t const _max_regions;
volatile size_t _num_root_regions; // Actual number of root regions.
Atomic<size_t> _num_root_regions; // Actual number of root regions.
volatile size_t _claimed_root_regions; // Number of root regions currently claimed.
Atomic<size_t> _claimed_root_regions; // Number of root regions currently claimed.
volatile bool _scan_in_progress;
volatile bool _should_abort;
Atomic<bool> _scan_in_progress;
Atomic<bool> _should_abort;
void notify_scan_done();
@ -312,11 +312,11 @@ public:
void prepare_for_scan();
// Forces get_next() to return null so that the iteration aborts early.
void abort() { _should_abort = true; }
void abort() { _should_abort.store_relaxed(true); }
// Return true if the CM thread are actively scanning root regions,
// false otherwise.
bool scan_in_progress() { return _scan_in_progress; }
bool scan_in_progress() { return _scan_in_progress.load_relaxed(); }
// Claim the next root MemRegion to scan atomically, or return null if
// all have been claimed.
@ -555,6 +555,9 @@ public:
uint worker_id_offset() const { return _worker_id_offset; }
void fully_initialize();
bool is_fully_initialized() const { return _cm_thread != nullptr; }
bool in_progress() const;
uint max_num_tasks() const {return _max_num_tasks; }
// Clear statistics gathered during the concurrent cycle for the given region after

View File

@ -90,7 +90,7 @@ inline void G1CMMarkStack::iterate(Fn fn) const {
size_t num_chunks = 0;
TaskQueueEntryChunk* cur = _chunk_list;
TaskQueueEntryChunk* cur = _chunk_list.load_relaxed();
while (cur != nullptr) {
guarantee(num_chunks <= _chunks_in_chunk_list, "Found %zu oop chunks which is more than there should be", num_chunks);

View File

@ -28,6 +28,7 @@
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectionSet.hpp"
#include "gc/g1/g1ConcurrentRefine.hpp"
#include "gc/g1/g1ConcurrentRefineStats.inline.hpp"
#include "gc/g1/g1ConcurrentRefineSweepTask.hpp"
#include "gc/g1/g1ConcurrentRefineThread.hpp"
#include "gc/g1/g1HeapRegion.inline.hpp"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,7 +22,7 @@
*
*/
#include "gc/g1/g1ConcurrentRefineStats.hpp"
#include "gc/g1/g1ConcurrentRefineStats.inline.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/timer.hpp"
@ -39,19 +39,27 @@ G1ConcurrentRefineStats::G1ConcurrentRefineStats() :
{}
void G1ConcurrentRefineStats::add_atomic(G1ConcurrentRefineStats* other) {
AtomicAccess::add(&_sweep_duration, other->_sweep_duration, memory_order_relaxed);
AtomicAccess::add(&_yield_during_sweep_duration, other->_yield_during_sweep_duration, memory_order_relaxed);
_sweep_duration.add_then_fetch(other->_sweep_duration.load_relaxed(), memory_order_relaxed);
_yield_during_sweep_duration.add_then_fetch(other->yield_during_sweep_duration(), memory_order_relaxed);
AtomicAccess::add(&_cards_scanned, other->_cards_scanned, memory_order_relaxed);
AtomicAccess::add(&_cards_clean, other->_cards_clean, memory_order_relaxed);
AtomicAccess::add(&_cards_not_parsable, other->_cards_not_parsable, memory_order_relaxed);
AtomicAccess::add(&_cards_already_refer_to_cset, other->_cards_already_refer_to_cset, memory_order_relaxed);
AtomicAccess::add(&_cards_refer_to_cset, other->_cards_refer_to_cset, memory_order_relaxed);
AtomicAccess::add(&_cards_no_cross_region, other->_cards_no_cross_region, memory_order_relaxed);
_cards_scanned.add_then_fetch(other->cards_scanned(), memory_order_relaxed);
_cards_clean.add_then_fetch(other->cards_clean(), memory_order_relaxed);
_cards_not_parsable.add_then_fetch(other->cards_not_parsable(), memory_order_relaxed);
_cards_already_refer_to_cset.add_then_fetch(other->cards_already_refer_to_cset(), memory_order_relaxed);
_cards_refer_to_cset.add_then_fetch(other->cards_refer_to_cset(), memory_order_relaxed);
_cards_no_cross_region.add_then_fetch(other->cards_no_cross_region(), memory_order_relaxed);
AtomicAccess::add(&_refine_duration, other->_refine_duration, memory_order_relaxed);
_refine_duration.add_then_fetch(other->refine_duration(), memory_order_relaxed);
}
void G1ConcurrentRefineStats::reset() {
*this = G1ConcurrentRefineStats();
_sweep_duration.store_relaxed(0);
_yield_during_sweep_duration.store_relaxed(0);
_cards_scanned.store_relaxed(0);
_cards_clean.store_relaxed(0);
_cards_not_parsable.store_relaxed(0);
_cards_already_refer_to_cset.store_relaxed(0);
_cards_refer_to_cset.store_relaxed(0);
_cards_no_cross_region.store_relaxed(0);
_refine_duration.store_relaxed(0);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,61 +26,61 @@
#define SHARE_GC_G1_G1CONCURRENTREFINESTATS_HPP
#include "memory/allocation.hpp"
#include "runtime/atomic.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/ticks.hpp"
// Collection of statistics for concurrent refinement processing.
// Used for collecting per-thread statistics and for summaries over a
// collection of threads.
class G1ConcurrentRefineStats : public CHeapObj<mtGC> {
jlong _sweep_duration; // Time spent sweeping the table finding non-clean cards
// and refining them.
jlong _yield_during_sweep_duration; // Time spent yielding during the sweep (not doing the sweep).
Atomic<jlong> _sweep_duration; // Time spent sweeping the table finding non-clean cards
// and refining them.
Atomic<jlong> _yield_during_sweep_duration; // Time spent yielding during the sweep (not doing the sweep).
size_t _cards_scanned; // Total number of cards scanned.
size_t _cards_clean; // Number of cards found clean.
size_t _cards_not_parsable; // Number of cards we could not parse and left unrefined.
size_t _cards_already_refer_to_cset;// Number of cards marked found to be already young.
size_t _cards_refer_to_cset; // Number of dirty cards that were recently found to contain a to-cset reference.
size_t _cards_no_cross_region; // Number of dirty cards that were dirtied, but then cleaned again by the mutator.
Atomic<size_t> _cards_scanned; // Total number of cards scanned.
Atomic<size_t> _cards_clean; // Number of cards found clean.
Atomic<size_t> _cards_not_parsable; // Number of cards we could not parse and left unrefined.
Atomic<size_t> _cards_already_refer_to_cset;// Number of cards marked found to be already young.
Atomic<size_t> _cards_refer_to_cset; // Number of dirty cards that were recently found to contain a to-cset reference.
Atomic<size_t> _cards_no_cross_region; // Number of dirty cards that were dirtied, but then cleaned again by the mutator.
jlong _refine_duration; // Time spent during actual refinement.
Atomic<jlong> _refine_duration; // Time spent during actual refinement.
public:
G1ConcurrentRefineStats();
// Time spent performing sweeping the refinement table (includes actual refinement,
// but not yield time).
jlong sweep_duration() const { return _sweep_duration - _yield_during_sweep_duration; }
jlong yield_during_sweep_duration() const { return _yield_during_sweep_duration; }
jlong refine_duration() const { return _refine_duration; }
inline jlong sweep_duration() const;
inline jlong yield_during_sweep_duration() const;
inline jlong refine_duration() const;
// Number of refined cards.
size_t refined_cards() const { return cards_not_clean(); }
inline size_t refined_cards() const;
size_t cards_scanned() const { return _cards_scanned; }
size_t cards_clean() const { return _cards_clean; }
size_t cards_not_clean() const { return _cards_scanned - _cards_clean; }
size_t cards_not_parsable() const { return _cards_not_parsable; }
size_t cards_already_refer_to_cset() const { return _cards_already_refer_to_cset; }
size_t cards_refer_to_cset() const { return _cards_refer_to_cset; }
size_t cards_no_cross_region() const { return _cards_no_cross_region; }
inline size_t cards_scanned() const;
inline size_t cards_clean() const;
inline size_t cards_not_clean() const;
inline size_t cards_not_parsable() const;
inline size_t cards_already_refer_to_cset() const;
inline size_t cards_refer_to_cset() const;
inline size_t cards_no_cross_region() const;
// Number of cards that were marked dirty and in need of refinement. This includes cards recently
// found to refer to the collection set as they originally were dirty.
size_t cards_pending() const { return cards_not_clean() - _cards_already_refer_to_cset; }
inline size_t cards_pending() const;
size_t cards_to_cset() const { return _cards_already_refer_to_cset + _cards_refer_to_cset; }
inline size_t cards_to_cset() const;
void inc_sweep_time(jlong t) { _sweep_duration += t; }
void inc_yield_during_sweep_duration(jlong t) { _yield_during_sweep_duration += t; }
void inc_refine_duration(jlong t) { _refine_duration += t; }
inline void inc_sweep_time(jlong t);
inline void inc_yield_during_sweep_duration(jlong t);
inline void inc_refine_duration(jlong t);
void inc_cards_scanned(size_t increment) { _cards_scanned += increment; }
void inc_cards_clean(size_t increment) { _cards_clean += increment; }
void inc_cards_not_parsable() { _cards_not_parsable++; }
void inc_cards_already_refer_to_cset() { _cards_already_refer_to_cset++; }
void inc_cards_refer_to_cset() { _cards_refer_to_cset++; }
void inc_cards_no_cross_region() { _cards_no_cross_region++; }
inline void inc_cards_scanned(size_t increment);
inline void inc_cards_clean(size_t increment);
inline void inc_cards_not_parsable();
inline void inc_cards_already_refer_to_cset();
inline void inc_cards_refer_to_cset();
inline void inc_cards_no_cross_region();
void add_atomic(G1ConcurrentRefineStats* other);

Some files were not shown because too many files have changed in this diff Show More