[0-9a-z_.]*)( .*)?$");
/**
* Returns the set of locations in which the tag may be used.
@@ -157,19 +173,50 @@ public class JSpec implements Taglet {
.trim();
Matcher m = TAG_PATTERN.matcher(tagText);
if (m.find()) {
+ // preview-feature-4.6 is preview-feature-, 4, .6
+ String preview = m.group("preview"); // null if no preview feature
String chapter = m.group("chapter");
String section = m.group("section");
String rootParent = currentPath().replaceAll("[^/]+", "..");
- String url = String.format("%1$s/specs/%2$s/%2$s-%3$s.html#%2$s-%3$s%4$s",
- rootParent, idPrefix, chapter, section);
+ String url = preview == null ?
+ String.format("%1$s/specs/%2$s/%2$s-%3$s.html#%2$s-%3$s%4$s",
+ rootParent, idPrefix, chapter, section) :
+ String.format("%1$s/specs/%5$s%2$s.html#%2$s-%3$s%4$s",
+ rootParent, idPrefix, chapter, section, preview);
+
+ var literal = expand(contents).trim();
+ var prefix = (preview == null ? "" : preview) + chapter + section;
+ if (literal.startsWith(prefix)) {
+ var hasFullTitle = literal.length() > prefix.length();
+ if (hasFullTitle) {
+ // Drop the preview identifier
+ literal = chapter + section + literal.substring(prefix.length());
+ } else {
+ // No section sign if the tag refers to a chapter, like {@jvms 4}
+ String sectionSign = section.isEmpty() ? "" : "§";
+ // Change whole text to "§chapter.x" in inline tags.
+ literal = sectionSign + chapter + section;
+ }
+ }
sb.append("")
- .append(expand(contents))
+ .append(literal)
.append("");
+ if (preview != null) {
+ // Add PREVIEW superscript that links to JLS/JVMS 1.5.1
+ // "Restrictions on the Use of Preview Features"
+ // Similar to how APIs link to the Preview info box warning
+ var sectionLink = String.format("%1$s/specs/%2$s/%2$s-%3$s.html#%2$s-%3$s%4$s",
+ rootParent, idPrefix, "1", ".5.1");
+ sb.append("PREVIEW");
+ }
+
if (tag.getKind() == DocTree.Kind.UNKNOWN_BLOCK_TAG) {
sb.append("
");
}
diff --git a/make/modules/java.desktop/lib/AwtLibraries.gmk b/make/modules/java.desktop/lib/AwtLibraries.gmk
index 463e09e12dc..8b6b50b9e62 100644
--- a/make/modules/java.desktop/lib/AwtLibraries.gmk
+++ b/make/modules/java.desktop/lib/AwtLibraries.gmk
@@ -88,6 +88,10 @@ LIBAWT_EXTRA_HEADER_DIRS := \
LIBAWT_CFLAGS := -D__MEDIALIB_OLD_NAMES -D__USE_J2D_NAMES -DMLIB_NO_LIBSUNMATH
+ifeq ($(ENABLE_HEADLESS_ONLY), true)
+ LIBAWT_CFLAGS += -DHEADLESS
+endif
+
ifeq ($(call isTargetOs, windows), true)
LIBAWT_CFLAGS += -EHsc -DUNICODE -D_UNICODE -DMLIB_OS64BIT
LIBAWT_RCFLAGS ?= -I$(TOPDIR)/src/java.base/windows/native/launcher/icons
@@ -167,11 +171,18 @@ ifeq ($(call isTargetOs, windows macosx), false)
$(TOPDIR)/src/$(MODULE)/$(OPENJDK_TARGET_OS_TYPE)/native/common/awt \
#
+ LIBAWT_HEADLESS_EXCLUDE_FILES := \
+ GLXGraphicsConfig.c \
+ GLXSurfaceData.c \
+ X11PMBlitLoops.c \
+ X11Renderer.c \
+ X11SurfaceData.c \
+ #
+
LIBAWT_HEADLESS_EXTRA_HEADER_DIRS := \
$(LIBAWT_DEFAULT_HEADER_DIRS) \
common/awt/debug \
common/font \
- common/java2d/opengl \
java.base:libjvm \
#
@@ -191,7 +202,8 @@ ifeq ($(call isTargetOs, windows macosx), false)
$(eval $(call SetupJdkLibrary, BUILD_LIBAWT_HEADLESS, \
NAME := awt_headless, \
EXTRA_SRC := $(LIBAWT_HEADLESS_EXTRA_SRC), \
- EXCLUDES := medialib, \
+ EXCLUDES := medialib opengl, \
+ EXCLUDE_FILES := $(LIBAWT_HEADLESS_EXCLUDE_FILES), \
ONLY_EXPORTED := $(LIBAWT_HEADLESS_ONLY_EXPORTED), \
OPTIMIZATION := LOW, \
CFLAGS := -DHEADLESS=true $(CUPS_CFLAGS) $(FONTCONFIG_CFLAGS) \
diff --git a/make/modules/jdk.security.auth/Lib.gmk b/make/modules/jdk.security.auth/Lib.gmk
index 9ead32dbe12..96d609f08d6 100644
--- a/make/modules/jdk.security.auth/Lib.gmk
+++ b/make/modules/jdk.security.auth/Lib.gmk
@@ -31,13 +31,14 @@ include LibCommon.gmk
## Build libjaas
################################################################################
-$(eval $(call SetupJdkLibrary, BUILD_LIBJAAS, \
- NAME := jaas, \
- OPTIMIZATION := LOW, \
- EXTRA_HEADER_DIRS := java.base:libjava, \
- LIBS_windows := advapi32.lib mpr.lib netapi32.lib user32.lib, \
-))
-
-TARGETS += $(BUILD_LIBJAAS)
+ifeq ($(call isTargetOs, windows), true)
+ $(eval $(call SetupJdkLibrary, BUILD_LIBJAAS, \
+ NAME := jaas, \
+ OPTIMIZATION := LOW, \
+ EXTRA_HEADER_DIRS := java.base:libjava, \
+ LIBS_windows := advapi32.lib mpr.lib netapi32.lib user32.lib, \
+ ))
+ TARGETS += $(BUILD_LIBJAAS)
+endif
################################################################################
diff --git a/src/demo/share/jfc/TableExample/OldJTable.java b/src/demo/share/jfc/TableExample/OldJTable.java
deleted file mode 100644
index 8c77978fe8a..00000000000
--- a/src/demo/share/jfc/TableExample/OldJTable.java
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * - Neither the name of Oracle nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * This source code is provided to illustrate the usage of a given feature
- * or technique and has been deliberately simplified. Additional steps
- * required for a production-quality application, such as security checks,
- * input validation and proper error handling, might not be present in
- * this sample code.
- */
-
-
-
-import java.util.EventObject;
-import java.util.List;
-import javax.swing.JTable;
-import javax.swing.table.DefaultTableModel;
-import javax.swing.table.TableCellEditor;
-import javax.swing.table.TableCellRenderer;
-import javax.swing.table.TableColumn;
-
-
-/**
- * The OldJTable is an unsupported class containing some methods that were
- * deleted from the JTable between releases 0.6 and 0.7
- */
-@SuppressWarnings("serial")
-public class OldJTable extends JTable
-{
- /*
- * A new convenience method returning the index of the column in the
- * co-ordinate space of the view.
- */
- public int getColumnIndex(Object identifier) {
- return getColumnModel().getColumnIndex(identifier);
- }
-
-//
-// Methods deleted from the JTable because they only work with the
-// DefaultTableModel.
-//
-
- public TableColumn addColumn(Object columnIdentifier, int width) {
- return addColumn(columnIdentifier, width, null, null, null);
- }
-
- public TableColumn addColumn(Object columnIdentifier, List> columnData) {
- return addColumn(columnIdentifier, -1, null, null, columnData);
- }
-
- // Override the new JTable implementation - it will not add a column to the
- // DefaultTableModel.
- public TableColumn addColumn(Object columnIdentifier, int width,
- TableCellRenderer renderer,
- TableCellEditor editor) {
- return addColumn(columnIdentifier, width, renderer, editor, null);
- }
-
- public TableColumn addColumn(Object columnIdentifier, int width,
- TableCellRenderer renderer,
- TableCellEditor editor, List> columnData) {
- checkDefaultTableModel();
-
- // Set up the model side first
- DefaultTableModel m = (DefaultTableModel)getModel();
- m.addColumn(columnIdentifier, columnData.toArray());
-
- // The column will have been added to the end, so the index of the
- // column in the model is the last element.
- TableColumn newColumn = new TableColumn(
- m.getColumnCount()-1, width, renderer, editor);
- super.addColumn(newColumn);
- return newColumn;
- }
-
- // Not possilble to make this work the same way ... change it so that
- // it does not delete columns from the model.
- public void removeColumn(Object columnIdentifier) {
- super.removeColumn(getColumn(columnIdentifier));
- }
-
- public void addRow(Object[] rowData) {
- checkDefaultTableModel();
- ((DefaultTableModel)getModel()).addRow(rowData);
- }
-
- public void addRow(List> rowData) {
- checkDefaultTableModel();
- ((DefaultTableModel)getModel()).addRow(rowData.toArray());
- }
-
- public void removeRow(int rowIndex) {
- checkDefaultTableModel();
- ((DefaultTableModel)getModel()).removeRow(rowIndex);
- }
-
- public void moveRow(int startIndex, int endIndex, int toIndex) {
- checkDefaultTableModel();
- ((DefaultTableModel)getModel()).moveRow(startIndex, endIndex, toIndex);
- }
-
- public void insertRow(int rowIndex, Object[] rowData) {
- checkDefaultTableModel();
- ((DefaultTableModel)getModel()).insertRow(rowIndex, rowData);
- }
-
- public void insertRow(int rowIndex, List> rowData) {
- checkDefaultTableModel();
- ((DefaultTableModel)getModel()).insertRow(rowIndex, rowData.toArray());
- }
-
- public void setNumRows(int newSize) {
- checkDefaultTableModel();
- ((DefaultTableModel)getModel()).setNumRows(newSize);
- }
-
- public void setDataVector(Object[][] newData, List> columnIds) {
- checkDefaultTableModel();
- ((DefaultTableModel)getModel()).setDataVector(
- newData, columnIds.toArray());
- }
-
- public void setDataVector(Object[][] newData, Object[] columnIds) {
- checkDefaultTableModel();
- ((DefaultTableModel)getModel()).setDataVector(newData, columnIds);
- }
-
- protected void checkDefaultTableModel() {
- if(!(dataModel instanceof DefaultTableModel))
- throw new InternalError("In order to use this method, the data model must be an instance of DefaultTableModel.");
- }
-
-//
-// Methods removed from JTable in the move from identifiers to ints.
-//
-
- public Object getValueAt(Object columnIdentifier, int rowIndex) {
- return super.getValueAt(rowIndex, getColumnIndex(columnIdentifier));
- }
-
- public boolean isCellEditable(Object columnIdentifier, int rowIndex) {
- return super.isCellEditable(rowIndex, getColumnIndex(columnIdentifier));
- }
-
- public void setValueAt(Object aValue, Object columnIdentifier, int rowIndex) {
- super.setValueAt(aValue, rowIndex, getColumnIndex(columnIdentifier));
- }
-
- public boolean editColumnRow(Object identifier, int row) {
- return super.editCellAt(row, getColumnIndex(identifier));
- }
-
- public void moveColumn(Object columnIdentifier, Object targetColumnIdentifier) {
- moveColumn(getColumnIndex(columnIdentifier),
- getColumnIndex(targetColumnIdentifier));
- }
-
- public boolean isColumnSelected(Object identifier) {
- return isColumnSelected(getColumnIndex(identifier));
- }
-
- public TableColumn addColumn(int modelColumn, int width) {
- return addColumn(modelColumn, width, null, null);
- }
-
- public TableColumn addColumn(int modelColumn) {
- return addColumn(modelColumn, 75, null, null);
- }
-
- /**
- * Creates a new column with modelColumn, width,
- * renderer, and editor and adds it to the end of
- * the JTable's array of columns. This method also retrieves the
- * name of the column using the model's getColumnName(modelColumn)
- * method, and sets the both the header value and the identifier
- * for this TableColumn accordingly.
- *
- * The modelColumn is the index of the column in the model which
- * will supply the data for this column in the table. This, like the
- * columnIdentifier in previous releases, does not change as the
- * columns are moved in the view.
- *
- * For the rest of the JTable API, and all of its associated classes,
- * columns are referred to in the co-ordinate system of the view, the
- * index of the column in the model is kept inside the TableColumn
- * and is used only to retrieve the information from the appropraite
- * column in the model.
- *
- *
- * @param modelColumn The index of the column in the model
- * @param width The new column's width. Or -1 to use
- * the default width
- * @param renderer The renderer used with the new column.
- * Or null to use the default renderer.
- * @param editor The editor used with the new column.
- * Or null to use the default editor.
- */
- public TableColumn addColumn(int modelColumn, int width,
- TableCellRenderer renderer,
- TableCellEditor editor) {
- TableColumn newColumn = new TableColumn(
- modelColumn, width, renderer, editor);
- addColumn(newColumn);
- return newColumn;
- }
-
-//
-// Methods that had their arguments switched.
-//
-
-// These won't work with the new table package.
-
-/*
- public Object getValueAt(int columnIndex, int rowIndex) {
- return super.getValueAt(rowIndex, columnIndex);
- }
-
- public boolean isCellEditable(int columnIndex, int rowIndex) {
- return super.isCellEditable(rowIndex, columnIndex);
- }
-
- public void setValueAt(Object aValue, int columnIndex, int rowIndex) {
- super.setValueAt(aValue, rowIndex, columnIndex);
- }
-*/
-
- public boolean editColumnRow(int columnIndex, int rowIndex) {
- return super.editCellAt(rowIndex, columnIndex);
- }
-
- public boolean editColumnRow(int columnIndex, int rowIndex, EventObject e){
- return super.editCellAt(rowIndex, columnIndex, e);
- }
-
-
-} // End Of Class OldJTable
diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad
index b9252cc56ff..a9ca91d9309 100644
--- a/src/hotspot/cpu/aarch64/aarch64.ad
+++ b/src/hotspot/cpu/aarch64/aarch64.ad
@@ -1229,7 +1229,7 @@ public:
// predicate controlling addressing modes
bool size_fits_all_mem_uses(AddPNode* addp, int shift);
- // Convert BootTest condition to Assembler condition.
+ // Convert BoolTest condition to Assembler condition.
// Replicate the logic of cmpOpOper::ccode() and cmpOpUOper::ccode().
Assembler::Condition to_assembler_cond(BoolTest::mask cond);
%}
@@ -2579,7 +2579,7 @@ bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
return true;
}
-// Convert BootTest condition to Assembler condition.
+// Convert BoolTest condition to Assembler condition.
// Replicate the logic of cmpOpOper::ccode() and cmpOpUOper::ccode().
Assembler::Condition to_assembler_cond(BoolTest::mask cond) {
Assembler::Condition result;
diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
index 37a6a130e0d..c0621cbd5c2 100644
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
@@ -1218,43 +1218,11 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
__ bind(*op->stub()->continuation());
}
-void LIR_Assembler::type_profile_helper(Register mdo,
- ciMethodData *md, ciProfileData *data,
- Register recv, Label* update_done) {
+void LIR_Assembler::type_profile_helper(Register mdo, ciMethodData *md,
+ ciProfileData *data, Register recv) {
- // Given a profile data offset, generate an Address which points to
- // the corresponding slot in mdo->data().
- // Clobbers rscratch2.
- auto slot_at = [=](ByteSize offset) -> Address {
- return __ form_address(rscratch2, mdo,
- md->byte_offset_of_slot(data, offset),
- LogBytesPerWord);
- };
-
- for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
- Label next_test;
- // See if the receiver is receiver[n].
- __ ldr(rscratch1, slot_at(ReceiverTypeData::receiver_offset(i)));
- __ cmp(recv, rscratch1);
- __ br(Assembler::NE, next_test);
- __ addptr(slot_at(ReceiverTypeData::receiver_count_offset(i)),
- DataLayout::counter_increment);
- __ b(*update_done);
- __ bind(next_test);
- }
-
- // Didn't find receiver; find next empty slot and fill it in
- for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
- Label next_test;
- Address recv_addr(slot_at(ReceiverTypeData::receiver_offset(i)));
- __ ldr(rscratch1, recv_addr);
- __ cbnz(rscratch1, next_test);
- __ str(recv, recv_addr);
- __ mov(rscratch1, DataLayout::counter_increment);
- __ str(rscratch1, slot_at(ReceiverTypeData::receiver_count_offset(i)));
- __ b(*update_done);
- __ bind(next_test);
- }
+ int mdp_offset = md->byte_offset_of_slot(data, in_ByteSize(0));
+ __ profile_receiver_type(recv, mdo, mdp_offset);
}
void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
@@ -1316,14 +1284,9 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
__ b(*obj_is_null);
__ bind(not_null);
- Label update_done;
Register recv = k_RInfo;
__ load_klass(recv, obj);
- type_profile_helper(mdo, md, data, recv, &update_done);
- Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
- __ addptr(counter_addr, DataLayout::counter_increment);
-
- __ bind(update_done);
+ type_profile_helper(mdo, md, data, recv);
} else {
__ cbz(obj, *obj_is_null);
}
@@ -1430,13 +1393,9 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
__ b(done);
__ bind(not_null);
- Label update_done;
Register recv = k_RInfo;
__ load_klass(recv, value);
- type_profile_helper(mdo, md, data, recv, &update_done);
- Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
- __ addptr(counter_addr, DataLayout::counter_increment);
- __ bind(update_done);
+ type_profile_helper(mdo, md, data, recv);
} else {
__ cbz(value, done);
}
@@ -2540,13 +2499,9 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) {
// We know the type that will be seen at this call site; we can
// statically update the MethodData* rather than needing to do
- // dynamic tests on the receiver type
-
- // NOTE: we should probably put a lock around this search to
- // avoid collisions by concurrent compilations
+ // dynamic tests on the receiver type.
ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
- uint i;
- for (i = 0; i < VirtualCallData::row_limit(); i++) {
+ for (uint i = 0; i < VirtualCallData::row_limit(); i++) {
ciKlass* receiver = vc_data->receiver(i);
if (known_klass->equals(receiver)) {
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
@@ -2554,36 +2509,13 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
return;
}
}
-
- // Receiver type not found in profile data; select an empty slot
-
- // Note that this is less efficient than it should be because it
- // always does a write to the receiver part of the
- // VirtualCallData rather than just the first time
- for (i = 0; i < VirtualCallData::row_limit(); i++) {
- ciKlass* receiver = vc_data->receiver(i);
- if (receiver == nullptr) {
- __ mov_metadata(rscratch1, known_klass->constant_encoding());
- Address recv_addr =
- __ form_address(rscratch2, mdo,
- md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)),
- LogBytesPerWord);
- __ str(rscratch1, recv_addr);
- Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
- __ addptr(data_addr, DataLayout::counter_increment);
- return;
- }
- }
+ // Receiver type is not found in profile data.
+ // Fall back to runtime helper to handle the rest at runtime.
+ __ mov_metadata(recv, known_klass->constant_encoding());
} else {
__ load_klass(recv, recv);
- Label update_done;
- type_profile_helper(mdo, md, data, recv, &update_done);
- // Receiver did not match any saved receiver and there is no empty row for it.
- // Increment total counter to indicate polymorphic case.
- __ addptr(counter_addr, DataLayout::counter_increment);
-
- __ bind(update_done);
}
+ type_profile_helper(mdo, md, data, recv);
} else {
// Static call
__ addptr(counter_addr, DataLayout::counter_increment);
diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp
index 21916a5f7dd..5af06fc6a1c 100644
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp
@@ -50,9 +50,8 @@ friend class ArrayCopyStub;
Address stack_slot_address(int index, uint shift, Register tmp, int adjust = 0);
// Record the type of the receiver in ReceiverTypeData
- void type_profile_helper(Register mdo,
- ciMethodData *md, ciProfileData *data,
- Register recv, Label* update_done);
+ void type_profile_helper(Register mdo, ciMethodData *md,
+ ciProfileData *data, Register recv);
void add_debug_info_for_branch(address adr, CodeEmitInfo* info);
void casw(Register addr, Register newval, Register cmpval);
diff --git a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp
index 9a035d9f40e..ad7bac4e067 100644
--- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp
@@ -85,26 +85,16 @@ void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, Dec
}
}
-void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm,
- Register obj,
- Register pre_val,
- Register thread,
- Register tmp,
- bool tosca_live,
- bool expand_call) {
- if (ShenandoahSATBBarrier) {
- satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, rscratch1, tosca_live, expand_call);
- }
-}
+void ShenandoahBarrierSetAssembler::satb_barrier(MacroAssembler* masm,
+ Register obj,
+ Register pre_val,
+ Register thread,
+ Register tmp1,
+ Register tmp2,
+ bool tosca_live,
+ bool expand_call) {
+ assert(ShenandoahSATBBarrier, "Should be checked by caller");
-void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
- Register obj,
- Register pre_val,
- Register thread,
- Register tmp1,
- Register tmp2,
- bool tosca_live,
- bool expand_call) {
// If expand_call is true then we expand the call_VM_leaf macro
// directly to skip generating the check by
// InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
@@ -358,20 +348,20 @@ void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet d
if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
__ enter(/*strip_ret_addr*/true);
__ push_call_clobbered_registers();
- satb_write_barrier_pre(masm /* masm */,
- noreg /* obj */,
- dst /* pre_val */,
- rthread /* thread */,
- tmp1 /* tmp1 */,
- tmp2 /* tmp2 */,
- true /* tosca_live */,
- true /* expand_call */);
+ satb_barrier(masm /* masm */,
+ noreg /* obj */,
+ dst /* pre_val */,
+ rthread /* thread */,
+ tmp1 /* tmp1 */,
+ tmp2 /* tmp2 */,
+ true /* tosca_live */,
+ true /* expand_call */);
__ pop_call_clobbered_registers();
__ leave();
}
}
-void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj) {
+void ShenandoahBarrierSetAssembler::card_barrier(MacroAssembler* masm, Register obj) {
assert(ShenandoahCardBarrier, "Should have been checked by caller");
__ lsr(obj, obj, CardTable::card_shift());
@@ -394,13 +384,13 @@ void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register o
void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
- bool on_oop = is_reference_type(type);
- if (!on_oop) {
+ // 1: non-reference types require no barriers
+ if (!is_reference_type(type)) {
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
return;
}
- // flatten object address if needed
+ // Flatten object address right away for simplicity: likely needed by barriers
if (dst.index() == noreg && dst.offset() == 0) {
if (dst.base() != tmp3) {
__ mov(tmp3, dst.base());
@@ -409,20 +399,26 @@ void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet
__ lea(tmp3, dst);
}
- shenandoah_write_barrier_pre(masm,
- tmp3 /* obj */,
- tmp2 /* pre_val */,
- rthread /* thread */,
- tmp1 /* tmp */,
- val != noreg /* tosca_live */,
- false /* expand_call */);
+ bool storing_non_null = (val != noreg);
+ // 2: pre-barrier: SATB needs the previous value
+ if (ShenandoahBarrierSet::need_satb_barrier(decorators, type)) {
+ satb_barrier(masm,
+ tmp3 /* obj */,
+ tmp2 /* pre_val */,
+ rthread /* thread */,
+ tmp1 /* tmp */,
+ rscratch1 /* tmp2 */,
+ storing_non_null /* tosca_live */,
+ false /* expand_call */);
+ }
+
+ // Store!
BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), val, noreg, noreg, noreg);
- bool in_heap = (decorators & IN_HEAP) != 0;
- bool needs_post_barrier = (val != noreg) && in_heap && ShenandoahCardBarrier;
- if (needs_post_barrier) {
- store_check(masm, tmp3);
+ // 3: post-barrier: card barrier needs store address
+ if (ShenandoahBarrierSet::need_card_barrier(decorators, type) && storing_non_null) {
+ card_barrier(masm, tmp3);
}
}
diff --git a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp
index c0e708e1292..362fcae1ccd 100644
--- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp
@@ -40,23 +40,16 @@ class StubCodeGenerator;
class ShenandoahBarrierSetAssembler: public BarrierSetAssembler {
private:
- void satb_write_barrier_pre(MacroAssembler* masm,
- Register obj,
- Register pre_val,
- Register thread,
- Register tmp1,
- Register tmp2,
- bool tosca_live,
- bool expand_call);
- void shenandoah_write_barrier_pre(MacroAssembler* masm,
- Register obj,
- Register pre_val,
- Register thread,
- Register tmp,
- bool tosca_live,
- bool expand_call);
+ void satb_barrier(MacroAssembler* masm,
+ Register obj,
+ Register pre_val,
+ Register thread,
+ Register tmp1,
+ Register tmp2,
+ bool tosca_live,
+ bool expand_call);
- void store_check(MacroAssembler* masm, Register obj);
+ void card_barrier(MacroAssembler* masm, Register obj);
void resolve_forward_pointer(MacroAssembler* masm, Register dst, Register tmp = noreg);
void resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp = noreg);
diff --git a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp
index 957c2aee1c1..2b506b241e0 100644
--- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp
@@ -240,15 +240,14 @@ void InterpreterMacroAssembler::load_resolved_klass_at_offset(
// Rsub_klass: subklass
//
// Kills:
-// r2, r5
+// r2
void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
Label& ok_is_subtype) {
assert(Rsub_klass != r0, "r0 holds superklass");
assert(Rsub_klass != r2, "r2 holds 2ndary super array length");
- assert(Rsub_klass != r5, "r5 holds 2ndary super array scan ptr");
// Profile the not-null value's klass.
- profile_typecheck(r2, Rsub_klass, r5); // blows r2, reloads r5
+ profile_typecheck(r2, Rsub_klass); // blows r2
// Do the check.
check_klass_subtype(Rsub_klass, r0, r2, ok_is_subtype); // blows r2
@@ -991,7 +990,6 @@ void InterpreterMacroAssembler::profile_final_call(Register mdp) {
void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
Register mdp,
- Register reg2,
bool receiver_can_be_null) {
if (ProfileInterpreter) {
Label profile_continue;
@@ -1009,7 +1007,7 @@ void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
}
// Record the receiver type.
- record_klass_in_profile(receiver, mdp, reg2);
+ profile_receiver_type(receiver, mdp, 0);
bind(skip_receiver_profile);
// The method data pointer needs to be updated to reflect the new target.
@@ -1018,131 +1016,6 @@ void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
}
}
-// This routine creates a state machine for updating the multi-row
-// type profile at a virtual call site (or other type-sensitive bytecode).
-// The machine visits each row (of receiver/count) until the receiver type
-// is found, or until it runs out of rows. At the same time, it remembers
-// the location of the first empty row. (An empty row records null for its
-// receiver, and can be allocated for a newly-observed receiver type.)
-// Because there are two degrees of freedom in the state, a simple linear
-// search will not work; it must be a decision tree. Hence this helper
-// function is recursive, to generate the required tree structured code.
-// It's the interpreter, so we are trading off code space for speed.
-// See below for example code.
-void InterpreterMacroAssembler::record_klass_in_profile_helper(
- Register receiver, Register mdp,
- Register reg2, int start_row,
- Label& done) {
- if (TypeProfileWidth == 0) {
- increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
- } else {
- record_item_in_profile_helper(receiver, mdp, reg2, 0, done, TypeProfileWidth,
- &VirtualCallData::receiver_offset, &VirtualCallData::receiver_count_offset);
- }
-}
-
-void InterpreterMacroAssembler::record_item_in_profile_helper(Register item, Register mdp,
- Register reg2, int start_row, Label& done, int total_rows,
- OffsetFunction item_offset_fn, OffsetFunction item_count_offset_fn) {
- int last_row = total_rows - 1;
- assert(start_row <= last_row, "must be work left to do");
- // Test this row for both the item and for null.
- // Take any of three different outcomes:
- // 1. found item => increment count and goto done
- // 2. found null => keep looking for case 1, maybe allocate this cell
- // 3. found something else => keep looking for cases 1 and 2
- // Case 3 is handled by a recursive call.
- for (int row = start_row; row <= last_row; row++) {
- Label next_test;
- bool test_for_null_also = (row == start_row);
-
- // See if the item is item[n].
- int item_offset = in_bytes(item_offset_fn(row));
- test_mdp_data_at(mdp, item_offset, item,
- (test_for_null_also ? reg2 : noreg),
- next_test);
- // (Reg2 now contains the item from the CallData.)
-
- // The item is item[n]. Increment count[n].
- int count_offset = in_bytes(item_count_offset_fn(row));
- increment_mdp_data_at(mdp, count_offset);
- b(done);
- bind(next_test);
-
- if (test_for_null_also) {
- Label found_null;
- // Failed the equality check on item[n]... Test for null.
- if (start_row == last_row) {
- // The only thing left to do is handle the null case.
- cbz(reg2, found_null);
- // Item did not match any saved item and there is no empty row for it.
- // Increment total counter to indicate polymorphic case.
- increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
- b(done);
- bind(found_null);
- break;
- }
- // Since null is rare, make it be the branch-taken case.
- cbz(reg2, found_null);
-
- // Put all the "Case 3" tests here.
- record_item_in_profile_helper(item, mdp, reg2, start_row + 1, done, total_rows,
- item_offset_fn, item_count_offset_fn);
-
- // Found a null. Keep searching for a matching item,
- // but remember that this is an empty (unused) slot.
- bind(found_null);
- }
- }
-
- // In the fall-through case, we found no matching item, but we
- // observed the item[start_row] is null.
-
- // Fill in the item field and increment the count.
- int item_offset = in_bytes(item_offset_fn(start_row));
- set_mdp_data_at(mdp, item_offset, item);
- int count_offset = in_bytes(item_count_offset_fn(start_row));
- mov(reg2, DataLayout::counter_increment);
- set_mdp_data_at(mdp, count_offset, reg2);
- if (start_row > 0) {
- b(done);
- }
-}
-
-// Example state machine code for three profile rows:
-// // main copy of decision tree, rooted at row[1]
-// if (row[0].rec == rec) { row[0].incr(); goto done; }
-// if (row[0].rec != nullptr) {
-// // inner copy of decision tree, rooted at row[1]
-// if (row[1].rec == rec) { row[1].incr(); goto done; }
-// if (row[1].rec != nullptr) {
-// // degenerate decision tree, rooted at row[2]
-// if (row[2].rec == rec) { row[2].incr(); goto done; }
-// if (row[2].rec != nullptr) { count.incr(); goto done; } // overflow
-// row[2].init(rec); goto done;
-// } else {
-// // remember row[1] is empty
-// if (row[2].rec == rec) { row[2].incr(); goto done; }
-// row[1].init(rec); goto done;
-// }
-// } else {
-// // remember row[0] is empty
-// if (row[1].rec == rec) { row[1].incr(); goto done; }
-// if (row[2].rec == rec) { row[2].incr(); goto done; }
-// row[0].init(rec); goto done;
-// }
-// done:
-
-void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
- Register mdp, Register reg2) {
- assert(ProfileInterpreter, "must be profiling");
- Label done;
-
- record_klass_in_profile_helper(receiver, mdp, reg2, 0, done);
-
- bind (done);
-}
-
void InterpreterMacroAssembler::profile_ret(Register return_bci,
Register mdp) {
if (ProfileInterpreter) {
@@ -1200,7 +1073,7 @@ void InterpreterMacroAssembler::profile_null_seen(Register mdp) {
}
}
-void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass, Register reg2) {
+void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass) {
if (ProfileInterpreter) {
Label profile_continue;
@@ -1213,7 +1086,7 @@ void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass,
mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
// Record the object type.
- record_klass_in_profile(klass, mdp, reg2);
+ profile_receiver_type(klass, mdp, 0);
}
update_mdp_by_constant(mdp, mdp_delta);
diff --git a/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp b/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp
index 2b230a3b73e..74d4430000d 100644
--- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp
@@ -273,15 +273,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
Register test_value_out,
Label& not_equal_continue);
- void record_klass_in_profile(Register receiver, Register mdp,
- Register reg2);
- void record_klass_in_profile_helper(Register receiver, Register mdp,
- Register reg2, int start_row,
- Label& done);
- void record_item_in_profile_helper(Register item, Register mdp,
- Register reg2, int start_row, Label& done, int total_rows,
- OffsetFunction item_offset_fn, OffsetFunction item_count_offset_fn);
-
void update_mdp_by_offset(Register mdp_in, int offset_of_offset);
void update_mdp_by_offset(Register mdp_in, Register reg, int offset_of_disp);
void update_mdp_by_constant(Register mdp_in, int constant);
@@ -295,11 +286,10 @@ class InterpreterMacroAssembler: public MacroAssembler {
void profile_call(Register mdp);
void profile_final_call(Register mdp);
void profile_virtual_call(Register receiver, Register mdp,
- Register scratch2,
bool receiver_can_be_null = false);
void profile_ret(Register return_bci, Register mdp);
void profile_null_seen(Register mdp);
- void profile_typecheck(Register mdp, Register klass, Register scratch);
+ void profile_typecheck(Register mdp, Register klass);
void profile_typecheck_failed(Register mdp);
void profile_switch_default(Register mdp);
void profile_switch_case(Register index_in_scratch, Register mdp,
diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
index b8a9afc123f..f8b5a6f825c 100644
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
@@ -2118,6 +2118,161 @@ Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
}
}
+// Handle the receiver type profile update given the "recv" klass.
+//
+// Normally updates the ReceiverData (RD) that starts at "mdp" + "mdp_offset".
+// If there are no matching or claimable receiver entries in RD, updates
+// the polymorphic counter.
+//
+// This code expected to run by either the interpreter or JIT-ed code, without
+// extra synchronization. For safety, receiver cells are claimed atomically, which
+// avoids grossly misrepresenting the profiles under concurrent updates. For speed,
+// counter updates are not atomic.
+//
+void MacroAssembler::profile_receiver_type(Register recv, Register mdp, int mdp_offset) {
+ assert_different_registers(recv, mdp, rscratch1, rscratch2);
+
+ int base_receiver_offset = in_bytes(ReceiverTypeData::receiver_offset(0));
+ int end_receiver_offset = in_bytes(ReceiverTypeData::receiver_offset(ReceiverTypeData::row_limit()));
+ int poly_count_offset = in_bytes(CounterData::count_offset());
+ int receiver_step = in_bytes(ReceiverTypeData::receiver_offset(1)) - base_receiver_offset;
+ int receiver_to_count_step = in_bytes(ReceiverTypeData::receiver_count_offset(0)) - base_receiver_offset;
+
+ // Adjust for MDP offsets.
+ base_receiver_offset += mdp_offset;
+ end_receiver_offset += mdp_offset;
+ poly_count_offset += mdp_offset;
+
+#ifdef ASSERT
+ // We are about to walk the MDO slots without asking for offsets.
+ // Check that our math hits all the right spots.
+ for (uint c = 0; c < ReceiverTypeData::row_limit(); c++) {
+ int real_recv_offset = mdp_offset + in_bytes(ReceiverTypeData::receiver_offset(c));
+ int real_count_offset = mdp_offset + in_bytes(ReceiverTypeData::receiver_count_offset(c));
+ int offset = base_receiver_offset + receiver_step*c;
+ int count_offset = offset + receiver_to_count_step;
+ assert(offset == real_recv_offset, "receiver slot math");
+ assert(count_offset == real_count_offset, "receiver count math");
+ }
+ int real_poly_count_offset = mdp_offset + in_bytes(CounterData::count_offset());
+ assert(poly_count_offset == real_poly_count_offset, "poly counter math");
+#endif
+
+ // Corner case: no profile table. Increment poly counter and exit.
+ if (ReceiverTypeData::row_limit() == 0) {
+ increment(Address(mdp, poly_count_offset), DataLayout::counter_increment);
+ return;
+ }
+
+ Register offset = rscratch2;
+
+ Label L_loop_search_receiver, L_loop_search_empty;
+ Label L_restart, L_found_recv, L_found_empty, L_polymorphic, L_count_update;
+
+ // The code here recognizes three major cases:
+ // A. Fastest: receiver found in the table
+ // B. Fast: no receiver in the table, and the table is full
+ // C. Slow: no receiver in the table, free slots in the table
+ //
+ // The case A performance is most important, as perfectly-behaved code would end up
+ // there, especially with larger TypeProfileWidth. The case B performance is
+ // important as well, this is where bulk of code would land for normally megamorphic
+ // cases. The case C performance is not essential, its job is to deal with installation
+ // races, we optimize for code density instead. Case C needs to make sure that receiver
+ // rows are only claimed once. This makes sure we never overwrite a row for another
+ // receiver and never duplicate the receivers in the list, making profile type-accurate.
+ //
+ // It is very tempting to handle these cases in a single loop, and claim the first slot
+ // without checking the rest of the table. But, profiling code should tolerate free slots
+ // in the table, as class unloading can clear them. After such cleanup, the receiver
+ // we need might be _after_ the free slot. Therefore, we need to let at least full scan
+ // to complete, before trying to install new slots. Splitting the code in several tight
+ // loops also helpfully optimizes for cases A and B.
+ //
+ // This code is effectively:
+ //
+ // restart:
+ // // Fastest: receiver is already installed
+ // for (i = 0; i < receiver_count(); i++) {
+ // if (receiver(i) == recv) goto found_recv(i);
+ // }
+ //
+ // // Fast: no receiver, but profile is full
+ // for (i = 0; i < receiver_count(); i++) {
+ // if (receiver(i) == null) goto found_null(i);
+ // }
+ // goto polymorphic
+ //
+ // // Slow: try to install receiver
+ // found_null(i):
+ // CAS(&receiver(i), null, recv);
+ // goto restart
+ //
+ // polymorphic:
+ // count++;
+ // return
+ //
+ // found_recv(i):
+ // *receiver_count(i)++
+ //
+
+ bind(L_restart);
+
+ // Fastest: receiver is already installed
+ mov(offset, base_receiver_offset);
+ bind(L_loop_search_receiver);
+ ldr(rscratch1, Address(mdp, offset));
+ cmp(rscratch1, recv);
+ br(Assembler::EQ, L_found_recv);
+ add(offset, offset, receiver_step);
+ sub(rscratch1, offset, end_receiver_offset);
+ cbnz(rscratch1, L_loop_search_receiver);
+
+ // Fast: no receiver, but profile is full
+ mov(offset, base_receiver_offset);
+ bind(L_loop_search_empty);
+ ldr(rscratch1, Address(mdp, offset));
+ cbz(rscratch1, L_found_empty);
+ add(offset, offset, receiver_step);
+ sub(rscratch1, offset, end_receiver_offset);
+ cbnz(rscratch1, L_loop_search_empty);
+ b(L_polymorphic);
+
+ // Slow: try to install receiver
+ bind(L_found_empty);
+
+ // Atomically swing receiver slot: null -> recv.
+ //
+ // The update uses CAS, which clobbers rscratch1. Therefore, rscratch2
+ // is used to hold the destination address. This is safe because the
+ // offset is no longer needed after the address is computed.
+
+ lea(rscratch2, Address(mdp, offset));
+ cmpxchg(/*addr*/ rscratch2, /*expected*/ zr, /*new*/ recv, Assembler::xword,
+ /*acquire*/ false, /*release*/ false, /*weak*/ true, noreg);
+
+ // CAS success means the slot now has the receiver we want. CAS failure means
+ // something had claimed the slot concurrently: it can be the same receiver we want,
+ // or something else. Since this is a slow path, we can optimize for code density,
+ // and just restart the search from the beginning.
+ b(L_restart);
+
+ // Counter updates:
+
+ // Increment polymorphic counter instead of receiver slot.
+ bind(L_polymorphic);
+ mov(offset, poly_count_offset);
+ b(L_count_update);
+
+ // Found a receiver, convert its slot offset to corresponding count offset.
+ bind(L_found_recv);
+ add(offset, offset, receiver_to_count_step);
+
+ bind(L_count_update);
+ increment(Address(mdp, offset), DataLayout::counter_increment);
+}
+
+
void MacroAssembler::call_VM_leaf_base(address entry_point,
int number_of_arguments,
Label *retaddr) {
@@ -5606,12 +5761,11 @@ void MacroAssembler::adrp(Register reg1, const Address &dest, uint64_t &byte_off
}
void MacroAssembler::load_byte_map_base(Register reg) {
- CardTable::CardValue* byte_map_base =
- ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base();
+ CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
- // Strictly speaking the byte_map_base isn't an address at all, and it might
+ // Strictly speaking the card table base isn't an address at all, and it might
// even be negative. It is thus materialised as a constant.
- mov(reg, (uint64_t)byte_map_base);
+ mov(reg, (uint64_t)ctbs->card_table_base_const());
}
void MacroAssembler::build_frame(int framesize) {
@@ -5782,6 +5936,9 @@ address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3,
// return false;
bind(A_IS_NOT_NULL);
ldrw(cnt1, Address(a1, length_offset));
+ ldrw(tmp5, Address(a2, length_offset));
+ cmp(cnt1, tmp5);
+ br(NE, DONE); // If lengths differ, return false
// Increase loop counter by diff between base- and actual start-offset.
addw(cnt1, cnt1, extra_length);
lea(a1, Address(a1, start_offset));
@@ -5848,6 +6005,9 @@ address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3,
cbz(a1, DONE);
ldrw(cnt1, Address(a1, length_offset));
cbz(a2, DONE);
+ ldrw(tmp5, Address(a2, length_offset));
+ cmp(cnt1, tmp5);
+ br(NE, DONE); // If lengths differ, return false
// Increase loop counter by diff between base- and actual start-offset.
addw(cnt1, cnt1, extra_length);
diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
index 4baa07d7d49..7eb6cea0614 100644
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
@@ -1122,6 +1122,8 @@ public:
Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
+ void profile_receiver_type(Register recv, Register mdp, int mdp_offset);
+
void verify_sve_vector_length(Register tmp = rscratch1);
void reinitialize_ptrue() {
if (UseSVE > 0) {
diff --git a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp
index 89ae6bc10e0..73b631029a0 100644
--- a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -722,22 +722,20 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
// Class initialization barrier for static methods
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
- if (VM_Version::supports_fast_class_init_checks()) {
- Label L_skip_barrier;
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
+ Label L_skip_barrier;
- { // Bypass the barrier for non-static methods
- __ ldrh(rscratch1, Address(rmethod, Method::access_flags_offset()));
- __ andsw(zr, rscratch1, JVM_ACC_STATIC);
- __ br(Assembler::EQ, L_skip_barrier); // non-static
- }
+ // Bypass the barrier for non-static methods
+ __ ldrh(rscratch1, Address(rmethod, Method::access_flags_offset()));
+ __ andsw(zr, rscratch1, JVM_ACC_STATIC);
+ __ br(Assembler::EQ, L_skip_barrier); // non-static
- __ load_method_holder(rscratch2, rmethod);
- __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
- __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
+ __ load_method_holder(rscratch2, rmethod);
+ __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
+ __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
- __ bind(L_skip_barrier);
- entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
- }
+ __ bind(L_skip_barrier);
+ entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->c2i_entry_barrier(masm);
@@ -1508,7 +1506,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// SVC, HVC, or SMC. Make it a NOP.
__ nop();
- if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
+ if (method->needs_clinit_barrier()) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
Label L_skip_barrier;
__ mov_metadata(rscratch2, method->method_holder()); // InstanceKlass*
__ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
diff --git a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp
index 7e2f333ba40..db653bcf236 100644
--- a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp
@@ -6081,14 +6081,18 @@ class StubGenerator: public StubCodeGenerator {
// static int implKyber12To16(
// byte[] condensed, int index, short[] parsed, int parsedLength) {}
//
- // (parsedLength or (parsedLength - 48) must be divisible by 64.)
+ // we assume that parsed and condensed are allocated such that for
+ // n = (parsedLength + 63) / 64
+ // n blocks of 96 bytes of input can be processed, i.e.
+ // index + n * 96 <= condensed.length and
+ // n * 64 <= parsed.length
//
// condensed (byte[]) = c_rarg0
// condensedIndex = c_rarg1
- // parsed (short[112 or 256]) = c_rarg2
- // parsedLength (112 or 256) = c_rarg3
+ // parsed (short[]) = c_rarg2
+ // parsedLength = c_rarg3
address generate_kyber12To16() {
- Label L_F00, L_loop, L_end;
+ Label L_F00, L_loop;
__ align(CodeEntryAlignment);
StubId stub_id = StubId::stubgen_kyber12To16_id;
@@ -6209,75 +6213,8 @@ class StubGenerator: public StubCodeGenerator {
vs_st2_post(vs_front(vb), __ T8H, parsed);
__ sub(parsedLength, parsedLength, 64);
- __ cmp(parsedLength, (u1)64);
- __ br(Assembler::GE, L_loop);
- __ cbz(parsedLength, L_end);
-
- // if anything is left it should be a final 72 bytes of input
- // i.e. a final 48 12-bit values. so we handle this by loading
- // 48 bytes into all 16B lanes of front(vin) and only 24
- // bytes into the lower 8B lane of back(vin)
- vs_ld3_post(vs_front(vin), __ T16B, condensed);
- vs_ld3(vs_back(vin), __ T8B, condensed);
-
- // Expand vin[0] into va[0:1], and vin[1] into va[2:3] and va[4:5]
- // n.b. target elements 2 and 3 of va duplicate elements 4 and
- // 5 and target element 2 of vb duplicates element 4.
- __ ushll(va[0], __ T8H, vin[0], __ T8B, 0);
- __ ushll2(va[1], __ T8H, vin[0], __ T16B, 0);
- __ ushll(va[2], __ T8H, vin[1], __ T8B, 0);
- __ ushll2(va[3], __ T8H, vin[1], __ T16B, 0);
- __ ushll(va[4], __ T8H, vin[1], __ T8B, 0);
- __ ushll2(va[5], __ T8H, vin[1], __ T16B, 0);
-
- // This time expand just the lower 8 lanes
- __ ushll(vb[0], __ T8H, vin[3], __ T8B, 0);
- __ ushll(vb[2], __ T8H, vin[4], __ T8B, 0);
- __ ushll(vb[4], __ T8H, vin[4], __ T8B, 0);
-
- // shift lo byte of copy 1 of the middle stripe into the high byte
- __ shl(va[2], __ T8H, va[2], 8);
- __ shl(va[3], __ T8H, va[3], 8);
- __ shl(vb[2], __ T8H, vb[2], 8);
-
- // expand vin[2] into va[6:7] and lower 8 lanes of vin[5] into
- // vb[6] pre-shifted by 4 to ensure top bits of the input 12-bit
- // int are in bit positions [4..11].
- __ ushll(va[6], __ T8H, vin[2], __ T8B, 4);
- __ ushll2(va[7], __ T8H, vin[2], __ T16B, 4);
- __ ushll(vb[6], __ T8H, vin[5], __ T8B, 4);
-
- // mask hi 4 bits of each 1st 12-bit int in pair from copy1 and
- // shift lo 4 bits of each 2nd 12-bit int in pair to bottom of
- // copy2
- __ andr(va[2], __ T16B, va[2], v31);
- __ andr(va[3], __ T16B, va[3], v31);
- __ ushr(va[4], __ T8H, va[4], 4);
- __ ushr(va[5], __ T8H, va[5], 4);
- __ andr(vb[2], __ T16B, vb[2], v31);
- __ ushr(vb[4], __ T8H, vb[4], 4);
-
-
-
- // sum hi 4 bits and lo 8 bits of each 1st 12-bit int in pair and
- // hi 8 bits plus lo 4 bits of each 2nd 12-bit int in pair
-
- // n.b. ordering ensures: i) inputs are consumed before they are
- // overwritten ii) order of 16-bit results across succsessive
- // pairs of vectors in va and then lower half of vb reflects order
- // of corresponding 12-bit inputs
- __ addv(va[0], __ T8H, va[0], va[2]);
- __ addv(va[2], __ T8H, va[1], va[3]);
- __ addv(va[1], __ T8H, va[4], va[6]);
- __ addv(va[3], __ T8H, va[5], va[7]);
- __ addv(vb[0], __ T8H, vb[0], vb[2]);
- __ addv(vb[1], __ T8H, vb[4], vb[6]);
-
- // store 48 results interleaved as shorts
- vs_st2_post(vs_front(va), __ T8H, parsed);
- vs_st2_post(vs_front(vs_front(vb)), __ T8H, parsed);
-
- __ BIND(L_end);
+ __ cmp(parsedLength, (u1)0);
+ __ br(Assembler::GT, L_loop);
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ mov(r0, zr); // return 0
diff --git a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp
index cde142b39ac..5d4f7103a84 100644
--- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -2290,7 +2290,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
__ subs(zr, temp, (int) code); // have we resolved this bytecode?
// Class initialization barrier for static methods
- if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
+ if (bytecode() == Bytecodes::_invokestatic) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
__ br(Assembler::NE, L_clinit_barrier_slow);
__ ldr(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::method_offset())));
__ load_method_holder(temp, temp);
@@ -2340,8 +2341,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
__ subs(zr, temp, (int) code); // have we resolved this bytecode?
// Class initialization barrier for static fields
- if (VM_Version::supports_fast_class_init_checks() &&
- (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
+ if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
const Register field_holder = temp;
__ br(Assembler::NE, L_clinit_barrier_slow);
@@ -3369,7 +3370,7 @@ void TemplateTable::invokevirtual_helper(Register index,
__ load_klass(r0, recv);
// profile this call
- __ profile_virtual_call(r0, rlocals, r3);
+ __ profile_virtual_call(r0, rlocals);
// get target Method & entry point
__ lookup_virtual_method(r0, index, method);
@@ -3499,7 +3500,7 @@ void TemplateTable::invokeinterface(int byte_no) {
/*return_method=*/false);
// profile this call
- __ profile_virtual_call(r3, r13, r19);
+ __ profile_virtual_call(r3, r13);
// Get declaring interface class from method, and itable index
diff --git a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp
index 659c231464a..3fa85f8f47d 100644
--- a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp
@@ -201,16 +201,14 @@ void VM_Version::initialize() {
}
}
- // Cortex A53
- if (_cpu == CPU_ARM && model_is(0xd03)) {
+ if (_cpu == CPU_ARM && model_is(CPU_MODEL_ARM_CORTEX_A53)) {
set_feature(CPU_A53MAC);
if (FLAG_IS_DEFAULT(UseSIMDForArrayEquals)) {
FLAG_SET_DEFAULT(UseSIMDForArrayEquals, false);
}
}
- // Cortex A73
- if (_cpu == CPU_ARM && model_is(0xd09)) {
+ if (_cpu == CPU_ARM && model_is(CPU_MODEL_ARM_CORTEX_A73)) {
if (FLAG_IS_DEFAULT(SoftwarePrefetchHintDistance)) {
FLAG_SET_DEFAULT(SoftwarePrefetchHintDistance, -1);
}
@@ -220,16 +218,11 @@ void VM_Version::initialize() {
}
}
- // Neoverse
- // N1: 0xd0c
- // N2: 0xd49
- // N3: 0xd8e
- // V1: 0xd40
- // V2: 0xd4f
- // V3: 0xd84
- if (_cpu == CPU_ARM && (model_is(0xd0c) || model_is(0xd49) ||
- model_is(0xd40) || model_is(0xd4f) ||
- model_is(0xd8e) || model_is(0xd84))) {
+ if (_cpu == CPU_ARM &&
+ model_is_in({ CPU_MODEL_ARM_NEOVERSE_N1, CPU_MODEL_ARM_NEOVERSE_V1,
+ CPU_MODEL_ARM_NEOVERSE_N2, CPU_MODEL_ARM_NEOVERSE_V2,
+ CPU_MODEL_ARM_NEOVERSE_N3, CPU_MODEL_ARM_NEOVERSE_V3,
+ CPU_MODEL_ARM_NEOVERSE_V3AE })) {
if (FLAG_IS_DEFAULT(UseSIMDForMemoryOps)) {
FLAG_SET_DEFAULT(UseSIMDForMemoryOps, true);
}
@@ -261,12 +254,9 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseCRC32, false);
}
- // Neoverse
- // V1: 0xd40
- // V2: 0xd4f
- // V3: 0xd84
if (_cpu == CPU_ARM &&
- (model_is(0xd40) || model_is(0xd4f) || model_is(0xd84))) {
+ model_is_in({ CPU_MODEL_ARM_NEOVERSE_V1, CPU_MODEL_ARM_NEOVERSE_V2,
+ CPU_MODEL_ARM_NEOVERSE_V3, CPU_MODEL_ARM_NEOVERSE_V3AE })) {
if (FLAG_IS_DEFAULT(UseCryptoPmullForCRC32)) {
FLAG_SET_DEFAULT(UseCryptoPmullForCRC32, true);
}
diff --git a/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp b/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp
index 17087d243d3..38b112d9936 100644
--- a/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp
@@ -30,6 +30,8 @@
#include "runtime/abstract_vm_version.hpp"
#include "utilities/sizes.hpp"
+#include
+
class stringStream;
#define BIT_MASK(flag) (1ULL<<(flag))
@@ -112,14 +114,26 @@ public:
CPU_APPLE = 'a',
};
-enum Ampere_CPU_Model {
+ enum Ampere_CPU_Model {
CPU_MODEL_EMAG = 0x0, /* CPU implementer is CPU_AMCC */
CPU_MODEL_ALTRA = 0xd0c, /* CPU implementer is CPU_ARM, Neoverse N1 */
CPU_MODEL_ALTRAMAX = 0xd0c, /* CPU implementer is CPU_ARM, Neoverse N1 */
CPU_MODEL_AMPERE_1 = 0xac3, /* CPU implementer is CPU_AMPERE */
CPU_MODEL_AMPERE_1A = 0xac4, /* CPU implementer is CPU_AMPERE */
CPU_MODEL_AMPERE_1B = 0xac5 /* AMPERE_1B core Implements ARMv8.7 with CSSC, MTE, SM3/SM4 extensions */
-};
+ };
+
+ enum ARM_CPU_Model {
+ CPU_MODEL_ARM_CORTEX_A53 = 0xd03,
+ CPU_MODEL_ARM_CORTEX_A73 = 0xd09,
+ CPU_MODEL_ARM_NEOVERSE_N1 = 0xd0c,
+ CPU_MODEL_ARM_NEOVERSE_V1 = 0xd40,
+ CPU_MODEL_ARM_NEOVERSE_N2 = 0xd49,
+ CPU_MODEL_ARM_NEOVERSE_V2 = 0xd4f,
+ CPU_MODEL_ARM_NEOVERSE_V3AE = 0xd83,
+ CPU_MODEL_ARM_NEOVERSE_V3 = 0xd84,
+ CPU_MODEL_ARM_NEOVERSE_N3 = 0xd8e,
+ };
#define CPU_FEATURE_FLAGS(decl) \
decl(FP, fp, 0) \
@@ -181,6 +195,15 @@ enum Ampere_CPU_Model {
return _model == cpu_model || _model2 == cpu_model;
}
+ static bool model_is_in(std::initializer_list cpu_models) {
+ for (const int& cpu_model : cpu_models) {
+ if (_model == cpu_model || _model2 == cpu_model) {
+ return true;
+ }
+ }
+ return false;
+ }
+
static bool is_zva_enabled() { return 0 <= _zva_length; }
static int zva_length() {
assert(is_zva_enabled(), "ZVA not available");
diff --git a/src/hotspot/cpu/arm/frame_arm.cpp b/src/hotspot/cpu/arm/frame_arm.cpp
index 7a23296a3d4..f791fae7bd7 100644
--- a/src/hotspot/cpu/arm/frame_arm.cpp
+++ b/src/hotspot/cpu/arm/frame_arm.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -356,10 +356,10 @@ frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
assert(is_interpreted_frame(), "Not an interpreted frame");
// These are reasonable sanity checks
- if (fp() == 0 || (intptr_t(fp()) & (wordSize-1)) != 0) {
+ if (fp() == nullptr || (intptr_t(fp()) & (wordSize-1)) != 0) {
return false;
}
- if (sp() == 0 || (intptr_t(sp()) & (wordSize-1)) != 0) {
+ if (sp() == nullptr || (intptr_t(sp()) & (wordSize-1)) != 0) {
return false;
}
if (fp() + interpreter_frame_initial_sp_offset < sp()) {
diff --git a/src/hotspot/cpu/arm/gc/shared/cardTableBarrierSetAssembler_arm.cpp b/src/hotspot/cpu/arm/gc/shared/cardTableBarrierSetAssembler_arm.cpp
index 2427d46cafa..5d63035ac69 100644
--- a/src/hotspot/cpu/arm/gc/shared/cardTableBarrierSetAssembler_arm.cpp
+++ b/src/hotspot/cpu/arm/gc/shared/cardTableBarrierSetAssembler_arm.cpp
@@ -67,9 +67,7 @@ void CardTableBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet d
void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, Register tmp) {
BLOCK_COMMENT("CardTablePostBarrier");
- BarrierSet* bs = BarrierSet::barrier_set();
- CardTableBarrierSet* ctbs = barrier_set_cast(bs);
- CardTable* ct = ctbs->card_table();
+ CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
Label L_cardtable_loop, L_done;
@@ -83,7 +81,7 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
__ sub(count, count, addr); // nb of cards
// warning: Rthread has not been preserved
- __ mov_address(tmp, (address) ct->byte_map_base());
+ __ mov_address(tmp, (address)ctbs->card_table_base_const());
__ add(addr,tmp, addr);
Register zero = __ zero_register(tmp);
@@ -122,8 +120,7 @@ void CardTableBarrierSetAssembler::store_check_part1(MacroAssembler* masm, Regis
assert(bs->kind() == BarrierSet::CardTableBarrierSet,
"Wrong barrier set kind");
- CardTableBarrierSet* ctbs = barrier_set_cast(bs);
- CardTable* ct = ctbs->card_table();
+ CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
// Load card table base address.
@@ -140,7 +137,7 @@ void CardTableBarrierSetAssembler::store_check_part1(MacroAssembler* masm, Regis
Possible cause is a cache miss (card table base address resides in a
rarely accessed area of thread descriptor).
*/
- __ mov_address(card_table_base, (address)ct->byte_map_base());
+ __ mov_address(card_table_base, (address)ctbs->card_table_base_const());
}
// The 2nd part of the store check.
@@ -170,8 +167,8 @@ void CardTableBarrierSetAssembler::store_check_part2(MacroAssembler* masm, Regis
void CardTableBarrierSetAssembler::set_card(MacroAssembler* masm, Register card_table_base, Address card_table_addr, Register tmp) {
CardTableBarrierSet* ctbs = barrier_set_cast(BarrierSet::barrier_set());
- CardTable* ct = ctbs->card_table();
- if ((((uintptr_t)ct->byte_map_base() & 0xff) == 0)) {
+
+ if ((((uintptr_t)ctbs->card_table_base_const() & 0xff) == 0)) {
// Card table is aligned so the lowest byte of the table address base is zero.
// This works only if the code is not saved for later use, possibly
// in a context where the base would no longer be aligned.
diff --git a/src/hotspot/cpu/arm/nativeInst_arm_32.cpp b/src/hotspot/cpu/arm/nativeInst_arm_32.cpp
index 232294b246a..df780ac31a6 100644
--- a/src/hotspot/cpu/arm/nativeInst_arm_32.cpp
+++ b/src/hotspot/cpu/arm/nativeInst_arm_32.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -172,7 +172,7 @@ void NativeMovConstReg::set_data(intptr_t x, address pc) {
address addr = oop_addr != nullptr ? (address)oop_addr : (address)metadata_addr;
- if(pc == 0) {
+ if (pc == nullptr) {
offset = addr - instruction_address() - 8;
} else {
offset = addr - pc - 8;
@@ -228,7 +228,7 @@ void NativeMovConstReg::set_data(intptr_t x, address pc) {
void NativeMovConstReg::set_pc_relative_offset(address addr, address pc) {
int offset;
- if (pc == 0) {
+ if (pc == nullptr) {
offset = addr - instruction_address() - 8;
} else {
offset = addr - pc - 8;
diff --git a/src/hotspot/cpu/arm/nativeInst_arm_32.hpp b/src/hotspot/cpu/arm/nativeInst_arm_32.hpp
index 82385bf0244..2b52db89285 100644
--- a/src/hotspot/cpu/arm/nativeInst_arm_32.hpp
+++ b/src/hotspot/cpu/arm/nativeInst_arm_32.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -371,7 +371,7 @@ class NativeMovConstReg: public NativeInstruction {
public:
intptr_t data() const;
- void set_data(intptr_t x, address pc = 0);
+ void set_data(intptr_t x, address pc = nullptr);
bool is_pc_relative() {
return !is_movw();
}
diff --git a/src/hotspot/cpu/ppc/assembler_ppc.hpp b/src/hotspot/cpu/ppc/assembler_ppc.hpp
index 15e38411482..23775a3a52e 100644
--- a/src/hotspot/cpu/ppc/assembler_ppc.hpp
+++ b/src/hotspot/cpu/ppc/assembler_ppc.hpp
@@ -568,6 +568,9 @@ class Assembler : public AbstractAssembler {
XSCVDPHP_OPCODE= (60u << OPCODE_SHIFT | 347u << 2 | 17u << 16), // XX2-FORM
XXPERM_OPCODE = (60u << OPCODE_SHIFT | 26u << 3),
XXSEL_OPCODE = (60u << OPCODE_SHIFT | 3u << 4),
+ XSCMPEQDP_OPCODE=(60u << OPCODE_SHIFT | 3u << 3),
+ XSCMPGEDP_OPCODE=(60u << OPCODE_SHIFT | 19u << 3),
+ XSCMPGTDP_OPCODE=(60u << OPCODE_SHIFT | 11u << 3),
XXSPLTIB_OPCODE= (60u << OPCODE_SHIFT | 360u << 1),
XVDIVDP_OPCODE = (60u << OPCODE_SHIFT | 120u << 3),
XVABSSP_OPCODE = (60u << OPCODE_SHIFT | 409u << 2),
@@ -2424,6 +2427,9 @@ class Assembler : public AbstractAssembler {
inline void xscvdphp( VectorSRegister d, VectorSRegister b);
inline void xxland( VectorSRegister d, VectorSRegister a, VectorSRegister b);
inline void xxsel( VectorSRegister d, VectorSRegister a, VectorSRegister b, VectorSRegister c);
+ inline void xscmpeqdp(VectorSRegister t, VectorSRegister a, VectorSRegister b); // Requires Power9
+ inline void xscmpgedp(VectorSRegister t, VectorSRegister a, VectorSRegister b); // Requires Power9
+ inline void xscmpgtdp(VectorSRegister t, VectorSRegister a, VectorSRegister b); // Requires Power9
inline void xxspltib( VectorSRegister d, int ui8);
inline void xvdivsp( VectorSRegister d, VectorSRegister a, VectorSRegister b);
inline void xvdivdp( VectorSRegister d, VectorSRegister a, VectorSRegister b);
diff --git a/src/hotspot/cpu/ppc/assembler_ppc.inline.hpp b/src/hotspot/cpu/ppc/assembler_ppc.inline.hpp
index 7e49ec7455d..4cda782067e 100644
--- a/src/hotspot/cpu/ppc/assembler_ppc.inline.hpp
+++ b/src/hotspot/cpu/ppc/assembler_ppc.inline.hpp
@@ -923,6 +923,10 @@ inline void Assembler::xxmrghw( VectorSRegister d, VectorSRegister a, VectorSReg
inline void Assembler::xxmrglw( VectorSRegister d, VectorSRegister a, VectorSRegister b) { emit_int32( XXMRGHW_OPCODE | vsrt(d) | vsra(a) | vsrb(b)); }
inline void Assembler::xxsel( VectorSRegister d, VectorSRegister a, VectorSRegister b, VectorSRegister c) { emit_int32( XXSEL_OPCODE | vsrt(d) | vsra(a) | vsrb(b) | vsrc(c)); }
+inline void Assembler::xscmpeqdp(VectorSRegister t, VectorSRegister a, VectorSRegister b) { emit_int32( XSCMPEQDP_OPCODE | vsrt(t) | vsra(a) | vsrb(b) );}
+inline void Assembler::xscmpgedp(VectorSRegister t, VectorSRegister a, VectorSRegister b) { emit_int32( XSCMPGEDP_OPCODE | vsrt(t) | vsra(a) | vsrb(b) );}
+inline void Assembler::xscmpgtdp(VectorSRegister t, VectorSRegister a, VectorSRegister b) { emit_int32( XSCMPGTDP_OPCODE | vsrt(t) | vsra(a) | vsrb(b) );}
+
// VSX Extended Mnemonics
inline void Assembler::xxspltd( VectorSRegister d, VectorSRegister a, int x) { xxpermdi(d, a, a, x ? 3 : 0); }
inline void Assembler::xxmrghd( VectorSRegister d, VectorSRegister a, VectorSRegister b) { xxpermdi(d, a, b, 0); }
diff --git a/src/hotspot/cpu/ppc/c2_MacroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/c2_MacroAssembler_ppc.cpp
index edf348fdc50..73b6b132895 100644
--- a/src/hotspot/cpu/ppc/c2_MacroAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/c2_MacroAssembler_ppc.cpp
@@ -664,3 +664,37 @@ void C2_MacroAssembler::reduceI(int opcode, Register dst, Register iSrc, VectorR
fn_scalar_op(opcode, dst, iSrc, R0); // dst <- op(iSrc, R0)
}
+// Works for single and double precision floats.
+// dst = (op1 cmp(cc) op2) ? src1 : src2;
+// Unordered semantics are the same as for CmpF3Node/CmpD3Node which implement the fcmpl/dcmpl bytecodes.
+// Comparing unordered values has the same result as when src1 is less than src2.
+// So dst = src1 for <, <=, != and dst = src2 for >, >=, ==.
+void C2_MacroAssembler::cmovF(int cc, VectorSRegister dst, VectorSRegister op1, VectorSRegister op2,
+ VectorSRegister src1, VectorSRegister src2, VectorSRegister tmp) {
+ // See operand cmpOp() for details.
+ bool invert_cond = (cc & 8) == 0; // invert reflects bcondCRbiIs0
+ auto cmp = (Assembler::Condition)(cc & 3);
+
+ switch(cmp) {
+ case Assembler::Condition::equal:
+ // Use false_result if "unordered".
+ xscmpeqdp(tmp, op1, op2);
+ break;
+ case Assembler::Condition::greater:
+ // Use false_result if "unordered".
+ xscmpgtdp(tmp, op1, op2);
+ break;
+ case Assembler::Condition::less:
+ // Use true_result if "unordered".
+ xscmpgedp(tmp, op1, op2);
+ invert_cond = !invert_cond;
+ break;
+ default:
+ assert(false, "unsupported compare condition: %d", cc);
+ ShouldNotReachHere();
+ }
+
+ VectorSRegister true_result = invert_cond ? src2 : src1;
+ VectorSRegister false_result = invert_cond ? src1 : src2;
+ xxsel(dst, false_result, true_result, tmp);
+}
diff --git a/src/hotspot/cpu/ppc/c2_MacroAssembler_ppc.hpp b/src/hotspot/cpu/ppc/c2_MacroAssembler_ppc.hpp
index 5a114294c1f..e0dffec8396 100644
--- a/src/hotspot/cpu/ppc/c2_MacroAssembler_ppc.hpp
+++ b/src/hotspot/cpu/ppc/c2_MacroAssembler_ppc.hpp
@@ -74,5 +74,7 @@
void count_positives(Register src, Register cnt, Register result, Register tmp1, Register tmp2);
void reduceI(int opcode, Register dst, Register iSrc, VectorRegister vSrc, VectorRegister vTmp1, VectorRegister vTmp2);
+ void cmovF(int cc, VectorSRegister dst, VectorSRegister op1, VectorSRegister op2,
+ VectorSRegister src1, VectorSRegister src2, VectorSRegister tmp);
#endif // CPU_PPC_C2_MACROASSEMBLER_PPC_HPP
diff --git a/src/hotspot/cpu/ppc/gc/shared/cardTableBarrierSetAssembler_ppc.cpp b/src/hotspot/cpu/ppc/gc/shared/cardTableBarrierSetAssembler_ppc.cpp
index 7404f7e2e5c..297ce57a394 100644
--- a/src/hotspot/cpu/ppc/gc/shared/cardTableBarrierSetAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/gc/shared/cardTableBarrierSetAssembler_ppc.cpp
@@ -103,8 +103,7 @@ void CardTableBarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Registe
void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr,
Register count, Register preserve) {
- CardTableBarrierSet* ctbs = barrier_set_cast(BarrierSet::barrier_set());
- CardTable* ct = ctbs->card_table();
+ CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
assert_different_registers(addr, count, R0);
Label Lskip_loop, Lstore_loop;
@@ -117,7 +116,7 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
__ srdi(addr, addr, CardTable::card_shift());
__ srdi(count, count, CardTable::card_shift());
__ subf(count, addr, count);
- __ add_const_optimized(addr, addr, (address)ct->byte_map_base(), R0);
+ __ add_const_optimized(addr, addr, (address)ctbs->card_table_base_const(), R0);
__ addi(count, count, 1);
__ li(R0, 0);
__ mtctr(count);
@@ -140,8 +139,8 @@ void CardTableBarrierSetAssembler::card_table_write(MacroAssembler* masm,
}
void CardTableBarrierSetAssembler::card_write_barrier_post(MacroAssembler* masm, Register store_addr, Register tmp) {
- CardTableBarrierSet* bs = barrier_set_cast(BarrierSet::barrier_set());
- card_table_write(masm, bs->card_table()->byte_map_base(), tmp, store_addr);
+ CardTableBarrierSet* bs = CardTableBarrierSet::barrier_set();
+ card_table_write(masm, bs->card_table_base_const(), tmp, store_addr);
}
void CardTableBarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
diff --git a/src/hotspot/cpu/ppc/gc/shenandoah/shenandoahBarrierSetAssembler_ppc.cpp b/src/hotspot/cpu/ppc/gc/shenandoah/shenandoahBarrierSetAssembler_ppc.cpp
index 1f1bc7622ed..c3bb1811031 100644
--- a/src/hotspot/cpu/ppc/gc/shenandoah/shenandoahBarrierSetAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/gc/shenandoah/shenandoahBarrierSetAssembler_ppc.cpp
@@ -50,14 +50,14 @@
#define __ masm->
-void ShenandoahBarrierSetAssembler::satb_write_barrier(MacroAssembler *masm,
- Register base, RegisterOrConstant ind_or_offs,
- Register tmp1, Register tmp2, Register tmp3,
- MacroAssembler::PreservationLevel preservation_level) {
+void ShenandoahBarrierSetAssembler::satb_barrier(MacroAssembler *masm,
+ Register base, RegisterOrConstant ind_or_offs,
+ Register tmp1, Register tmp2, Register tmp3,
+ MacroAssembler::PreservationLevel preservation_level) {
if (ShenandoahSATBBarrier) {
- __ block_comment("satb_write_barrier (shenandoahgc) {");
- satb_write_barrier_impl(masm, 0, base, ind_or_offs, tmp1, tmp2, tmp3, preservation_level);
- __ block_comment("} satb_write_barrier (shenandoahgc)");
+ __ block_comment("satb_barrier (shenandoahgc) {");
+ satb_barrier_impl(masm, 0, base, ind_or_offs, tmp1, tmp2, tmp3, preservation_level);
+ __ block_comment("} satb_barrier (shenandoahgc)");
}
}
@@ -198,11 +198,12 @@ void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, Dec
// In "load mode", this register acts as a temporary register and must
// thus not be 'noreg'. In "preloaded mode", its content will be sustained.
// tmp1/tmp2: Temporary registers, one of which must be non-volatile in "preloaded mode".
-void ShenandoahBarrierSetAssembler::satb_write_barrier_impl(MacroAssembler *masm, DecoratorSet decorators,
- Register base, RegisterOrConstant ind_or_offs,
- Register pre_val,
- Register tmp1, Register tmp2,
- MacroAssembler::PreservationLevel preservation_level) {
+void ShenandoahBarrierSetAssembler::satb_barrier_impl(MacroAssembler *masm, DecoratorSet decorators,
+ Register base, RegisterOrConstant ind_or_offs,
+ Register pre_val,
+ Register tmp1, Register tmp2,
+ MacroAssembler::PreservationLevel preservation_level) {
+ assert(ShenandoahSATBBarrier, "Should be checked by caller");
assert_different_registers(tmp1, tmp2, pre_val, noreg);
Label skip_barrier;
@@ -574,13 +575,13 @@ void ShenandoahBarrierSetAssembler::load_at(
if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
if (ShenandoahSATBBarrier) {
__ block_comment("keep_alive_barrier (shenandoahgc) {");
- satb_write_barrier_impl(masm, 0, noreg, noreg, dst, tmp1, tmp2, preservation_level);
+ satb_barrier_impl(masm, 0, noreg, noreg, dst, tmp1, tmp2, preservation_level);
__ block_comment("} keep_alive_barrier (shenandoahgc)");
}
}
}
-void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register base, RegisterOrConstant ind_or_offs, Register tmp) {
+void ShenandoahBarrierSetAssembler::card_barrier(MacroAssembler* masm, Register base, RegisterOrConstant ind_or_offs, Register tmp) {
assert(ShenandoahCardBarrier, "Should have been checked by caller");
assert_different_registers(base, tmp, R0);
@@ -603,21 +604,33 @@ void ShenandoahBarrierSetAssembler::store_at(MacroAssembler *masm, DecoratorSet
Register base, RegisterOrConstant ind_or_offs, Register val,
Register tmp1, Register tmp2, Register tmp3,
MacroAssembler::PreservationLevel preservation_level) {
- if (is_reference_type(type)) {
- if (ShenandoahSATBBarrier) {
- satb_write_barrier(masm, base, ind_or_offs, tmp1, tmp2, tmp3, preservation_level);
- }
+ // 1: non-reference types require no barriers
+ if (!is_reference_type(type)) {
+ BarrierSetAssembler::store_at(masm, decorators, type,
+ base, ind_or_offs,
+ val,
+ tmp1, tmp2, tmp3,
+ preservation_level);
+ return;
}
+ bool storing_non_null = (val != noreg);
+
+ // 2: pre-barrier: SATB needs the previous value
+ if (ShenandoahBarrierSet::need_satb_barrier(decorators, type)) {
+ satb_barrier(masm, base, ind_or_offs, tmp1, tmp2, tmp3, preservation_level);
+ }
+
+ // Store!
BarrierSetAssembler::store_at(masm, decorators, type,
base, ind_or_offs,
val,
tmp1, tmp2, tmp3,
preservation_level);
- // No need for post barrier if storing null
- if (ShenandoahCardBarrier && is_reference_type(type) && val != noreg) {
- store_check(masm, base, ind_or_offs, tmp1);
+ // 3: post-barrier: card barrier needs store address
+ if (ShenandoahBarrierSet::need_card_barrier(decorators, type) && storing_non_null) {
+ card_barrier(masm, base, ind_or_offs, tmp1);
}
}
@@ -771,9 +784,6 @@ void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler *masm, Register b
void ShenandoahBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, Register preserve) {
assert(ShenandoahCardBarrier, "Should have been checked by caller");
-
- ShenandoahBarrierSet* bs = ShenandoahBarrierSet::barrier_set();
- CardTable* ct = bs->card_table();
assert_different_registers(addr, count, R0);
Label L_skip_loop, L_store_loop;
diff --git a/src/hotspot/cpu/ppc/gc/shenandoah/shenandoahBarrierSetAssembler_ppc.hpp b/src/hotspot/cpu/ppc/gc/shenandoah/shenandoahBarrierSetAssembler_ppc.hpp
index b058dcf1a2e..52615a740af 100644
--- a/src/hotspot/cpu/ppc/gc/shenandoah/shenandoahBarrierSetAssembler_ppc.hpp
+++ b/src/hotspot/cpu/ppc/gc/shenandoah/shenandoahBarrierSetAssembler_ppc.hpp
@@ -45,15 +45,15 @@ class ShenandoahBarrierSetAssembler: public BarrierSetAssembler {
private:
/* ==== Actual barrier implementations ==== */
- void satb_write_barrier_impl(MacroAssembler* masm, DecoratorSet decorators,
- Register base, RegisterOrConstant ind_or_offs,
- Register pre_val,
- Register tmp1, Register tmp2,
- MacroAssembler::PreservationLevel preservation_level);
+ void satb_barrier_impl(MacroAssembler* masm, DecoratorSet decorators,
+ Register base, RegisterOrConstant ind_or_offs,
+ Register pre_val,
+ Register tmp1, Register tmp2,
+ MacroAssembler::PreservationLevel preservation_level);
- void store_check(MacroAssembler* masm,
- Register base, RegisterOrConstant ind_or_offs,
- Register tmp);
+ void card_barrier(MacroAssembler* masm,
+ Register base, RegisterOrConstant ind_or_offs,
+ Register tmp);
void load_reference_barrier_impl(MacroAssembler* masm, DecoratorSet decorators,
Register base, RegisterOrConstant ind_or_offs,
@@ -85,10 +85,10 @@ public:
#endif
/* ==== Available barriers (facades of the actual implementations) ==== */
- void satb_write_barrier(MacroAssembler* masm,
- Register base, RegisterOrConstant ind_or_offs,
- Register tmp1, Register tmp2, Register tmp3,
- MacroAssembler::PreservationLevel preservation_level);
+ void satb_barrier(MacroAssembler* masm,
+ Register base, RegisterOrConstant ind_or_offs,
+ Register tmp1, Register tmp2, Register tmp3,
+ MacroAssembler::PreservationLevel preservation_level);
void load_reference_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register base, RegisterOrConstant ind_or_offs,
diff --git a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp
index fc865be015e..f7bf457f72c 100644
--- a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp
+++ b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp
@@ -1109,11 +1109,11 @@ void InterpreterMacroAssembler::verify_method_data_pointer() {
lhz(R11_scratch1, in_bytes(DataLayout::bci_offset()), R28_mdx);
ld(R12_scratch2, in_bytes(Method::const_offset()), R19_method);
addi(R11_scratch1, R11_scratch1, in_bytes(ConstMethod::codes_offset()));
- add(R11_scratch1, R12_scratch2, R12_scratch2);
+ add(R11_scratch1, R11_scratch1, R12_scratch2);
cmpd(CR0, R11_scratch1, R14_bcp);
beq(CR0, verify_continue);
- call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp ), R19_method, R14_bcp, R28_mdx);
+ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), R19_method, R14_bcp, R28_mdx);
bind(verify_continue);
#endif
diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp
index 5649ead2ea8..809285afddb 100644
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp
@@ -4535,7 +4535,7 @@ void MacroAssembler::push_cont_fastpath() {
Label done;
ld_ptr(R0, JavaThread::cont_fastpath_offset(), R16_thread);
cmpld(CR0, R1_SP, R0);
- ble(CR0, done);
+ ble(CR0, done); // if (SP <= _cont_fastpath) goto done;
st_ptr(R1_SP, JavaThread::cont_fastpath_offset(), R16_thread);
bind(done);
}
@@ -4546,7 +4546,7 @@ void MacroAssembler::pop_cont_fastpath() {
Label done;
ld_ptr(R0, JavaThread::cont_fastpath_offset(), R16_thread);
cmpld(CR0, R1_SP, R0);
- ble(CR0, done);
+ blt(CR0, done); // if (SP < _cont_fastpath) goto done;
li(R0, 0);
st_ptr(R0, JavaThread::cont_fastpath_offset(), R16_thread);
bind(done);
diff --git a/src/hotspot/cpu/ppc/matcher_ppc.hpp b/src/hotspot/cpu/ppc/matcher_ppc.hpp
index aad41fb7b1c..b50de6323de 100644
--- a/src/hotspot/cpu/ppc/matcher_ppc.hpp
+++ b/src/hotspot/cpu/ppc/matcher_ppc.hpp
@@ -64,12 +64,10 @@
return true;
}
- // Use conditional move (CMOVL) on Power7.
static constexpr int long_cmove_cost() { return 0; } // this only makes long cmoves more expensive than int cmoves
- // Suppress CMOVF. Conditional move available (sort of) on PPC64 only from P7 onwards. Not exploited yet.
- // fsel doesn't accept a condition register as input, so this would be slightly different.
- static int float_cmove_cost() { return ConditionalMoveLimit; }
+ // Suppress CMOVF for Power8 because there are no fast nodes.
+ static int float_cmove_cost() { return (PowerArchitecturePPC64 >= 9) ? 0 : ConditionalMoveLimit; }
// This affects two different things:
// - how Decode nodes are matched
diff --git a/src/hotspot/cpu/ppc/ppc.ad b/src/hotspot/cpu/ppc/ppc.ad
index 2a0a9149bb3..d926fabd353 100644
--- a/src/hotspot/cpu/ppc/ppc.ad
+++ b/src/hotspot/cpu/ppc/ppc.ad
@@ -3024,7 +3024,6 @@ encode %{
%}
enc_class postalloc_expand_encode_oop(iRegNdst dst, iRegPdst src, flagsReg crx) %{
- // use isel instruction with Power 7
cmpP_reg_imm16Node *n_compare = new cmpP_reg_imm16Node();
encodeP_subNode *n_sub_base = new encodeP_subNode();
encodeP_shiftNode *n_shift = new encodeP_shiftNode();
@@ -3099,7 +3098,6 @@ encode %{
n_shift->_opnds[1] = op_src;
n_shift->_bottom_type = _bottom_type;
- // use isel instruction with Power 7
decodeN_addNode *n_add_base = new decodeN_addNode();
n_add_base->add_req(n_region, n_shift);
n_add_base->_opnds[0] = op_dst;
@@ -6618,7 +6616,6 @@ instruct cond_sub_base(iRegNdst dst, flagsRegSrc crx, iRegPsrc src1) %{
ins_pipe(pipe_class_default);
%}
-// Power 7 can use isel instruction
instruct cond_set_0_oop(iRegNdst dst, flagsRegSrc crx, iRegPsrc src1) %{
// The match rule is needed to make it a 'MachTypeNode'!
match(Set dst (EncodeP (Binary crx src1)));
@@ -7293,7 +7290,6 @@ instruct cmovF_reg(cmpOp cmp, flagsRegSrc crx, regF dst, regF src) %{
ins_variable_size_depending_on_alignment(true);
format %{ "CMOVEF $cmp, $crx, $dst, $src\n\t" %}
- // Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode %{
Label done;
@@ -7313,7 +7309,6 @@ instruct cmovD_reg(cmpOp cmp, flagsRegSrc crx, regD dst, regD src) %{
ins_variable_size_depending_on_alignment(true);
format %{ "CMOVEF $cmp, $crx, $dst, $src\n\t" %}
- // Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode %{
Label done;
@@ -7326,6 +7321,70 @@ instruct cmovD_reg(cmpOp cmp, flagsRegSrc crx, regD dst, regD src) %{
ins_pipe(pipe_class_default);
%}
+instruct cmovF_cmpF(cmpOp cop, regF op1, regF op2, regF dst, regF false_result, regF true_result, regD tmp) %{
+ match(Set dst (CMoveF (Binary cop (CmpF op1 op2)) (Binary false_result true_result)));
+ predicate(PowerArchitecturePPC64 >= 9);
+ effect(TEMP tmp);
+ ins_cost(2*DEFAULT_COST);
+ format %{ "cmovF_cmpF $dst = ($op1 $cop $op2) ? $true_result : $false_result\n\t" %}
+ size(8);
+ ins_encode %{
+ __ cmovF($cop$$cmpcode, $dst$$FloatRegister->to_vsr(),
+ $op1$$FloatRegister->to_vsr(), $op2$$FloatRegister->to_vsr(),
+ $true_result$$FloatRegister->to_vsr(), $false_result$$FloatRegister->to_vsr(),
+ $tmp$$FloatRegister->to_vsr());
+ %}
+ ins_pipe(pipe_class_default);
+%}
+
+instruct cmovF_cmpD(cmpOp cop, regD op1, regD op2, regF dst, regF false_result, regF true_result, regD tmp) %{
+ match(Set dst (CMoveF (Binary cop (CmpD op1 op2)) (Binary false_result true_result)));
+ predicate(PowerArchitecturePPC64 >= 9);
+ effect(TEMP tmp);
+ ins_cost(2*DEFAULT_COST);
+ format %{ "cmovF_cmpD $dst = ($op1 $cop $op2) ? $true_result : $false_result\n\t" %}
+ size(8);
+ ins_encode %{
+ __ cmovF($cop$$cmpcode, $dst$$FloatRegister->to_vsr(),
+ $op1$$FloatRegister->to_vsr(), $op2$$FloatRegister->to_vsr(),
+ $true_result$$FloatRegister->to_vsr(), $false_result$$FloatRegister->to_vsr(),
+ $tmp$$FloatRegister->to_vsr());
+ %}
+ ins_pipe(pipe_class_default);
+%}
+
+instruct cmovD_cmpD(cmpOp cop, regD op1, regD op2, regD dst, regD false_result, regD true_result, regD tmp) %{
+ match(Set dst (CMoveD (Binary cop (CmpD op1 op2)) (Binary false_result true_result)));
+ predicate(PowerArchitecturePPC64 >= 9);
+ effect(TEMP tmp);
+ ins_cost(2*DEFAULT_COST);
+ format %{ "cmovD_cmpD $dst = ($op1 $cop $op2) ? $true_result : $false_result\n\t" %}
+ size(8);
+ ins_encode %{
+ __ cmovF($cop$$cmpcode, $dst$$FloatRegister->to_vsr(),
+ $op1$$FloatRegister->to_vsr(), $op2$$FloatRegister->to_vsr(),
+ $true_result$$FloatRegister->to_vsr(), $false_result$$FloatRegister->to_vsr(),
+ $tmp$$FloatRegister->to_vsr());
+ %}
+ ins_pipe(pipe_class_default);
+%}
+
+instruct cmovD_cmpF(cmpOp cop, regF op1, regF op2, regD dst, regD false_result, regD true_result, regD tmp) %{
+ match(Set dst (CMoveD (Binary cop (CmpF op1 op2)) (Binary false_result true_result)));
+ predicate(PowerArchitecturePPC64 >= 9);
+ effect(TEMP tmp);
+ ins_cost(2*DEFAULT_COST);
+ format %{ "cmovD_cmpF $dst = ($op1 $cop $op2) ? $true_result : $false_result\n\t" %}
+ size(8);
+ ins_encode %{
+ __ cmovF($cop$$cmpcode, $dst$$FloatRegister->to_vsr(),
+ $op1$$FloatRegister->to_vsr(), $op2$$FloatRegister->to_vsr(),
+ $true_result$$FloatRegister->to_vsr(), $false_result$$FloatRegister->to_vsr(),
+ $tmp$$FloatRegister->to_vsr());
+ %}
+ ins_pipe(pipe_class_default);
+%}
+
//----------Compare-And-Swap---------------------------------------------------
// CompareAndSwap{P,I,L} have more than one output, therefore "CmpI
@@ -8492,7 +8551,6 @@ instruct cmovI_bne_negI_reg(iRegIdst dst, flagsRegSrc crx, iRegIsrc src1) %{
ins_variable_size_depending_on_alignment(true);
format %{ "CMOVE $dst, neg($src1), $crx" %}
- // Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode %{
Label done;
@@ -8551,7 +8609,6 @@ instruct cmovL_bne_negL_reg(iRegLdst dst, flagsRegSrc crx, iRegLsrc src1) %{
ins_variable_size_depending_on_alignment(true);
format %{ "CMOVE $dst, neg($src1), $crx" %}
- // Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode %{
Label done;
@@ -10262,7 +10319,6 @@ instruct cmovI_bso_stackSlotL(iRegIdst dst, flagsRegSrc crx, stackSlotL src) %{
ins_variable_size_depending_on_alignment(true);
format %{ "cmovI $crx, $dst, $src" %}
- // Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode( enc_cmove_bso_stackSlotL(dst, crx, src) );
ins_pipe(pipe_class_default);
@@ -10276,7 +10332,6 @@ instruct cmovI_bso_reg(iRegIdst dst, flagsRegSrc crx, regD src) %{
ins_variable_size_depending_on_alignment(true);
format %{ "cmovI $crx, $dst, $src" %}
- // Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode( enc_cmove_bso_reg(dst, crx, src) );
ins_pipe(pipe_class_default);
@@ -10439,7 +10494,6 @@ instruct cmovL_bso_stackSlotL(iRegLdst dst, flagsRegSrc crx, stackSlotL src) %{
ins_variable_size_depending_on_alignment(true);
format %{ "cmovL $crx, $dst, $src" %}
- // Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode( enc_cmove_bso_stackSlotL(dst, crx, src) );
ins_pipe(pipe_class_default);
@@ -10453,7 +10507,6 @@ instruct cmovL_bso_reg(iRegLdst dst, flagsRegSrc crx, regD src) %{
ins_variable_size_depending_on_alignment(true);
format %{ "cmovL $crx, $dst, $src" %}
- // Worst case is branch + move + stop, no stop without scheduler.
size(8);
ins_encode( enc_cmove_bso_reg(dst, crx, src) );
ins_pipe(pipe_class_default);
@@ -11080,7 +11133,6 @@ instruct cmov_bns_less(flagsReg crx) %{
ins_variable_size_depending_on_alignment(true);
format %{ "cmov $crx" %}
- // Worst case is branch + move + stop, no stop without scheduler.
size(12);
ins_encode %{
Label done;
diff --git a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp
index 4e427ace404..4eb2028f529 100644
--- a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp
+++ b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -1237,26 +1237,24 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
// Class initialization barrier for static methods
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
- if (VM_Version::supports_fast_class_init_checks()) {
- Label L_skip_barrier;
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
+ Label L_skip_barrier;
- { // Bypass the barrier for non-static methods
- __ lhz(R0, in_bytes(Method::access_flags_offset()), R19_method);
- __ andi_(R0, R0, JVM_ACC_STATIC);
- __ beq(CR0, L_skip_barrier); // non-static
- }
+ // Bypass the barrier for non-static methods
+ __ lhz(R0, in_bytes(Method::access_flags_offset()), R19_method);
+ __ andi_(R0, R0, JVM_ACC_STATIC);
+ __ beq(CR0, L_skip_barrier); // non-static
- Register klass = R11_scratch1;
- __ load_method_holder(klass, R19_method);
- __ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/);
+ Register klass = R11_scratch1;
+ __ load_method_holder(klass, R19_method);
+ __ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/);
- __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0);
- __ mtctr(klass);
- __ bctr();
+ __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0);
+ __ mtctr(klass);
+ __ bctr();
- __ bind(L_skip_barrier);
- entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
- }
+ __ bind(L_skip_barrier);
+ entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->c2i_entry_barrier(masm, /* tmp register*/ ic_klass, /* tmp register*/ receiver_klass, /* tmp register*/ code);
@@ -2210,7 +2208,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// --------------------------------------------------------------------------
vep_start_pc = (intptr_t)__ pc();
- if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
+ if (method->needs_clinit_barrier()) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
Label L_skip_barrier;
Register klass = r_temp_1;
// Notify OOP recorder (don't need the relocation)
diff --git a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp
index 8d61ba1b2d7..8a3af748fa1 100644
--- a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp
+++ b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2025 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -2199,7 +2199,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no, Register Rca
__ isync(); // Order load wrt. succeeding loads.
// Class initialization barrier for static methods
- if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
+ if (bytecode() == Bytecodes::_invokestatic) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
const Register method = Rscratch;
const Register klass = Rscratch;
@@ -2244,8 +2245,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no, Register Rcac
__ isync(); // Order load wrt. succeeding loads.
// Class initialization barrier for static fields
- if (VM_Version::supports_fast_class_init_checks() &&
- (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
+ if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
const Register field_holder = R4_ARG2;
// InterpreterRuntime::resolve_get_put sets field_holder and finally release-stores put_code.
diff --git a/src/hotspot/cpu/riscv/gc/shenandoah/shenandoahBarrierSetAssembler_riscv.cpp b/src/hotspot/cpu/riscv/gc/shenandoah/shenandoahBarrierSetAssembler_riscv.cpp
index dd6c8556307..3cbbb783258 100644
--- a/src/hotspot/cpu/riscv/gc/shenandoah/shenandoahBarrierSetAssembler_riscv.cpp
+++ b/src/hotspot/cpu/riscv/gc/shenandoah/shenandoahBarrierSetAssembler_riscv.cpp
@@ -88,26 +88,16 @@ void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, Dec
}
}
-void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm,
- Register obj,
- Register pre_val,
- Register thread,
- Register tmp,
- bool tosca_live,
- bool expand_call) {
- if (ShenandoahSATBBarrier) {
- satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, t0, tosca_live, expand_call);
- }
-}
+void ShenandoahBarrierSetAssembler::satb_barrier(MacroAssembler* masm,
+ Register obj,
+ Register pre_val,
+ Register thread,
+ Register tmp1,
+ Register tmp2,
+ bool tosca_live,
+ bool expand_call) {
+ assert(ShenandoahSATBBarrier, "Should be checked by caller");
-void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
- Register obj,
- Register pre_val,
- Register thread,
- Register tmp1,
- Register tmp2,
- bool tosca_live,
- bool expand_call) {
// If expand_call is true then we expand the call_VM_leaf macro
// directly to skip generating the check by
// InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
@@ -376,21 +366,21 @@ void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm,
if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
__ enter();
__ push_call_clobbered_registers();
- satb_write_barrier_pre(masm /* masm */,
- noreg /* obj */,
- dst /* pre_val */,
- xthread /* thread */,
- tmp1 /* tmp1 */,
- tmp2 /* tmp2 */,
- true /* tosca_live */,
- true /* expand_call */);
+ satb_barrier(masm /* masm */,
+ noreg /* obj */,
+ dst /* pre_val */,
+ xthread /* thread */,
+ tmp1 /* tmp1 */,
+ tmp2 /* tmp2 */,
+ true /* tosca_live */,
+ true /* expand_call */);
__ pop_call_clobbered_registers();
__ leave();
}
}
-void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj) {
- assert(ShenandoahCardBarrier, "Did you mean to enable ShenandoahCardBarrier?");
+void ShenandoahBarrierSetAssembler::card_barrier(MacroAssembler* masm, Register obj) {
+ assert(ShenandoahCardBarrier, "Should have been checked by caller");
__ srli(obj, obj, CardTable::card_shift());
@@ -413,13 +403,13 @@ void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register o
void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
- bool on_oop = is_reference_type(type);
- if (!on_oop) {
+ // 1: non-reference types require no barriers
+ if (!is_reference_type(type)) {
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
return;
}
- // flatten object address if needed
+ // Flatten object address right away for simplicity: likely needed by barriers
if (dst.offset() == 0) {
if (dst.base() != tmp3) {
__ mv(tmp3, dst.base());
@@ -428,20 +418,26 @@ void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet
__ la(tmp3, dst);
}
- shenandoah_write_barrier_pre(masm,
- tmp3 /* obj */,
- tmp2 /* pre_val */,
- xthread /* thread */,
- tmp1 /* tmp */,
- val != noreg /* tosca_live */,
- false /* expand_call */);
+ bool storing_non_null = (val != noreg);
+ // 2: pre-barrier: SATB needs the previous value
+ if (ShenandoahBarrierSet::need_satb_barrier(decorators, type)) {
+ satb_barrier(masm,
+ tmp3 /* obj */,
+ tmp2 /* pre_val */,
+ xthread /* thread */,
+ tmp1 /* tmp */,
+ t0 /* tmp2 */,
+ storing_non_null /* tosca_live */,
+ false /* expand_call */);
+ }
+
+ // Store!
BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), val, noreg, noreg, noreg);
- bool in_heap = (decorators & IN_HEAP) != 0;
- bool needs_post_barrier = (val != noreg) && in_heap && ShenandoahCardBarrier;
- if (needs_post_barrier) {
- store_check(masm, tmp3);
+ // 3: post-barrier: card barrier needs store address
+ if (ShenandoahBarrierSet::need_card_barrier(decorators, type) && storing_non_null) {
+ card_barrier(masm, tmp3);
}
}
diff --git a/src/hotspot/cpu/riscv/gc/shenandoah/shenandoahBarrierSetAssembler_riscv.hpp b/src/hotspot/cpu/riscv/gc/shenandoah/shenandoahBarrierSetAssembler_riscv.hpp
index c8a7c35fb83..5085be26b2e 100644
--- a/src/hotspot/cpu/riscv/gc/shenandoah/shenandoahBarrierSetAssembler_riscv.hpp
+++ b/src/hotspot/cpu/riscv/gc/shenandoah/shenandoahBarrierSetAssembler_riscv.hpp
@@ -41,23 +41,16 @@ class StubCodeGenerator;
class ShenandoahBarrierSetAssembler: public BarrierSetAssembler {
private:
- void satb_write_barrier_pre(MacroAssembler* masm,
- Register obj,
- Register pre_val,
- Register thread,
- Register tmp1,
- Register tmp2,
- bool tosca_live,
- bool expand_call);
- void shenandoah_write_barrier_pre(MacroAssembler* masm,
- Register obj,
- Register pre_val,
- Register thread,
- Register tmp,
- bool tosca_live,
- bool expand_call);
+ void satb_barrier(MacroAssembler* masm,
+ Register obj,
+ Register pre_val,
+ Register thread,
+ Register tmp1,
+ Register tmp2,
+ bool tosca_live,
+ bool expand_call);
- void store_check(MacroAssembler* masm, Register obj);
+ void card_barrier(MacroAssembler* masm, Register obj);
void resolve_forward_pointer(MacroAssembler* masm, Register dst, Register tmp = noreg);
void resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp = noreg);
diff --git a/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp
index 43b17a13c20..fb30f64e9ed 100644
--- a/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp
+++ b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp
@@ -5110,9 +5110,8 @@ void MacroAssembler::get_thread(Register thread) {
}
void MacroAssembler::load_byte_map_base(Register reg) {
- CardTable::CardValue* byte_map_base =
- ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base();
- mv(reg, (uint64_t)byte_map_base);
+ CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
+ mv(reg, (uint64_t)ctbs->card_table_base_const());
}
void MacroAssembler::build_frame(int framesize) {
diff --git a/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp
index eeb6fad1b59..8c343f6ab2b 100644
--- a/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp
+++ b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -213,7 +213,7 @@ void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
// Is vector's size (in bytes) bigger than a size saved by default?
// riscv does not ovlerlay the floating-point registers on vector registers like aarch64.
bool SharedRuntime::is_wide_vector(int size) {
- return UseRVV;
+ return UseRVV && size > 0;
}
// ---------------------------------------------------------------------------
@@ -637,22 +637,20 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
// Class initialization barrier for static methods
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
- if (VM_Version::supports_fast_class_init_checks()) {
- Label L_skip_barrier;
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
+ Label L_skip_barrier;
- { // Bypass the barrier for non-static methods
- __ load_unsigned_short(t0, Address(xmethod, Method::access_flags_offset()));
- __ test_bit(t1, t0, exact_log2(JVM_ACC_STATIC));
- __ beqz(t1, L_skip_barrier); // non-static
- }
+ // Bypass the barrier for non-static methods
+ __ load_unsigned_short(t0, Address(xmethod, Method::access_flags_offset()));
+ __ test_bit(t1, t0, exact_log2(JVM_ACC_STATIC));
+ __ beqz(t1, L_skip_barrier); // non-static
- __ load_method_holder(t1, xmethod);
- __ clinit_barrier(t1, t0, &L_skip_barrier);
- __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
+ __ load_method_holder(t1, xmethod);
+ __ clinit_barrier(t1, t0, &L_skip_barrier);
+ __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
- __ bind(L_skip_barrier);
- entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
- }
+ __ bind(L_skip_barrier);
+ entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->c2i_entry_barrier(masm);
@@ -1443,7 +1441,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ nop(); // 4 bytes
}
- if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
+ if (method->needs_clinit_barrier()) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
Label L_skip_barrier;
__ mov_metadata(t1, method->method_holder()); // InstanceKlass*
__ clinit_barrier(t1, t0, &L_skip_barrier);
diff --git a/src/hotspot/cpu/riscv/templateTable_riscv.cpp b/src/hotspot/cpu/riscv/templateTable_riscv.cpp
index ca41583e4bc..0fb529d1683 100644
--- a/src/hotspot/cpu/riscv/templateTable_riscv.cpp
+++ b/src/hotspot/cpu/riscv/templateTable_riscv.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -708,7 +708,6 @@ void TemplateTable::index_check(Register array, Register index) {
__ mv(x11, index);
}
Label ok;
- __ sext(index, index, 32);
__ bltu(index, length, ok);
__ mv(x13, array);
__ mv(t1, Interpreter::_throw_ArrayIndexOutOfBoundsException_entry);
@@ -1052,7 +1051,7 @@ void TemplateTable::aastore() {
transition(vtos, vtos);
// stack: ..., array, index, value
__ ld(x10, at_tos()); // value
- __ ld(x12, at_tos_p1()); // index
+ __ lw(x12, at_tos_p1()); // index
__ ld(x13, at_tos_p2()); // array
index_check(x13, x12); // kills x11
@@ -1462,9 +1461,9 @@ void TemplateTable::iinc() {
transition(vtos, vtos);
__ load_signed_byte(x11, at_bcp(2)); // get constant
locals_index(x12);
- __ ld(x10, iaddress(x12, x10, _masm));
+ __ lw(x10, iaddress(x12, x10, _masm));
__ addw(x10, x10, x11);
- __ sd(x10, iaddress(x12, t0, _masm));
+ __ sw(x10, iaddress(x12, t0, _masm));
}
void TemplateTable::wide_iinc() {
@@ -1477,9 +1476,9 @@ void TemplateTable::wide_iinc() {
__ orr(x11, x11, t1);
locals_index_wide(x12);
- __ ld(x10, iaddress(x12, t0, _masm));
+ __ lw(x10, iaddress(x12, t0, _masm));
__ addw(x10, x10, x11);
- __ sd(x10, iaddress(x12, t0, _masm));
+ __ sw(x10, iaddress(x12, t0, _masm));
}
void TemplateTable::convert() {
@@ -2192,7 +2191,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
__ mv(t0, (int) code);
// Class initialization barrier for static methods
- if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
+ if (bytecode() == Bytecodes::_invokestatic) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
__ bne(temp, t0, L_clinit_barrier_slow); // have we resolved this bytecode?
__ ld(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::method_offset())));
__ load_method_holder(temp, temp);
@@ -2243,8 +2243,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
__ mv(t0, (int) code); // have we resolved this bytecode?
// Class initialization barrier for static fields
- if (VM_Version::supports_fast_class_init_checks() &&
- (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
+ if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
const Register field_holder = temp;
__ bne(temp, t0, L_clinit_barrier_slow);
diff --git a/src/hotspot/cpu/riscv/vm_version_riscv.cpp b/src/hotspot/cpu/riscv/vm_version_riscv.cpp
index 22f19c4f5ea..36f0864da0b 100644
--- a/src/hotspot/cpu/riscv/vm_version_riscv.cpp
+++ b/src/hotspot/cpu/riscv/vm_version_riscv.cpp
@@ -167,11 +167,6 @@ void VM_Version::common_initialize() {
(unaligned_scalar.value() == MISALIGNED_SCALAR_FAST));
}
- if (FLAG_IS_DEFAULT(AlignVector)) {
- FLAG_SET_DEFAULT(AlignVector,
- unaligned_vector.value() != MISALIGNED_VECTOR_FAST);
- }
-
#ifdef __riscv_ztso
// Hotspot is compiled with TSO support, it will only run on hardware which
// supports Ztso
@@ -242,6 +237,11 @@ void VM_Version::c2_initialize() {
}
}
+ if (FLAG_IS_DEFAULT(AlignVector)) {
+ FLAG_SET_DEFAULT(AlignVector,
+ unaligned_vector.value() != MISALIGNED_VECTOR_FAST);
+ }
+
// NOTE: Make sure codes dependent on UseRVV are put after MaxVectorSize initialize,
// as there are extra checks inside it which could disable UseRVV
// in some situations.
diff --git a/src/hotspot/cpu/s390/gc/shared/cardTableBarrierSetAssembler_s390.cpp b/src/hotspot/cpu/s390/gc/shared/cardTableBarrierSetAssembler_s390.cpp
index a0da6ebe682..9bb7f63ff31 100644
--- a/src/hotspot/cpu/s390/gc/shared/cardTableBarrierSetAssembler_s390.cpp
+++ b/src/hotspot/cpu/s390/gc/shared/cardTableBarrierSetAssembler_s390.cpp
@@ -83,8 +83,7 @@ void CardTableBarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Registe
void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count,
bool do_return) {
- CardTableBarrierSet* ctbs = barrier_set_cast(BarrierSet::barrier_set());
- CardTable* ct = ctbs->card_table();
+ CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
NearLabel doXC, done;
assert_different_registers(Z_R0, Z_R1, addr, count);
@@ -105,7 +104,7 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
__ add2reg_with_index(count, -BytesPerHeapOop, count, addr);
// Get base address of card table.
- __ load_const_optimized(Z_R1, (address)ct->byte_map_base());
+ __ load_const_optimized(Z_R1, (address)ctbs->card_table_base_const());
// count = (count>>shift) - (addr>>shift)
__ z_srlg(addr, addr, CardTable::card_shift());
@@ -179,13 +178,12 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register store_addr, Register tmp) {
// Does a store check for the oop in register obj. The content of
// register obj is destroyed afterwards.
- CardTableBarrierSet* ctbs = barrier_set_cast(BarrierSet::barrier_set());
- CardTable* ct = ctbs->card_table();
+ CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
assert_different_registers(store_addr, tmp);
__ z_srlg(store_addr, store_addr, CardTable::card_shift());
- __ load_absolute_address(tmp, (address)ct->byte_map_base());
+ __ load_absolute_address(tmp, (address)ctbs->card_table_base_const());
__ z_agr(store_addr, tmp);
__ z_mvi(0, store_addr, CardTable::dirty_card_val());
}
diff --git a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp
index 5b6f7dcd984..00a830a80cd 100644
--- a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp
+++ b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2024 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -1567,7 +1567,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
//---------------------------------------------------------------------
wrapper_VEPStart = __ offset();
- if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
+ if (method->needs_clinit_barrier()) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
Label L_skip_barrier;
Register klass = Z_R1_scratch;
// Notify OOP recorder (don't need the relocation)
@@ -2378,24 +2379,22 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
// Class initialization barrier for static methods
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
- if (VM_Version::supports_fast_class_init_checks()) {
- Label L_skip_barrier;
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
+ Label L_skip_barrier;
- { // Bypass the barrier for non-static methods
- __ testbit_ushort(Address(Z_method, Method::access_flags_offset()), JVM_ACC_STATIC_BIT);
- __ z_bfalse(L_skip_barrier); // non-static
- }
+ // Bypass the barrier for non-static methods
+ __ testbit_ushort(Address(Z_method, Method::access_flags_offset()), JVM_ACC_STATIC_BIT);
+ __ z_bfalse(L_skip_barrier); // non-static
- Register klass = Z_R11;
- __ load_method_holder(klass, Z_method);
- __ clinit_barrier(klass, Z_thread, &L_skip_barrier /*L_fast_path*/);
+ Register klass = Z_R11;
+ __ load_method_holder(klass, Z_method);
+ __ clinit_barrier(klass, Z_thread, &L_skip_barrier /*L_fast_path*/);
- __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub());
- __ z_br(klass);
+ __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub());
+ __ z_br(klass);
- __ bind(L_skip_barrier);
- entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
- }
+ __ bind(L_skip_barrier);
+ entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
return;
diff --git a/src/hotspot/cpu/s390/templateTable_s390.cpp b/src/hotspot/cpu/s390/templateTable_s390.cpp
index 4e8fdf275e4..647915ef4fa 100644
--- a/src/hotspot/cpu/s390/templateTable_s390.cpp
+++ b/src/hotspot/cpu/s390/templateTable_s390.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2024 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -2377,7 +2377,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
__ z_cli(Address(Rcache, bc_offset), code);
// Class initialization barrier for static methods
- if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
+ if (bytecode() == Bytecodes::_invokestatic) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
const Register method = Z_R1_scratch;
const Register klass = Z_R1_scratch;
__ z_brne(L_clinit_barrier_slow);
@@ -2427,8 +2428,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
__ z_cli(Address(cache, code_offset), code);
// Class initialization barrier for static fields
- if (VM_Version::supports_fast_class_init_checks() &&
- (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
+ if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
const Register field_holder = index;
__ z_brne(L_clinit_barrier_slow);
diff --git a/src/hotspot/cpu/x86/c2_stubGenerator_x86_64_string.cpp b/src/hotspot/cpu/x86/c2_stubGenerator_x86_64_string.cpp
index 0951bda0d17..77a149addb5 100644
--- a/src/hotspot/cpu/x86/c2_stubGenerator_x86_64_string.cpp
+++ b/src/hotspot/cpu/x86/c2_stubGenerator_x86_64_string.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2024, 2025, Intel Corporation. All rights reserved.
+ * Copyright (c) 2024, 2026, Intel Corporation. All rights reserved.
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -1330,10 +1330,12 @@ static void big_case_loop_helper(bool sizeKnown, int size, Label &noMatch, Label
// Clarification: The BYTE_K compare above compares haystack[(n-32):(n-1)]. We need to
// compare haystack[(k-1):(k-1+31)]. Subtracting either index gives shift value of
// (k + 31 - n): x = (k-1+31)-(n-1) = k-1+31-n+1 = k+31-n.
+ // When isU is set, similarly, shift is from haystack[(n-32):(n-1)] to [(k-2):(k-2+31)]
+
if (sizeKnown) {
- __ movl(temp2, 31 + size);
+ __ movl(temp2, (isU ? 30 : 31) + size);
} else {
- __ movl(temp2, 31);
+ __ movl(temp2, isU ? 30 : 31);
__ addl(temp2, needleLen);
}
__ subl(temp2, hsLength);
diff --git a/src/hotspot/cpu/x86/gc/shared/cardTableBarrierSetAssembler_x86.cpp b/src/hotspot/cpu/x86/gc/shared/cardTableBarrierSetAssembler_x86.cpp
index 2b91662ddb5..65e6b4e01fc 100644
--- a/src/hotspot/cpu/x86/gc/shared/cardTableBarrierSetAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/gc/shared/cardTableBarrierSetAssembler_x86.cpp
@@ -95,11 +95,7 @@ void CardTableBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet d
void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, Register tmp) {
- BarrierSet *bs = BarrierSet::barrier_set();
- CardTableBarrierSet* ctbs = barrier_set_cast(bs);
- CardTable* ct = ctbs->card_table();
- intptr_t disp = (intptr_t) ct->byte_map_base();
- SHENANDOAHGC_ONLY(assert(!UseShenandoahGC, "Shenandoah byte_map_base is not constant.");)
+ CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
Label L_loop, L_done;
const Register end = count;
@@ -115,7 +111,7 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
__ shrptr(end, CardTable::card_shift());
__ subptr(end, addr); // end --> cards count
- __ mov64(tmp, disp);
+ __ mov64(tmp, (intptr_t)ctbs->card_table_base_const());
__ addptr(addr, tmp);
__ BIND(L_loop);
__ movb(Address(addr, count, Address::times_1), 0);
@@ -128,10 +124,7 @@ __ BIND(L_done);
void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj, Address dst) {
// Does a store check for the oop in register obj. The content of
// register obj is destroyed afterwards.
- BarrierSet* bs = BarrierSet::barrier_set();
-
- CardTableBarrierSet* ctbs = barrier_set_cast(bs);
- CardTable* ct = ctbs->card_table();
+ CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
__ shrptr(obj, CardTable::card_shift());
@@ -142,7 +135,7 @@ void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register ob
// So this essentially converts an address to a displacement and it will
// never need to be relocated. On 64bit however the value may be too
// large for a 32bit displacement.
- intptr_t byte_map_base = (intptr_t)ct->byte_map_base();
+ intptr_t byte_map_base = (intptr_t)ctbs->card_table_base_const();
if (__ is_simm32(byte_map_base)) {
card_addr = Address(noreg, obj, Address::times_1, byte_map_base);
} else {
diff --git a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp
index 9e321391f6c..97829a10a3b 100644
--- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp
@@ -174,24 +174,14 @@ void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, Dec
}
}
-void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm,
- Register obj,
- Register pre_val,
- Register tmp,
- bool tosca_live,
- bool expand_call) {
+void ShenandoahBarrierSetAssembler::satb_barrier(MacroAssembler* masm,
+ Register obj,
+ Register pre_val,
+ Register tmp,
+ bool tosca_live,
+ bool expand_call) {
+ assert(ShenandoahSATBBarrier, "Should be checked by caller");
- if (ShenandoahSATBBarrier) {
- satb_write_barrier_pre(masm, obj, pre_val, tmp, tosca_live, expand_call);
- }
-}
-
-void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
- Register obj,
- Register pre_val,
- Register tmp,
- bool tosca_live,
- bool expand_call) {
// If expand_call is true then we expand the call_VM_leaf macro
// directly to skip generating the check by
// InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
@@ -533,18 +523,18 @@ void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet d
assert_different_registers(dst, tmp1, r15_thread);
// Generate the SATB pre-barrier code to log the value of
// the referent field in an SATB buffer.
- shenandoah_write_barrier_pre(masm /* masm */,
- noreg /* obj */,
- dst /* pre_val */,
- tmp1 /* tmp */,
- true /* tosca_live */,
- true /* expand_call */);
+ satb_barrier(masm /* masm */,
+ noreg /* obj */,
+ dst /* pre_val */,
+ tmp1 /* tmp */,
+ true /* tosca_live */,
+ true /* expand_call */);
restore_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ true);
}
}
-void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj) {
+void ShenandoahBarrierSetAssembler::card_barrier(MacroAssembler* masm, Register obj) {
assert(ShenandoahCardBarrier, "Should have been checked by caller");
// Does a store check for the oop in register obj. The content of
@@ -575,41 +565,40 @@ void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register o
void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
- bool on_oop = is_reference_type(type);
- bool in_heap = (decorators & IN_HEAP) != 0;
- bool as_normal = (decorators & AS_NORMAL) != 0;
- if (on_oop && in_heap) {
- bool needs_pre_barrier = as_normal;
+ // 1: non-reference types require no barriers
+ if (!is_reference_type(type)) {
+ BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
+ return;
+ }
- // flatten object address if needed
- // We do it regardless of precise because we need the registers
- if (dst.index() == noreg && dst.disp() == 0) {
- if (dst.base() != tmp1) {
- __ movptr(tmp1, dst.base());
- }
- } else {
- __ lea(tmp1, dst);
- }
-
- assert_different_registers(val, tmp1, tmp2, tmp3, r15_thread);
-
- if (needs_pre_barrier) {
- shenandoah_write_barrier_pre(masm /*masm*/,
- tmp1 /* obj */,
- tmp2 /* pre_val */,
- tmp3 /* tmp */,
- val != noreg /* tosca_live */,
- false /* expand_call */);
- }
-
- BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg, noreg);
- if (val != noreg) {
- if (ShenandoahCardBarrier) {
- store_check(masm, tmp1);
- }
+ // Flatten object address right away for simplicity: likely needed by barriers
+ assert_different_registers(val, tmp1, tmp2, tmp3, r15_thread);
+ if (dst.index() == noreg && dst.disp() == 0) {
+ if (dst.base() != tmp1) {
+ __ movptr(tmp1, dst.base());
}
} else {
- BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
+ __ lea(tmp1, dst);
+ }
+
+ bool storing_non_null = (val != noreg);
+
+ // 2: pre-barrier: SATB needs the previous value
+ if (ShenandoahBarrierSet::need_satb_barrier(decorators, type)) {
+ satb_barrier(masm,
+ tmp1 /* obj */,
+ tmp2 /* pre_val */,
+ tmp3 /* tmp */,
+ storing_non_null /* tosca_live */,
+ false /* expand_call */);
+ }
+
+ // Store!
+ BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg, noreg);
+
+ // 3: post-barrier: card barrier needs store address
+ if (ShenandoahBarrierSet::need_card_barrier(decorators, type) && storing_non_null) {
+ card_barrier(masm, tmp1);
}
}
diff --git a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.hpp b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.hpp
index b0185f2dbff..b5cc5c8d834 100644
--- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.hpp
+++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.hpp
@@ -41,21 +41,14 @@ class StubCodeGenerator;
class ShenandoahBarrierSetAssembler: public BarrierSetAssembler {
private:
- void satb_write_barrier_pre(MacroAssembler* masm,
- Register obj,
- Register pre_val,
- Register tmp,
- bool tosca_live,
- bool expand_call);
+ void satb_barrier(MacroAssembler* masm,
+ Register obj,
+ Register pre_val,
+ Register tmp,
+ bool tosca_live,
+ bool expand_call);
- void shenandoah_write_barrier_pre(MacroAssembler* masm,
- Register obj,
- Register pre_val,
- Register tmp,
- bool tosca_live,
- bool expand_call);
-
- void store_check(MacroAssembler* masm, Register obj);
+ void card_barrier(MacroAssembler* masm, Register obj);
void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count,
diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.cpp b/src/hotspot/cpu/x86/macroAssembler_x86.cpp
index be7deb884ce..7f7bb2c4c7f 100644
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -6086,7 +6086,7 @@ void MacroAssembler::generate_fill(BasicType t, bool aligned,
vpbroadcastd(xtmp, xtmp, Assembler::AVX_512bit);
subptr(count, 16 << shift);
- jccb(Assembler::less, L_check_fill_32_bytes);
+ jcc(Assembler::less, L_check_fill_32_bytes);
align(16);
BIND(L_fill_64_bytes_loop_avx3);
diff --git a/src/hotspot/cpu/x86/register_x86.cpp b/src/hotspot/cpu/x86/register_x86.cpp
index e6083429344..188af9264a8 100644
--- a/src/hotspot/cpu/x86/register_x86.cpp
+++ b/src/hotspot/cpu/x86/register_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,14 +32,10 @@ const KRegister::KRegisterImpl all_KRegisterImpls [KRegister::number_
const char * Register::RegisterImpl::name() const {
static const char *const names[number_of_registers] = {
-#ifdef _LP64
"rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
"r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
"r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
-#else
- "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi"
-#endif // _LP64
};
return is_valid() ? names[encoding()] : "noreg";
}
@@ -54,11 +50,9 @@ const char* FloatRegister::FloatRegisterImpl::name() const {
const char* XMMRegister::XMMRegisterImpl::name() const {
static const char *const names[number_of_registers] = {
"xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
-#ifdef _LP64
,"xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"
,"xmm16", "xmm17", "xmm18", "xmm19", "xmm20", "xmm21", "xmm22", "xmm23"
,"xmm24", "xmm25", "xmm26", "xmm27", "xmm28", "xmm29", "xmm30", "xmm31"
-#endif // _LP64
};
return is_valid() ? names[encoding()] : "xnoreg";
}
diff --git a/src/hotspot/cpu/x86/register_x86.hpp b/src/hotspot/cpu/x86/register_x86.hpp
index 0a8ecb7be26..5e0326b9aad 100644
--- a/src/hotspot/cpu/x86/register_x86.hpp
+++ b/src/hotspot/cpu/x86/register_x86.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -34,7 +34,7 @@
class VMRegImpl;
typedef VMRegImpl* VMReg;
-// The implementation of integer registers for the x86/x64 architectures.
+// The implementation of integer registers for the x64 architectures.
class Register {
private:
int _encoding;
@@ -44,11 +44,9 @@ private:
public:
inline friend constexpr Register as_Register(int encoding);
- enum {
- number_of_registers = LP64_ONLY( 32 ) NOT_LP64( 8 ),
- number_of_byte_registers = LP64_ONLY( 32 ) NOT_LP64( 4 ),
- max_slots_per_register = LP64_ONLY( 2 ) NOT_LP64( 1 )
- };
+ static const int number_of_registers = 32;
+ static const int number_of_byte_registers = 32;
+ static const int max_slots_per_register = 2;
class RegisterImpl: public AbstractRegisterImpl {
friend class Register;
@@ -79,11 +77,9 @@ public:
// Actually available GP registers for use, depending on actual CPU capabilities and flags.
static int available_gp_registers() {
-#ifdef _LP64
if (!UseAPX) {
return number_of_registers / 2;
}
-#endif // _LP64
return number_of_registers;
}
};
@@ -116,9 +112,8 @@ constexpr Register rsp = as_Register(4);
constexpr Register rbp = as_Register(5);
constexpr Register rsi = as_Register(6);
constexpr Register rdi = as_Register(7);
-#ifdef _LP64
-constexpr Register r8 = as_Register( 8);
-constexpr Register r9 = as_Register( 9);
+constexpr Register r8 = as_Register(8);
+constexpr Register r9 = as_Register(9);
constexpr Register r10 = as_Register(10);
constexpr Register r11 = as_Register(11);
constexpr Register r12 = as_Register(12);
@@ -141,7 +136,6 @@ constexpr Register r28 = as_Register(28);
constexpr Register r29 = as_Register(29);
constexpr Register r30 = as_Register(30);
constexpr Register r31 = as_Register(31);
-#endif // _LP64
// The implementation of x87 floating point registers for the ia32 architecture.
@@ -154,10 +148,8 @@ private:
public:
inline friend constexpr FloatRegister as_FloatRegister(int encoding);
- enum {
- number_of_registers = 8,
- max_slots_per_register = 2
- };
+ static const int number_of_registers = 8;
+ static const int max_slots_per_register = 2;
class FloatRegisterImpl: public AbstractRegisterImpl {
friend class FloatRegister;
@@ -217,10 +209,8 @@ private:
public:
inline friend constexpr XMMRegister as_XMMRegister(int encoding);
- enum {
- number_of_registers = LP64_ONLY( 32 ) NOT_LP64( 8 ),
- max_slots_per_register = LP64_ONLY( 16 ) NOT_LP64( 16 ) // 512-bit
- };
+ static const int number_of_registers = 32;
+ static const int max_slots_per_register = 16; // 512-bit
class XMMRegisterImpl: public AbstractRegisterImpl {
friend class XMMRegister;
@@ -250,11 +240,9 @@ public:
// Actually available XMM registers for use, depending on actual CPU capabilities and flags.
static int available_xmm_registers() {
-#ifdef _LP64
if (UseAVX < 3) {
return number_of_registers / 2;
}
-#endif // _LP64
return number_of_registers;
}
};
@@ -287,7 +275,6 @@ constexpr XMMRegister xmm4 = as_XMMRegister( 4);
constexpr XMMRegister xmm5 = as_XMMRegister( 5);
constexpr XMMRegister xmm6 = as_XMMRegister( 6);
constexpr XMMRegister xmm7 = as_XMMRegister( 7);
-#ifdef _LP64
constexpr XMMRegister xmm8 = as_XMMRegister( 8);
constexpr XMMRegister xmm9 = as_XMMRegister( 9);
constexpr XMMRegister xmm10 = as_XMMRegister(10);
@@ -312,7 +299,6 @@ constexpr XMMRegister xmm28 = as_XMMRegister(28);
constexpr XMMRegister xmm29 = as_XMMRegister(29);
constexpr XMMRegister xmm30 = as_XMMRegister(30);
constexpr XMMRegister xmm31 = as_XMMRegister(31);
-#endif // _LP64
// The implementation of AVX-512 opmask registers.
@@ -394,25 +380,17 @@ constexpr KRegister k7 = as_KRegister(7);
// Define a class that exports it.
class ConcreteRegisterImpl : public AbstractRegisterImpl {
public:
- enum {
- max_gpr = Register::number_of_registers * Register::max_slots_per_register,
- max_fpr = max_gpr + FloatRegister::number_of_registers * FloatRegister::max_slots_per_register,
- max_xmm = max_fpr + XMMRegister::number_of_registers * XMMRegister::max_slots_per_register,
- max_kpr = max_xmm + KRegister::number_of_registers * KRegister::max_slots_per_register,
+ static const int max_gpr = Register::number_of_registers * Register::max_slots_per_register;
+ static const int max_fpr = max_gpr + FloatRegister::number_of_registers * FloatRegister::max_slots_per_register;
+ static const int max_xmm = max_fpr + XMMRegister::number_of_registers * XMMRegister::max_slots_per_register;
+ static const int max_kpr = max_xmm + KRegister::number_of_registers * KRegister::max_slots_per_register;
// A big enough number for C2: all the registers plus flags
// This number must be large enough to cover REG_COUNT (defined by c2) registers.
// There is no requirement that any ordering here matches any ordering c2 gives
// it's optoregs.
-
- // x86_32.ad defines additional dummy FILL0-FILL7 registers, in order to tally
- // REG_COUNT (computed by ADLC based on the number of reg_defs seen in .ad files)
- // with ConcreteRegisterImpl::number_of_registers additional count of 8 is being
- // added for 32 bit jvm.
- number_of_registers = max_kpr + // gpr/fpr/xmm/kpr
- NOT_LP64( 8 + ) // FILL0-FILL7 in x86_32.ad
- 1 // eflags
- };
+ static const int number_of_registers = max_kpr + // gpr/fpr/xmm/kpr
+ 1; // eflags
};
template <>
diff --git a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
index 5a4a5b1809e..bbd43c1a0e8 100644
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1043,26 +1043,24 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
// Class initialization barrier for static methods
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
- if (VM_Version::supports_fast_class_init_checks()) {
- Label L_skip_barrier;
- Register method = rbx;
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
+ Label L_skip_barrier;
+ Register method = rbx;
- { // Bypass the barrier for non-static methods
- Register flags = rscratch1;
- __ load_unsigned_short(flags, Address(method, Method::access_flags_offset()));
- __ testl(flags, JVM_ACC_STATIC);
- __ jcc(Assembler::zero, L_skip_barrier); // non-static
- }
+ // Bypass the barrier for non-static methods
+ Register flags = rscratch1;
+ __ load_unsigned_short(flags, Address(method, Method::access_flags_offset()));
+ __ testl(flags, JVM_ACC_STATIC);
+ __ jcc(Assembler::zero, L_skip_barrier); // non-static
- Register klass = rscratch1;
- __ load_method_holder(klass, method);
- __ clinit_barrier(klass, &L_skip_barrier /*L_fast_path*/);
+ Register klass = rscratch1;
+ __ load_method_holder(klass, method);
+ __ clinit_barrier(klass, &L_skip_barrier /*L_fast_path*/);
- __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path
+ __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path
- __ bind(L_skip_barrier);
- entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
- }
+ __ bind(L_skip_barrier);
+ entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->c2i_entry_barrier(masm);
@@ -1904,7 +1902,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
int vep_offset = ((intptr_t)__ pc()) - start;
- if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
+ if (method->needs_clinit_barrier()) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
Label L_skip_barrier;
Register klass = r10;
__ mov_metadata(klass, method->method_holder()); // InstanceKlass*
@@ -3602,4 +3601,3 @@ RuntimeStub* SharedRuntime::generate_jfr_return_lease() {
}
#endif // INCLUDE_JFR
-
diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_kyber.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_kyber.cpp
index 3e5593322d5..7d5dee6a5df 100644
--- a/src/hotspot/cpu/x86/stubGenerator_x86_64_kyber.cpp
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_kyber.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -64,6 +64,39 @@ static address kyberAvx512ConstsAddr(int offset) {
const Register scratch = r10;
+ATTRIBUTE_ALIGNED(64) static const uint8_t kyberAvx512_12To16Dup[] = {
+// 0 - 63
+ 0, 1, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11, 12, 13, 13, 14, 15, 16,
+ 16, 17, 18, 19, 19, 20, 21, 22, 22, 23, 24, 25, 25, 26, 27, 28, 28, 29, 30,
+ 31, 31, 32, 33, 34, 34, 35, 36, 37, 37, 38, 39, 40, 40, 41, 42, 43, 43, 44,
+ 45, 46, 46, 47
+ };
+
+static address kyberAvx512_12To16DupAddr() {
+ return (address) kyberAvx512_12To16Dup;
+}
+
+ATTRIBUTE_ALIGNED(64) static const uint16_t kyberAvx512_12To16Shift[] = {
+// 0 - 31
+ 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0,
+ 4, 0, 4, 0, 4, 0, 4
+ };
+
+static address kyberAvx512_12To16ShiftAddr() {
+ return (address) kyberAvx512_12To16Shift;
+}
+
+ATTRIBUTE_ALIGNED(64) static const uint64_t kyberAvx512_12To16And[] = {
+// 0 - 7
+ 0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF,
+ 0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF,
+ 0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF
+ };
+
+static address kyberAvx512_12To16AndAddr() {
+ return (address) kyberAvx512_12To16And;
+}
+
ATTRIBUTE_ALIGNED(64) static const uint16_t kyberAvx512NttPerms[] = {
// 0
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
@@ -822,10 +855,65 @@ address generate_kyber12To16_avx512(StubGenerator *stubgen,
const Register perms = r11;
- Label Loop;
+ Label Loop, VBMILoop;
__ addptr(condensed, condensedOffs);
+ if (VM_Version::supports_avx512_vbmi()) {
+ // mask load for the first 48 bytes of each vector
+ __ mov64(rax, 0x0000FFFFFFFFFFFF);
+ __ kmovql(k1, rax);
+
+ __ lea(perms, ExternalAddress(kyberAvx512_12To16DupAddr()));
+ __ evmovdqub(xmm20, Address(perms), Assembler::AVX_512bit);
+
+ __ lea(perms, ExternalAddress(kyberAvx512_12To16ShiftAddr()));
+ __ evmovdquw(xmm21, Address(perms), Assembler::AVX_512bit);
+
+ __ lea(perms, ExternalAddress(kyberAvx512_12To16AndAddr()));
+ __ evmovdquq(xmm22, Address(perms), Assembler::AVX_512bit);
+
+ __ align(OptoLoopAlignment);
+ __ BIND(VBMILoop);
+
+ __ evmovdqub(xmm0, k1, Address(condensed, 0), false,
+ Assembler::AVX_512bit);
+ __ evmovdqub(xmm1, k1, Address(condensed, 48), false,
+ Assembler::AVX_512bit);
+ __ evmovdqub(xmm2, k1, Address(condensed, 96), false,
+ Assembler::AVX_512bit);
+ __ evmovdqub(xmm3, k1, Address(condensed, 144), false,
+ Assembler::AVX_512bit);
+
+ __ evpermb(xmm4, k0, xmm20, xmm0, false, Assembler::AVX_512bit);
+ __ evpermb(xmm5, k0, xmm20, xmm1, false, Assembler::AVX_512bit);
+ __ evpermb(xmm6, k0, xmm20, xmm2, false, Assembler::AVX_512bit);
+ __ evpermb(xmm7, k0, xmm20, xmm3, false, Assembler::AVX_512bit);
+
+ __ evpsrlvw(xmm4, xmm4, xmm21, Assembler::AVX_512bit);
+ __ evpsrlvw(xmm5, xmm5, xmm21, Assembler::AVX_512bit);
+ __ evpsrlvw(xmm6, xmm6, xmm21, Assembler::AVX_512bit);
+ __ evpsrlvw(xmm7, xmm7, xmm21, Assembler::AVX_512bit);
+
+ __ evpandq(xmm0, xmm22, xmm4, Assembler::AVX_512bit);
+ __ evpandq(xmm1, xmm22, xmm5, Assembler::AVX_512bit);
+ __ evpandq(xmm2, xmm22, xmm6, Assembler::AVX_512bit);
+ __ evpandq(xmm3, xmm22, xmm7, Assembler::AVX_512bit);
+
+ store4regs(parsed, 0, xmm0_3, _masm);
+
+ __ addptr(condensed, 192);
+ __ addptr(parsed, 256);
+ __ subl(parsedLength, 128);
+ __ jcc(Assembler::greater, VBMILoop);
+
+ __ leave(); // required for proper stackwalking of RuntimeStub frame
+ __ mov64(rax, 0); // return 0
+ __ ret(0);
+
+ return start;
+ }
+
__ lea(perms, ExternalAddress(kyberAvx512_12To16PermsAddr()));
load4regs(xmm24_27, perms, 0, _masm);
diff --git a/src/hotspot/cpu/x86/templateTable_x86.cpp b/src/hotspot/cpu/x86/templateTable_x86.cpp
index 42392b84833..db7749ec482 100644
--- a/src/hotspot/cpu/x86/templateTable_x86.cpp
+++ b/src/hotspot/cpu/x86/templateTable_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -2216,7 +2216,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
__ cmpl(temp, code); // have we resolved this bytecode?
// Class initialization barrier for static methods
- if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
+ if (bytecode() == Bytecodes::_invokestatic) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
const Register method = temp;
const Register klass = temp;
@@ -2264,8 +2265,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
__ cmpl(temp, code); // have we resolved this bytecode?
// Class initialization barrier for static fields
- if (VM_Version::supports_fast_class_init_checks() &&
- (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
+ if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
const Register field_holder = temp;
__ jcc(Assembler::notEqual, L_clinit_barrier_slow);
diff --git a/src/hotspot/cpu/x86/vm_version_x86.cpp b/src/hotspot/cpu/x86/vm_version_x86.cpp
index 747daefd51d..74df41f8682 100644
--- a/src/hotspot/cpu/x86/vm_version_x86.cpp
+++ b/src/hotspot/cpu/x86/vm_version_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -143,7 +143,7 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4, std_cpuid24, std_cpuid29;
Label sef_cpuid, sefsl1_cpuid, ext_cpuid, ext_cpuid1, ext_cpuid5, ext_cpuid7;
- Label ext_cpuid8, done, wrapup, vector_save_restore, apx_save_restore_warning;
+ Label ext_cpuid8, done, wrapup, vector_save_restore, apx_save_restore_warning, apx_xstate;
Label legacy_setup, save_restore_except, legacy_save_restore, start_simd_check;
StubCodeMark mark(this, "VM_Version", "get_cpu_info_stub");
@@ -468,6 +468,20 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
__ movq(Address(rsi, 0), r16);
__ movq(Address(rsi, 8), r31);
+ //
+ // Query CPUID 0xD.19 for APX XSAVE offset
+ // Extended State Enumeration Sub-leaf 19 (APX)
+ // EAX = size of APX state (should be 128)
+ // EBX = offset in standard XSAVE format
+ //
+ __ movl(rax, 0xD);
+ __ movl(rcx, 19);
+ __ cpuid();
+ __ lea(rsi, Address(rbp, in_bytes(VM_Version::apx_xstate_size_offset())));
+ __ movl(Address(rsi, 0), rax);
+ __ lea(rsi, Address(rbp, in_bytes(VM_Version::apx_xstate_offset_offset())));
+ __ movl(Address(rsi, 0), rbx);
+
UseAPX = save_apx;
__ bind(vector_save_restore);
//
@@ -921,8 +935,9 @@ void VM_Version::get_processor_features() {
// Check if processor has Intel Ecore
if (FLAG_IS_DEFAULT(EnableX86ECoreOpts) && is_intel() && is_intel_server_family() &&
- (_model == 0x97 || _model == 0xAA || _model == 0xAC || _model == 0xAF ||
- _model == 0xCC || _model == 0xDD)) {
+ (supports_hybrid() ||
+ _model == 0xAF /* Xeon 6 E-cores (Sierra Forest) */ ||
+ _model == 0xDD /* Xeon 6+ E-cores (Clearwater Forest) */ )) {
FLAG_SET_DEFAULT(EnableX86ECoreOpts, true);
}
diff --git a/src/hotspot/cpu/x86/vm_version_x86.hpp b/src/hotspot/cpu/x86/vm_version_x86.hpp
index cc93ee3564e..a3f2a801198 100644
--- a/src/hotspot/cpu/x86/vm_version_x86.hpp
+++ b/src/hotspot/cpu/x86/vm_version_x86.hpp
@@ -676,6 +676,10 @@ protected:
// Space to save apx registers after signal handle
jlong apx_save[2]; // Save r16 and r31
+ // cpuid function 0xD, subleaf 19 (APX extended state)
+ uint32_t apx_xstate_size; // EAX: size of APX state (128)
+ uint32_t apx_xstate_offset; // EBX: offset in standard XSAVE area
+
VM_Features feature_flags() const;
// Asserts
@@ -739,6 +743,11 @@ public:
static ByteSize ymm_save_offset() { return byte_offset_of(CpuidInfo, ymm_save); }
static ByteSize zmm_save_offset() { return byte_offset_of(CpuidInfo, zmm_save); }
static ByteSize apx_save_offset() { return byte_offset_of(CpuidInfo, apx_save); }
+ static ByteSize apx_xstate_offset_offset() { return byte_offset_of(CpuidInfo, apx_xstate_offset); }
+ static ByteSize apx_xstate_size_offset() { return byte_offset_of(CpuidInfo, apx_xstate_size); }
+
+ static uint32_t apx_xstate_offset() { return _cpuid_info.apx_xstate_offset; }
+ static uint32_t apx_xstate_size() { return _cpuid_info.apx_xstate_size; }
// The value used to check ymm register after signal handle
static int ymm_test_value() { return 0xCAFEBABE; }
diff --git a/src/hotspot/cpu/x86/vmreg_x86.cpp b/src/hotspot/cpu/x86/vmreg_x86.cpp
index 44aee56ef15..45269e69fcb 100644
--- a/src/hotspot/cpu/x86/vmreg_x86.cpp
+++ b/src/hotspot/cpu/x86/vmreg_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,9 +32,7 @@ void VMRegImpl::set_regName() {
int i;
for (i = 0; i < ConcreteRegisterImpl::max_gpr ; ) {
regName[i++] = reg->name();
-#ifdef AMD64
regName[i++] = reg->name();
-#endif // AMD64
reg = reg->successor();
}
diff --git a/src/hotspot/cpu/x86/vmreg_x86.hpp b/src/hotspot/cpu/x86/vmreg_x86.hpp
index 6f7c7fafb32..032ce654f2f 100644
--- a/src/hotspot/cpu/x86/vmreg_x86.hpp
+++ b/src/hotspot/cpu/x86/vmreg_x86.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -52,14 +52,8 @@ inline bool is_KRegister() {
}
inline Register as_Register() {
-
- assert( is_Register(), "must be");
- // Yuk
-#ifdef AMD64
+ assert(is_Register(), "must be");
return ::as_Register(value() >> 1);
-#else
- return ::as_Register(value());
-#endif // AMD64
}
inline FloatRegister as_FloatRegister() {
@@ -82,9 +76,6 @@ inline KRegister as_KRegister() {
inline bool is_concrete() {
assert(is_reg(), "must be");
-#ifndef AMD64
- if (is_Register()) return true;
-#endif // AMD64
// Do not use is_XMMRegister() here as it depends on the UseAVX setting.
if (value() >= ConcreteRegisterImpl::max_fpr && value() < ConcreteRegisterImpl::max_xmm) {
int base = value() - ConcreteRegisterImpl::max_fpr;
diff --git a/src/hotspot/cpu/x86/vmreg_x86.inline.hpp b/src/hotspot/cpu/x86/vmreg_x86.inline.hpp
index 1aeedc094fd..76d70900fe4 100644
--- a/src/hotspot/cpu/x86/vmreg_x86.inline.hpp
+++ b/src/hotspot/cpu/x86/vmreg_x86.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006, 2021, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
#define CPU_X86_VMREG_X86_INLINE_HPP
inline VMReg Register::RegisterImpl::as_VMReg() const {
- return VMRegImpl::as_VMReg(encoding() LP64_ONLY( << 1 ));
+ return VMRegImpl::as_VMReg(encoding() << 1);
}
inline VMReg FloatRegister::FloatRegisterImpl::as_VMReg() const {
diff --git a/src/hotspot/os/aix/decoder_aix.hpp b/src/hotspot/os/aix/decoder_aix.hpp
index 2ba3e1c5a3a..632355ccf4e 100644
--- a/src/hotspot/os/aix/decoder_aix.hpp
+++ b/src/hotspot/os/aix/decoder_aix.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -38,7 +38,7 @@ class AIXDecoder: public AbstractDecoder {
virtual bool demangle(const char* symbol, char* buf, int buflen) { return false; } // use AixSymbols::get_function_name to demangle
virtual bool decode(address addr, char* buf, int buflen, int* offset, const char* modulepath, bool demangle) {
- return AixSymbols::get_function_name(addr, buf, buflen, offset, 0, demangle);
+ return AixSymbols::get_function_name(addr, buf, buflen, offset, nullptr, demangle);
}
virtual bool decode(address addr, char *buf, int buflen, int* offset, const void *base) {
ShouldNotReachHere();
diff --git a/src/hotspot/os/aix/os_aix.cpp b/src/hotspot/os/aix/os_aix.cpp
index f88729cdc66..0a8efbece8d 100644
--- a/src/hotspot/os/aix/os_aix.cpp
+++ b/src/hotspot/os/aix/os_aix.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -258,10 +258,18 @@ bool os::free_memory(physical_memory_size_type& value) {
return Aix::available_memory(value);
}
+bool os::Machine::free_memory(physical_memory_size_type& value) {
+ return Aix::available_memory(value);
+}
+
bool os::available_memory(physical_memory_size_type& value) {
return Aix::available_memory(value);
}
+bool os::Machine::available_memory(physical_memory_size_type& value) {
+ return Aix::available_memory(value);
+}
+
bool os::Aix::available_memory(physical_memory_size_type& value) {
os::Aix::meminfo_t mi;
if (os::Aix::get_meminfo(&mi)) {
@@ -273,6 +281,10 @@ bool os::Aix::available_memory(physical_memory_size_type& value) {
}
bool os::total_swap_space(physical_memory_size_type& value) {
+ return Machine::total_swap_space(value);
+}
+
+bool os::Machine::total_swap_space(physical_memory_size_type& value) {
perfstat_memory_total_t memory_info;
if (libperfstat::perfstat_memory_total(nullptr, &memory_info, sizeof(perfstat_memory_total_t), 1) == -1) {
return false;
@@ -282,6 +294,10 @@ bool os::total_swap_space(physical_memory_size_type& value) {
}
bool os::free_swap_space(physical_memory_size_type& value) {
+ return Machine::free_swap_space(value);
+}
+
+bool os::Machine::free_swap_space(physical_memory_size_type& value) {
perfstat_memory_total_t memory_info;
if (libperfstat::perfstat_memory_total(nullptr, &memory_info, sizeof(perfstat_memory_total_t), 1) == -1) {
return false;
@@ -294,6 +310,10 @@ physical_memory_size_type os::physical_memory() {
return Aix::physical_memory();
}
+physical_memory_size_type os::Machine::physical_memory() {
+ return Aix::physical_memory();
+}
+
size_t os::rss() { return (size_t)0; }
// Cpu architecture string
@@ -683,7 +703,7 @@ static void *thread_native_entry(Thread *thread) {
log_info(os, thread)("Thread finished (tid: %zu, kernel thread id: %zu).",
os::current_thread_id(), (uintx) kernel_thread_id);
- return 0;
+ return nullptr;
}
bool os::create_thread(Thread* thread, ThreadType thr_type,
@@ -2264,6 +2284,10 @@ int os::active_processor_count() {
return ActiveProcessorCount;
}
+ return Machine::active_processor_count();
+}
+
+int os::Machine::active_processor_count() {
int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
return online_cpus;
diff --git a/src/hotspot/os/aix/porting_aix.cpp b/src/hotspot/os/aix/porting_aix.cpp
index 7311afc197b..b3f878fbfdd 100644
--- a/src/hotspot/os/aix/porting_aix.cpp
+++ b/src/hotspot/os/aix/porting_aix.cpp
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2012, 2024 SAP SE. All rights reserved.
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -78,7 +78,7 @@ class fixed_strings {
public:
- fixed_strings() : first(0) {}
+ fixed_strings() : first(nullptr) {}
~fixed_strings() {
node* n = first;
while (n) {
@@ -113,7 +113,7 @@ bool AixSymbols::get_function_name (
// information (null if not available)
bool demangle // [in] whether to demangle the name
) {
- struct tbtable* tb = 0;
+ struct tbtable* tb = nullptr;
unsigned int searchcount = 0;
// initialize output parameters
@@ -653,10 +653,10 @@ void AixNativeCallstack::print_callstack_for_context(outputStream* st, const uco
// To print the first frame, use the current value of iar:
// current entry indicated by iar (the current pc)
- codeptr_t cur_iar = 0;
- stackptr_t cur_sp = 0;
- codeptr_t cur_rtoc = 0;
- codeptr_t cur_lr = 0;
+ codeptr_t cur_iar = nullptr;
+ stackptr_t cur_sp = nullptr;
+ codeptr_t cur_rtoc = nullptr;
+ codeptr_t cur_lr = nullptr;
const ucontext_t* uc = (const ucontext_t*) context;
@@ -926,7 +926,7 @@ static struct handletableentry* p_handletable = nullptr;
static const char* rtv_linkedin_libpath() {
constexpr int bufsize = 4096;
static char buffer[bufsize];
- static const char* libpath = 0;
+ static const char* libpath = nullptr;
// we only try to retrieve the libpath once. After that try we
// let libpath point to buffer, which then contains a valid libpath
diff --git a/src/hotspot/os/bsd/memMapPrinter_macosx.cpp b/src/hotspot/os/bsd/memMapPrinter_macosx.cpp
index 6fd08d63e85..30e258c9d2c 100644
--- a/src/hotspot/os/bsd/memMapPrinter_macosx.cpp
+++ b/src/hotspot/os/bsd/memMapPrinter_macosx.cpp
@@ -132,7 +132,7 @@ public:
static const char* tagToStr(uint32_t user_tag) {
switch (user_tag) {
case 0:
- return 0;
+ return nullptr;
X1(MALLOC, malloc);
X1(MALLOC_SMALL, malloc_small);
X1(MALLOC_LARGE, malloc_large);
diff --git a/src/hotspot/os/bsd/os_bsd.cpp b/src/hotspot/os/bsd/os_bsd.cpp
index 61de48bb7fa..0e21c2d1785 100644
--- a/src/hotspot/os/bsd/os_bsd.cpp
+++ b/src/hotspot/os/bsd/os_bsd.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -137,10 +137,18 @@ bool os::available_memory(physical_memory_size_type& value) {
return Bsd::available_memory(value);
}
+bool os::Machine::available_memory(physical_memory_size_type& value) {
+ return Bsd::available_memory(value);
+}
+
bool os::free_memory(physical_memory_size_type& value) {
return Bsd::available_memory(value);
}
+bool os::Machine::free_memory(physical_memory_size_type& value) {
+ return Bsd::available_memory(value);
+}
+
// Available here means free. Note that this number is of no much use. As an estimate
// for future memory pressure it is far too conservative, since MacOS will use a lot
// of unused memory for caches, and return it willingly in case of needs.
@@ -181,6 +189,10 @@ void os::Bsd::print_uptime_info(outputStream* st) {
}
bool os::total_swap_space(physical_memory_size_type& value) {
+ return Machine::total_swap_space(value);
+}
+
+bool os::Machine::total_swap_space(physical_memory_size_type& value) {
#if defined(__APPLE__)
struct xsw_usage vmusage;
size_t size = sizeof(vmusage);
@@ -195,6 +207,10 @@ bool os::total_swap_space(physical_memory_size_type& value) {
}
bool os::free_swap_space(physical_memory_size_type& value) {
+ return Machine::free_swap_space(value);
+}
+
+bool os::Machine::free_swap_space(physical_memory_size_type& value) {
#if defined(__APPLE__)
struct xsw_usage vmusage;
size_t size = sizeof(vmusage);
@@ -212,6 +228,10 @@ physical_memory_size_type os::physical_memory() {
return Bsd::physical_memory();
}
+physical_memory_size_type os::Machine::physical_memory() {
+ return Bsd::physical_memory();
+}
+
size_t os::rss() {
size_t rss = 0;
#ifdef __APPLE__
@@ -608,7 +628,7 @@ static void *thread_native_entry(Thread *thread) {
log_info(os, thread)("Thread finished (tid: %zu, pthread id: %zu).",
os::current_thread_id(), (uintx) pthread_self());
- return 0;
+ return nullptr;
}
bool os::create_thread(Thread* thread, ThreadType thr_type,
@@ -1400,7 +1420,7 @@ int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *pa
#elif defined(__APPLE__)
for (uint32_t i = 1; i < _dyld_image_count(); i++) {
// Value for top_address is returned as 0 since we don't have any information about module size
- if (callback(_dyld_get_image_name(i), (address)_dyld_get_image_header(i), (address)0, param)) {
+ if (callback(_dyld_get_image_name(i), (address)_dyld_get_image_header(i), nullptr, param)) {
return 1;
}
}
@@ -2189,6 +2209,10 @@ int os::active_processor_count() {
return ActiveProcessorCount;
}
+ return Machine::active_processor_count();
+}
+
+int os::Machine::active_processor_count() {
return _processor_count;
}
diff --git a/src/hotspot/os/linux/cgroupSubsystem_linux.cpp b/src/hotspot/os/linux/cgroupSubsystem_linux.cpp
index f9c6f794ebd..e49d070890e 100644
--- a/src/hotspot/os/linux/cgroupSubsystem_linux.cpp
+++ b/src/hotspot/os/linux/cgroupSubsystem_linux.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -631,22 +631,20 @@ void CgroupSubsystemFactory::cleanup(CgroupInfo* cg_infos) {
* return:
* true if there were no errors. false otherwise.
*/
-bool CgroupSubsystem::active_processor_count(int& value) {
- int cpu_count;
- int result = -1;
-
+bool CgroupSubsystem::active_processor_count(double& value) {
// We use a cache with a timeout to avoid performing expensive
// computations in the event this function is called frequently.
// [See 8227006].
- CachingCgroupController* contrl = cpu_controller();
- CachedMetric* cpu_limit = contrl->metrics_cache();
+ CachingCgroupController* contrl = cpu_controller();
+ CachedMetric* cpu_limit = contrl->metrics_cache();
if (!cpu_limit->should_check_metric()) {
- value = (int)cpu_limit->value();
- log_trace(os, container)("CgroupSubsystem::active_processor_count (cached): %d", value);
+ value = cpu_limit->value();
+ log_trace(os, container)("CgroupSubsystem::active_processor_count (cached): %.2f", value);
return true;
}
- cpu_count = os::Linux::active_processor_count();
+ int cpu_count = os::Linux::active_processor_count();
+ double result = -1;
if (!CgroupUtil::processor_count(contrl->controller(), cpu_count, result)) {
return false;
}
@@ -671,8 +669,8 @@ bool CgroupSubsystem::active_processor_count(int& value) {
*/
bool CgroupSubsystem::memory_limit_in_bytes(physical_memory_size_type upper_bound,
physical_memory_size_type& value) {
- CachingCgroupController* contrl = memory_controller();
- CachedMetric* memory_limit = contrl->metrics_cache();
+ CachingCgroupController* contrl = memory_controller();
+ CachedMetric* memory_limit = contrl->metrics_cache();
if (!memory_limit->should_check_metric()) {
value = memory_limit->value();
return true;
diff --git a/src/hotspot/os/linux/cgroupSubsystem_linux.hpp b/src/hotspot/os/linux/cgroupSubsystem_linux.hpp
index 522b64f8816..d083a9985c2 100644
--- a/src/hotspot/os/linux/cgroupSubsystem_linux.hpp
+++ b/src/hotspot/os/linux/cgroupSubsystem_linux.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -181,20 +181,21 @@ class CgroupController: public CHeapObj {
static bool limit_from_str(char* limit_str, physical_memory_size_type& value);
};
+template
class CachedMetric : public CHeapObj{
private:
- volatile physical_memory_size_type _metric;
+ volatile MetricType _metric;
volatile jlong _next_check_counter;
public:
CachedMetric() {
- _metric = value_unlimited;
+ _metric = static_cast(value_unlimited);
_next_check_counter = min_jlong;
}
bool should_check_metric() {
return os::elapsed_counter() > _next_check_counter;
}
- physical_memory_size_type value() { return _metric; }
- void set_value(physical_memory_size_type value, jlong timeout) {
+ MetricType value() { return _metric; }
+ void set_value(MetricType value, jlong timeout) {
_metric = value;
// Metric is unlikely to change, but we want to remain
// responsive to configuration changes. A very short grace time
@@ -205,19 +206,19 @@ class CachedMetric : public CHeapObj{
}
};
-template
+template
class CachingCgroupController : public CHeapObj {
private:
T* _controller;
- CachedMetric* _metrics_cache;
+ CachedMetric* _metrics_cache;
public:
CachingCgroupController(T* cont) {
_controller = cont;
- _metrics_cache = new CachedMetric();
+ _metrics_cache = new CachedMetric();
}
- CachedMetric* metrics_cache() { return _metrics_cache; }
+ CachedMetric* metrics_cache() { return _metrics_cache; }
T* controller() { return _controller; }
};
@@ -277,7 +278,7 @@ class CgroupMemoryController: public CHeapObj {
class CgroupSubsystem: public CHeapObj {
public:
bool memory_limit_in_bytes(physical_memory_size_type upper_bound, physical_memory_size_type& value);
- bool active_processor_count(int& value);
+ bool active_processor_count(double& value);
virtual bool pids_max(uint64_t& value) = 0;
virtual bool pids_current(uint64_t& value) = 0;
@@ -286,8 +287,8 @@ class CgroupSubsystem: public CHeapObj {
virtual char * cpu_cpuset_cpus() = 0;
virtual char * cpu_cpuset_memory_nodes() = 0;
virtual const char * container_type() = 0;
- virtual CachingCgroupController* memory_controller() = 0;
- virtual CachingCgroupController* cpu_controller() = 0;
+ virtual CachingCgroupController* memory_controller() = 0;
+ virtual CachingCgroupController* cpu_controller() = 0;
virtual CgroupCpuacctController* cpuacct_controller() = 0;
bool cpu_quota(int& value);
diff --git a/src/hotspot/os/linux/cgroupUtil_linux.cpp b/src/hotspot/os/linux/cgroupUtil_linux.cpp
index 7aa07d53148..570b335940b 100644
--- a/src/hotspot/os/linux/cgroupUtil_linux.cpp
+++ b/src/hotspot/os/linux/cgroupUtil_linux.cpp
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2024, 2025, Red Hat, Inc.
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,9 +26,8 @@
#include "cgroupUtil_linux.hpp"
#include "os_linux.hpp"
-bool CgroupUtil::processor_count(CgroupCpuController* cpu_ctrl, int upper_bound, int& value) {
+bool CgroupUtil::processor_count(CgroupCpuController* cpu_ctrl, int upper_bound, double& value) {
assert(upper_bound > 0, "upper bound of cpus must be positive");
- int limit_count = upper_bound;
int quota = -1;
int period = -1;
if (!cpu_ctrl->cpu_quota(quota)) {
@@ -37,20 +37,15 @@ bool CgroupUtil::processor_count(CgroupCpuController* cpu_ctrl, int upper_bound,
return false;
}
int quota_count = 0;
- int result = upper_bound;
+ double result = upper_bound;
- if (quota > -1 && period > 0) {
- quota_count = ceilf((float)quota / (float)period);
- log_trace(os, container)("CPU Quota count based on quota/period: %d", quota_count);
+ if (quota > 0 && period > 0) { // Use quotas
+ double cpu_quota = static_cast(quota) / period;
+ log_trace(os, container)("CPU Quota based on quota/period: %.2f", cpu_quota);
+ result = MIN2(result, cpu_quota);
}
- // Use quotas
- if (quota_count != 0) {
- limit_count = quota_count;
- }
-
- result = MIN2(upper_bound, limit_count);
- log_trace(os, container)("OSContainer::active_processor_count: %d", result);
+ log_trace(os, container)("OSContainer::active_processor_count: %.2f", result);
value = result;
return true;
}
@@ -73,11 +68,11 @@ physical_memory_size_type CgroupUtil::get_updated_mem_limit(CgroupMemoryControll
// Get an updated cpu limit. The return value is strictly less than or equal to the
// passed in 'lowest' value.
-int CgroupUtil::get_updated_cpu_limit(CgroupCpuController* cpu,
+double CgroupUtil::get_updated_cpu_limit(CgroupCpuController* cpu,
int lowest,
int upper_bound) {
assert(lowest > 0 && lowest <= upper_bound, "invariant");
- int cpu_limit_val = -1;
+ double cpu_limit_val = -1;
if (CgroupUtil::processor_count(cpu, upper_bound, cpu_limit_val) && cpu_limit_val != upper_bound) {
assert(cpu_limit_val <= upper_bound, "invariant");
if (lowest > cpu_limit_val) {
@@ -172,7 +167,7 @@ void CgroupUtil::adjust_controller(CgroupCpuController* cpu) {
assert(cg_path[0] == '/', "cgroup path must start with '/'");
int host_cpus = os::Linux::active_processor_count();
int lowest_limit = host_cpus;
- int cpus = get_updated_cpu_limit(cpu, lowest_limit, host_cpus);
+ double cpus = get_updated_cpu_limit(cpu, lowest_limit, host_cpus);
int orig_limit = lowest_limit != host_cpus ? lowest_limit : host_cpus;
char* limit_cg_path = nullptr;
while ((last_slash = strrchr(cg_path, '/')) != cg_path) {
diff --git a/src/hotspot/os/linux/cgroupUtil_linux.hpp b/src/hotspot/os/linux/cgroupUtil_linux.hpp
index d72bbd1cf1e..1fd2a7d872b 100644
--- a/src/hotspot/os/linux/cgroupUtil_linux.hpp
+++ b/src/hotspot/os/linux/cgroupUtil_linux.hpp
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2024, Red Hat, Inc.
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,7 +32,7 @@
class CgroupUtil: AllStatic {
public:
- static bool processor_count(CgroupCpuController* cpu, int upper_bound, int& value);
+ static bool processor_count(CgroupCpuController* cpu, int upper_bound, double& value);
// Given a memory controller, adjust its path to a point in the hierarchy
// that represents the closest memory limit.
static void adjust_controller(CgroupMemoryController* m);
@@ -42,9 +43,7 @@ class CgroupUtil: AllStatic {
static physical_memory_size_type get_updated_mem_limit(CgroupMemoryController* m,
physical_memory_size_type lowest,
physical_memory_size_type upper_bound);
- static int get_updated_cpu_limit(CgroupCpuController* c,
- int lowest,
- int upper_bound);
+ static double get_updated_cpu_limit(CgroupCpuController* c, int lowest, int upper_bound);
};
#endif // CGROUP_UTIL_LINUX_HPP
diff --git a/src/hotspot/os/linux/cgroupV1Subsystem_linux.cpp b/src/hotspot/os/linux/cgroupV1Subsystem_linux.cpp
index 2df604083d2..c8f5a290c99 100644
--- a/src/hotspot/os/linux/cgroupV1Subsystem_linux.cpp
+++ b/src/hotspot/os/linux/cgroupV1Subsystem_linux.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -328,8 +328,8 @@ CgroupV1Subsystem::CgroupV1Subsystem(CgroupV1Controller* cpuset,
_pids(pids) {
CgroupUtil::adjust_controller(memory);
CgroupUtil::adjust_controller(cpu);
- _memory = new CachingCgroupController(memory);
- _cpu = new CachingCgroupController(cpu);
+ _memory = new CachingCgroupController(memory);
+ _cpu = new CachingCgroupController(cpu);
}
bool CgroupV1Subsystem::is_containerized() {
diff --git a/src/hotspot/os/linux/cgroupV1Subsystem_linux.hpp b/src/hotspot/os/linux/cgroupV1Subsystem_linux.hpp
index f556bc57f26..af8d0efd378 100644
--- a/src/hotspot/os/linux/cgroupV1Subsystem_linux.hpp
+++ b/src/hotspot/os/linux/cgroupV1Subsystem_linux.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -214,15 +214,15 @@ class CgroupV1Subsystem: public CgroupSubsystem {
const char * container_type() override {
return "cgroupv1";
}
- CachingCgroupController* memory_controller() override { return _memory; }
- CachingCgroupController* cpu_controller() override { return _cpu; }
+ CachingCgroupController* memory_controller() override { return _memory; }
+ CachingCgroupController* cpu_controller() override { return _cpu; }
CgroupCpuacctController* cpuacct_controller() override { return _cpuacct; }
private:
/* controllers */
- CachingCgroupController* _memory = nullptr;
+ CachingCgroupController* _memory = nullptr;
CgroupV1Controller* _cpuset = nullptr;
- CachingCgroupController* _cpu = nullptr;
+ CachingCgroupController* _cpu = nullptr;
CgroupV1CpuacctController* _cpuacct = nullptr;
CgroupV1Controller* _pids = nullptr;
diff --git a/src/hotspot/os/linux/cgroupV2Subsystem_linux.cpp b/src/hotspot/os/linux/cgroupV2Subsystem_linux.cpp
index c61d30e9236..30e1affc646 100644
--- a/src/hotspot/os/linux/cgroupV2Subsystem_linux.cpp
+++ b/src/hotspot/os/linux/cgroupV2Subsystem_linux.cpp
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2020, 2025, Red Hat Inc.
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -156,8 +156,8 @@ CgroupV2Subsystem::CgroupV2Subsystem(CgroupV2MemoryController * memory,
_unified(unified) {
CgroupUtil::adjust_controller(memory);
CgroupUtil::adjust_controller(cpu);
- _memory = new CachingCgroupController(memory);
- _cpu = new CachingCgroupController(cpu);
+ _memory = new CachingCgroupController(memory);
+ _cpu = new CachingCgroupController(cpu);
_cpuacct = cpuacct;
}
diff --git a/src/hotspot/os/linux/cgroupV2Subsystem_linux.hpp b/src/hotspot/os/linux/cgroupV2Subsystem_linux.hpp
index 39a4fabe9f6..998145c0ff9 100644
--- a/src/hotspot/os/linux/cgroupV2Subsystem_linux.hpp
+++ b/src/hotspot/os/linux/cgroupV2Subsystem_linux.hpp
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2020, 2024, Red Hat Inc.
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -152,8 +152,8 @@ class CgroupV2Subsystem: public CgroupSubsystem {
/* One unified controller */
CgroupV2Controller _unified;
/* Caching wrappers for cpu/memory metrics */
- CachingCgroupController* _memory = nullptr;
- CachingCgroupController* _cpu = nullptr;
+ CachingCgroupController* _memory = nullptr;
+ CachingCgroupController* _cpu = nullptr;
CgroupCpuacctController* _cpuacct = nullptr;
@@ -175,8 +175,8 @@ class CgroupV2Subsystem: public CgroupSubsystem {
const char * container_type() override {
return "cgroupv2";
}
- CachingCgroupController* memory_controller() override { return _memory; }
- CachingCgroupController* cpu_controller() override { return _cpu; }
+ CachingCgroupController* memory_controller() override { return _memory; }
+ CachingCgroupController* cpu_controller() override { return _cpu; }
CgroupCpuacctController* cpuacct_controller() override { return _cpuacct; };
};
diff --git a/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp b/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp
index 25ffd0b8078..28159ae4801 100644
--- a/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp
+++ b/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp
@@ -66,9 +66,6 @@
#endif
// open(2) flags
-#ifndef O_CLOEXEC
-#define O_CLOEXEC 02000000
-#endif
#ifndef O_TMPFILE
#define O_TMPFILE (020000000 | O_DIRECTORY)
#endif
diff --git a/src/hotspot/os/linux/osContainer_linux.cpp b/src/hotspot/os/linux/osContainer_linux.cpp
index 15a6403d07f..b46263efd99 100644
--- a/src/hotspot/os/linux/osContainer_linux.cpp
+++ b/src/hotspot/os/linux/osContainer_linux.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -86,8 +86,8 @@ void OSContainer::init() {
// 2.) On a physical Linux system with a limit enforced by other means (like systemd slice)
physical_memory_size_type mem_limit_val = value_unlimited;
(void)memory_limit_in_bytes(mem_limit_val); // discard error and use default
- int host_cpus = os::Linux::active_processor_count();
- int cpus = host_cpus;
+ double host_cpus = os::Linux::active_processor_count();
+ double cpus = host_cpus;
(void)active_processor_count(cpus); // discard error and use default
any_mem_cpu_limit_present = mem_limit_val != value_unlimited || host_cpus != cpus;
if (any_mem_cpu_limit_present) {
@@ -127,8 +127,7 @@ bool OSContainer::available_memory_in_bytes(physical_memory_size_type& value) {
return false;
}
-bool OSContainer::available_swap_in_bytes(physical_memory_size_type host_free_swap,
- physical_memory_size_type& value) {
+bool OSContainer::available_swap_in_bytes(physical_memory_size_type& value) {
physical_memory_size_type mem_limit = 0;
physical_memory_size_type mem_swap_limit = 0;
if (memory_limit_in_bytes(mem_limit) &&
@@ -179,8 +178,7 @@ bool OSContainer::available_swap_in_bytes(physical_memory_size_type host_free_sw
assert(num < 25, "buffer too small");
mem_limit_buf[num] = '\0';
log_trace(os,container)("OSContainer::available_swap_in_bytes: container_swap_limit=%s"
- " container_mem_limit=%s, host_free_swap: " PHYS_MEM_TYPE_FORMAT,
- mem_swap_buf, mem_limit_buf, host_free_swap);
+ " container_mem_limit=%s", mem_swap_buf, mem_limit_buf);
}
return false;
}
@@ -252,7 +250,7 @@ char * OSContainer::cpu_cpuset_memory_nodes() {
return cgroup_subsystem->cpu_cpuset_memory_nodes();
}
-bool OSContainer::active_processor_count(int& value) {
+bool OSContainer::active_processor_count(double& value) {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
return cgroup_subsystem->active_processor_count(value);
}
@@ -291,11 +289,13 @@ template struct metric_fmt;
template<> struct metric_fmt { static constexpr const char* fmt = "%llu"; };
template<> struct metric_fmt { static constexpr const char* fmt = "%lu"; };
template<> struct metric_fmt { static constexpr const char* fmt = "%d"; };
+template<> struct metric_fmt { static constexpr const char* fmt = "%.2f"; };
template<> struct metric_fmt { static constexpr const char* fmt = "%s"; };
template void OSContainer::print_container_metric(outputStream*, const char*, unsigned long long int, const char*);
template void OSContainer::print_container_metric(outputStream*, const char*, unsigned long int, const char*);
template void OSContainer::print_container_metric(outputStream*, const char*, int, const char*);
+template void OSContainer::print_container_metric(outputStream*, const char*, double, const char*);
template void OSContainer::print_container_metric(outputStream*, const char*, const char*, const char*);
template
@@ -304,12 +304,13 @@ void OSContainer::print_container_metric(outputStream* st, const char* metrics,
constexpr int longest_value = max_length - 11; // Max length - shortest "metric: " string ("cpu_quota: ")
char value_str[longest_value + 1] = {};
os::snprintf_checked(value_str, longest_value, metric_fmt::fmt, value);
- st->print("%s: %*s", metrics, max_length - static_cast(strlen(metrics)) - 2, value_str); // -2 for the ": "
- if (unit[0] != '\0') {
- st->print_cr(" %s", unit);
- } else {
- st->print_cr("");
- }
+
+ const int pad_width = max_length - static_cast(strlen(metrics)) - 2; // -2 for the ": "
+ const char* unit_prefix = unit[0] != '\0' ? " " : "";
+
+ char line[128] = {};
+ os::snprintf_checked(line, sizeof(line), "%s: %*s%s%s", metrics, pad_width, value_str, unit_prefix, unit);
+ st->print_cr("%s", line);
}
void OSContainer::print_container_helper(outputStream* st, MetricResult& res, const char* metrics) {
diff --git a/src/hotspot/os/linux/osContainer_linux.hpp b/src/hotspot/os/linux/osContainer_linux.hpp
index 11c3e086feb..96b59b98db8 100644
--- a/src/hotspot/os/linux/osContainer_linux.hpp
+++ b/src/hotspot/os/linux/osContainer_linux.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -72,8 +72,7 @@ class OSContainer: AllStatic {
static const char * container_type();
static bool available_memory_in_bytes(physical_memory_size_type& value);
- static bool available_swap_in_bytes(physical_memory_size_type host_free_swap,
- physical_memory_size_type& value);
+ static bool available_swap_in_bytes(physical_memory_size_type& value);
static bool memory_limit_in_bytes(physical_memory_size_type& value);
static bool memory_and_swap_limit_in_bytes(physical_memory_size_type& value);
static bool memory_and_swap_usage_in_bytes(physical_memory_size_type& value);
@@ -84,7 +83,7 @@ class OSContainer: AllStatic {
static bool rss_usage_in_bytes(physical_memory_size_type& value);
static bool cache_usage_in_bytes(physical_memory_size_type& value);
- static bool active_processor_count(int& value);
+ static bool active_processor_count(double& value);
static char * cpu_cpuset_cpus();
static char * cpu_cpuset_memory_nodes();
diff --git a/src/hotspot/os/linux/os_linux.cpp b/src/hotspot/os/linux/os_linux.cpp
index 88e5e9b582a..7190845a8ba 100644
--- a/src/hotspot/os/linux/os_linux.cpp
+++ b/src/hotspot/os/linux/os_linux.cpp
@@ -211,15 +211,58 @@ static bool suppress_primordial_thread_resolution = false;
// utility functions
+bool os::is_containerized() {
+ return OSContainer::is_containerized();
+}
+
+bool os::Container::memory_limit(physical_memory_size_type& value) {
+ physical_memory_size_type result = 0;
+ if (OSContainer::memory_limit_in_bytes(result) && result != value_unlimited) {
+ value = result;
+ return true;
+ }
+ return false;
+}
+
+bool os::Container::memory_soft_limit(physical_memory_size_type& value) {
+ physical_memory_size_type result = 0;
+ if (OSContainer::memory_soft_limit_in_bytes(result) && result != 0 && result != value_unlimited) {
+ value = result;
+ return true;
+ }
+ return false;
+}
+
+bool os::Container::memory_throttle_limit(physical_memory_size_type& value) {
+ physical_memory_size_type result = 0;
+ if (OSContainer::memory_throttle_limit_in_bytes(result) && result != value_unlimited) {
+ value = result;
+ return true;
+ }
+ return false;
+}
+
+bool os::Container::used_memory(physical_memory_size_type& value) {
+ return OSContainer::memory_usage_in_bytes(value);
+}
+
bool os::available_memory(physical_memory_size_type& value) {
- if (OSContainer::is_containerized() && OSContainer::available_memory_in_bytes(value)) {
+ if (is_containerized() && Container::available_memory(value)) {
log_trace(os)("available container memory: " PHYS_MEM_TYPE_FORMAT, value);
return true;
}
+ return Machine::available_memory(value);
+}
+
+bool os::Machine::available_memory(physical_memory_size_type& value) {
return Linux::available_memory(value);
}
+bool os::Container::available_memory(physical_memory_size_type& value) {
+ return OSContainer::available_memory_in_bytes(value);
+}
+
bool os::Linux::available_memory(physical_memory_size_type& value) {
physical_memory_size_type avail_mem = 0;
@@ -251,11 +294,15 @@ bool os::Linux::available_memory(physical_memory_size_type& value) {
}
bool os::free_memory(physical_memory_size_type& value) {
- if (OSContainer::is_containerized() && OSContainer::available_memory_in_bytes(value)) {
+ if (is_containerized() && Container::available_memory(value)) {
log_trace(os)("free container memory: " PHYS_MEM_TYPE_FORMAT, value);
return true;
}
+ return Machine::free_memory(value);
+}
+
+bool os::Machine::free_memory(physical_memory_size_type& value) {
return Linux::free_memory(value);
}
@@ -274,21 +321,30 @@ bool os::Linux::free_memory(physical_memory_size_type& value) {
}
bool os::total_swap_space(physical_memory_size_type& value) {
- if (OSContainer::is_containerized()) {
- physical_memory_size_type mem_swap_limit = value_unlimited;
- physical_memory_size_type memory_limit = value_unlimited;
- if (OSContainer::memory_and_swap_limit_in_bytes(mem_swap_limit) &&
- OSContainer::memory_limit_in_bytes(memory_limit)) {
- if (memory_limit != value_unlimited && mem_swap_limit != value_unlimited &&
- mem_swap_limit >= memory_limit /* ensure swap is >= 0 */) {
- value = mem_swap_limit - memory_limit;
- return true;
- }
- }
- } // fallback to the host swap space if the container returned unlimited
+ if (is_containerized() && Container::total_swap_space(value)) {
+ return true;
+ } // fallback to the host swap space if the container value fails
+ return Machine::total_swap_space(value);
+}
+
+bool os::Machine::total_swap_space(physical_memory_size_type& value) {
return Linux::host_swap(value);
}
+bool os::Container::total_swap_space(physical_memory_size_type& value) {
+ physical_memory_size_type mem_swap_limit = value_unlimited;
+ physical_memory_size_type memory_limit = value_unlimited;
+ if (OSContainer::memory_and_swap_limit_in_bytes(mem_swap_limit) &&
+ OSContainer::memory_limit_in_bytes(memory_limit)) {
+ if (memory_limit != value_unlimited && mem_swap_limit != value_unlimited &&
+ mem_swap_limit >= memory_limit /* ensure swap is >= 0 */) {
+ value = mem_swap_limit - memory_limit;
+ return true;
+ }
+ }
+ return false;
+}
+
static bool host_free_swap_f(physical_memory_size_type& value) {
struct sysinfo si;
int ret = sysinfo(&si);
@@ -309,32 +365,45 @@ bool os::free_swap_space(physical_memory_size_type& value) {
return false;
}
physical_memory_size_type host_free_swap_val = MIN2(total_swap_space, host_free_swap);
- if (OSContainer::is_containerized()) {
- if (OSContainer::available_swap_in_bytes(host_free_swap_val, value)) {
+ if (is_containerized()) {
+ if (Container::free_swap_space(value)) {
return true;
}
// Fall through to use host value
log_trace(os,container)("os::free_swap_space: containerized value unavailable"
" returning host value: " PHYS_MEM_TYPE_FORMAT, host_free_swap_val);
}
+
value = host_free_swap_val;
return true;
}
+bool os::Machine::free_swap_space(physical_memory_size_type& value) {
+ return host_free_swap_f(value);
+}
+
+bool os::Container::free_swap_space(physical_memory_size_type& value) {
+ return OSContainer::available_swap_in_bytes(value);
+}
+
physical_memory_size_type os::physical_memory() {
- if (OSContainer::is_containerized()) {
+ if (is_containerized()) {
physical_memory_size_type mem_limit = value_unlimited;
- if (OSContainer::memory_limit_in_bytes(mem_limit) && mem_limit != value_unlimited) {
+ if (Container::memory_limit(mem_limit) && mem_limit != value_unlimited) {
log_trace(os)("total container memory: " PHYS_MEM_TYPE_FORMAT, mem_limit);
return mem_limit;
}
}
- physical_memory_size_type phys_mem = Linux::physical_memory();
+ physical_memory_size_type phys_mem = Machine::physical_memory();
log_trace(os)("total system memory: " PHYS_MEM_TYPE_FORMAT, phys_mem);
return phys_mem;
}
+physical_memory_size_type os::Machine::physical_memory() {
+ return Linux::physical_memory();
+}
+
// Returns the resident set size (RSS) of the process.
// Falls back to using VmRSS from /proc/self/status if /proc/self/smaps_rollup is unavailable.
// Note: On kernels with memory cgroups or shared memory, VmRSS may underreport RSS.
@@ -2439,20 +2508,21 @@ bool os::Linux::print_container_info(outputStream* st) {
OSContainer::print_container_metric(st, "cpu_memory_nodes", p != nullptr ? p : "not supported");
free(p);
- int i = -1;
- bool supported = OSContainer::active_processor_count(i);
+ double cpus = -1;
+ bool supported = OSContainer::active_processor_count(cpus);
if (supported) {
- assert(i > 0, "must be");
+ assert(cpus > 0, "must be");
if (ActiveProcessorCount > 0) {
OSContainer::print_container_metric(st, "active_processor_count", ActiveProcessorCount, "(from -XX:ActiveProcessorCount)");
} else {
- OSContainer::print_container_metric(st, "active_processor_count", i);
+ OSContainer::print_container_metric(st, "active_processor_count", cpus);
}
} else {
OSContainer::print_container_metric(st, "active_processor_count", "not supported");
}
+ int i = -1;
supported = OSContainer::cpu_quota(i);
if (supported && i > 0) {
OSContainer::print_container_metric(st, "cpu_quota", i);
@@ -4737,15 +4807,26 @@ int os::active_processor_count() {
return ActiveProcessorCount;
}
- int active_cpus = -1;
- if (OSContainer::is_containerized() && OSContainer::active_processor_count(active_cpus)) {
- log_trace(os)("active_processor_count: determined by OSContainer: %d",
- active_cpus);
- } else {
- active_cpus = os::Linux::active_processor_count();
+ if (is_containerized()) {
+ double cpu_quota;
+ if (Container::processor_count(cpu_quota)) {
+ int active_cpus = ceilf(cpu_quota); // Round fractional CPU quota up.
+ assert(active_cpus <= Machine::active_processor_count(), "must be");
+ log_trace(os)("active_processor_count: determined by OSContainer: %d",
+ active_cpus);
+ return active_cpus;
+ }
}
- return active_cpus;
+ return Machine::active_processor_count();
+}
+
+int os::Machine::active_processor_count() {
+ return os::Linux::active_processor_count();
+}
+
+bool os::Container::processor_count(double& value) {
+ return OSContainer::active_processor_count(value);
}
static bool should_warn_invalid_processor_id() {
@@ -4878,36 +4959,18 @@ int os::open(const char *path, int oflag, int mode) {
// All file descriptors that are opened in the Java process and not
// specifically destined for a subprocess should have the close-on-exec
// flag set. If we don't set it, then careless 3rd party native code
- // might fork and exec without closing all appropriate file descriptors,
- // and this in turn might:
- //
- // - cause end-of-file to fail to be detected on some file
- // descriptors, resulting in mysterious hangs, or
- //
- // - might cause an fopen in the subprocess to fail on a system
- // suffering from bug 1085341.
- //
- // (Yes, the default setting of the close-on-exec flag is a Unix
- // design flaw)
- //
- // See:
- // 1085341: 32-bit stdio routines should support file descriptors >255
- // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
- // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
- //
- // Modern Linux kernels (after 2.6.23 2007) support O_CLOEXEC with open().
- // O_CLOEXEC is preferable to using FD_CLOEXEC on an open file descriptor
- // because it saves a system call and removes a small window where the flag
- // is unset. On ancient Linux kernels the O_CLOEXEC flag will be ignored
- // and we fall back to using FD_CLOEXEC (see below).
-#ifdef O_CLOEXEC
+ // might fork and exec without closing all appropriate file descriptors.
oflag |= O_CLOEXEC;
-#endif
int fd = ::open(path, oflag, mode);
- if (fd == -1) return -1;
+ // No further checking is needed if open() returned an error or
+ // access mode is not read only.
+ if (fd == -1 || (oflag & O_ACCMODE) != O_RDONLY) {
+ return fd;
+ }
- //If the open succeeded, the file might still be a directory
+ // If the open succeeded and is read only, the file might be a directory
+ // which the JVM doesn't allow to be read.
{
struct stat buf;
int ret = ::fstat(fd, &buf);
@@ -4925,21 +4988,6 @@ int os::open(const char *path, int oflag, int mode) {
}
}
-#ifdef FD_CLOEXEC
- // Validate that the use of the O_CLOEXEC flag on open above worked.
- // With recent kernels, we will perform this check exactly once.
- static sig_atomic_t O_CLOEXEC_is_known_to_work = 0;
- if (!O_CLOEXEC_is_known_to_work) {
- int flags = ::fcntl(fd, F_GETFD);
- if (flags != -1) {
- if ((flags & FD_CLOEXEC) != 0)
- O_CLOEXEC_is_known_to_work = 1;
- else
- ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
- }
- }
-#endif
-
return fd;
}
diff --git a/src/hotspot/os/posix/perfMemory_posix.cpp b/src/hotspot/os/posix/perfMemory_posix.cpp
index 08a19270943..ce9c2a4f031 100644
--- a/src/hotspot/os/posix/perfMemory_posix.cpp
+++ b/src/hotspot/os/posix/perfMemory_posix.cpp
@@ -112,6 +112,10 @@ static void save_memory_to_file(char* addr, size_t size) {
result = ::close(fd);
if (result == OS_ERR) {
warning("Could not close %s: %s\n", destfile, os::strerror(errno));
+ } else {
+ if (!successful_write) {
+ remove(destfile);
+ }
}
}
FREE_C_HEAP_ARRAY(char, destfile);
@@ -949,6 +953,7 @@ static int create_sharedmem_file(const char* dirname, const char* filename, size
warning("Insufficient space for shared memory file: %s/%s\n", dirname, filename);
}
result = OS_ERR;
+ remove(filename);
break;
}
}
diff --git a/src/hotspot/os/posix/signals_posix.cpp b/src/hotspot/os/posix/signals_posix.cpp
index 85babea5603..203e13a46ac 100644
--- a/src/hotspot/os/posix/signals_posix.cpp
+++ b/src/hotspot/os/posix/signals_posix.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -951,6 +951,32 @@ struct enum_sigcode_desc_t {
const char* s_desc;
};
+#if defined(LINUX)
+// Additional kernel si_code definitions that are only exported by
+// more recent glibc distributions, so we have to hard-code the values.
+#ifndef BUS_MCEERR_AR // glibc 2.17
+#define BUS_MCEERR_AR 4
+#define BUS_MCEERR_AO 5
+#endif
+
+#ifndef SEGV_PKUERR // glibc 2.27
+#define SEGV_PKUERR 4
+#endif
+
+#ifndef SYS_SECCOMP // glibc 2.28
+#define SYS_SECCOMP 1
+#endif
+
+#ifndef TRAP_BRANCH // glibc 2.30
+#define TRAP_BRANCH 3
+#endif
+
+#ifndef TRAP_HWBKPT // not glibc version specific - gdb related
+#define TRAP_HWBKPT 4
+#endif
+
+#endif // LINUX
+
static bool get_signal_code_description(const siginfo_t* si, enum_sigcode_desc_t* out) {
const struct {
@@ -976,6 +1002,7 @@ static bool get_signal_code_description(const siginfo_t* si, enum_sigcode_desc_t
{ SIGSEGV, SEGV_ACCERR, "SEGV_ACCERR", "Invalid permissions for mapped object." },
#if defined(LINUX)
{ SIGSEGV, SEGV_BNDERR, "SEGV_BNDERR", "Failed address bound checks." },
+ { SIGSEGV, SEGV_PKUERR, "SEGV_PKUERR", "Protection key checking failure." },
#endif
#if defined(AIX)
// no explanation found what keyerr would be
@@ -984,8 +1011,18 @@ static bool get_signal_code_description(const siginfo_t* si, enum_sigcode_desc_t
{ SIGBUS, BUS_ADRALN, "BUS_ADRALN", "Invalid address alignment." },
{ SIGBUS, BUS_ADRERR, "BUS_ADRERR", "Nonexistent physical address." },
{ SIGBUS, BUS_OBJERR, "BUS_OBJERR", "Object-specific hardware error." },
+#if defined(LINUX)
+ { SIGBUS, BUS_MCEERR_AR,"BUS_MCEERR_AR","Hardware memory error consumed on a machine check: action required." },
+ { SIGBUS, BUS_MCEERR_AO,"BUS_MCEERR_AO","Hardware memory error detected in process but not consumed: action optional." },
+
+ { SIGSYS, SYS_SECCOMP, "SYS_SECCOMP", "Secure computing (seccomp) filter failure." },
+#endif
{ SIGTRAP, TRAP_BRKPT, "TRAP_BRKPT", "Process breakpoint." },
{ SIGTRAP, TRAP_TRACE, "TRAP_TRACE", "Process trace trap." },
+#if defined(LINUX)
+ { SIGTRAP, TRAP_BRANCH, "TRAP_BRANCH", "Process taken branch trap." },
+ { SIGTRAP, TRAP_HWBKPT, "TRAP_HWBKPT", "Hardware breakpoint/watchpoint." },
+#endif
{ SIGCHLD, CLD_EXITED, "CLD_EXITED", "Child has exited." },
{ SIGCHLD, CLD_KILLED, "CLD_KILLED", "Child has terminated abnormally and did not create a core file." },
{ SIGCHLD, CLD_DUMPED, "CLD_DUMPED", "Child has terminated abnormally and created a core file." },
@@ -993,6 +1030,7 @@ static bool get_signal_code_description(const siginfo_t* si, enum_sigcode_desc_t
{ SIGCHLD, CLD_STOPPED, "CLD_STOPPED", "Child has stopped." },
{ SIGCHLD, CLD_CONTINUED,"CLD_CONTINUED","Stopped child has continued." },
#ifdef SIGPOLL
+ { SIGPOLL, POLL_IN, "POLL_IN", "Data input available." },
{ SIGPOLL, POLL_OUT, "POLL_OUT", "Output buffers available." },
{ SIGPOLL, POLL_MSG, "POLL_MSG", "Input message available." },
{ SIGPOLL, POLL_ERR, "POLL_ERR", "I/O error." },
diff --git a/src/hotspot/os/windows/os_windows.cpp b/src/hotspot/os/windows/os_windows.cpp
index efbd1fe7c68..b0b7ae18106 100644
--- a/src/hotspot/os/windows/os_windows.cpp
+++ b/src/hotspot/os/windows/os_windows.cpp
@@ -839,10 +839,18 @@ bool os::available_memory(physical_memory_size_type& value) {
return win32::available_memory(value);
}
+bool os::Machine::available_memory(physical_memory_size_type& value) {
+ return win32::available_memory(value);
+}
+
bool os::free_memory(physical_memory_size_type& value) {
return win32::available_memory(value);
}
+bool os::Machine::free_memory(physical_memory_size_type& value) {
+ return win32::available_memory(value);
+}
+
bool os::win32::available_memory(physical_memory_size_type& value) {
// Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
// value if total memory is larger than 4GB
@@ -858,7 +866,11 @@ bool os::win32::available_memory(physical_memory_size_type& value) {
}
}
-bool os::total_swap_space(physical_memory_size_type& value) {
+bool os::total_swap_space(physical_memory_size_type& value) {
+ return Machine::total_swap_space(value);
+}
+
+bool os::Machine::total_swap_space(physical_memory_size_type& value) {
MEMORYSTATUSEX ms;
ms.dwLength = sizeof(ms);
BOOL res = GlobalMemoryStatusEx(&ms);
@@ -872,6 +884,10 @@ bool os::total_swap_space(physical_memory_size_type& value) {
}
bool os::free_swap_space(physical_memory_size_type& value) {
+ return Machine::free_swap_space(value);
+}
+
+bool os::Machine::free_swap_space(physical_memory_size_type& value) {
MEMORYSTATUSEX ms;
ms.dwLength = sizeof(ms);
BOOL res = GlobalMemoryStatusEx(&ms);
@@ -888,6 +904,10 @@ physical_memory_size_type os::physical_memory() {
return win32::physical_memory();
}
+physical_memory_size_type os::Machine::physical_memory() {
+ return win32::physical_memory();
+}
+
size_t os::rss() {
size_t rss = 0;
PROCESS_MEMORY_COUNTERS_EX pmex;
@@ -911,6 +931,10 @@ int os::active_processor_count() {
return ActiveProcessorCount;
}
+ return Machine::active_processor_count();
+}
+
+int os::Machine::active_processor_count() {
bool schedules_all_processor_groups = win32::is_windows_11_or_greater() || win32::is_windows_server_2022_or_greater();
if (UseAllWindowsProcessorGroups && !schedules_all_processor_groups && !win32::processor_group_warning_displayed()) {
win32::set_processor_group_warning_displayed(true);
diff --git a/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp b/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp
index afef21b091a..3ab81697280 100644
--- a/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp
+++ b/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2025 SAP SE. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -412,12 +412,8 @@ run_stub:
}
void os::Aix::init_thread_fpu_state(void) {
-#if !defined(USE_XLC_BUILTINS)
// Disable FP exceptions.
__asm__ __volatile__ ("mtfsfi 6,0");
-#else
- __mtfsfi(6, 0);
-#endif
}
////////////////////////////////////////////////////////////////////////////////
diff --git a/src/hotspot/os_cpu/aix_ppc/prefetch_aix_ppc.inline.hpp b/src/hotspot/os_cpu/aix_ppc/prefetch_aix_ppc.inline.hpp
index c741335b5f0..d9dac0e231f 100644
--- a/src/hotspot/os_cpu/aix_ppc/prefetch_aix_ppc.inline.hpp
+++ b/src/hotspot/os_cpu/aix_ppc/prefetch_aix_ppc.inline.hpp
@@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2013 SAP SE. All rights reserved.
+ * Copyright (c) 2012, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,29 +29,21 @@
// Included in runtime/prefetch.inline.hpp
inline void Prefetch::read(const void *loc, intx interval) {
-#if !defined(USE_XLC_BUILTINS)
__asm__ __volatile__ (
" dcbt 0, %0 \n"
:
: /*%0*/"r" ( ((address)loc) +((long)interval) )
//:
);
-#else
- __dcbt(((address)loc) +((long)interval));
-#endif
}
inline void Prefetch::write(void *loc, intx interval) {
-#if !defined(USE_XLC_BUILTINS)
__asm__ __volatile__ (
" dcbtst 0, %0 \n"
:
: /*%0*/"r" ( ((address)loc) +((long)interval) )
//:
);
-#else
- __dcbtst( ((address)loc) +((long)interval) );
-#endif
}
#endif // OS_CPU_AIX_PPC_PREFETCH_AIX_PPC_INLINE_HPP
diff --git a/src/hotspot/os_cpu/linux_arm/javaThread_linux_arm.cpp b/src/hotspot/os_cpu/linux_arm/javaThread_linux_arm.cpp
index 3dc0035ed87..2b96e978980 100644
--- a/src/hotspot/os_cpu/linux_arm/javaThread_linux_arm.cpp
+++ b/src/hotspot/os_cpu/linux_arm/javaThread_linux_arm.cpp
@@ -42,8 +42,19 @@ frame JavaThread::pd_last_frame() {
void JavaThread::cache_global_variables() {
BarrierSet* bs = BarrierSet::barrier_set();
+#if INCLUDE_G1GC
+ if (bs->is_a(BarrierSet::G1BarrierSet)) {
+ _card_table_base = nullptr;
+ } else
+#endif
+#if INCLUDE_SHENANDOAHGC
+ if (bs->is_a(BarrierSet::ShenandoahBarrierSet)) {
+ _card_table_base = nullptr;
+ } else
+#endif
if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
- _card_table_base = (address) (barrier_set_cast(bs)->card_table()->byte_map_base());
+ CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
+ _card_table_base = (address)ctbs->card_table_base_const();
} else {
_card_table_base = nullptr;
}
diff --git a/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp b/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp
index 07f53582a76..ee08738c678 100644
--- a/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp
+++ b/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp
@@ -52,6 +52,7 @@
#include "utilities/debug.hpp"
#include "utilities/events.hpp"
#include "utilities/vmError.hpp"
+#include "runtime/vm_version.hpp"
// put OS-includes here
# include
@@ -380,6 +381,43 @@ size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
/////////////////////////////////////////////////////////////////////////////
// helper functions for fatal error handler
+// XSAVE constants - from Intel SDM Vol. 1, Chapter 13
+#define XSAVE_HDR_OFFSET 512
+#define XFEATURE_APX (1ULL << 19)
+
+// XSAVE header structure
+// See: Intel SDM Vol. 1, Section 13.4.2 "XSAVE Header"
+// Also: Linux kernel arch/x86/include/asm/fpu/types.h
+struct xstate_header {
+ uint64_t xfeatures;
+ uint64_t xcomp_bv;
+ uint64_t reserved[6];
+};
+
+// APX extended state - R16-R31 (16 x 64-bit registers)
+// See: Intel APX Architecture Specification
+struct apx_state {
+ uint64_t regs[16]; // r16-r31
+};
+
+static apx_state* get_apx_state(const ucontext_t* uc) {
+ uint32_t offset = VM_Version::apx_xstate_offset();
+ if (offset == 0 || uc->uc_mcontext.fpregs == nullptr) {
+ return nullptr;
+ }
+
+ char* xsave = (char*)uc->uc_mcontext.fpregs;
+ xstate_header* hdr = (xstate_header*)(xsave + XSAVE_HDR_OFFSET);
+
+ // Check if APX state is present in this context
+ if (!(hdr->xfeatures & XFEATURE_APX)) {
+ return nullptr;
+ }
+
+ return (apx_state*)(xsave + offset);
+}
+
+
void os::print_context(outputStream *st, const void *context) {
if (context == nullptr) return;
@@ -406,6 +444,14 @@ void os::print_context(outputStream *st, const void *context) {
st->print(", R14=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_R14]);
st->print(", R15=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_R15]);
st->cr();
+ // Dump APX EGPRs (R16-R31)
+ apx_state* apx = UseAPX ? get_apx_state(uc) : nullptr;
+ if (apx != nullptr) {
+ for (int i = 0; i < 16; i++) {
+ st->print("%sR%d=" INTPTR_FORMAT, (i % 4 == 0) ? "" : ", ", 16 + i, (intptr_t)apx->regs[i]);
+ if (i % 4 == 3) st->cr();
+ }
+ }
st->print( "RIP=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_RIP]);
st->print(", EFLAGS=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_EFL]);
st->print(", CSGSFS=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_CSGSFS]);
@@ -432,37 +478,50 @@ void os::print_context(outputStream *st, const void *context) {
}
void os::print_register_info(outputStream *st, const void *context, int& continuation) {
- const int register_count = 16;
+ if (context == nullptr) {
+ return;
+ }
+ const ucontext_t *uc = (const ucontext_t*)context;
+ apx_state* apx = UseAPX ? get_apx_state(uc) : nullptr;
+
+ const int register_count = 16 + (apx != nullptr ? 16 : 0);
int n = continuation;
assert(n >= 0 && n <= register_count, "Invalid continuation value");
- if (context == nullptr || n == register_count) {
+ if (n == register_count) {
return;
}
- const ucontext_t *uc = (const ucontext_t*)context;
while (n < register_count) {
// Update continuation with next index before printing location
continuation = n + 1;
+
+ if (n < 16) {
+ // Standard registers (RAX-R15)
# define CASE_PRINT_REG(n, str, id) case n: st->print(str); print_location(st, uc->uc_mcontext.gregs[REG_##id]);
- switch (n) {
- CASE_PRINT_REG( 0, "RAX=", RAX); break;
- CASE_PRINT_REG( 1, "RBX=", RBX); break;
- CASE_PRINT_REG( 2, "RCX=", RCX); break;
- CASE_PRINT_REG( 3, "RDX=", RDX); break;
- CASE_PRINT_REG( 4, "RSP=", RSP); break;
- CASE_PRINT_REG( 5, "RBP=", RBP); break;
- CASE_PRINT_REG( 6, "RSI=", RSI); break;
- CASE_PRINT_REG( 7, "RDI=", RDI); break;
- CASE_PRINT_REG( 8, "R8 =", R8); break;
- CASE_PRINT_REG( 9, "R9 =", R9); break;
- CASE_PRINT_REG(10, "R10=", R10); break;
- CASE_PRINT_REG(11, "R11=", R11); break;
- CASE_PRINT_REG(12, "R12=", R12); break;
- CASE_PRINT_REG(13, "R13=", R13); break;
- CASE_PRINT_REG(14, "R14=", R14); break;
- CASE_PRINT_REG(15, "R15=", R15); break;
- }
+ switch (n) {
+ CASE_PRINT_REG( 0, "RAX=", RAX); break;
+ CASE_PRINT_REG( 1, "RBX=", RBX); break;
+ CASE_PRINT_REG( 2, "RCX=", RCX); break;
+ CASE_PRINT_REG( 3, "RDX=", RDX); break;
+ CASE_PRINT_REG( 4, "RSP=", RSP); break;
+ CASE_PRINT_REG( 5, "RBP=", RBP); break;
+ CASE_PRINT_REG( 6, "RSI=", RSI); break;
+ CASE_PRINT_REG( 7, "RDI=", RDI); break;
+ CASE_PRINT_REG( 8, "R8 =", R8); break;
+ CASE_PRINT_REG( 9, "R9 =", R9); break;
+ CASE_PRINT_REG(10, "R10=", R10); break;
+ CASE_PRINT_REG(11, "R11=", R11); break;
+ CASE_PRINT_REG(12, "R12=", R12); break;
+ CASE_PRINT_REG(13, "R13=", R13); break;
+ CASE_PRINT_REG(14, "R14=", R14); break;
+ CASE_PRINT_REG(15, "R15=", R15); break;
+ }
# undef CASE_PRINT_REG
+ } else {
+ // APX extended general purpose registers (R16-R31)
+ st->print("R%d=", n);
+ print_location(st, apx->regs[n - 16]);
+ }
++n;
}
}
diff --git a/src/hotspot/share/cds/aotGrowableArray.cpp b/src/hotspot/share/cds/aotGrowableArray.cpp
new file mode 100644
index 00000000000..ec63e7aa57f
--- /dev/null
+++ b/src/hotspot/share/cds/aotGrowableArray.cpp
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "cds/aotGrowableArray.hpp"
+#include "cds/aotMetaspace.hpp"
+#include "memory/allocation.inline.hpp"
+#include "utilities/growableArray.hpp"
+
+void AOTGrowableArrayHelper::deallocate(void* mem) {
+ if (!AOTMetaspace::in_aot_cache(mem)) {
+ GrowableArrayCHeapAllocator::deallocate(mem);
+ }
+}
diff --git a/src/hotspot/share/cds/aotGrowableArray.hpp b/src/hotspot/share/cds/aotGrowableArray.hpp
new file mode 100644
index 00000000000..0a0c137ed07
--- /dev/null
+++ b/src/hotspot/share/cds/aotGrowableArray.hpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_AOT_AOTGROWABLEARRAY_HPP
+#define SHARE_AOT_AOTGROWABLEARRAY_HPP
+
+#include
+#include
+
+class AOTGrowableArrayHelper {
+public:
+ static void deallocate(void* mem);
+};
+
+// An AOTGrowableArray provides the same functionality as a GrowableArray that
+// uses the C heap allocator. In addition, AOTGrowableArray can be iterated with
+// MetaspaceClosure. This type should be used for growable arrays that need to be
+// stored in the AOT cache. See ModuleEntry::_reads for an example.
+template
+class AOTGrowableArray : public GrowableArrayWithAllocator> {
+ friend class VMStructs;
+ friend class GrowableArrayWithAllocator;
+
+ static E* allocate(int max, MemTag mem_tag) {
+ return (E*)GrowableArrayCHeapAllocator::allocate(max, sizeof(E), mem_tag);
+ }
+
+ E* allocate() {
+ return allocate(this->_capacity, mtClass);
+ }
+
+ void deallocate(E* mem) {
+#if INCLUDE_CDS
+ AOTGrowableArrayHelper::deallocate(mem);
+#else
+ GrowableArrayCHeapAllocator::deallocate(mem);
+#endif
+ }
+
+public:
+ AOTGrowableArray(int initial_capacity, MemTag mem_tag) :
+ GrowableArrayWithAllocator(
+ allocate(initial_capacity, mem_tag),
+ initial_capacity) {}
+
+ AOTGrowableArray() : AOTGrowableArray(0, mtClassShared) {}
+
+ // methods required by MetaspaceClosure
+ void metaspace_pointers_do(MetaspaceClosure* it);
+ int size_in_heapwords() const { return (int)heap_word_size(sizeof(*this)); }
+ MetaspaceClosureType type() const { return MetaspaceClosureType::GrowableArrayType; }
+ static bool is_read_only_by_default() { return false; }
+};
+
+#endif // SHARE_AOT_AOTGROWABLEARRAY_HPP
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMarkObjArrayProcessor.inline.hpp b/src/hotspot/share/cds/aotGrowableArray.inline.hpp
similarity index 62%
rename from src/hotspot/share/gc/g1/g1ConcurrentMarkObjArrayProcessor.inline.hpp
rename to src/hotspot/share/cds/aotGrowableArray.inline.hpp
index f6d47acdc01..8c6e8cb6503 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentMarkObjArrayProcessor.inline.hpp
+++ b/src/hotspot/share/cds/aotGrowableArray.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,17 +22,16 @@
*
*/
-#ifndef SHARE_GC_G1_G1CONCURRENTMARKOBJARRAYPROCESSOR_INLINE_HPP
-#define SHARE_GC_G1_G1CONCURRENTMARKOBJARRAYPROCESSOR_INLINE_HPP
+#ifndef SHARE_CDS_AOTGROWABLEARRAY_INLINE_HPP
+#define SHARE_CDS_AOTGROWABLEARRAY_INLINE_HPP
-#include "gc/g1/g1ConcurrentMarkObjArrayProcessor.hpp"
+#include "cds/aotGrowableArray.hpp"
-#include "gc/shared/gc_globals.hpp"
-#include "oops/oop.inline.hpp"
-#include "oops/oopsHierarchy.hpp"
+#include "memory/metaspaceClosure.hpp"
-inline bool G1CMObjArrayProcessor::should_be_sliced(oop obj) {
- return obj->is_objArray() && ((objArrayOop)obj)->size() >= 2 * ObjArrayMarkingStride;
+template
+void AOTGrowableArray::metaspace_pointers_do(MetaspaceClosure* it) {
+ it->push_c_array(AOTGrowableArray::data_addr(), AOTGrowableArray::capacity());
}
-#endif // SHARE_GC_G1_G1CONCURRENTMARKOBJARRAYPROCESSOR_INLINE_HPP
+#endif // SHARE_CDS_AOTGROWABLEARRAY_INLINE_HPP
diff --git a/src/hotspot/share/cds/aotMapLogger.cpp b/src/hotspot/share/cds/aotMapLogger.cpp
index a252eae4b84..5e4e0956824 100644
--- a/src/hotspot/share/cds/aotMapLogger.cpp
+++ b/src/hotspot/share/cds/aotMapLogger.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,8 @@
#include "cds/aotStreamedHeapWriter.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/filemap.hpp"
+#include "classfile/moduleEntry.hpp"
+#include "classfile/packageEntry.hpp"
#include "classfile/systemDictionaryShared.hpp"
#include "classfile/vmClasses.hpp"
#include "logging/log.hpp"
@@ -141,7 +143,7 @@ public:
info._buffered_addr = ref->obj();
info._requested_addr = ref->obj();
info._bytes = ref->size() * BytesPerWord;
- info._type = ref->msotype();
+ info._type = ref->type();
_objs.append(info);
}
@@ -214,7 +216,7 @@ void AOTMapLogger::dumptime_log_metaspace_region(const char* name, DumpRegion* r
info._buffered_addr = src_info->buffered_addr();
info._requested_addr = info._buffered_addr + _buffer_to_requested_delta;
info._bytes = src_info->size_in_bytes();
- info._type = src_info->msotype();
+ info._type = src_info->type();
objs.append(info);
}
@@ -332,43 +334,52 @@ void AOTMapLogger::log_metaspace_objects_impl(address region_base, address regio
address buffered_addr = info._buffered_addr;
address requested_addr = info._requested_addr;
int bytes = info._bytes;
- MetaspaceObj::Type type = info._type;
- const char* type_name = MetaspaceObj::type_name(type);
+ MetaspaceClosureType type = info._type;
+ const char* type_name = MetaspaceClosure::type_name(type);
log_as_hex(last_obj_base, buffered_addr, last_obj_base + _buffer_to_requested_delta);
switch (type) {
- case MetaspaceObj::ClassType:
+ case MetaspaceClosureType::ClassType:
log_klass((Klass*)src, requested_addr, type_name, bytes, current);
break;
- case MetaspaceObj::ConstantPoolType:
+ case MetaspaceClosureType::ConstantPoolType:
log_constant_pool((ConstantPool*)src, requested_addr, type_name, bytes, current);
break;
- case MetaspaceObj::ConstantPoolCacheType:
+ case MetaspaceClosureType::ConstantPoolCacheType:
log_constant_pool_cache((ConstantPoolCache*)src, requested_addr, type_name, bytes, current);
break;
- case MetaspaceObj::ConstMethodType:
+ case MetaspaceClosureType::ConstMethodType:
log_const_method((ConstMethod*)src, requested_addr, type_name, bytes, current);
break;
- case MetaspaceObj::MethodType:
+ case MetaspaceClosureType::MethodType:
log_method((Method*)src, requested_addr, type_name, bytes, current);
break;
- case MetaspaceObj::MethodCountersType:
+ case MetaspaceClosureType::MethodCountersType:
log_method_counters((MethodCounters*)src, requested_addr, type_name, bytes, current);
break;
- case MetaspaceObj::MethodDataType:
+ case MetaspaceClosureType::MethodDataType:
log_method_data((MethodData*)src, requested_addr, type_name, bytes, current);
break;
- case MetaspaceObj::SymbolType:
+ case MetaspaceClosureType::ModuleEntryType:
+ log_module_entry((ModuleEntry*)src, requested_addr, type_name, bytes, current);
+ break;
+ case MetaspaceClosureType::PackageEntryType:
+ log_package_entry((PackageEntry*)src, requested_addr, type_name, bytes, current);
+ break;
+ case MetaspaceClosureType::GrowableArrayType:
+ log_growable_array((GrowableArrayBase*)src, requested_addr, type_name, bytes, current);
+ break;
+ case MetaspaceClosureType::SymbolType:
log_symbol((Symbol*)src, requested_addr, type_name, bytes, current);
break;
- case MetaspaceObj::KlassTrainingDataType:
+ case MetaspaceClosureType::KlassTrainingDataType:
log_klass_training_data((KlassTrainingData*)src, requested_addr, type_name, bytes, current);
break;
- case MetaspaceObj::MethodTrainingDataType:
+ case MetaspaceClosureType::MethodTrainingDataType:
log_method_training_data((MethodTrainingData*)src, requested_addr, type_name, bytes, current);
break;
- case MetaspaceObj::CompileTrainingDataType:
+ case MetaspaceClosureType::CompileTrainingDataType:
log_compile_training_data((CompileTrainingData*)src, requested_addr, type_name, bytes, current);
break;
default:
@@ -421,6 +432,27 @@ void AOTMapLogger::log_method_data(MethodData* md, address requested_addr, const
log_debug(aot, map)(_LOG_PREFIX " %s", p2i(requested_addr), type_name, bytes, md->method()->external_name());
}
+void AOTMapLogger::log_module_entry(ModuleEntry* mod, address requested_addr, const char* type_name,
+ int bytes, Thread* current) {
+ ResourceMark rm(current);
+ log_debug(aot, map)(_LOG_PREFIX " %s", p2i(requested_addr), type_name, bytes,
+ mod->name_as_C_string());
+}
+
+void AOTMapLogger::log_package_entry(PackageEntry* pkg, address requested_addr, const char* type_name,
+ int bytes, Thread* current) {
+ ResourceMark rm(current);
+ log_debug(aot, map)(_LOG_PREFIX " %s - %s", p2i(requested_addr), type_name, bytes,
+ pkg->module()->name_as_C_string(), pkg->name_as_C_string());
+}
+
+void AOTMapLogger::log_growable_array(GrowableArrayBase* arr, address requested_addr, const char* type_name,
+ int bytes, Thread* current) {
+ ResourceMark rm(current);
+ log_debug(aot, map)(_LOG_PREFIX " %d (%d)", p2i(requested_addr), type_name, bytes,
+ arr->length(), arr->capacity());
+}
+
void AOTMapLogger::log_klass(Klass* k, address requested_addr, const char* type_name,
int bytes, Thread* current) {
ResourceMark rm(current);
diff --git a/src/hotspot/share/cds/aotMapLogger.hpp b/src/hotspot/share/cds/aotMapLogger.hpp
index ba188514861..bf7ce0028b9 100644
--- a/src/hotspot/share/cds/aotMapLogger.hpp
+++ b/src/hotspot/share/cds/aotMapLogger.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
#include "cds/archiveBuilder.hpp"
#include "memory/allocation.hpp"
#include "memory/allStatic.hpp"
+#include "memory/metaspaceClosureType.hpp"
#include "oops/oopsHierarchy.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/growableArray.hpp"
@@ -37,9 +38,13 @@ class ArchiveStreamedHeapInfo;
class CompileTrainingData;
class DumpRegion;
class FileMapInfo;
+class GrowableArrayBase;
class KlassTrainingData;
+class MethodCounters;
class MethodTrainingData;
+class ModuleEntry;
class outputStream;
+class PackageEntry;
// Write detailed info to a mapfile to analyze contents of the AOT cache/CDS archive.
// -Xlog:aot+map* can be used both when creating an AOT cache, or when using an AOT cache.
@@ -62,7 +67,7 @@ class AOTMapLogger : AllStatic {
address _buffered_addr;
address _requested_addr;
int _bytes;
- MetaspaceObj::Type _type;
+ MetaspaceClosureType _type;
};
public:
@@ -142,6 +147,9 @@ private:
Thread* current);
static void log_klass(Klass* k, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_method(Method* m, address requested_addr, const char* type_name, int bytes, Thread* current);
+ static void log_module_entry(ModuleEntry* mod, address requested_addr, const char* type_name, int bytes, Thread* current);
+ static void log_package_entry(PackageEntry* pkg, address requested_addr, const char* type_name, int bytes, Thread* current);
+ static void log_growable_array(GrowableArrayBase* arr, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_symbol(Symbol* s, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_klass_training_data(KlassTrainingData* ktd, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_method_training_data(MethodTrainingData* mtd, address requested_addr, const char* type_name, int bytes, Thread* current);
diff --git a/src/hotspot/share/cds/aotMappedHeapLoader.cpp b/src/hotspot/share/cds/aotMappedHeapLoader.cpp
index 84051cbd9e5..210867be70c 100644
--- a/src/hotspot/share/cds/aotMappedHeapLoader.cpp
+++ b/src/hotspot/share/cds/aotMappedHeapLoader.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -360,10 +360,8 @@ bool AOTMappedHeapLoader::load_heap_region(FileMapInfo* mapinfo) {
}
objArrayOop AOTMappedHeapLoader::root_segment(int segment_idx) {
- if (CDSConfig::is_dumping_heap()) {
- assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
- } else {
- assert(CDSConfig::is_using_archive(), "must be");
+ if (!CDSConfig::is_using_archive()) {
+ assert(CDSConfig::is_dumping_heap() && Thread::current() == (Thread*)VMThread::vm_thread(), "sanity");
}
objArrayOop segment = (objArrayOop)_root_segments->at(segment_idx).resolve();
@@ -466,7 +464,9 @@ void AOTMappedHeapLoader::finish_initialization(FileMapInfo* info) {
add_root_segment((objArrayOop)segment_oop);
}
- StringTable::load_shared_strings_array();
+ if (CDSConfig::is_dumping_final_static_archive()) {
+ StringTable::move_shared_strings_into_runtime_table();
+ }
}
}
@@ -619,7 +619,7 @@ bool AOTMappedHeapLoader::map_heap_region_impl(FileMapInfo* info) {
aot_log_info(aot)("Preferred address to map heap data (to avoid relocation) is " INTPTR_FORMAT, p2i(requested_start));
// allocate from java heap
- HeapWord* start = G1CollectedHeap::heap()->alloc_archive_region(word_size, (HeapWord*)requested_start);
+ HeapWord* start = G1CollectedHeap::heap()->alloc_archive_region(word_size);
if (start == nullptr) {
AOTMetaspace::report_loading_error("UseSharedSpaces: Unable to allocate java heap region for archive heap.");
return false;
diff --git a/src/hotspot/share/cds/aotMetaspace.cpp b/src/hotspot/share/cds/aotMetaspace.cpp
index 79d789e0c70..894a35183ca 100644
--- a/src/hotspot/share/cds/aotMetaspace.cpp
+++ b/src/hotspot/share/cds/aotMetaspace.cpp
@@ -698,6 +698,9 @@ public:
Universe::metaspace_pointers_do(it);
vmSymbols::metaspace_pointers_do(it);
TrainingData::iterate_roots(it);
+ if (CDSConfig::is_dumping_full_module_graph()) {
+ ClassLoaderDataShared::iterate_roots(it);
+ }
// The above code should find all the symbols that are referenced by the
// archived classes. We just need to add the extra symbols which
@@ -795,6 +798,10 @@ void VM_PopulateDumpSharedSpace::doit() {
_builder.make_klasses_shareable();
AOTMetaspace::make_method_handle_intrinsics_shareable();
+ if (CDSConfig::is_dumping_full_module_graph()) {
+ ClassLoaderDataShared::remove_unshareable_info();
+ }
+
dump_java_heap_objects();
dump_shared_symbol_table(_builder.symbols());
@@ -1097,7 +1104,12 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS
#if INCLUDE_CDS_JAVA_HEAP
if (CDSConfig::is_dumping_heap()) {
- assert(CDSConfig::allow_only_single_java_thread(), "Required");
+ if (!CDSConfig::is_dumping_preimage_static_archive()) {
+ // A single thread is required for Reference handling and deterministic CDS archive.
+ // Its's not required for dumping preimage, where References won't be archived and
+ // determinism is not needed.
+ assert(CDSConfig::allow_only_single_java_thread(), "Required");
+ }
if (!HeapShared::is_archived_boot_layer_available(THREAD)) {
report_loading_error("archivedBootLayer not available, disabling full module graph");
CDSConfig::stop_dumping_full_module_graph();
@@ -1135,6 +1147,7 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS
HeapShared::init_heap_writer();
if (CDSConfig::is_dumping_full_module_graph()) {
ClassLoaderDataShared::ensure_module_entry_tables_exist();
+ ClassLoaderDataShared::build_tables(CHECK);
HeapShared::reset_archived_object_states(CHECK);
}
@@ -1154,12 +1167,6 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS
// Perhaps there is a way to avoid hard-coding these names here.
// See discussion in JDK-8342481.
}
-
- if (HeapShared::is_writing_mapping_mode()) {
- // Do this at the very end, when no Java code will be executed. Otherwise
- // some new strings may be added to the intern table.
- StringTable::allocate_shared_strings_array(CHECK);
- }
} else {
log_info(aot)("Not dumping heap, reset CDSConfig::_is_using_optimized_module_handling");
CDSConfig::stop_using_optimized_module_handling();
diff --git a/src/hotspot/share/cds/aotReferenceObjSupport.cpp b/src/hotspot/share/cds/aotReferenceObjSupport.cpp
index aa7cc875533..0c27c8ce5f0 100644
--- a/src/hotspot/share/cds/aotReferenceObjSupport.cpp
+++ b/src/hotspot/share/cds/aotReferenceObjSupport.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -177,12 +177,17 @@ void AOTReferenceObjSupport::init_keep_alive_objs_table() {
// Returns true IFF obj is an instance of java.lang.ref.Reference. If so, perform extra eligibility checks.
bool AOTReferenceObjSupport::check_if_ref_obj(oop obj) {
- // We have a single Java thread. This means java.lang.ref.Reference$ReferenceHandler thread
- // is not running. Otherwise the checks for next/discovered may not work.
- precond(CDSConfig::allow_only_single_java_thread());
assert_at_safepoint(); // _keep_alive_objs_table uses raw oops
if (obj->klass()->is_subclass_of(vmClasses::Reference_klass())) {
+ // The following check works only if the java.lang.ref.Reference$ReferenceHandler thread
+ // is not running.
+ //
+ // This code is called on every object found by AOTArtifactFinder. When dumping the
+ // preimage archive, AOTArtifactFinder should not find any Reference objects.
+ precond(!CDSConfig::is_dumping_preimage_static_archive());
+ precond(CDSConfig::allow_only_single_java_thread());
+
precond(AOTReferenceObjSupport::is_enabled());
precond(JavaClasses::is_supported_for_archiving(obj));
precond(_keep_alive_objs_table != nullptr);
diff --git a/src/hotspot/share/cds/archiveBuilder.cpp b/src/hotspot/share/cds/archiveBuilder.cpp
index 6bbefea5cd9..9161980c4be 100644
--- a/src/hotspot/share/cds/archiveBuilder.cpp
+++ b/src/hotspot/share/cds/archiveBuilder.cpp
@@ -243,7 +243,7 @@ bool ArchiveBuilder::gather_klass_and_symbol(MetaspaceClosure::Ref* ref, bool re
if (get_follow_mode(ref) != make_a_copy) {
return false;
}
- if (ref->msotype() == MetaspaceObj::ClassType) {
+ if (ref->type() == MetaspaceClosureType::ClassType) {
Klass* klass = (Klass*)ref->obj();
assert(klass->is_klass(), "must be");
if (!is_excluded(klass)) {
@@ -252,7 +252,7 @@ bool ArchiveBuilder::gather_klass_and_symbol(MetaspaceClosure::Ref* ref, bool re
assert(klass->is_instance_klass(), "must be");
}
}
- } else if (ref->msotype() == MetaspaceObj::SymbolType) {
+ } else if (ref->type() == MetaspaceClosureType::SymbolType) {
// Make sure the symbol won't be GC'ed while we are dumping the archive.
Symbol* sym = (Symbol*)ref->obj();
sym->increment_refcount();
@@ -271,11 +271,6 @@ void ArchiveBuilder::gather_klasses_and_symbols() {
aot_log_info(aot)("Gathering classes and symbols ... ");
GatherKlassesAndSymbols doit(this);
iterate_roots(&doit);
-#if INCLUDE_CDS_JAVA_HEAP
- if (CDSConfig::is_dumping_full_module_graph()) {
- ClassLoaderDataShared::iterate_symbols(&doit);
- }
-#endif
doit.finish();
if (CDSConfig::is_dumping_static_archive()) {
@@ -446,14 +441,14 @@ bool ArchiveBuilder::gather_one_source_obj(MetaspaceClosure::Ref* ref, bool read
}
#ifdef ASSERT
- if (ref->msotype() == MetaspaceObj::MethodType) {
+ if (ref->type() == MetaspaceClosureType::MethodType) {
Method* m = (Method*)ref->obj();
assert(!RegeneratedClasses::has_been_regenerated((address)m->method_holder()),
"Should not archive methods in a class that has been regenerated");
}
#endif
- if (ref->msotype() == MetaspaceObj::MethodDataType) {
+ if (ref->type() == MetaspaceClosureType::MethodDataType) {
MethodData* md = (MethodData*)ref->obj();
md->clean_method_data(false /* always_clean */);
}
@@ -554,16 +549,16 @@ ArchiveBuilder::FollowMode ArchiveBuilder::get_follow_mode(MetaspaceClosure::Ref
if (CDSConfig::is_dumping_dynamic_archive() && AOTMetaspace::in_aot_cache(obj)) {
// Don't dump existing shared metadata again.
return point_to_it;
- } else if (ref->msotype() == MetaspaceObj::MethodDataType ||
- ref->msotype() == MetaspaceObj::MethodCountersType ||
- ref->msotype() == MetaspaceObj::KlassTrainingDataType ||
- ref->msotype() == MetaspaceObj::MethodTrainingDataType ||
- ref->msotype() == MetaspaceObj::CompileTrainingDataType) {
+ } else if (ref->type() == MetaspaceClosureType::MethodDataType ||
+ ref->type() == MetaspaceClosureType::MethodCountersType ||
+ ref->type() == MetaspaceClosureType::KlassTrainingDataType ||
+ ref->type() == MetaspaceClosureType::MethodTrainingDataType ||
+ ref->type() == MetaspaceClosureType::CompileTrainingDataType) {
return (TrainingData::need_data() || TrainingData::assembling_data()) ? make_a_copy : set_to_null;
- } else if (ref->msotype() == MetaspaceObj::AdapterHandlerEntryType) {
+ } else if (ref->type() == MetaspaceClosureType::AdapterHandlerEntryType) {
return CDSConfig::is_dumping_adapters() ? make_a_copy : set_to_null;
} else {
- if (ref->msotype() == MetaspaceObj::ClassType) {
+ if (ref->type() == MetaspaceClosureType::ClassType) {
Klass* klass = (Klass*)ref->obj();
assert(klass->is_klass(), "must be");
if (RegeneratedClasses::has_been_regenerated(klass)) {
@@ -571,7 +566,12 @@ ArchiveBuilder::FollowMode ArchiveBuilder::get_follow_mode(MetaspaceClosure::Ref
}
if (is_excluded(klass)) {
ResourceMark rm;
- log_debug(cds, dynamic)("Skipping class (excluded): %s", klass->external_name());
+ aot_log_trace(aot)("pointer set to null: class (excluded): %s", klass->external_name());
+ return set_to_null;
+ }
+ if (klass->is_array_klass() && CDSConfig::is_dumping_dynamic_archive()) {
+ ResourceMark rm;
+ aot_log_trace(aot)("pointer set to null: array class not supported in dynamic region: %s", klass->external_name());
return set_to_null;
}
}
@@ -615,15 +615,6 @@ void ArchiveBuilder::dump_rw_metadata() {
ResourceMark rm;
aot_log_info(aot)("Allocating RW objects ... ");
make_shallow_copies(&_rw_region, &_rw_src_objs);
-
-#if INCLUDE_CDS_JAVA_HEAP
- if (CDSConfig::is_dumping_full_module_graph()) {
- // Archive the ModuleEntry's and PackageEntry's of the 3 built-in loaders
- char* start = rw_region()->top();
- ClassLoaderDataShared::allocate_archived_tables();
- alloc_stats()->record_modules(rw_region()->top() - start, /*read_only*/false);
- }
-#endif
}
void ArchiveBuilder::dump_ro_metadata() {
@@ -632,15 +623,6 @@ void ArchiveBuilder::dump_ro_metadata() {
start_dump_region(&_ro_region);
make_shallow_copies(&_ro_region, &_ro_src_objs);
-
-#if INCLUDE_CDS_JAVA_HEAP
- if (CDSConfig::is_dumping_full_module_graph()) {
- char* start = ro_region()->top();
- ClassLoaderDataShared::init_archived_tables();
- alloc_stats()->record_modules(ro_region()->top() - start, /*read_only*/true);
- }
-#endif
-
RegeneratedClasses::record_regenerated_objects();
}
@@ -658,7 +640,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
size_t alignment = SharedSpaceObjectAlignment; // alignment for the dest pointer
char* oldtop = dump_region->top();
- if (src_info->msotype() == MetaspaceObj::ClassType) {
+ if (src_info->type() == MetaspaceClosureType::ClassType) {
// Allocate space for a pointer directly in front of the future InstanceKlass, so
// we can do a quick lookup from InstanceKlass* -> RunTimeClassInfo*
// without building another hashtable. See RunTimeClassInfo::get_for()
@@ -674,7 +656,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
alignment = nth_bit(ArchiveBuilder::precomputed_narrow_klass_shift());
}
#endif
- } else if (src_info->msotype() == MetaspaceObj::SymbolType) {
+ } else if (src_info->type() == MetaspaceClosureType::SymbolType) {
// Symbols may be allocated by using AllocateHeap, so their sizes
// may be less than size_in_bytes() indicates.
bytes = ((Symbol*)src)->byte_size();
@@ -684,7 +666,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
memcpy(dest, src, bytes);
// Update the hash of buffered sorted symbols for static dump so that the symbols have deterministic contents
- if (CDSConfig::is_dumping_static_archive() && (src_info->msotype() == MetaspaceObj::SymbolType)) {
+ if (CDSConfig::is_dumping_static_archive() && (src_info->type() == MetaspaceClosureType::SymbolType)) {
Symbol* buffered_symbol = (Symbol*)dest;
assert(((Symbol*)src)->is_permanent(), "archived symbols must be permanent");
buffered_symbol->update_identity_hash();
@@ -699,7 +681,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
}
}
- intptr_t* archived_vtable = CppVtables::get_archived_vtable(src_info->msotype(), (address)dest);
+ intptr_t* archived_vtable = CppVtables::get_archived_vtable(src_info->type(), (address)dest);
if (archived_vtable != nullptr) {
*(address*)dest = (address)archived_vtable;
ArchivePtrMarker::mark_pointer((address*)dest);
@@ -709,7 +691,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
src_info->set_buffered_addr((address)dest);
char* newtop = dump_region->top();
- _alloc_stats.record(src_info->msotype(), int(newtop - oldtop), src_info->read_only());
+ _alloc_stats.record(src_info->type(), int(newtop - oldtop), src_info->read_only());
DEBUG_ONLY(_alloc_stats.verify((int)dump_region->used(), src_info->read_only()));
}
@@ -992,15 +974,15 @@ void ArchiveBuilder::make_training_data_shareable() {
return;
}
- if (info.msotype() == MetaspaceObj::KlassTrainingDataType ||
- info.msotype() == MetaspaceObj::MethodTrainingDataType ||
- info.msotype() == MetaspaceObj::CompileTrainingDataType) {
+ if (info.type() == MetaspaceClosureType::KlassTrainingDataType ||
+ info.type() == MetaspaceClosureType::MethodTrainingDataType ||
+ info.type() == MetaspaceClosureType::CompileTrainingDataType) {
TrainingData* buffered_td = (TrainingData*)info.buffered_addr();
buffered_td->remove_unshareable_info();
- } else if (info.msotype() == MetaspaceObj::MethodDataType) {
+ } else if (info.type() == MetaspaceClosureType::MethodDataType) {
MethodData* buffered_mdo = (MethodData*)info.buffered_addr();
buffered_mdo->remove_unshareable_info();
- } else if (info.msotype() == MetaspaceObj::MethodCountersType) {
+ } else if (info.type() == MetaspaceClosureType::MethodCountersType) {
MethodCounters* buffered_mc = (MethodCounters*)info.buffered_addr();
buffered_mc->remove_unshareable_info();
}
diff --git a/src/hotspot/share/cds/archiveBuilder.hpp b/src/hotspot/share/cds/archiveBuilder.hpp
index 9a628439039..9de6c02edc5 100644
--- a/src/hotspot/share/cds/archiveBuilder.hpp
+++ b/src/hotspot/share/cds/archiveBuilder.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -134,13 +134,13 @@ private:
int _size_in_bytes;
int _id; // Each object has a unique serial ID, starting from zero. The ID is assigned
// when the object is added into _source_objs.
- MetaspaceObj::Type _msotype;
+ MetaspaceClosureType _type;
address _source_addr; // The source object to be copied.
address _buffered_addr; // The copy of this object insider the buffer.
public:
SourceObjInfo(MetaspaceClosure::Ref* ref, bool read_only, FollowMode follow_mode) :
_ptrmap_start(0), _ptrmap_end(0), _read_only(read_only), _has_embedded_pointer(false), _follow_mode(follow_mode),
- _size_in_bytes(ref->size() * BytesPerWord), _id(0), _msotype(ref->msotype()),
+ _size_in_bytes(ref->size() * BytesPerWord), _id(0), _type(ref->type()),
_source_addr(ref->obj()) {
if (follow_mode == point_to_it) {
_buffered_addr = ref->obj();
@@ -155,7 +155,7 @@ private:
SourceObjInfo(address src, SourceObjInfo* renegerated_obj_info) :
_ptrmap_start(0), _ptrmap_end(0), _read_only(false),
_follow_mode(renegerated_obj_info->_follow_mode),
- _size_in_bytes(0), _msotype(renegerated_obj_info->_msotype),
+ _size_in_bytes(0), _type(renegerated_obj_info->_type),
_source_addr(src), _buffered_addr(renegerated_obj_info->_buffered_addr) {}
bool should_copy() const { return _follow_mode == make_a_copy; }
@@ -182,7 +182,7 @@ private:
}
return _buffered_addr;
}
- MetaspaceObj::Type msotype() const { return _msotype; }
+ MetaspaceClosureType type() const { return _type; }
FollowMode follow_mode() const { return _follow_mode; }
};
diff --git a/src/hotspot/share/cds/cdsConfig.cpp b/src/hotspot/share/cds/cdsConfig.cpp
index 5f6b568dd6e..f4ef3c66f7a 100644
--- a/src/hotspot/share/cds/cdsConfig.cpp
+++ b/src/hotspot/share/cds/cdsConfig.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -556,7 +556,9 @@ void CDSConfig::check_aotmode_record() {
// At VM exit, the module graph may be contaminated with program states.
// We will rebuild the module graph when dumping the CDS final image.
- disable_heap_dumping();
+ _is_using_optimized_module_handling = false;
+ _is_using_full_module_graph = false;
+ _is_dumping_full_module_graph = false;
}
void CDSConfig::check_aotmode_create() {
@@ -582,6 +584,7 @@ void CDSConfig::check_aotmode_create() {
substitute_aot_filename(FLAG_MEMBER_ENUM(AOTCache));
_is_dumping_final_static_archive = true;
+ _is_using_full_module_graph = false;
UseSharedSpaces = true;
RequireSharedSpaces = true;
@@ -954,7 +957,9 @@ bool CDSConfig::are_vm_options_incompatible_with_dumping_heap() {
}
bool CDSConfig::is_dumping_heap() {
- if (!(is_dumping_classic_static_archive() || is_dumping_final_static_archive())
+ // Note: when dumping preimage static archive, only a very limited set of oops
+ // are dumped.
+ if (!is_dumping_static_archive()
|| are_vm_options_incompatible_with_dumping_heap()
|| _disable_heap_dumping) {
return false;
@@ -966,6 +971,26 @@ bool CDSConfig::is_loading_heap() {
return HeapShared::is_archived_heap_in_use();
}
+bool CDSConfig::is_dumping_klass_subgraphs() {
+ if (is_dumping_classic_static_archive() || is_dumping_final_static_archive()) {
+ // KlassSubGraphs (see heapShared.cpp) is a legacy mechanism for archiving oops. It
+ // has been superceded by AOT class linking. This feature is used only when
+ // AOT class linking is disabled.
+ //
+ // KlassSubGraphs are disabled in the preimage static archive, which contains a very
+ // limited set of oops.
+ return is_dumping_heap() && !is_dumping_aot_linked_classes();
+ } else {
+ return false;
+ }
+}
+
+bool CDSConfig::is_using_klass_subgraphs() {
+ return (is_loading_heap() &&
+ !CDSConfig::is_using_aot_linked_classes() &&
+ !CDSConfig::is_dumping_final_static_archive());
+}
+
bool CDSConfig::is_using_full_module_graph() {
if (ClassLoaderDataShared::is_full_module_graph_loaded()) {
return true;
diff --git a/src/hotspot/share/cds/cdsConfig.hpp b/src/hotspot/share/cds/cdsConfig.hpp
index 202904e8231..739dbb4937b 100644
--- a/src/hotspot/share/cds/cdsConfig.hpp
+++ b/src/hotspot/share/cds/cdsConfig.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -188,6 +188,9 @@ public:
static bool is_dumping_heap() NOT_CDS_JAVA_HEAP_RETURN_(false);
static bool is_loading_heap() NOT_CDS_JAVA_HEAP_RETURN_(false);
+ static bool is_dumping_klass_subgraphs() NOT_CDS_JAVA_HEAP_RETURN_(false);
+ static bool is_using_klass_subgraphs() NOT_CDS_JAVA_HEAP_RETURN_(false);
+
static bool is_dumping_invokedynamic() NOT_CDS_JAVA_HEAP_RETURN_(false);
static bool is_dumping_method_handles() NOT_CDS_JAVA_HEAP_RETURN_(false);
diff --git a/src/hotspot/share/cds/cppVtables.cpp b/src/hotspot/share/cds/cppVtables.cpp
index f2862454286..da68fa70761 100644
--- a/src/hotspot/share/cds/cppVtables.cpp
+++ b/src/hotspot/share/cds/cppVtables.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,12 +22,14 @@
*
*/
+#include "cds/aotGrowableArray.hpp"
#include "cds/aotMetaspace.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/archiveUtils.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/cppVtables.hpp"
#include "logging/log.hpp"
+#include "memory/resourceArea.hpp"
#include "oops/instanceClassLoaderKlass.hpp"
#include "oops/instanceMirrorKlass.hpp"
#include "oops/instanceRefKlass.hpp"
@@ -53,6 +55,19 @@
// + at run time: we clone the actual contents of the vtables from libjvm.so
// into our own tables.
+
+#ifndef PRODUCT
+
+// AOTGrowableArray has a vtable only when in non-product builds (due to
+// the virtual printing functions in AnyObj).
+
+using GrowableArray_ModuleEntry_ptr = AOTGrowableArray;
+
+#define DEBUG_CPP_VTABLE_TYPES_DO(f) \
+ f(GrowableArray_ModuleEntry_ptr) \
+
+#endif
+
// Currently, the archive contains ONLY the following types of objects that have C++ vtables.
#define CPP_VTABLE_TYPES_DO(f) \
f(ConstantPool) \
@@ -68,7 +83,8 @@
f(TypeArrayKlass) \
f(KlassTrainingData) \
f(MethodTrainingData) \
- f(CompileTrainingData)
+ f(CompileTrainingData) \
+ NOT_PRODUCT(DEBUG_CPP_VTABLE_TYPES_DO(f))
class CppVtableInfo {
intptr_t _vtable_size;
@@ -86,7 +102,7 @@ public:
}
};
-static inline intptr_t* vtable_of(const Metadata* m) {
+static inline intptr_t* vtable_of(const void* m) {
return *((intptr_t**)m);
}
@@ -116,6 +132,7 @@ CppVtableInfo* CppVtableCloner::allocate_and_initialize(const char* name) {
template
void CppVtableCloner::initialize(const char* name, CppVtableInfo* info) {
+ ResourceMark rm;
T tmp; // Allocate temporary dummy metadata object to get to the original vtable.
int n = info->vtable_size();
intptr_t* srcvtable = vtable_of(&tmp);
@@ -268,7 +285,7 @@ void CppVtables::serialize(SerializeClosure* soc) {
}
}
-intptr_t* CppVtables::get_archived_vtable(MetaspaceObj::Type msotype, address obj) {
+intptr_t* CppVtables::get_archived_vtable(MetaspaceClosureType type, address obj) {
if (!_orig_cpp_vtptrs_inited) {
CPP_VTABLE_TYPES_DO(INIT_ORIG_CPP_VTPTRS);
_orig_cpp_vtptrs_inited = true;
@@ -276,19 +293,23 @@ intptr_t* CppVtables::get_archived_vtable(MetaspaceObj::Type msotype, address ob
assert(CDSConfig::is_dumping_archive(), "sanity");
int kind = -1;
- switch (msotype) {
- case MetaspaceObj::SymbolType:
- case MetaspaceObj::TypeArrayU1Type:
- case MetaspaceObj::TypeArrayU2Type:
- case MetaspaceObj::TypeArrayU4Type:
- case MetaspaceObj::TypeArrayU8Type:
- case MetaspaceObj::TypeArrayOtherType:
- case MetaspaceObj::ConstMethodType:
- case MetaspaceObj::ConstantPoolCacheType:
- case MetaspaceObj::AnnotationsType:
- case MetaspaceObj::RecordComponentType:
- case MetaspaceObj::AdapterHandlerEntryType:
- case MetaspaceObj::AdapterFingerPrintType:
+ switch (type) {
+ case MetaspaceClosureType::SymbolType:
+ case MetaspaceClosureType::TypeArrayU1Type:
+ case MetaspaceClosureType::TypeArrayU2Type:
+ case MetaspaceClosureType::TypeArrayU4Type:
+ case MetaspaceClosureType::TypeArrayU8Type:
+ case MetaspaceClosureType::TypeArrayOtherType:
+ case MetaspaceClosureType::CArrayType:
+ case MetaspaceClosureType::ConstMethodType:
+ case MetaspaceClosureType::ConstantPoolCacheType:
+ case MetaspaceClosureType::AnnotationsType:
+ case MetaspaceClosureType::ModuleEntryType:
+ case MetaspaceClosureType::PackageEntryType:
+ case MetaspaceClosureType::RecordComponentType:
+ case MetaspaceClosureType::AdapterHandlerEntryType:
+ case MetaspaceClosureType::AdapterFingerPrintType:
+ PRODUCT_ONLY(case MetaspaceClosureType::GrowableArrayType:)
// These have no vtables.
break;
default:
diff --git a/src/hotspot/share/cds/cppVtables.hpp b/src/hotspot/share/cds/cppVtables.hpp
index b40ca036023..9e28ba020ee 100644
--- a/src/hotspot/share/cds/cppVtables.hpp
+++ b/src/hotspot/share/cds/cppVtables.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
#include "memory/allocation.hpp"
#include "memory/allStatic.hpp"
+#include "memory/metaspaceClosureType.hpp"
#include "utilities/globalDefinitions.hpp"
class ArchiveBuilder;
@@ -40,7 +41,7 @@ class CppVtables : AllStatic {
public:
static void dumptime_init(ArchiveBuilder* builder);
static void zero_archived_vtables();
- static intptr_t* get_archived_vtable(MetaspaceObj::Type msotype, address obj);
+ static intptr_t* get_archived_vtable(MetaspaceClosureType type, address obj);
static void serialize(SerializeClosure* sc);
static bool is_valid_shared_method(const Method* m) NOT_CDS_RETURN_(false);
static char* vtables_serialized_base() { return _vtables_serialized_base; }
diff --git a/src/hotspot/share/cds/dumpAllocStats.hpp b/src/hotspot/share/cds/dumpAllocStats.hpp
index 7d651320e6f..4553f0f6a01 100644
--- a/src/hotspot/share/cds/dumpAllocStats.hpp
+++ b/src/hotspot/share/cds/dumpAllocStats.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,32 +27,34 @@
#include "classfile/compactHashtable.hpp"
#include "memory/allocation.hpp"
+#include "memory/metaspaceClosureType.hpp"
// This is for dumping detailed statistics for the allocations
// in the shared spaces.
class DumpAllocStats : public StackObj {
public:
- // Here's poor man's enum inheritance
-#define SHAREDSPACE_OBJ_TYPES_DO(f) \
- METASPACE_OBJ_TYPES_DO(f) \
+#define DUMPED_OBJ_TYPES_DO(f) \
+ METASPACE_CLOSURE_TYPES_DO(f) \
f(SymbolHashentry) \
f(SymbolBucket) \
f(StringHashentry) \
f(StringBucket) \
- f(ModulesNatives) \
f(CppVTables) \
f(Other)
+#define DUMPED_TYPE_DECLARE(name) name ## Type,
+#define DUMPED_TYPE_NAME_CASE(name) case name ## Type: return #name;
+
enum Type {
// Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc
- SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE)
+ DUMPED_OBJ_TYPES_DO(DUMPED_TYPE_DECLARE)
_number_of_types
};
static const char* type_name(Type type) {
switch(type) {
- SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE)
+ DUMPED_OBJ_TYPES_DO(DUMPED_TYPE_NAME_CASE)
default:
ShouldNotReachHere();
return nullptr;
@@ -101,16 +103,12 @@ public:
CompactHashtableStats* symbol_stats() { return &_symbol_stats; }
CompactHashtableStats* string_stats() { return &_string_stats; }
- void record(MetaspaceObj::Type type, int byte_size, bool read_only) {
- assert(int(type) >= 0 && type < MetaspaceObj::_number_of_types, "sanity");
+ void record(MetaspaceClosureType type, int byte_size, bool read_only) {
+ int t = (int)type;
+ assert(t >= 0 && t < (int)MetaspaceClosureType::_number_of_types, "sanity");
int which = (read_only) ? RO : RW;
- _counts[which][type] ++;
- _bytes [which][type] += byte_size;
- }
-
- void record_modules(int byte_size, bool read_only) {
- int which = (read_only) ? RO : RW;
- _bytes [which][ModulesNativesType] += byte_size;
+ _counts[which][t] ++;
+ _bytes [which][t] += byte_size;
}
void record_other_type(int byte_size, bool read_only) {
diff --git a/src/hotspot/share/cds/heapShared.cpp b/src/hotspot/share/cds/heapShared.cpp
index fdc335f3799..143f9147853 100644
--- a/src/hotspot/share/cds/heapShared.cpp
+++ b/src/hotspot/share/cds/heapShared.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -210,7 +210,7 @@ static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], Instan
bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) {
assert(CDSConfig::is_dumping_heap(), "dump-time only");
- if (!CDSConfig::is_dumping_aot_linked_classes()) {
+ if (CDSConfig::is_dumping_klass_subgraphs()) {
// Legacy CDS archive support (to be deprecated)
return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) ||
is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik);
@@ -413,6 +413,8 @@ void HeapShared::materialize_thread_object() {
void HeapShared::add_to_dumped_interned_strings(oop string) {
assert(HeapShared::is_writing_mapping_mode(), "Only used by this mode");
AOTMappedHeapWriter::add_to_dumped_interned_strings(string);
+ bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, string);
+ assert(success, "shared strings array must not point to arrays or strings that are too large to archive");
}
void HeapShared::finalize_initialization(FileMapInfo* static_mapinfo) {
@@ -453,7 +455,6 @@ int HeapShared::append_root(oop obj) {
oop HeapShared::get_root(int index, bool clear) {
assert(index >= 0, "sanity");
- assert(!CDSConfig::is_dumping_heap() && CDSConfig::is_using_archive(), "runtime only");
assert(is_archived_heap_in_use(), "getting roots into heap that is not used");
oop result;
@@ -598,8 +599,7 @@ public:
void set_oop(MetaspaceObj* ptr, oop o) {
MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
OopHandle handle(Universe::vm_global(), o);
- bool is_new = put(ptr, handle);
- assert(is_new, "cannot set twice");
+ put_when_absent(ptr, handle);
}
void remove_oop(MetaspaceObj* ptr) {
MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
@@ -612,6 +612,11 @@ public:
};
void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) {
+ if (CDSConfig::is_dumping_preimage_static_archive() && scratch_resolved_references(src) != nullptr) {
+ // We are in AOT training run. The class has been redefined and we are giving it a new resolved_reference.
+ // Ignore it, as this class will be excluded from the AOT config.
+ return;
+ }
if (SystemDictionaryShared::is_builtin_loader(src->pool_holder()->class_loader_data())) {
_scratch_objects_table->set_oop(src, dest);
}
@@ -831,14 +836,6 @@ static objArrayOop get_archived_resolved_references(InstanceKlass* src_ik) {
return nullptr;
}
-void HeapShared::archive_strings() {
- assert(HeapShared::is_writing_mapping_mode(), "should not reach here");
- oop shared_strings_array = StringTable::init_shared_strings_array();
- bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, shared_strings_array);
- assert(success, "shared strings array must not point to arrays or strings that are too large to archive");
- StringTable::set_shared_strings_array_index(append_root(shared_strings_array));
-}
-
int HeapShared::archive_exception_instance(oop exception) {
bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, exception);
assert(success, "sanity");
@@ -890,7 +887,7 @@ void HeapShared::start_scanning_for_oops() {
void HeapShared::end_scanning_for_oops() {
if (is_writing_mapping_mode()) {
- archive_strings();
+ StringTable::init_shared_table();
}
delete_seen_objects_table();
}
@@ -940,7 +937,7 @@ void HeapShared::scan_java_class(Klass* orig_k) {
void HeapShared::archive_subgraphs() {
assert(CDSConfig::is_dumping_heap(), "must be");
- if (!CDSConfig::is_dumping_aot_linked_classes()) {
+ if (CDSConfig::is_dumping_klass_subgraphs()) {
archive_object_subgraphs(archive_subgraph_entry_fields,
false /* is_full_module_graph */);
if (CDSConfig::is_dumping_full_module_graph()) {
@@ -948,10 +945,6 @@ void HeapShared::archive_subgraphs() {
true /* is_full_module_graph */);
}
}
-
- if (CDSConfig::is_dumping_full_module_graph()) {
- Modules::verify_archived_modules();
- }
}
//
@@ -1302,10 +1295,7 @@ static void verify_the_heap(Klass* k, const char* which) {
// this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots.
void HeapShared::resolve_classes(JavaThread* current) {
assert(CDSConfig::is_using_archive(), "runtime only!");
- if (!is_archived_heap_in_use()) {
- return; // nothing to do
- }
- if (!CDSConfig::is_using_aot_linked_classes()) {
+ if (CDSConfig::is_using_klass_subgraphs()) {
resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields);
resolve_classes_for_subgraphs(current, fmg_archive_subgraph_entry_fields);
}
@@ -1395,7 +1385,7 @@ void HeapShared::init_classes_for_special_subgraph(Handle class_loader, TRAPS) {
void HeapShared::initialize_from_archived_subgraph(JavaThread* current, Klass* k) {
JavaThread* THREAD = current;
- if (!is_archived_heap_in_use()) {
+ if (!CDSConfig::is_using_klass_subgraphs()) {
return; // nothing to do
}
@@ -1871,7 +1861,7 @@ void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k,
const char* klass_name,
int field_offset,
const char* field_name) {
- assert(CDSConfig::is_dumping_heap(), "dump time only");
+ precond(CDSConfig::is_dumping_klass_subgraphs());
assert(k->defined_by_boot_loader(), "must be boot class");
oop m = k->java_mirror();
@@ -1922,7 +1912,7 @@ class VerifySharedOopClosure: public BasicOopIterateClosure {
};
void HeapShared::verify_subgraph_from_static_field(InstanceKlass* k, int field_offset) {
- assert(CDSConfig::is_dumping_heap(), "dump time only");
+ precond(CDSConfig::is_dumping_klass_subgraphs());
assert(k->defined_by_boot_loader(), "must be boot class");
oop m = k->java_mirror();
@@ -2148,7 +2138,7 @@ void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[],
void HeapShared::init_subgraph_entry_fields(TRAPS) {
assert(CDSConfig::is_dumping_heap(), "must be");
_dump_time_subgraph_info_table = new (mtClass)DumpTimeKlassSubGraphInfoTable();
- if (!CDSConfig::is_dumping_aot_linked_classes()) {
+ if (CDSConfig::is_dumping_klass_subgraphs()) {
init_subgraph_entry_fields(archive_subgraph_entry_fields, CHECK);
if (CDSConfig::is_dumping_full_module_graph()) {
init_subgraph_entry_fields(fmg_archive_subgraph_entry_fields, CHECK);
diff --git a/src/hotspot/share/cds/heapShared.hpp b/src/hotspot/share/cds/heapShared.hpp
index 118c60faa60..3c7068e96ab 100644
--- a/src/hotspot/share/cds/heapShared.hpp
+++ b/src/hotspot/share/cds/heapShared.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -478,7 +478,6 @@ private:
static bool has_been_archived(oop orig_obj);
static void prepare_resolved_references();
- static void archive_strings();
static void archive_subgraphs();
static void copy_java_mirror(oop orig_mirror, oop scratch_m);
diff --git a/src/hotspot/share/ci/ciField.cpp b/src/hotspot/share/ci/ciField.cpp
index 19e05784f4d..e0c818f02fc 100644
--- a/src/hotspot/share/ci/ciField.cpp
+++ b/src/hotspot/share/ci/ciField.cpp
@@ -216,6 +216,10 @@ ciField::ciField(fieldDescriptor *fd) :
static bool trust_final_non_static_fields(ciInstanceKlass* holder) {
if (holder == nullptr)
return false;
+ if (holder->trust_final_fields()) {
+ // Explicit opt-in from system classes
+ return true;
+ }
// Even if general trusting is disabled, trust system-built closures in these packages.
if (holder->is_in_package("java/lang/invoke") || holder->is_in_package("sun/invoke") ||
holder->is_in_package("java/lang/reflect") || holder->is_in_package("jdk/internal/reflect") ||
@@ -230,14 +234,6 @@ static bool trust_final_non_static_fields(ciInstanceKlass* holder) {
// Trust final fields in records
if (holder->is_record())
return true;
- // Trust Atomic*FieldUpdaters: they are very important for performance, and make up one
- // more reason not to use Unsafe, if their final fields are trusted. See more in JDK-8140483.
- if (holder->name() == ciSymbols::java_util_concurrent_atomic_AtomicIntegerFieldUpdater_Impl() ||
- holder->name() == ciSymbols::java_util_concurrent_atomic_AtomicLongFieldUpdater_CASUpdater() ||
- holder->name() == ciSymbols::java_util_concurrent_atomic_AtomicLongFieldUpdater_LockedUpdater() ||
- holder->name() == ciSymbols::java_util_concurrent_atomic_AtomicReferenceFieldUpdater_Impl()) {
- return true;
- }
return TrustFinalNonStaticFields;
}
diff --git a/src/hotspot/share/ci/ciInstanceKlass.cpp b/src/hotspot/share/ci/ciInstanceKlass.cpp
index 64b9acf9146..33bcabc4566 100644
--- a/src/hotspot/share/ci/ciInstanceKlass.cpp
+++ b/src/hotspot/share/ci/ciInstanceKlass.cpp
@@ -65,6 +65,7 @@ ciInstanceKlass::ciInstanceKlass(Klass* k) :
_has_nonstatic_concrete_methods = ik->has_nonstatic_concrete_methods();
_is_hidden = ik->is_hidden();
_is_record = ik->is_record();
+ _trust_final_fields = ik->trust_final_fields();
_nonstatic_fields = nullptr; // initialized lazily by compute_nonstatic_fields:
_has_injected_fields = -1;
_implementor = nullptr; // we will fill these lazily
diff --git a/src/hotspot/share/ci/ciInstanceKlass.hpp b/src/hotspot/share/ci/ciInstanceKlass.hpp
index a1b2d8dd12d..8ccf1fadfb7 100644
--- a/src/hotspot/share/ci/ciInstanceKlass.hpp
+++ b/src/hotspot/share/ci/ciInstanceKlass.hpp
@@ -59,6 +59,7 @@ private:
bool _has_nonstatic_concrete_methods;
bool _is_hidden;
bool _is_record;
+ bool _trust_final_fields;
bool _has_trusted_loader;
ciFlags _flags;
@@ -207,6 +208,10 @@ public:
return _is_record;
}
+ bool trust_final_fields() const {
+ return _trust_final_fields;
+ }
+
ciInstanceKlass* get_canonical_holder(int offset);
ciField* get_field_by_offset(int field_offset, bool is_static);
ciField* get_field_by_name(ciSymbol* name, ciSymbol* signature, bool is_static);
diff --git a/src/hotspot/share/ci/ciUtilities.cpp b/src/hotspot/share/ci/ciUtilities.cpp
index 0c5b4d9824f..011fa049275 100644
--- a/src/hotspot/share/ci/ciUtilities.cpp
+++ b/src/hotspot/share/ci/ciUtilities.cpp
@@ -42,10 +42,7 @@ const char* basictype_to_str(BasicType t) {
// ------------------------------------------------------------------
// card_table_base
-CardTable::CardValue* ci_card_table_address() {
- BarrierSet* bs = BarrierSet::barrier_set();
- CardTableBarrierSet* ctbs = barrier_set_cast(bs);
- CardTable* ct = ctbs->card_table();
- assert(!UseShenandoahGC, "Shenandoah byte_map_base is not constant.");
- return ct->byte_map_base();
+CardTable::CardValue* ci_card_table_address_const() {
+ CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
+ return ctbs->card_table_base_const();
}
diff --git a/src/hotspot/share/ci/ciUtilities.hpp b/src/hotspot/share/ci/ciUtilities.hpp
index 75dbb03adf4..3333972d57d 100644
--- a/src/hotspot/share/ci/ciUtilities.hpp
+++ b/src/hotspot/share/ci/ciUtilities.hpp
@@ -51,9 +51,9 @@ inline const char* bool_to_str(bool b) {
const char* basictype_to_str(BasicType t);
-CardTable::CardValue* ci_card_table_address();
+CardTable::CardValue* ci_card_table_address_const();
template T ci_card_table_address_as() {
- return reinterpret_cast(ci_card_table_address());
+ return reinterpret_cast(ci_card_table_address_const());
}
#endif // SHARE_CI_CIUTILITIES_HPP
diff --git a/src/hotspot/share/classfile/classFileParser.cpp b/src/hotspot/share/classfile/classFileParser.cpp
index c9d9d3632b5..817d0c64d11 100644
--- a/src/hotspot/share/classfile/classFileParser.cpp
+++ b/src/hotspot/share/classfile/classFileParser.cpp
@@ -943,6 +943,7 @@ public:
_java_lang_Deprecated_for_removal,
_jdk_internal_vm_annotation_AOTSafeClassInitializer,
_method_AOTRuntimeSetup,
+ _jdk_internal_vm_annotation_TrustFinalFields,
_annotation_LIMIT
};
const Location _location;
@@ -1878,6 +1879,11 @@ AnnotationCollector::annotation_index(const ClassLoaderData* loader_data,
if (!privileged) break; // only allow in privileged code
return _field_Stable;
}
+ case VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_TrustFinalFields_signature): {
+ if (_location != _in_class) break; // only allow for classes
+ if (!privileged) break; // only allow in privileged code
+ return _jdk_internal_vm_annotation_TrustFinalFields;
+ }
case VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_Contended_signature): {
if (_location != _in_field && _location != _in_class) {
break; // only allow for fields and classes
@@ -1992,6 +1998,9 @@ void ClassFileParser::ClassAnnotationCollector::apply_to(InstanceKlass* ik) {
if (has_annotation(_jdk_internal_vm_annotation_AOTSafeClassInitializer)) {
ik->set_has_aot_safe_initializer();
}
+ if (has_annotation(_jdk_internal_vm_annotation_TrustFinalFields)) {
+ ik->set_trust_final_fields(true);
+ }
}
#define MAX_ARGS_SIZE 255
diff --git a/src/hotspot/share/classfile/classLoader.cpp b/src/hotspot/share/classfile/classLoader.cpp
index d9a63cd154b..f631bfaa102 100644
--- a/src/hotspot/share/classfile/classLoader.cpp
+++ b/src/hotspot/share/classfile/classLoader.cpp
@@ -1418,6 +1418,10 @@ char* ClassLoader::lookup_vm_options() {
jio_snprintf(modules_path, JVM_MAXPATHLEN, "%s%slib%smodules", Arguments::get_java_home(), fileSep, fileSep);
JImage_file =(*JImageOpen)(modules_path, &error);
if (JImage_file == nullptr) {
+ if (Arguments::has_jimage()) {
+ // The modules file exists but is unreadable or corrupt
+ vm_exit_during_initialization(err_msg("Unable to load %s", modules_path));
+ }
return nullptr;
}
diff --git a/src/hotspot/share/classfile/classLoaderDataShared.cpp b/src/hotspot/share/classfile/classLoaderDataShared.cpp
index 7a7743edd03..d415fe64bac 100644
--- a/src/hotspot/share/classfile/classLoaderDataShared.cpp
+++ b/src/hotspot/share/classfile/classLoaderDataShared.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,6 +33,7 @@
#include "classfile/packageEntry.hpp"
#include "classfile/systemDictionary.hpp"
#include "logging/log.hpp"
+#include "memory/metaspaceClosure.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/safepoint.hpp"
@@ -56,9 +57,9 @@ class ArchivedClassLoaderData {
public:
ArchivedClassLoaderData() : _packages(nullptr), _modules(nullptr), _unnamed_module(nullptr) {}
- void iterate_symbols(ClassLoaderData* loader_data, MetaspaceClosure* closure);
- void allocate(ClassLoaderData* loader_data);
- void init_archived_entries(ClassLoaderData* loader_data);
+ void iterate_roots(MetaspaceClosure* closure);
+ void build_tables(ClassLoaderData* loader_data, TRAPS);
+ void remove_unshareable_info();
ModuleEntry* unnamed_module() {
return _unnamed_module;
}
@@ -80,17 +81,14 @@ static ModuleEntry* _archived_javabase_moduleEntry = nullptr;
static int _platform_loader_root_index = -1;
static int _system_loader_root_index = -1;
-void ArchivedClassLoaderData::iterate_symbols(ClassLoaderData* loader_data, MetaspaceClosure* closure) {
+void ArchivedClassLoaderData::iterate_roots(MetaspaceClosure* it) {
assert(CDSConfig::is_dumping_full_module_graph(), "must be");
- assert_valid(loader_data);
- if (loader_data != nullptr) {
- loader_data->packages()->iterate_symbols(closure);
- loader_data->modules() ->iterate_symbols(closure);
- loader_data->unnamed_module()->iterate_symbols(closure);
- }
+ it->push(&_packages);
+ it->push(&_modules);
+ it->push(&_unnamed_module);
}
-void ArchivedClassLoaderData::allocate(ClassLoaderData* loader_data) {
+void ArchivedClassLoaderData::build_tables(ClassLoaderData* loader_data, TRAPS) {
assert(CDSConfig::is_dumping_full_module_graph(), "must be");
assert_valid(loader_data);
if (loader_data != nullptr) {
@@ -98,19 +96,28 @@ void ArchivedClassLoaderData::allocate(ClassLoaderData* loader_data) {
// address of the Symbols, which may be relocated at runtime due to ASLR.
// So we store the packages/modules in Arrays. At runtime, we create
// the hashtables using these arrays.
- _packages = loader_data->packages()->allocate_archived_entries();
- _modules = loader_data->modules() ->allocate_archived_entries();
- _unnamed_module = loader_data->unnamed_module()->allocate_archived_entry();
+ _packages = loader_data->packages()->build_aot_table(loader_data, CHECK);
+ _modules = loader_data->modules()->build_aot_table(loader_data, CHECK);
+ _unnamed_module = loader_data->unnamed_module();
}
}
-void ArchivedClassLoaderData::init_archived_entries(ClassLoaderData* loader_data) {
- assert(CDSConfig::is_dumping_full_module_graph(), "must be");
- assert_valid(loader_data);
- if (loader_data != nullptr) {
- loader_data->packages()->init_archived_entries(_packages);
- loader_data->modules() ->init_archived_entries(_modules);
- _unnamed_module->init_as_archived_entry();
+void ArchivedClassLoaderData::remove_unshareable_info() {
+ if (_packages != nullptr) {
+ _packages = ArchiveBuilder::current()->get_buffered_addr(_packages);
+ for (int i = 0; i < _packages->length(); i++) {
+ _packages->at(i)->remove_unshareable_info();
+ }
+ }
+ if (_modules != nullptr) {
+ _modules = ArchiveBuilder::current()->get_buffered_addr(_modules);
+ for (int i = 0; i < _modules->length(); i++) {
+ _modules->at(i)->remove_unshareable_info();
+ }
+ }
+ if (_unnamed_module != nullptr) {
+ _unnamed_module = ArchiveBuilder::current()->get_buffered_addr(_unnamed_module);
+ _unnamed_module->remove_unshareable_info();
}
}
@@ -153,7 +160,6 @@ void ArchivedClassLoaderData::clear_archived_oops() {
// ------------------------------
void ClassLoaderDataShared::load_archived_platform_and_system_class_loaders() {
-#if INCLUDE_CDS_JAVA_HEAP
// The streaming object loader prefers loading the class loader related objects before
// the CLD constructor which has a NoSafepointVerifier.
if (!HeapShared::is_loading_streaming_mode()) {
@@ -178,7 +184,6 @@ void ClassLoaderDataShared::load_archived_platform_and_system_class_loaders() {
if (system_loader_module_entry != nullptr) {
system_loader_module_entry->preload_archived_oops();
}
-#endif
}
static ClassLoaderData* null_class_loader_data() {
@@ -210,28 +215,27 @@ void ClassLoaderDataShared::ensure_module_entry_table_exists(oop class_loader) {
assert(met != nullptr, "sanity");
}
-void ClassLoaderDataShared::iterate_symbols(MetaspaceClosure* closure) {
+void ClassLoaderDataShared::build_tables(TRAPS) {
assert(CDSConfig::is_dumping_full_module_graph(), "must be");
- _archived_boot_loader_data.iterate_symbols (null_class_loader_data(), closure);
- _archived_platform_loader_data.iterate_symbols(java_platform_loader_data_or_null(), closure);
- _archived_system_loader_data.iterate_symbols (java_system_loader_data_or_null(), closure);
+ _archived_boot_loader_data.build_tables(null_class_loader_data(), CHECK);
+ _archived_platform_loader_data.build_tables(java_platform_loader_data_or_null(), CHECK);
+ _archived_system_loader_data.build_tables(java_system_loader_data_or_null(), CHECK);
}
-void ClassLoaderDataShared::allocate_archived_tables() {
+void ClassLoaderDataShared::iterate_roots(MetaspaceClosure* it) {
assert(CDSConfig::is_dumping_full_module_graph(), "must be");
- _archived_boot_loader_data.allocate (null_class_loader_data());
- _archived_platform_loader_data.allocate(java_platform_loader_data_or_null());
- _archived_system_loader_data.allocate (java_system_loader_data_or_null());
+ _archived_boot_loader_data.iterate_roots(it);
+ _archived_platform_loader_data.iterate_roots(it);
+ _archived_system_loader_data.iterate_roots(it);
}
-void ClassLoaderDataShared::init_archived_tables() {
+void ClassLoaderDataShared::remove_unshareable_info() {
assert(CDSConfig::is_dumping_full_module_graph(), "must be");
+ _archived_boot_loader_data.remove_unshareable_info();
+ _archived_platform_loader_data.remove_unshareable_info();
+ _archived_system_loader_data.remove_unshareable_info();
- _archived_boot_loader_data.init_archived_entries (null_class_loader_data());
- _archived_platform_loader_data.init_archived_entries(java_platform_loader_data_or_null());
- _archived_system_loader_data.init_archived_entries (java_system_loader_data_or_null());
-
- _archived_javabase_moduleEntry = ModuleEntry::get_archived_entry(ModuleEntryTable::javabase_moduleEntry());
+ _archived_javabase_moduleEntry = ArchiveBuilder::current()->get_buffered_addr(ModuleEntryTable::javabase_moduleEntry());
_platform_loader_root_index = HeapShared::append_root(SystemDictionary::java_platform_loader());
_system_loader_root_index = HeapShared::append_root(SystemDictionary::java_system_loader());
@@ -271,7 +275,6 @@ ModuleEntry* ClassLoaderDataShared::archived_unnamed_module(ClassLoaderData* loa
return archived_module;
}
-
void ClassLoaderDataShared::clear_archived_oops() {
assert(!CDSConfig::is_using_full_module_graph(), "must be");
_archived_boot_loader_data.clear_archived_oops();
diff --git a/src/hotspot/share/classfile/classLoaderDataShared.hpp b/src/hotspot/share/classfile/classLoaderDataShared.hpp
index 39d0a89418f..2cf37310e50 100644
--- a/src/hotspot/share/classfile/classLoaderDataShared.hpp
+++ b/src/hotspot/share/classfile/classLoaderDataShared.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -40,11 +40,11 @@ class ClassLoaderDataShared : AllStatic {
public:
static void load_archived_platform_and_system_class_loaders() NOT_CDS_JAVA_HEAP_RETURN;
static void restore_archived_modules_for_preloading_classes(JavaThread* current) NOT_CDS_JAVA_HEAP_RETURN;
+ static void build_tables(TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
+ static void iterate_roots(MetaspaceClosure* closure) NOT_CDS_JAVA_HEAP_RETURN;
+ static void remove_unshareable_info() NOT_CDS_JAVA_HEAP_RETURN;
#if INCLUDE_CDS_JAVA_HEAP
static void ensure_module_entry_tables_exist();
- static void allocate_archived_tables();
- static void iterate_symbols(MetaspaceClosure* closure);
- static void init_archived_tables();
static void serialize(SerializeClosure* f);
static void clear_archived_oops();
static void restore_archived_entries_for_null_class_loader_data();
diff --git a/src/hotspot/share/classfile/javaClasses.cpp b/src/hotspot/share/classfile/javaClasses.cpp
index dd70d7b49ab..b650bf8cfb8 100644
--- a/src/hotspot/share/classfile/javaClasses.cpp
+++ b/src/hotspot/share/classfile/javaClasses.cpp
@@ -1263,6 +1263,10 @@ bool java_lang_Class::restore_archived_mirror(Klass *k,
"Restored %s archived mirror " PTR_FORMAT, k->external_name(), p2i(mirror()));
}
+ if (CDSConfig::is_dumping_heap()) {
+ create_scratch_mirror(k, CHECK_(false));
+ }
+
return true;
}
#endif // INCLUDE_CDS_JAVA_HEAP
diff --git a/src/hotspot/share/classfile/moduleEntry.cpp b/src/hotspot/share/classfile/moduleEntry.cpp
index 5fb3d6f2d13..b5b8aa4ef55 100644
--- a/src/hotspot/share/classfile/moduleEntry.cpp
+++ b/src/hotspot/share/classfile/moduleEntry.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,7 @@
*/
#include "cds/aotClassLocation.hpp"
+#include "cds/aotGrowableArray.inline.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/archiveUtils.hpp"
#include "cds/cdsConfig.hpp"
@@ -37,6 +38,7 @@
#include "jni.h"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
+#include "memory/metadataFactory.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/oopHandle.inline.hpp"
@@ -44,7 +46,6 @@
#include "runtime/handles.inline.hpp"
#include "runtime/safepoint.hpp"
#include "utilities/events.hpp"
-#include "utilities/growableArray.hpp"
#include "utilities/hashTable.hpp"
#include "utilities/ostream.hpp"
#include "utilities/quickSort.hpp"
@@ -167,7 +168,7 @@ void ModuleEntry::add_read(ModuleEntry* m) {
} else {
if (reads() == nullptr) {
// Lazily create a module's reads list
- GrowableArray* new_reads = new (mtModule) GrowableArray(MODULE_READS_SIZE, mtModule);
+ AOTGrowableArray* new_reads = new (mtModule) AOTGrowableArray(MODULE_READS_SIZE, mtModule);
set_reads(new_reads);
}
@@ -274,8 +275,7 @@ ModuleEntry::ModuleEntry(Handle module_handle,
_has_default_read_edges(false),
_must_walk_reads(false),
_is_open(is_open),
- _is_patched(false)
- DEBUG_ONLY(COMMA _reads_is_archived(false)) {
+ _is_patched(false) {
// Initialize fields specific to a ModuleEntry
if (_name == nullptr) {
@@ -394,7 +394,6 @@ ModuleEntryTable::~ModuleEntryTable() {
ModuleEntryTableDeleter deleter;
_table.unlink(&deleter);
assert(_table.number_of_entries() == 0, "should have removed all entries");
-
}
void ModuleEntry::set_loader_data(ClassLoaderData* cld) {
@@ -402,147 +401,51 @@ void ModuleEntry::set_loader_data(ClassLoaderData* cld) {
_loader_data = cld;
}
+void ModuleEntry::metaspace_pointers_do(MetaspaceClosure* it) {
+ it->push(&_name);
+ it->push(&_reads);
+ it->push(&_version);
+ it->push(&_location);
+}
+
#if INCLUDE_CDS_JAVA_HEAP
-typedef HashTable<
- const ModuleEntry*,
- ModuleEntry*,
- 557, // prime number
- AnyObj::C_HEAP> ArchivedModuleEntries;
-static ArchivedModuleEntries* _archive_modules_entries = nullptr;
-
-#ifndef PRODUCT
-static int _num_archived_module_entries = 0;
-static int _num_inited_module_entries = 0;
-#endif
-
bool ModuleEntry::should_be_archived() const {
return SystemDictionaryShared::is_builtin_loader(loader_data());
}
-ModuleEntry* ModuleEntry::allocate_archived_entry() const {
- precond(should_be_archived());
- precond(CDSConfig::is_dumping_full_module_graph());
- ModuleEntry* archived_entry = (ModuleEntry*)ArchiveBuilder::rw_region_alloc(sizeof(ModuleEntry));
- memcpy((void*)archived_entry, (void*)this, sizeof(ModuleEntry));
+void ModuleEntry::remove_unshareable_info() {
+ _archived_module_index = HeapShared::append_root(module_oop());
- archived_entry->_archived_module_index = HeapShared::append_root(module_oop());
- if (_archive_modules_entries == nullptr) {
- _archive_modules_entries = new (mtClass)ArchivedModuleEntries();
- }
- assert(_archive_modules_entries->get(this) == nullptr, "Each ModuleEntry must not be shared across ModuleEntryTables");
- _archive_modules_entries->put(this, archived_entry);
- DEBUG_ONLY(_num_archived_module_entries++);
-
- if (CDSConfig::is_dumping_final_static_archive()) {
- OopHandle null_handle;
- archived_entry->_shared_pd = null_handle;
- } else {
- assert(archived_entry->shared_protection_domain() == nullptr, "never set during -Xshare:dump");
+ if (_reads != nullptr) {
+ _reads->set_in_aot_cache();
}
// Clear handles and restore at run time. Handles cannot be archived.
+ if (CDSConfig::is_dumping_final_static_archive()) {
+ OopHandle null_handle;
+ _shared_pd = null_handle;
+ } else {
+ assert(shared_protection_domain() == nullptr, "never set during -Xshare:dump");
+ }
+
OopHandle null_handle;
- archived_entry->_module_handle = null_handle;
-
- // For verify_archived_module_entries()
- DEBUG_ONLY(_num_inited_module_entries++);
-
- if (log_is_enabled(Info, aot, module)) {
- ResourceMark rm;
- LogStream ls(Log(aot, module)::info());
- ls.print("Stored in archive: ");
- archived_entry->print(&ls);
- }
- return archived_entry;
-}
-
-bool ModuleEntry::has_been_archived() {
- assert(!ArchiveBuilder::current()->is_in_buffer_space(this), "must be called on original ModuleEntry");
- return _archive_modules_entries->contains(this);
-}
-
-ModuleEntry* ModuleEntry::get_archived_entry(ModuleEntry* orig_entry) {
- ModuleEntry** ptr = _archive_modules_entries->get(orig_entry);
- assert(ptr != nullptr && *ptr != nullptr, "must have been allocated");
- return *ptr;
-}
-
-// This function is used to archive ModuleEntry::_reads and PackageEntry::_qualified_exports.
-// GrowableArray cannot be directly archived, as it needs to be expandable at runtime.
-// Write it out as an Array, and convert it back to GrowableArray at runtime.
-Array* ModuleEntry::write_growable_array(GrowableArray* array) {
- Array* archived_array = nullptr;
- int length = (array == nullptr) ? 0 : array->length();
- if (length > 0) {
- archived_array = ArchiveBuilder::new_ro_array(length);
- for (int i = 0; i < length; i++) {
- ModuleEntry* archived_entry = get_archived_entry(array->at(i));
- archived_array->at_put(i, archived_entry);
- ArchivePtrMarker::mark_pointer((address*)archived_array->adr_at(i));
- }
- }
-
- return archived_array;
-}
-
-GrowableArray* ModuleEntry::restore_growable_array(Array* archived_array) {
- GrowableArray* array = nullptr;
- int length = (archived_array == nullptr) ? 0 : archived_array->length();
- if (length > 0) {
- array = new (mtModule) GrowableArray(length, mtModule);
- for (int i = 0; i < length; i++) {
- ModuleEntry* archived_entry = archived_array->at(i);
- array->append(archived_entry);
- }
- }
-
- return array;
-}
-
-void ModuleEntry::iterate_symbols(MetaspaceClosure* closure) {
- closure->push(&_name);
- closure->push(&_version);
- closure->push(&_location);
-}
-
-void ModuleEntry::init_as_archived_entry() {
- set_archived_reads(write_growable_array(reads()));
+ _module_handle = null_handle;
_loader_data = nullptr; // re-init at runtime
if (name() != nullptr) {
- _shared_path_index = AOTClassLocationConfig::dumptime()->get_module_shared_path_index(_location);
- _name = ArchiveBuilder::get_buffered_symbol(_name);
- ArchivePtrMarker::mark_pointer((address*)&_name);
+ Symbol* src_location = ArchiveBuilder::current()->get_source_addr(_location);
+ _shared_path_index = AOTClassLocationConfig::dumptime()->get_module_shared_path_index(src_location);
} else {
// _shared_path_index is used only by SystemDictionary::is_shared_class_visible_impl()
// for checking classes in named modules.
_shared_path_index = -1;
}
- if (_version != nullptr) {
- _version = ArchiveBuilder::get_buffered_symbol(_version);
- }
- if (_location != nullptr) {
- _location = ArchiveBuilder::get_buffered_symbol(_location);
- }
JFR_ONLY(set_trace_id(0);) // re-init at runtime
-
- ArchivePtrMarker::mark_pointer((address*)&_reads);
- ArchivePtrMarker::mark_pointer((address*)&_version);
- ArchivePtrMarker::mark_pointer((address*)&_location);
}
-#ifndef PRODUCT
-void ModuleEntry::verify_archived_module_entries() {
- assert(_num_archived_module_entries == _num_inited_module_entries,
- "%d ModuleEntries have been archived but %d of them have been properly initialized with archived java.lang.Module objects",
- _num_archived_module_entries, _num_inited_module_entries);
-}
-#endif // PRODUCT
-
void ModuleEntry::load_from_archive(ClassLoaderData* loader_data) {
assert(CDSConfig::is_using_archive(), "runtime only");
set_loader_data(loader_data);
- set_reads(restore_growable_array(archived_reads()));
JFR_ONLY(INIT_ID(this);)
}
@@ -581,38 +484,28 @@ static int compare_module_by_name(ModuleEntry* a, ModuleEntry* b) {
return a->name()->fast_compare(b->name());
}
-void ModuleEntryTable::iterate_symbols(MetaspaceClosure* closure) {
- auto syms = [&] (const SymbolHandle& key, ModuleEntry*& m) {
- m->iterate_symbols(closure);
- };
- _table.iterate_all(syms);
-}
-
-Array* ModuleEntryTable::allocate_archived_entries() {
- Array* archived_modules = ArchiveBuilder::new_rw_array(_table.number_of_entries());
+Array* ModuleEntryTable::build_aot_table(ClassLoaderData* loader_data, TRAPS) {
+ Array* aot_table =
+ MetadataFactory::new_array(loader_data, _table.number_of_entries(), nullptr, CHECK_NULL);
int n = 0;
auto grab = [&] (const SymbolHandle& key, ModuleEntry*& m) {
- archived_modules->at_put(n++, m);
+ m->pack_reads();
+ aot_table->at_put(n++, m);
+ if (log_is_enabled(Info, aot, module)) {
+ ResourceMark rm;
+ LogStream ls(Log(aot, module)::info());
+ ls.print("Stored in archive: ");
+ m->print(&ls);
+ }
};
_table.iterate_all(grab);
if (n > 1) {
// Always allocate in the same order to produce deterministic archive.
- QuickSort::sort(archived_modules->data(), n, compare_module_by_name);
+ QuickSort::sort(aot_table->data(), n, compare_module_by_name);
}
- for (int i = 0; i < n; i++) {
- archived_modules->at_put(i, archived_modules->at(i)->allocate_archived_entry());
- ArchivePtrMarker::mark_pointer((address*)archived_modules->adr_at(i));
- }
- return archived_modules;
-}
-void ModuleEntryTable::init_archived_entries(Array* archived_modules) {
- assert(CDSConfig::is_dumping_full_module_graph(), "sanity");
- for (int i = 0; i < archived_modules->length(); i++) {
- ModuleEntry* archived_entry = archived_modules->at(i);
- archived_entry->init_as_archived_entry();
- }
+ return aot_table;
}
void ModuleEntryTable::load_archived_entries(ClassLoaderData* loader_data,
diff --git a/src/hotspot/share/classfile/moduleEntry.hpp b/src/hotspot/share/classfile/moduleEntry.hpp
index 2e1852c5369..1a0251a2c2a 100644
--- a/src/hotspot/share/classfile/moduleEntry.hpp
+++ b/src/hotspot/share/classfile/moduleEntry.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,9 @@
#ifndef SHARE_CLASSFILE_MODULEENTRY_HPP
#define SHARE_CLASSFILE_MODULEENTRY_HPP
+#include "cds/aotGrowableArray.hpp"
#include "jni.h"
+#include "memory/metaspaceClosureType.hpp"
#include "oops/oopHandle.hpp"
#include "oops/symbol.hpp"
#include "oops/symbolHandle.hpp"
@@ -68,11 +70,8 @@ private:
// for shared classes from this module
Symbol* _name; // name of this module
ClassLoaderData* _loader_data;
+ AOTGrowableArray* _reads; // list of modules that are readable by this module
- union {
- GrowableArray* _reads; // list of modules that are readable by this module
- Array* _archived_reads; // List of readable modules stored in the CDS archive
- };
Symbol* _version; // module version number
Symbol* _location; // module location
CDS_ONLY(int _shared_path_index;) // >=0 if classes in this module are in CDS archive
@@ -81,7 +80,6 @@ private:
bool _must_walk_reads; // walk module's reads list at GC safepoints to purge out dead modules
bool _is_open; // whether the packages in the module are all unqualifiedly exported
bool _is_patched; // whether the module is patched via --patch-module
- DEBUG_ONLY(bool _reads_is_archived);
CDS_JAVA_HEAP_ONLY(int _archived_module_index;)
JFR_ONLY(DEFINE_TRACE_ID_FIELD;)
@@ -120,22 +118,18 @@ public:
bool can_read(ModuleEntry* m) const;
bool has_reads_list() const;
- GrowableArray* reads() const {
- assert(!_reads_is_archived, "sanity");
+ AOTGrowableArray* reads() const {
return _reads;
}
- void set_reads(GrowableArray* r) {
+ void set_reads(AOTGrowableArray* r) {
_reads = r;
- DEBUG_ONLY(_reads_is_archived = false);
}
- Array* archived_reads() const {
- assert(_reads_is_archived, "sanity");
- return _archived_reads;
- }
- void set_archived_reads(Array* r) {
- _archived_reads = r;
- DEBUG_ONLY(_reads_is_archived = true);
+ void pack_reads() {
+ if (_reads != nullptr) {
+ _reads->shrink_to_fit();
+ }
}
+
void add_read(ModuleEntry* m);
void set_read_walk_required(ClassLoaderData* m_loader_data);
@@ -189,6 +183,13 @@ public:
const char* name_as_C_string() const {
return is_named() ? name()->as_C_string() : UNNAMED_MODULE;
}
+
+ // methods required by MetaspaceClosure
+ void metaspace_pointers_do(MetaspaceClosure* it);
+ int size_in_heapwords() const { return (int)heap_word_size(sizeof(ModuleEntry)); }
+ MetaspaceClosureType type() const { return MetaspaceClosureType::ModuleEntryType; }
+ static bool is_read_only_by_default() { return false; }
+
void print(outputStream* st = tty) const;
void verify();
@@ -198,18 +199,11 @@ public:
#if INCLUDE_CDS_JAVA_HEAP
bool should_be_archived() const;
- void iterate_symbols(MetaspaceClosure* closure);
- ModuleEntry* allocate_archived_entry() const;
- void init_as_archived_entry();
- static ModuleEntry* get_archived_entry(ModuleEntry* orig_entry);
- bool has_been_archived();
- static Array* write_growable_array(GrowableArray* array);
- static GrowableArray* restore_growable_array(Array* archived_array);
+ void remove_unshareable_info();
void load_from_archive(ClassLoaderData* loader_data);
void preload_archived_oops();
void restore_archived_oops(ClassLoaderData* loader_data);
void clear_archived_oops();
- static void verify_archived_module_entries() PRODUCT_RETURN;
#endif
};
@@ -275,9 +269,7 @@ public:
void verify();
#if INCLUDE_CDS_JAVA_HEAP
- void iterate_symbols(MetaspaceClosure* closure);
- Array* allocate_archived_entries();
- void init_archived_entries(Array* archived_modules);
+ Array* build_aot_table(ClassLoaderData* loader_data, TRAPS);
void load_archived_entries(ClassLoaderData* loader_data,
Array* archived_modules);
void restore_archived_oops(ClassLoaderData* loader_data,
diff --git a/src/hotspot/share/classfile/modules.cpp b/src/hotspot/share/classfile/modules.cpp
index baf2acfb78c..51d09d9c47f 100644
--- a/src/hotspot/share/classfile/modules.cpp
+++ b/src/hotspot/share/classfile/modules.cpp
@@ -1,5 +1,5 @@
/*
-* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
+* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -505,13 +505,10 @@ void Modules::check_archived_module_oop(oop orig_module_obj) {
ClassLoaderData* loader_data = orig_module_ent->loader_data();
assert(loader_data->is_builtin_class_loader_data(), "must be");
- if (orig_module_ent->name() != nullptr) {
- // For each named module, we archive both the java.lang.Module oop and the ModuleEntry.
- assert(orig_module_ent->has_been_archived(), "sanity");
- } else {
+ precond(ArchiveBuilder::current()->has_been_archived(orig_module_ent));
+ if (orig_module_ent->name() == nullptr) {
// We always archive unnamed module oop for boot, platform, and system loaders.
precond(orig_module_ent->should_be_archived());
- precond(orig_module_ent->has_been_archived());
if (loader_data->is_boot_class_loader_data()) {
assert(!_seen_boot_unnamed_module, "only once");
@@ -529,10 +526,6 @@ void Modules::check_archived_module_oop(oop orig_module_obj) {
}
}
-void Modules::verify_archived_modules() {
- ModuleEntry::verify_archived_module_entries();
-}
-
class Modules::ArchivedProperty {
const char* _prop;
const bool _numbered;
diff --git a/src/hotspot/share/classfile/modules.hpp b/src/hotspot/share/classfile/modules.hpp
index 27a22c1017a..75857c8960c 100644
--- a/src/hotspot/share/classfile/modules.hpp
+++ b/src/hotspot/share/classfile/modules.hpp
@@ -1,5 +1,5 @@
/*
-* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
+* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -59,7 +59,6 @@ public:
TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
static void init_archived_modules(JavaThread* current, Handle h_platform_loader, Handle h_system_loader)
NOT_CDS_JAVA_HEAP_RETURN;
- static void verify_archived_modules() NOT_CDS_JAVA_HEAP_RETURN;
static void dump_archived_module_info() NOT_CDS_JAVA_HEAP_RETURN;
static void serialize_archived_module_info(SerializeClosure* soc) NOT_CDS_JAVA_HEAP_RETURN;
diff --git a/src/hotspot/share/classfile/packageEntry.cpp b/src/hotspot/share/classfile/packageEntry.cpp
index ea2e6cd1def..3e61f2e3a3e 100644
--- a/src/hotspot/share/classfile/packageEntry.cpp
+++ b/src/hotspot/share/classfile/packageEntry.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,6 +22,8 @@
*
*/
+#include "cds/aotGrowableArray.inline.hpp"
+#include "cds/aotMetaspace.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/archiveUtils.hpp"
#include "cds/cdsConfig.hpp"
@@ -31,13 +33,13 @@
#include "classfile/vmSymbols.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
+#include "memory/metadataFactory.hpp"
#include "memory/resourceArea.hpp"
#include "oops/array.hpp"
#include "oops/symbol.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
#include "utilities/events.hpp"
-#include "utilities/growableArray.hpp"
#include "utilities/hashTable.hpp"
#include "utilities/ostream.hpp"
#include "utilities/quickSort.hpp"
@@ -51,7 +53,7 @@ PackageEntry::PackageEntry(Symbol* name, ModuleEntry* module) :
_qualified_exports(nullptr),
_defined_by_cds_in_class_path(0)
{
- // name can't be null
+ // name can't be null -- a class in the default package gets a PackageEntry of nullptr.
_name->increment_refcount();
JFR_ONLY(INIT_ID(this);)
@@ -81,7 +83,7 @@ void PackageEntry::add_qexport(ModuleEntry* m) {
if (!has_qual_exports_list()) {
// Lazily create a package's qualified exports list.
// Initial size is small, do not anticipate export lists to be large.
- _qualified_exports = new (mtModule) GrowableArray(QUAL_EXP_SIZE, mtModule);
+ _qualified_exports = new (mtModule) AOTGrowableArray(QUAL_EXP_SIZE, mtModule);
}
// Determine, based on this newly established export to module m,
@@ -183,12 +185,24 @@ void PackageEntry::purge_qualified_exports() {
}
void PackageEntry::delete_qualified_exports() {
- if (_qualified_exports != nullptr) {
+ if (_qualified_exports != nullptr && !AOTMetaspace::in_aot_cache(_qualified_exports)) {
delete _qualified_exports;
}
_qualified_exports = nullptr;
}
+void PackageEntry::pack_qualified_exports() {
+ if (_qualified_exports != nullptr) {
+ _qualified_exports->shrink_to_fit();
+ }
+}
+
+void PackageEntry::metaspace_pointers_do(MetaspaceClosure* it) {
+ it->push(&_name);
+ it->push(&_module);
+ it->push(&_qualified_exports);
+}
+
PackageEntryTable::PackageEntryTable() { }
PackageEntryTable::~PackageEntryTable() {
@@ -212,66 +226,19 @@ PackageEntryTable::~PackageEntryTable() {
}
#if INCLUDE_CDS_JAVA_HEAP
-typedef HashTable<
- const PackageEntry*,
- PackageEntry*,
- 557, // prime number
- AnyObj::C_HEAP> ArchivedPackageEntries;
-static ArchivedPackageEntries* _archived_packages_entries = nullptr;
-
bool PackageEntry::should_be_archived() const {
return module()->should_be_archived();
}
-PackageEntry* PackageEntry::allocate_archived_entry() const {
- precond(should_be_archived());
- PackageEntry* archived_entry = (PackageEntry*)ArchiveBuilder::rw_region_alloc(sizeof(PackageEntry));
- memcpy((void*)archived_entry, (void*)this, sizeof(PackageEntry));
-
- if (_archived_packages_entries == nullptr) {
- _archived_packages_entries = new (mtClass)ArchivedPackageEntries();
+void PackageEntry::remove_unshareable_info() {
+ if (_qualified_exports != nullptr) {
+ _qualified_exports->set_in_aot_cache();
}
- assert(_archived_packages_entries->get(this) == nullptr, "Each PackageEntry must not be shared across PackageEntryTables");
- _archived_packages_entries->put(this, archived_entry);
-
- return archived_entry;
-}
-
-PackageEntry* PackageEntry::get_archived_entry(PackageEntry* orig_entry) {
- PackageEntry** ptr = _archived_packages_entries->get(orig_entry);
- if (ptr != nullptr) {
- return *ptr;
- } else {
- return nullptr;
- }
-}
-
-void PackageEntry::iterate_symbols(MetaspaceClosure* closure) {
- closure->push(&_name);
-}
-
-void PackageEntry::init_as_archived_entry() {
- Array* archived_qualified_exports = ModuleEntry::write_growable_array(_qualified_exports);
-
- _name = ArchiveBuilder::get_buffered_symbol(_name);
- _module = ModuleEntry::get_archived_entry(_module);
- _qualified_exports = (GrowableArray*)archived_qualified_exports;
_defined_by_cds_in_class_path = 0;
JFR_ONLY(set_trace_id(0);) // re-init at runtime
-
- ArchivePtrMarker::mark_pointer((address*)&_name);
- ArchivePtrMarker::mark_pointer((address*)&_module);
- ArchivePtrMarker::mark_pointer((address*)&_qualified_exports);
-
- LogStreamHandle(Info, aot, package) st;
- if (st.is_enabled()) {
- st.print("archived ");
- print(&st);
- }
}
void PackageEntry::load_from_archive() {
- _qualified_exports = ModuleEntry::restore_growable_array((Array*)_qualified_exports);
JFR_ONLY(INIT_ID(this);)
}
@@ -280,14 +247,7 @@ static int compare_package_by_name(PackageEntry* a, PackageEntry* b) {
return a->name()->fast_compare(b->name());
}
-void PackageEntryTable::iterate_symbols(MetaspaceClosure* closure) {
- auto syms = [&] (const SymbolHandle& key, PackageEntry*& p) {
- p->iterate_symbols(closure);
- };
- _table.iterate_all(syms);
-}
-
-Array* PackageEntryTable::allocate_archived_entries() {
+Array* PackageEntryTable::build_aot_table(ClassLoaderData* loader_data, TRAPS) {
// First count the packages in named modules
int n = 0;
auto count = [&] (const SymbolHandle& key, PackageEntry*& p) {
@@ -297,12 +257,19 @@ Array* PackageEntryTable::allocate_archived_entries() {
};
_table.iterate_all(count);
- Array* archived_packages = ArchiveBuilder::new_rw_array(n);
+ Array* archived_packages = MetadataFactory::new_array(loader_data, n, nullptr, CHECK_NULL);
// reset n
n = 0;
auto grab = [&] (const SymbolHandle& key, PackageEntry*& p) {
if (p->should_be_archived()) {
+ p->pack_qualified_exports();
archived_packages->at_put(n++, p);
+
+ LogStreamHandle(Info, aot, package) st;
+ if (st.is_enabled()) {
+ st.print("archived ");
+ p->print(&st);
+ }
}
};
_table.iterate_all(grab);
@@ -311,18 +278,8 @@ Array* PackageEntryTable::allocate_archived_entries() {
// Always allocate in the same order to produce deterministic archive.
QuickSort::sort(archived_packages->data(), n, compare_package_by_name);
}
- for (int i = 0; i < n; i++) {
- archived_packages->at_put(i, archived_packages->at(i)->allocate_archived_entry());
- ArchivePtrMarker::mark_pointer((address*)archived_packages->adr_at(i));
- }
- return archived_packages;
-}
-void PackageEntryTable::init_archived_entries(Array* archived_packages) {
- for (int i = 0; i < archived_packages->length(); i++) {
- PackageEntry* archived_entry = archived_packages->at(i);
- archived_entry->init_as_archived_entry();
- }
+ return archived_packages;
}
void PackageEntryTable::load_archived_entries(Array* archived_packages) {
diff --git a/src/hotspot/share/classfile/packageEntry.hpp b/src/hotspot/share/classfile/packageEntry.hpp
index 6abf89dc60f..7b174a92287 100644
--- a/src/hotspot/share/classfile/packageEntry.hpp
+++ b/src/hotspot/share/classfile/packageEntry.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,9 @@
#ifndef SHARE_CLASSFILE_PACKAGEENTRY_HPP
#define SHARE_CLASSFILE_PACKAGEENTRY_HPP
+#include "cds/aotGrowableArray.hpp"
#include "classfile/moduleEntry.hpp"
+#include "memory/metaspaceClosureType.hpp"
#include "oops/symbol.hpp"
#include "oops/symbolHandle.hpp"
#include "runtime/atomicAccess.hpp"
@@ -114,7 +116,7 @@ private:
bool _must_walk_exports;
// Contains list of modules this package is qualifiedly exported to. Access
// to this list is protected by the Module_lock.
- GrowableArray* _qualified_exports;
+ AOTGrowableArray* _qualified_exports;
JFR_ONLY(DEFINE_TRACE_ID_FIELD;)
// Initial size of a package entry's list of qualified exports.
@@ -205,14 +207,24 @@ public:
void purge_qualified_exports();
void delete_qualified_exports();
+ void pack_qualified_exports(); // used by AOT
+
+ // methods required by MetaspaceClosure
+ void metaspace_pointers_do(MetaspaceClosure* it);
+ int size_in_heapwords() const { return (int)heap_word_size(sizeof(PackageEntry)); }
+ MetaspaceClosureType type() const { return MetaspaceClosureType::PackageEntryType; }
+ static bool is_read_only_by_default() { return false; }
+
void print(outputStream* st = tty);
+ char* name_as_C_string() const {
+ assert(_name != nullptr, "name can't be null");
+ return name()->as_C_string();
+ }
+
#if INCLUDE_CDS_JAVA_HEAP
bool should_be_archived() const;
- void iterate_symbols(MetaspaceClosure* closure);
- PackageEntry* allocate_archived_entry() const;
- void init_as_archived_entry();
- static PackageEntry* get_archived_entry(PackageEntry* orig_entry);
+ void remove_unshareable_info();
void load_from_archive();
#endif
@@ -271,9 +283,7 @@ public:
void print(outputStream* st = tty);
#if INCLUDE_CDS_JAVA_HEAP
- void iterate_symbols(MetaspaceClosure* closure);
- Array* allocate_archived_entries();
- void init_archived_entries(Array* archived_packages);
+ Array* build_aot_table(ClassLoaderData* loader_data, TRAPS);
void load_archived_entries(Array* archived_packages);
#endif
};
diff --git a/src/hotspot/share/classfile/stringTable.cpp b/src/hotspot/share/classfile/stringTable.cpp
index c775014cfac..2b8b7780a41 100644
--- a/src/hotspot/share/classfile/stringTable.cpp
+++ b/src/hotspot/share/classfile/stringTable.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -74,24 +74,9 @@ const size_t REHASH_LEN = 100;
const double CLEAN_DEAD_HIGH_WATER_MARK = 0.5;
#if INCLUDE_CDS_JAVA_HEAP
-bool StringTable::_is_two_dimensional_shared_strings_array = false;
-OopHandle StringTable::_shared_strings_array;
-int StringTable::_shared_strings_array_root_index;
-
inline oop StringTable::read_string_from_compact_hashtable(address base_address, u4 index) {
assert(AOTMappedHeapLoader::is_in_use(), "sanity");
- objArrayOop array = (objArrayOop)(_shared_strings_array.resolve());
- oop s;
-
- if (!_is_two_dimensional_shared_strings_array) {
- s = array->obj_at((int)index);
- } else {
- int primary_index = index >> _secondary_array_index_bits;
- int secondary_index = index & _secondary_array_index_mask;
- objArrayOop secondary = (objArrayOop)array->obj_at(primary_index);
- s = secondary->obj_at(secondary_index);
- }
-
+ oop s = HeapShared::get_root((int)index, false);
assert(java_lang_String::is_instance(s), "must be");
return s;
}
@@ -115,7 +100,6 @@ OopStorage* StringTable::_oop_storage;
static size_t _current_size = 0;
static volatile size_t _items_count = 0;
-DEBUG_ONLY(static bool _disable_interning_during_cds_dump = false);
volatile bool _alt_hash = false;
@@ -317,12 +301,6 @@ void StringTable::create_table() {
_oop_storage->register_num_dead_callback(&gc_notification);
}
-#if INCLUDE_CDS_JAVA_HEAP
-void StringTable::load_shared_strings_array() {
- _shared_strings_array = OopHandle(Universe::vm_global(), HeapShared::get_root(_shared_strings_array_root_index));
-}
-#endif
-
void StringTable::item_added() {
AtomicAccess::inc(&_items_count);
}
@@ -509,9 +487,6 @@ oop StringTable::intern(const char* utf8_string, TRAPS) {
}
oop StringTable::intern(const StringWrapper& name, TRAPS) {
- assert(!AtomicAccess::load_acquire(&_disable_interning_during_cds_dump),
- "All threads that may intern strings should have been stopped before CDS starts copying the interned string table");
-
// shared table always uses java_lang_String::hash_code
unsigned int hash = hash_wrapped_string(name);
oop found_string = lookup_shared(name, hash);
@@ -614,6 +589,10 @@ struct StringTableDeleteCheck : StackObj {
};
void StringTable::clean_dead_entries(JavaThread* jt) {
+ // BulkDeleteTask::prepare() may take ConcurrentHashTableResize_lock (nosafepoint-2).
+ // When NativeHeapTrimmer is enabled, SuspendMark may take NativeHeapTrimmer::_lock (nosafepoint).
+ // Take SuspendMark first to keep lock order and avoid deadlock.
+ NativeHeapTrimmer::SuspendMark sm("stringtable");
StringTableHash::BulkDeleteTask bdt(_local_table);
if (!bdt.prepare(jt)) {
return;
@@ -621,7 +600,6 @@ void StringTable::clean_dead_entries(JavaThread* jt) {
StringTableDeleteCheck stdc;
StringTableDoDelete stdd;
- NativeHeapTrimmer::SuspendMark sm("stringtable");
{
TraceTime timer("Clean", TRACETIME_LOG(Debug, stringtable, perf));
while(bdt.do_task(jt, stdc, stdd)) {
@@ -954,118 +932,13 @@ oop StringTable::lookup_shared(const jchar* name, int len) {
return _shared_table.lookup(wrapped_name, java_lang_String::hash_code(name, len), 0);
}
-// This is called BEFORE we enter the CDS safepoint. We can still allocate Java object arrays to
-// be used by the shared strings table.
-void StringTable::allocate_shared_strings_array(TRAPS) {
- if (!CDSConfig::is_dumping_heap()) {
- return;
- }
+void StringTable::init_shared_table() {
+ assert(SafepointSynchronize::is_at_safepoint(), "inside AOT safepoint");
+ precond(CDSConfig::is_dumping_heap());
+ assert(HeapShared::is_writing_mapping_mode(), "not used for streamed oops");
- assert(HeapShared::is_writing_mapping_mode(), "should not reach here");
-
- CompileBroker::wait_for_no_active_tasks();
-
- precond(CDSConfig::allow_only_single_java_thread());
-
- // At this point, no more strings will be added:
- // - There's only a single Java thread (this thread). It no longer executes Java bytecodes
- // so JIT compilation will eventually stop.
- // - CompileBroker has no more active tasks, so all JIT requests have been processed.
-
- // This flag will be cleared after intern table dumping has completed, so we can run the
- // compiler again (for future AOT method compilation, etc).
- DEBUG_ONLY(AtomicAccess::release_store(&_disable_interning_during_cds_dump, true));
-
- if (items_count_acquire() > (size_t)max_jint) {
- fatal("Too many strings to be archived: %zu", items_count_acquire());
- }
-
- int total = (int)items_count_acquire();
- size_t single_array_size = objArrayOopDesc::object_size(total);
-
- log_info(aot)("allocated string table for %d strings", total);
-
- if (!HeapShared::is_too_large_to_archive(single_array_size)) {
- // The entire table can fit in a single array
- objArrayOop array = oopFactory::new_objArray(vmClasses::Object_klass(), total, CHECK);
- _shared_strings_array = OopHandle(Universe::vm_global(), array);
- log_info(aot)("string table array (single level) length = %d", total);
- } else {
- // Split the table in two levels of arrays.
- int primary_array_length = (total + _secondary_array_max_length - 1) / _secondary_array_max_length;
- size_t primary_array_size = objArrayOopDesc::object_size(primary_array_length);
- size_t secondary_array_size = objArrayOopDesc::object_size(_secondary_array_max_length);
-
- if (HeapShared::is_too_large_to_archive(secondary_array_size)) {
- // This can only happen if you have an extremely large number of classes that
- // refer to more than 16384 * 16384 = 26M interned strings! Not a practical concern
- // but bail out for safety.
- log_error(aot)("Too many strings to be archived: %zu", items_count_acquire());
- AOTMetaspace::unrecoverable_writing_error();
- }
-
- objArrayOop primary = oopFactory::new_objArray(vmClasses::Object_klass(), primary_array_length, CHECK);
- objArrayHandle primaryHandle(THREAD, primary);
- _shared_strings_array = OopHandle(Universe::vm_global(), primary);
-
- log_info(aot)("string table array (primary) length = %d", primary_array_length);
- for (int i = 0; i < primary_array_length; i++) {
- int len;
- if (total > _secondary_array_max_length) {
- len = _secondary_array_max_length;
- } else {
- len = total;
- }
- total -= len;
-
- objArrayOop secondary = oopFactory::new_objArray(vmClasses::Object_klass(), len, CHECK);
- primaryHandle()->obj_at_put(i, secondary);
-
- log_info(aot)("string table array (secondary)[%d] length = %d", i, len);
- assert(!HeapShared::is_too_large_to_archive(secondary), "sanity");
- }
-
- assert(total == 0, "must be");
- _is_two_dimensional_shared_strings_array = true;
- }
-}
-
-#ifndef PRODUCT
-void StringTable::verify_secondary_array_index_bits() {
- assert(HeapShared::is_writing_mapping_mode(), "should not reach here");
- int max;
- for (max = 1; ; max++) {
- size_t next_size = objArrayOopDesc::object_size(1 << (max + 1));
- if (HeapShared::is_too_large_to_archive(next_size)) {
- break;
- }
- }
- // Currently max is 17 for +UseCompressedOops, 16 for -UseCompressedOops.
- // When we add support for Shenandoah (which has a smaller mininum region size than G1),
- // max will become 15/14.
- //
- // We use _secondary_array_index_bits==14 as that will be the eventual value, and will
- // make testing easier.
- assert(_secondary_array_index_bits <= max,
- "_secondary_array_index_bits (%d) must be smaller than max possible value (%d)",
- _secondary_array_index_bits, max);
-}
-#endif // PRODUCT
-
-// This is called AFTER we enter the CDS safepoint.
-//
-// For each shared string:
-// [1] Store it into _shared_strings_array. Encode its position as a 32-bit index.
-// [2] Store the index and hashcode into _shared_table.
-oop StringTable::init_shared_strings_array() {
- assert(CDSConfig::is_dumping_heap(), "must be");
- assert(HeapShared::is_writing_mapping_mode(), "should not reach here");
- objArrayOop array = (objArrayOop)(_shared_strings_array.resolve());
-
- verify_secondary_array_index_bits();
-
- int index = 0;
- auto copy_into_array = [&] (WeakHandle* val) {
+ int n = 0;
+ auto copy_into_aot_heap = [&] (WeakHandle* val) {
oop string = val->peek();
if (string != nullptr && !HeapShared::is_string_too_large_to_archive(string)) {
// If string is too large, don't put it into the string table.
@@ -1074,53 +947,34 @@ oop StringTable::init_shared_strings_array() {
// - If there's a reference to it, we will report an error inside HeapShared.cpp and
// dumping will fail.
HeapShared::add_to_dumped_interned_strings(string);
- if (!_is_two_dimensional_shared_strings_array) {
- assert(index < array->length(), "no strings should have been added");
- array->obj_at_put(index, string);
- } else {
- int primary_index = index >> _secondary_array_index_bits;
- int secondary_index = index & _secondary_array_index_mask;
-
- assert(primary_index < array->length(), "no strings should have been added");
- objArrayOop secondary = (objArrayOop)array->obj_at(primary_index);
-
- assert(secondary != nullptr && secondary->is_objArray(), "must be");
- assert(secondary_index < secondary->length(), "no strings should have been added");
- secondary->obj_at_put(secondary_index, string);
- }
- index ++;
}
+ n++;
return true;
};
- _local_table->do_safepoint_scan(copy_into_array);
- log_info(aot)("Archived %d interned strings", index);
- return array;
+ _local_table->do_safepoint_scan(copy_into_aot_heap);
+ log_info(aot)("Archived %d interned strings", n);
};
void StringTable::write_shared_table() {
+ assert(SafepointSynchronize::is_at_safepoint(), "inside AOT safepoint");
+ precond(CDSConfig::is_dumping_heap());
+ assert(HeapShared::is_writing_mapping_mode(), "not used for streamed oops");
+
_shared_table.reset();
CompactHashtableWriter writer((int)items_count_acquire(), ArchiveBuilder::string_stats());
- int index = 0;
auto copy_into_shared_table = [&] (WeakHandle* val) {
oop string = val->peek();
if (string != nullptr && !HeapShared::is_string_too_large_to_archive(string)) {
unsigned int hash = java_lang_String::hash_code(string);
- writer.add(hash, index);
- index ++;
+ int root_id = HeapShared::append_root(string);
+ writer.add(hash, root_id);
}
return true;
};
_local_table->do_safepoint_scan(copy_into_shared_table);
writer.dump(&_shared_table, "string");
-
- DEBUG_ONLY(AtomicAccess::release_store(&_disable_interning_during_cds_dump, false));
-}
-
-void StringTable::set_shared_strings_array_index(int root_index) {
- assert(HeapShared::is_writing_mapping_mode(), "should not reach here");
- _shared_strings_array_root_index = root_index;
}
void StringTable::serialize_shared_table_header(SerializeClosure* soc) {
@@ -1132,8 +986,27 @@ void StringTable::serialize_shared_table_header(SerializeClosure* soc) {
} else if (!AOTMappedHeapLoader::is_in_use()) {
_shared_table.reset();
}
+}
- soc->do_bool(&_is_two_dimensional_shared_strings_array);
- soc->do_int(&_shared_strings_array_root_index);
+void StringTable::move_shared_strings_into_runtime_table() {
+ precond(CDSConfig::is_dumping_final_static_archive());
+ JavaThread* THREAD = JavaThread::current();
+ HandleMark hm(THREAD);
+
+ int n = 0;
+ _shared_table.iterate_all([&](oop string) {
+ int length = java_lang_String::length(string);
+ Handle h_string (THREAD, string);
+ StringWrapper name(h_string, length);
+ unsigned int hash = hash_wrapped_string(name);
+
+ assert(!_alt_hash, "too early");
+ oop interned = do_intern(name, hash, THREAD);
+ assert(string == interned, "must be");
+ n++;
+ });
+
+ _shared_table.reset();
+ log_info(aot)("Moved %d interned strings to runtime table", n);
}
#endif //INCLUDE_CDS_JAVA_HEAP
diff --git a/src/hotspot/share/classfile/stringTable.hpp b/src/hotspot/share/classfile/stringTable.hpp
index 839e9d9053d..0024a45a2f2 100644
--- a/src/hotspot/share/classfile/stringTable.hpp
+++ b/src/hotspot/share/classfile/stringTable.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -109,49 +109,17 @@ public:
static bool needs_rehashing() { return _needs_rehashing; }
static inline void update_needs_rehash(bool rehash);
- // Sharing
-#if INCLUDE_CDS_JAVA_HEAP
- static inline oop read_string_from_compact_hashtable(address base_address, u4 index);
-
+ // AOT support
+ static inline oop read_string_from_compact_hashtable(address base_address, u4 index) NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
private:
- static bool _is_two_dimensional_shared_strings_array;
- static OopHandle _shared_strings_array;
- static int _shared_strings_array_root_index;
-
- // All the shared strings are referenced through _shared_strings_array to keep them alive.
- // Each shared string is stored as a 32-bit index in ::_shared_table. The index
- // is interpreted in two ways:
- //
- // [1] _is_two_dimensional_shared_strings_array = false: _shared_strings_array is an Object[].
- // Each shared string is stored as _shared_strings_array[index]
- //
- // [2] _is_two_dimensional_shared_strings_array = true: _shared_strings_array is an Object[][]
- // This happens when there are too many elements in the shared table. We store them
- // using two levels of objArrays, such that none of the arrays are too big for
- // AOTMappedHeapWriter::is_too_large_to_archive(). In this case, the index is splited into two
- // parts. Each shared string is stored as _shared_strings_array[primary_index][secondary_index]:
- //
- // [bits 31 .. 14][ bits 13 .. 0 ]
- // primary_index secondary_index
- const static int _secondary_array_index_bits = 14;
- const static int _secondary_array_max_length = 1 << _secondary_array_index_bits;
- const static int _secondary_array_index_mask = _secondary_array_max_length - 1;
-
- // make sure _secondary_array_index_bits is not too big
- static void verify_secondary_array_index_bits() PRODUCT_RETURN;
-#endif // INCLUDE_CDS_JAVA_HEAP
-
- private:
static oop lookup_shared(const StringWrapper& name, unsigned int hash) NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
- public:
+public:
static oop lookup_shared(const jchar* name, int len) NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
static size_t shared_entry_count() NOT_CDS_JAVA_HEAP_RETURN_(0);
- static void allocate_shared_strings_array(TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
- static void load_shared_strings_array() NOT_CDS_JAVA_HEAP_RETURN;
- static oop init_shared_strings_array() NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
+ static void init_shared_table() NOT_CDS_JAVA_HEAP_RETURN;
static void write_shared_table() NOT_CDS_JAVA_HEAP_RETURN;
- static void set_shared_strings_array_index(int root_index) NOT_CDS_JAVA_HEAP_RETURN;
static void serialize_shared_table_header(SerializeClosure* soc) NOT_CDS_JAVA_HEAP_RETURN;
+ static void move_shared_strings_into_runtime_table();
// Jcmd
static void dump(outputStream* st, bool verbose=false);
diff --git a/src/hotspot/share/classfile/symbolTable.cpp b/src/hotspot/share/classfile/symbolTable.cpp
index ec639a2b4d3..c49aa10fa0d 100644
--- a/src/hotspot/share/classfile/symbolTable.cpp
+++ b/src/hotspot/share/classfile/symbolTable.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -763,6 +763,10 @@ struct SymbolTableDeleteCheck : StackObj {
};
void SymbolTable::clean_dead_entries(JavaThread* jt) {
+ // BulkDeleteTask::prepare() may take ConcurrentHashTableResize_lock (nosafepoint-2).
+ // When NativeHeapTrimmer is enabled, SuspendMark may take NativeHeapTrimmer::_lock (nosafepoint).
+ // Take SuspendMark first to keep lock order and avoid deadlock.
+ NativeHeapTrimmer::SuspendMark sm("symboltable");
SymbolTableHash::BulkDeleteTask bdt(_local_table);
if (!bdt.prepare(jt)) {
return;
@@ -770,7 +774,6 @@ void SymbolTable::clean_dead_entries(JavaThread* jt) {
SymbolTableDeleteCheck stdc;
SymbolTableDoDelete stdd;
- NativeHeapTrimmer::SuspendMark sm("symboltable");
{
TraceTime timer("Clean", TRACETIME_LOG(Debug, symboltable, perf));
while (bdt.do_task(jt, stdc, stdd)) {
diff --git a/src/hotspot/share/classfile/verifier.cpp b/src/hotspot/share/classfile/verifier.cpp
index 38dba1d3d5f..30f147b9ae7 100644
--- a/src/hotspot/share/classfile/verifier.cpp
+++ b/src/hotspot/share/classfile/verifier.cpp
@@ -190,9 +190,8 @@ bool Verifier::verify(InstanceKlass* klass, bool should_verify_class, TRAPS) {
// effect (sic!) for external_name(), but instead of doing that, we opt to
// explicitly push the hashcode in here. This is signify the following block
// is IMPORTANT:
- if (klass->java_mirror() != nullptr) {
- klass->java_mirror()->identity_hash();
- }
+ assert(klass->java_mirror() != nullptr, "must be");
+ klass->java_mirror()->identity_hash();
if (!is_eligible_for_verification(klass, should_verify_class)) {
return true;
diff --git a/src/hotspot/share/classfile/vmSymbols.hpp b/src/hotspot/share/classfile/vmSymbols.hpp
index 8388b98faae..79646f24d0e 100644
--- a/src/hotspot/share/classfile/vmSymbols.hpp
+++ b/src/hotspot/share/classfile/vmSymbols.hpp
@@ -245,10 +245,6 @@ class SerializeClosure;
\
/* Concurrency support */ \
template(java_util_concurrent_locks_AbstractOwnableSynchronizer, "java/util/concurrent/locks/AbstractOwnableSynchronizer") \
- template(java_util_concurrent_atomic_AtomicIntegerFieldUpdater_Impl, "java/util/concurrent/atomic/AtomicIntegerFieldUpdater$AtomicIntegerFieldUpdaterImpl") \
- template(java_util_concurrent_atomic_AtomicLongFieldUpdater_CASUpdater, "java/util/concurrent/atomic/AtomicLongFieldUpdater$CASUpdater") \
- template(java_util_concurrent_atomic_AtomicLongFieldUpdater_LockedUpdater, "java/util/concurrent/atomic/AtomicLongFieldUpdater$LockedUpdater") \
- template(java_util_concurrent_atomic_AtomicReferenceFieldUpdater_Impl, "java/util/concurrent/atomic/AtomicReferenceFieldUpdater$AtomicReferenceFieldUpdaterImpl") \
template(jdk_internal_vm_annotation_Contended_signature, "Ljdk/internal/vm/annotation/Contended;") \
template(jdk_internal_vm_annotation_ReservedStackAccess_signature, "Ljdk/internal/vm/annotation/ReservedStackAccess;") \
template(jdk_internal_ValueBased_signature, "Ljdk/internal/ValueBased;") \
@@ -302,6 +298,7 @@ class SerializeClosure;
template(jdk_internal_misc_Scoped_signature, "Ljdk/internal/misc/ScopedMemoryAccess$Scoped;") \
template(jdk_internal_vm_annotation_IntrinsicCandidate_signature, "Ljdk/internal/vm/annotation/IntrinsicCandidate;") \
template(jdk_internal_vm_annotation_Stable_signature, "Ljdk/internal/vm/annotation/Stable;") \
+ template(jdk_internal_vm_annotation_TrustFinalFields_signature, "Ljdk/internal/vm/annotation/TrustFinalFields;") \
\
template(jdk_internal_vm_annotation_ChangesCurrentThread_signature, "Ljdk/internal/vm/annotation/ChangesCurrentThread;") \
template(jdk_internal_vm_annotation_JvmtiHideEvents_signature, "Ljdk/internal/vm/annotation/JvmtiHideEvents;") \
diff --git a/src/hotspot/share/code/aotCodeCache.cpp b/src/hotspot/share/code/aotCodeCache.cpp
index 0314c5227d2..f51c068f1e7 100644
--- a/src/hotspot/share/code/aotCodeCache.cpp
+++ b/src/hotspot/share/code/aotCodeCache.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1371,6 +1371,7 @@ void AOTCodeAddressTable::init_extrs() {
SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
#endif
#if INCLUDE_ZGC
+ SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr());
SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr());
#if defined(AMD64)
SET_ADDRESS(_extrs, &ZPointerLoadShift);
diff --git a/src/hotspot/share/code/dependencies.cpp b/src/hotspot/share/code/dependencies.cpp
index d90695739a1..dbfe1cd884e 100644
--- a/src/hotspot/share/code/dependencies.cpp
+++ b/src/hotspot/share/code/dependencies.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1124,7 +1124,7 @@ class AbstractClassHierarchyWalker {
Klass* find_witness(InstanceKlass* context_type, KlassDepChange* changes = nullptr);
static void init();
- static void print_statistics();
+ NOT_PRODUCT(static void print_statistics();)
};
PerfCounter* AbstractClassHierarchyWalker::_perf_find_witness_anywhere_calls_count = nullptr;
@@ -2277,6 +2277,7 @@ bool KlassDepChange::involves_context(Klass* k) {
return is_contained;
}
+#ifndef PRODUCT
void Dependencies::print_statistics() {
AbstractClassHierarchyWalker::print_statistics();
}
@@ -2302,6 +2303,7 @@ void AbstractClassHierarchyWalker::print_statistics() {
}
}
}
+#endif
CallSiteDepChange::CallSiteDepChange(Handle call_site, Handle method_handle) :
_call_site(call_site),
diff --git a/src/hotspot/share/code/dependencies.hpp b/src/hotspot/share/code/dependencies.hpp
index d11c51a66dc..582a08183f9 100644
--- a/src/hotspot/share/code/dependencies.hpp
+++ b/src/hotspot/share/code/dependencies.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -649,7 +649,7 @@ class Dependencies: public ResourceObj {
};
friend class Dependencies::DepStream;
- static void print_statistics();
+ NOT_PRODUCT(static void print_statistics();)
};
diff --git a/src/hotspot/share/compiler/compileBroker.cpp b/src/hotspot/share/compiler/compileBroker.cpp
index 574f4d6543b..7b236ed3589 100644
--- a/src/hotspot/share/compiler/compileBroker.cpp
+++ b/src/hotspot/share/compiler/compileBroker.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -2346,12 +2346,18 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
/* Repeat compilation without installing code for profiling purposes */
int repeat_compilation_count = directive->RepeatCompilationOption;
- while (repeat_compilation_count > 0) {
- ResourceMark rm(thread);
- task->print_ul("NO CODE INSTALLED");
- thread->timeout()->reset();
- comp->compile_method(&ci_env, target, osr_bci, false, directive);
- repeat_compilation_count--;
+ if (repeat_compilation_count > 0) {
+ CHeapStringHolder failure_reason;
+ failure_reason.set(ci_env._failure_reason.get());
+ while (repeat_compilation_count > 0) {
+ ResourceMark rm(thread);
+ task->print_ul("NO CODE INSTALLED");
+ thread->timeout()->reset();
+ ci_env._failure_reason.clear();
+ comp->compile_method(&ci_env, target, osr_bci, false, directive);
+ repeat_compilation_count--;
+ }
+ ci_env._failure_reason.set(failure_reason.get());
}
}
diff --git a/src/hotspot/share/compiler/disassembler.cpp b/src/hotspot/share/compiler/disassembler.cpp
index c79c15e0f32..2c1ef235e07 100644
--- a/src/hotspot/share/compiler/disassembler.cpp
+++ b/src/hotspot/share/compiler/disassembler.cpp
@@ -607,10 +607,27 @@ void decode_env::print_address(address adr) {
return;
}
+ address card_table_base = nullptr;
BarrierSet* bs = BarrierSet::barrier_set();
- if (bs->is_a(BarrierSet::CardTableBarrierSet) &&
- adr == ci_card_table_address_as()) {
- st->print("word_map_base");
+#if INCLUDE_G1GC
+ if (bs->is_a(BarrierSet::G1BarrierSet)) {
+ G1BarrierSet* g1bs = barrier_set_cast(bs);
+ card_table_base = g1bs->card_table()->byte_map_base();
+ } else
+#endif
+#if INCLUDE_SHENANDOAHGC
+ if (bs->is_a(BarrierSet::ShenandoahBarrierSet)) {
+ ShenandoahBarrierSet* sbs = barrier_set_cast(bs);
+ if (sbs->card_table() != nullptr) {
+ card_table_base = sbs->card_table()->byte_map_base();
+ }
+ } else
+#endif
+ if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
+ card_table_base = ci_card_table_address_as();
+ }
+ if (card_table_base != nullptr && adr == card_table_base) {
+ st->print("card_table_base");
if (WizardMode) st->print(" " INTPTR_FORMAT, p2i(adr));
return;
}
diff --git a/src/hotspot/share/gc/epsilon/epsilonHeap.cpp b/src/hotspot/share/gc/epsilon/epsilonHeap.cpp
index 24182c22a23..59ab69b2427 100644
--- a/src/hotspot/share/gc/epsilon/epsilonHeap.cpp
+++ b/src/hotspot/share/gc/epsilon/epsilonHeap.cpp
@@ -62,8 +62,6 @@ jint EpsilonHeap::initialize() {
// Enable monitoring
_monitoring_support = new EpsilonMonitoringSupport(this);
- _last_counter_update = 0;
- _last_heap_print = 0;
// Install barrier set
BarrierSet::set_barrier_set(new EpsilonBarrierSet());
@@ -156,17 +154,17 @@ HeapWord* EpsilonHeap::allocate_work(size_t size) {
// At this point, some diagnostic subsystems might not yet be initialized.
// We pretend the printout happened either way. This keeps allocation path
// from obsessively checking the subsystems' status on every allocation.
- size_t last_counter = AtomicAccess::load(&_last_counter_update);
+ size_t last_counter = _last_counter_update.load_relaxed();
if ((used - last_counter >= _step_counter_update) &&
- AtomicAccess::cmpxchg(&_last_counter_update, last_counter, used) == last_counter) {
+ _last_counter_update.compare_set(last_counter, used)) {
if (_monitoring_support->is_ready()) {
_monitoring_support->update_counters();
}
}
- size_t last_heap = AtomicAccess::load(&_last_heap_print);
+ size_t last_heap = _last_heap_print.load_relaxed();
if ((used - last_heap >= _step_heap_print) &&
- AtomicAccess::cmpxchg(&_last_heap_print, last_heap, used) == last_heap) {
+ _last_heap_print.compare_set(last_heap, used)) {
print_heap_info(used);
if (Metaspace::initialized()) {
print_metaspace_info();
diff --git a/src/hotspot/share/gc/epsilon/epsilonHeap.hpp b/src/hotspot/share/gc/epsilon/epsilonHeap.hpp
index 9693c63b15c..8d7aa7960fd 100644
--- a/src/hotspot/share/gc/epsilon/epsilonHeap.hpp
+++ b/src/hotspot/share/gc/epsilon/epsilonHeap.hpp
@@ -31,6 +31,7 @@
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/space.hpp"
#include "memory/virtualspace.hpp"
+#include "runtime/atomic.hpp"
#include "services/memoryManager.hpp"
class EpsilonHeap : public CollectedHeap {
@@ -45,8 +46,8 @@ private:
size_t _step_counter_update;
size_t _step_heap_print;
int64_t _decay_time_ns;
- volatile size_t _last_counter_update;
- volatile size_t _last_heap_print;
+ Atomic _last_counter_update;
+ Atomic _last_heap_print;
void print_tracing_info() const override;
void stop() override {};
diff --git a/src/hotspot/share/gc/epsilon/epsilonMonitoringSupport.cpp b/src/hotspot/share/gc/epsilon/epsilonMonitoringSupport.cpp
index 38be736df74..213fc18b8ff 100644
--- a/src/hotspot/share/gc/epsilon/epsilonMonitoringSupport.cpp
+++ b/src/hotspot/share/gc/epsilon/epsilonMonitoringSupport.cpp
@@ -96,7 +96,6 @@ public:
EpsilonMonitoringSupport::EpsilonMonitoringSupport(EpsilonHeap* heap) {
_heap_counters = new EpsilonGenerationCounters(heap);
_space_counters = new EpsilonSpaceCounters("Heap", 0, heap->max_capacity(), 0, _heap_counters);
- _ready = false;
}
void EpsilonMonitoringSupport::update_counters() {
@@ -114,9 +113,9 @@ void EpsilonMonitoringSupport::update_counters() {
}
bool EpsilonMonitoringSupport::is_ready() {
- return AtomicAccess::load_acquire(&_ready);
+ return _ready.load_acquire();
}
void EpsilonMonitoringSupport::mark_ready() {
- return AtomicAccess::release_store(&_ready, true);
+ _ready.release_store(true);
}
diff --git a/src/hotspot/share/gc/epsilon/epsilonMonitoringSupport.hpp b/src/hotspot/share/gc/epsilon/epsilonMonitoringSupport.hpp
index 76cdac6df1b..9dc52c2a659 100644
--- a/src/hotspot/share/gc/epsilon/epsilonMonitoringSupport.hpp
+++ b/src/hotspot/share/gc/epsilon/epsilonMonitoringSupport.hpp
@@ -26,6 +26,7 @@
#define SHARE_GC_EPSILON_EPSILONMONITORINGSUPPORT_HPP
#include "memory/allocation.hpp"
+#include "runtime/atomic.hpp"
class EpsilonGenerationCounters;
class EpsilonSpaceCounters;
@@ -35,7 +36,7 @@ class EpsilonMonitoringSupport : public CHeapObj {
private:
EpsilonGenerationCounters* _heap_counters;
EpsilonSpaceCounters* _space_counters;
- volatile bool _ready;
+ Atomic _ready;
public:
EpsilonMonitoringSupport(EpsilonHeap* heap);
diff --git a/src/hotspot/share/gc/g1/c2/g1BarrierSetC2.cpp b/src/hotspot/share/gc/g1/c2/g1BarrierSetC2.cpp
index 61402301eb1..34d31702e80 100644
--- a/src/hotspot/share/gc/g1/c2/g1BarrierSetC2.cpp
+++ b/src/hotspot/share/gc/g1/c2/g1BarrierSetC2.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -351,7 +351,6 @@ Node* G1BarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) co
Node* G1BarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
Node* new_val, const Type* value_type) const {
- GraphKit* kit = access.kit();
if (!access.is_oop()) {
return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
}
@@ -361,7 +360,6 @@ Node* G1BarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access
Node* G1BarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
Node* new_val, const Type* value_type) const {
- GraphKit* kit = access.kit();
if (!access.is_oop()) {
return BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
}
@@ -370,7 +368,6 @@ Node* G1BarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& acces
}
Node* G1BarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const {
- GraphKit* kit = access.kit();
if (!access.is_oop()) {
return BarrierSetC2::atomic_xchg_at_resolved(access, new_val, value_type);
}
diff --git a/src/hotspot/share/gc/g1/g1Arguments.cpp b/src/hotspot/share/gc/g1/g1Arguments.cpp
index 58e76cdd43a..ffb06a7d822 100644
--- a/src/hotspot/share/gc/g1/g1Arguments.cpp
+++ b/src/hotspot/share/gc/g1/g1Arguments.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -209,6 +209,17 @@ void G1Arguments::initialize() {
FLAG_SET_DEFAULT(GCTimeRatio, 24);
}
+ // Do not interfere with GC-Pressure driven heap resizing unless the user
+ // explicitly sets otherwise. G1 heap sizing should be free to grow or shrink
+ // the heap based on GC pressure, rather than being forced to satisfy
+ // MinHeapFreeRatio or MaxHeapFreeRatio defaults that the user did not set.
+ if (FLAG_IS_DEFAULT(MinHeapFreeRatio)) {
+ FLAG_SET_DEFAULT(MinHeapFreeRatio, 0);
+ }
+ if (FLAG_IS_DEFAULT(MaxHeapFreeRatio)) {
+ FLAG_SET_DEFAULT(MaxHeapFreeRatio, 100);
+ }
+
// Below, we might need to calculate the pause time interval based on
// the pause target. When we do so we are going to give G1 maximum
// flexibility and allow it to do pauses when it needs to. So, we'll
diff --git a/src/hotspot/share/gc/g1/g1BarrierSet.cpp b/src/hotspot/share/gc/g1/g1BarrierSet.cpp
index 622651ce0d8..dee50500e07 100644
--- a/src/hotspot/share/gc/g1/g1BarrierSet.cpp
+++ b/src/hotspot/share/gc/g1/g1BarrierSet.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -64,13 +64,13 @@ G1BarrierSet::G1BarrierSet(G1CardTable* card_table,
{}
G1BarrierSet::~G1BarrierSet() {
- delete _refinement_table;
+ delete refinement_table();
}
void G1BarrierSet::swap_global_card_table() {
- G1CardTable* temp = static_cast(_card_table);
- _card_table = _refinement_table;
- _refinement_table = temp;
+ G1CardTable* temp = static_cast(card_table());
+ _card_table.store_relaxed(refinement_table());
+ _refinement_table.store_relaxed(temp);
}
void G1BarrierSet::update_card_table_base(Thread* thread) {
@@ -80,7 +80,7 @@ void G1BarrierSet::update_card_table_base(Thread* thread) {
assert(thread->is_Java_thread(), "may only update card table base of JavaThreads, not %s", thread->name());
}
#endif
- G1ThreadLocalData::set_byte_map_base(thread, _card_table->byte_map_base());
+ G1ThreadLocalData::set_byte_map_base(thread, card_table()->byte_map_base());
}
template void
@@ -135,10 +135,10 @@ void G1BarrierSet::write_region(MemRegion mr) {
// marks next time.
// If we write to the old card table (after the switching, then the refinement
// table) the oncoming handshake will do the memory synchronization.
- CardTable* card_table = AtomicAccess::load(&_card_table);
+ CardTable* local_card_table = card_table();
- volatile CardValue* byte = card_table->byte_for(mr.start());
- CardValue* last_byte = card_table->byte_for(mr.last());
+ volatile CardValue* byte = local_card_table->byte_for(mr.start());
+ CardValue* last_byte = local_card_table->byte_for(mr.last());
// Dirty cards only if necessary.
for (; byte <= last_byte; byte++) {
@@ -190,6 +190,6 @@ void G1BarrierSet::on_thread_detach(Thread* thread) {
}
void G1BarrierSet::print_on(outputStream* st) const {
- _card_table->print_on(st, "Card");
- _refinement_table->print_on(st, "Refinement");
+ card_table()->print_on(st, "Card");
+ refinement_table()->print_on(st, "Refinement");
}
diff --git a/src/hotspot/share/gc/g1/g1BarrierSet.hpp b/src/hotspot/share/gc/g1/g1BarrierSet.hpp
index bf595973a32..406096acf10 100644
--- a/src/hotspot/share/gc/g1/g1BarrierSet.hpp
+++ b/src/hotspot/share/gc/g1/g1BarrierSet.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
#include "gc/shared/bufferNode.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
+#include "runtime/atomic.hpp"
class G1CardTable;
class Thread;
@@ -66,7 +67,7 @@ class G1BarrierSet: public CardTableBarrierSet {
BufferNode::Allocator _satb_mark_queue_buffer_allocator;
G1SATBMarkQueueSet _satb_mark_queue_set;
- G1CardTable* _refinement_table;
+ Atomic _refinement_table;
public:
G1BarrierSet(G1CardTable* card_table, G1CardTable* refinement_table);
@@ -76,7 +77,7 @@ class G1BarrierSet: public CardTableBarrierSet {
return barrier_set_cast(BarrierSet::barrier_set());
}
- G1CardTable* refinement_table() const { return _refinement_table; }
+ G1CardTable* refinement_table() const { return _refinement_table.load_relaxed(); }
// Swap the global card table references, without synchronization.
void swap_global_card_table();
diff --git a/src/hotspot/share/gc/g1/g1BarrierSet.inline.hpp b/src/hotspot/share/gc/g1/g1BarrierSet.inline.hpp
index ee2c1450d9b..54892c9191d 100644
--- a/src/hotspot/share/gc/g1/g1BarrierSet.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1BarrierSet.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -70,7 +70,11 @@ inline void G1BarrierSet::write_ref_field_pre(T* field) {
template
inline void G1BarrierSet::write_ref_field_post(T* field) {
- volatile CardValue* byte = _card_table->byte_for(field);
+ // Make sure that the card table reference is read only once. Otherwise the compiler
+ // might reload that value in the two accesses below, that could cause writes to
+ // the wrong card table.
+ CardTable* local_card_table = card_table();
+ CardValue* byte = local_card_table->byte_for(field);
if (*byte == G1CardTable::clean_card_val()) {
*byte = G1CardTable::dirty_card_val();
}
diff --git a/src/hotspot/share/gc/g1/g1BatchedTask.cpp b/src/hotspot/share/gc/g1/g1BatchedTask.cpp
index 57558301541..1f082153476 100644
--- a/src/hotspot/share/gc/g1/g1BatchedTask.cpp
+++ b/src/hotspot/share/gc/g1/g1BatchedTask.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,6 @@
#include "gc/g1/g1BatchedTask.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1GCParPhaseTimesTracker.hpp"
-#include "runtime/atomicAccess.hpp"
#include "utilities/growableArray.hpp"
void G1AbstractSubTask::record_work_item(uint worker_id, uint index, size_t count) {
@@ -40,7 +39,7 @@ const char* G1AbstractSubTask::name() const {
}
bool G1BatchedTask::try_claim_serial_task(int& task) {
- task = AtomicAccess::fetch_then_add(&_num_serial_tasks_done, 1);
+ task = _num_serial_tasks_done.fetch_then_add(1);
return task < _serial_tasks.length();
}
@@ -96,8 +95,8 @@ void G1BatchedTask::work(uint worker_id) {
}
G1BatchedTask::~G1BatchedTask() {
- assert(AtomicAccess::load(&_num_serial_tasks_done) >= _serial_tasks.length(),
- "Only %d tasks of %d claimed", AtomicAccess::load(&_num_serial_tasks_done), _serial_tasks.length());
+ assert(_num_serial_tasks_done.load_relaxed() >= _serial_tasks.length(),
+ "Only %d tasks of %d claimed", _num_serial_tasks_done.load_relaxed(), _serial_tasks.length());
for (G1AbstractSubTask* task : _parallel_tasks) {
delete task;
diff --git a/src/hotspot/share/gc/g1/g1BatchedTask.hpp b/src/hotspot/share/gc/g1/g1BatchedTask.hpp
index 020fda634e4..a6d2ef923c0 100644
--- a/src/hotspot/share/gc/g1/g1BatchedTask.hpp
+++ b/src/hotspot/share/gc/g1/g1BatchedTask.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
#include "gc/g1/g1GCPhaseTimes.hpp"
#include "gc/shared/workerThread.hpp"
#include "memory/allocation.hpp"
+#include "runtime/atomic.hpp"
template
class GrowableArrayCHeap;
@@ -120,7 +121,7 @@ public:
// 5) ~T()
//
class G1BatchedTask : public WorkerTask {
- volatile int _num_serial_tasks_done;
+ Atomic _num_serial_tasks_done;
G1GCPhaseTimes* _phase_times;
bool try_claim_serial_task(int& task);
diff --git a/src/hotspot/share/gc/g1/g1CardSet.cpp b/src/hotspot/share/gc/g1/g1CardSet.cpp
index 80cf5fea76a..60ad63e812c 100644
--- a/src/hotspot/share/gc/g1/g1CardSet.cpp
+++ b/src/hotspot/share/gc/g1/g1CardSet.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,6 @@
#include "gc/shared/gcLogPrecious.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
#include "memory/allocation.inline.hpp"
-#include "runtime/atomicAccess.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/java.hpp"
#include "utilities/bitMap.inline.hpp"
@@ -192,32 +191,32 @@ const char* G1CardSetConfiguration::mem_object_type_name_str(uint index) {
void G1CardSetCoarsenStats::reset() {
STATIC_ASSERT(ARRAY_SIZE(_coarsen_from) == ARRAY_SIZE(_coarsen_collision));
for (uint i = 0; i < ARRAY_SIZE(_coarsen_from); i++) {
- _coarsen_from[i] = 0;
- _coarsen_collision[i] = 0;
+ _coarsen_from[i].store_relaxed(0);
+ _coarsen_collision[i].store_relaxed(0);
}
}
void G1CardSetCoarsenStats::set(G1CardSetCoarsenStats& other) {
STATIC_ASSERT(ARRAY_SIZE(_coarsen_from) == ARRAY_SIZE(_coarsen_collision));
for (uint i = 0; i < ARRAY_SIZE(_coarsen_from); i++) {
- _coarsen_from[i] = other._coarsen_from[i];
- _coarsen_collision[i] = other._coarsen_collision[i];
+ _coarsen_from[i].store_relaxed(other._coarsen_from[i].load_relaxed());
+ _coarsen_collision[i].store_relaxed(other._coarsen_collision[i].load_relaxed());
}
}
void G1CardSetCoarsenStats::subtract_from(G1CardSetCoarsenStats& other) {
STATIC_ASSERT(ARRAY_SIZE(_coarsen_from) == ARRAY_SIZE(_coarsen_collision));
for (uint i = 0; i < ARRAY_SIZE(_coarsen_from); i++) {
- _coarsen_from[i] = other._coarsen_from[i] - _coarsen_from[i];
- _coarsen_collision[i] = other._coarsen_collision[i] - _coarsen_collision[i];
+ _coarsen_from[i].store_relaxed(other._coarsen_from[i].load_relaxed() - _coarsen_from[i].load_relaxed());
+ _coarsen_collision[i].store_relaxed(other._coarsen_collision[i].load_relaxed() - _coarsen_collision[i].load_relaxed());
}
}
void G1CardSetCoarsenStats::record_coarsening(uint tag, bool collision) {
assert(tag < ARRAY_SIZE(_coarsen_from), "tag %u out of bounds", tag);
- AtomicAccess::inc(&_coarsen_from[tag], memory_order_relaxed);
+ _coarsen_from[tag].add_then_fetch(1u, memory_order_relaxed);
if (collision) {
- AtomicAccess::inc(&_coarsen_collision[tag], memory_order_relaxed);
+ _coarsen_collision[tag].add_then_fetch(1u, memory_order_relaxed);
}
}
@@ -228,13 +227,13 @@ void G1CardSetCoarsenStats::print_on(outputStream* out) {
"Inline->AoC %zu (%zu) "
"AoC->BitMap %zu (%zu) "
"BitMap->Full %zu (%zu) ",
- _coarsen_from[0], _coarsen_collision[0],
- _coarsen_from[1], _coarsen_collision[1],
+ _coarsen_from[0].load_relaxed(), _coarsen_collision[0].load_relaxed(),
+ _coarsen_from[1].load_relaxed(), _coarsen_collision[1].load_relaxed(),
// There is no BitMap at the first level so we can't .
- _coarsen_from[3], _coarsen_collision[3],
- _coarsen_from[4], _coarsen_collision[4],
- _coarsen_from[5], _coarsen_collision[5],
- _coarsen_from[6], _coarsen_collision[6]
+ _coarsen_from[3].load_relaxed(), _coarsen_collision[3].load_relaxed(),
+ _coarsen_from[4].load_relaxed(), _coarsen_collision[4].load_relaxed(),
+ _coarsen_from[5].load_relaxed(), _coarsen_collision[5].load_relaxed(),
+ _coarsen_from[6].load_relaxed(), _coarsen_collision[6].load_relaxed()
);
}
@@ -248,7 +247,7 @@ class G1CardSetHashTable : public CHeapObj {
// the per region cardsets.
const static uint GroupBucketClaimSize = 4;
// Did we insert at least one card in the table?
- bool volatile _inserted_card;
+ Atomic _inserted_card;
G1CardSetMemoryManager* _mm;
CardSetHash _table;
@@ -311,10 +310,10 @@ public:
G1CardSetHashTableValue value(region_idx, G1CardSetInlinePtr());
bool inserted = _table.insert_get(Thread::current(), lookup, value, found, should_grow);
- if (!_inserted_card && inserted) {
+ if (!_inserted_card.load_relaxed() && inserted) {
// It does not matter to us who is setting the flag so a regular atomic store
// is sufficient.
- AtomicAccess::store(&_inserted_card, true);
+ _inserted_card.store_relaxed(true);
}
return found.value();
@@ -343,9 +342,9 @@ public:
}
void reset() {
- if (AtomicAccess::load(&_inserted_card)) {
+ if (_inserted_card.load_relaxed()) {
_table.unsafe_reset(InitialLogTableSize);
- AtomicAccess::store(&_inserted_card, false);
+ _inserted_card.store_relaxed(false);
}
}
@@ -455,14 +454,14 @@ void G1CardSet::free_mem_object(ContainerPtr container) {
_mm->free(container_type_to_mem_object_type(type), value);
}
-G1CardSet::ContainerPtr G1CardSet::acquire_container(ContainerPtr volatile* container_addr) {
+G1CardSet::ContainerPtr G1CardSet::acquire_container(Atomic* container_addr) {
// Update reference counts under RCU critical section to avoid a
// use-after-cleapup bug where we increment a reference count for
// an object whose memory has already been cleaned up and reused.
GlobalCounter::CriticalSection cs(Thread::current());
while (true) {
// Get ContainerPtr and increment refcount atomically wrt to memory reuse.
- ContainerPtr container = AtomicAccess::load_acquire(container_addr);
+ ContainerPtr container = container_addr->load_acquire();
uint cs_type = container_type(container);
if (container == FullCardSet || cs_type == ContainerInlinePtr) {
return container;
@@ -503,15 +502,15 @@ class G1ReleaseCardsets : public StackObj {
G1CardSet* _card_set;
using ContainerPtr = G1CardSet::ContainerPtr;
- void coarsen_to_full(ContainerPtr* container_addr) {
+ void coarsen_to_full(Atomic* container_addr) {
while (true) {
- ContainerPtr cur_container = AtomicAccess::load_acquire(container_addr);
+ ContainerPtr cur_container = container_addr->load_acquire();
uint cs_type = G1CardSet::container_type(cur_container);
if (cur_container == G1CardSet::FullCardSet) {
return;
}
- ContainerPtr old_value = AtomicAccess::cmpxchg(container_addr, cur_container, G1CardSet::FullCardSet);
+ ContainerPtr old_value = container_addr->compare_exchange(cur_container, G1CardSet::FullCardSet);
if (old_value == cur_container) {
_card_set->release_and_maybe_free_container(cur_container);
@@ -523,7 +522,7 @@ class G1ReleaseCardsets : public StackObj {
public:
explicit G1ReleaseCardsets(G1CardSet* card_set) : _card_set(card_set) { }
- void operator ()(ContainerPtr* container_addr) {
+ void operator ()(Atomic* container_addr) {
coarsen_to_full(container_addr);
}
};
@@ -544,10 +543,10 @@ G1AddCardResult G1CardSet::add_to_howl(ContainerPtr parent_container,
ContainerPtr container;
uint bucket = _config->howl_bucket_index(card_in_region);
- ContainerPtr volatile* bucket_entry = howl->container_addr(bucket);
+ Atomic* bucket_entry = howl->container_addr(bucket);
while (true) {
- if (AtomicAccess::load(&howl->_num_entries) >= _config->cards_in_howl_threshold()) {
+ if (howl->_num_entries.load_relaxed() >= _config->cards_in_howl_threshold()) {
return Overflow;
}
@@ -571,7 +570,7 @@ G1AddCardResult G1CardSet::add_to_howl(ContainerPtr parent_container,
}
if (increment_total && add_result == Added) {
- AtomicAccess::inc(&howl->_num_entries, memory_order_relaxed);
+ howl->_num_entries.add_then_fetch(1u, memory_order_relaxed);
}
if (to_transfer != nullptr) {
@@ -588,7 +587,7 @@ G1AddCardResult G1CardSet::add_to_bitmap(ContainerPtr container, uint card_in_re
return bitmap->add(card_offset, _config->cards_in_howl_bitmap_threshold(), _config->max_cards_in_howl_bitmap());
}
-G1AddCardResult G1CardSet::add_to_inline_ptr(ContainerPtr volatile* container_addr, ContainerPtr container, uint card_in_region) {
+G1AddCardResult G1CardSet::add_to_inline_ptr(Atomic* container_addr, ContainerPtr container, uint card_in_region) {
G1CardSetInlinePtr value(container_addr, container);
return value.add(card_in_region, _config->inline_ptr_bits_per_card(), _config->max_cards_in_inline_ptr());
}
@@ -610,7 +609,7 @@ G1CardSet::ContainerPtr G1CardSet::create_coarsened_array_of_cards(uint card_in_
return new_container;
}
-bool G1CardSet::coarsen_container(ContainerPtr volatile* container_addr,
+bool G1CardSet::coarsen_container(Atomic* container_addr,
ContainerPtr cur_container,
uint card_in_region,
bool within_howl) {
@@ -640,7 +639,7 @@ bool G1CardSet::coarsen_container(ContainerPtr volatile* container_addr,
ShouldNotReachHere();
}
- ContainerPtr old_value = AtomicAccess::cmpxchg(container_addr, cur_container, new_container); // Memory order?
+ ContainerPtr old_value = container_addr->compare_exchange(cur_container, new_container); // Memory order?
if (old_value == cur_container) {
// Success. Indicate that the cards from the current card set must be transferred
// by this caller.
@@ -687,7 +686,7 @@ void G1CardSet::transfer_cards(G1CardSetHashTableValue* table_entry, ContainerPt
assert(container_type(source_container) == ContainerHowl, "must be");
// Need to correct for that the Full remembered set occupies more cards than the
// AoCS before.
- AtomicAccess::add(&_num_occupied, _config->max_cards_in_region() - table_entry->_num_occupied, memory_order_relaxed);
+ _num_occupied.add_then_fetch(_config->max_cards_in_region() - table_entry->_num_occupied.load_relaxed(), memory_order_relaxed);
}
}
@@ -713,18 +712,18 @@ void G1CardSet::transfer_cards_in_howl(ContainerPtr parent_container,
diff -= 1;
G1CardSetHowl* howling_array = container_ptr(parent_container);
- AtomicAccess::add(&howling_array->_num_entries, diff, memory_order_relaxed);
+ howling_array->_num_entries.add_then_fetch(diff, memory_order_relaxed);
G1CardSetHashTableValue* table_entry = get_container(card_region);
assert(table_entry != nullptr, "Table entry not found for transferred cards");
- AtomicAccess::add(&table_entry->_num_occupied, diff, memory_order_relaxed);
+ table_entry->_num_occupied.add_then_fetch(diff, memory_order_relaxed);
- AtomicAccess::add(&_num_occupied, diff, memory_order_relaxed);
+ _num_occupied.add_then_fetch(diff, memory_order_relaxed);
}
}
-G1AddCardResult G1CardSet::add_to_container(ContainerPtr volatile* container_addr,
+G1AddCardResult G1CardSet::add_to_container(Atomic* container_addr,
ContainerPtr container,
uint card_region,
uint card_in_region,
@@ -827,8 +826,8 @@ G1AddCardResult G1CardSet::add_card(uint card_region, uint card_in_region, bool
}
if (increment_total && add_result == Added) {
- AtomicAccess::inc(&table_entry->_num_occupied, memory_order_relaxed);
- AtomicAccess::inc(&_num_occupied, memory_order_relaxed);
+ table_entry->_num_occupied.add_then_fetch(1u, memory_order_relaxed);
+ _num_occupied.add_then_fetch(1u, memory_order_relaxed);
}
if (should_grow_table) {
_table->grow();
@@ -853,7 +852,7 @@ bool G1CardSet::contains_card(uint card_region, uint card_in_region) {
return false;
}
- ContainerPtr container = table_entry->_container;
+ ContainerPtr container = table_entry->_container.load_relaxed();
if (container == FullCardSet) {
// contains_card() is not a performance critical method so we do not hide that
// case in the switch below.
@@ -889,7 +888,7 @@ void G1CardSet::print_info(outputStream* st, uintptr_t card) {
return;
}
- ContainerPtr container = table_entry->_container;
+ ContainerPtr container = table_entry->_container.load_relaxed();
if (container == FullCardSet) {
st->print("FULL card set)");
return;
@@ -940,7 +939,7 @@ void G1CardSet::iterate_cards_during_transfer(ContainerPtr const container, Card
void G1CardSet::iterate_containers(ContainerPtrClosure* cl, bool at_safepoint) {
auto do_value =
[&] (G1CardSetHashTableValue* value) {
- cl->do_containerptr(value->_region_idx, value->_num_occupied, value->_container);
+ cl->do_containerptr(value->_region_idx, value->_num_occupied.load_relaxed(), value->_container.load_relaxed());
return true;
};
@@ -1001,11 +1000,11 @@ bool G1CardSet::occupancy_less_or_equal_to(size_t limit) const {
}
bool G1CardSet::is_empty() const {
- return _num_occupied == 0;
+ return _num_occupied.load_relaxed() == 0;
}
size_t G1CardSet::occupied() const {
- return _num_occupied;
+ return _num_occupied.load_relaxed();
}
size_t G1CardSet::num_containers() {
@@ -1024,10 +1023,6 @@ size_t G1CardSet::num_containers() {
return cl._count;
}
-G1CardSetCoarsenStats G1CardSet::coarsen_stats() {
- return _coarsen_stats;
-}
-
void G1CardSet::print_coarsen_stats(outputStream* out) {
_last_coarsen_stats.subtract_from(_coarsen_stats);
@@ -1055,7 +1050,7 @@ size_t G1CardSet::static_mem_size() {
void G1CardSet::clear() {
_table->reset();
- _num_occupied = 0;
+ _num_occupied.store_relaxed(0);
_mm->flush();
}
diff --git a/src/hotspot/share/gc/g1/g1CardSet.hpp b/src/hotspot/share/gc/g1/g1CardSet.hpp
index 9cefc4b1c22..64ddf0ca6a4 100644
--- a/src/hotspot/share/gc/g1/g1CardSet.hpp
+++ b/src/hotspot/share/gc/g1/g1CardSet.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
#include "memory/allocation.hpp"
#include "memory/memRegion.hpp"
+#include "runtime/atomic.hpp"
#include "utilities/concurrentHashTable.hpp"
class G1CardSetAllocOptions;
@@ -154,8 +155,8 @@ public:
private:
// Indices are "from" indices.
- size_t _coarsen_from[NumCoarsenCategories];
- size_t _coarsen_collision[NumCoarsenCategories];
+ Atomic _coarsen_from[NumCoarsenCategories];
+ Atomic _coarsen_collision[NumCoarsenCategories];
public:
G1CardSetCoarsenStats() { reset(); }
@@ -271,11 +272,11 @@ private:
// Total number of cards in this card set. This is a best-effort value, i.e. there may
// be (slightly) more cards in the card set than this value in reality.
- size_t _num_occupied;
+ Atomic _num_occupied;
ContainerPtr make_container_ptr(void* value, uintptr_t type);
- ContainerPtr acquire_container(ContainerPtr volatile* container_addr);
+ ContainerPtr acquire_container(Atomic* container_addr);
// Returns true if the card set container should be released
bool release_container(ContainerPtr container);
// Release card set and free if needed.
@@ -288,7 +289,7 @@ private:
// coarsen_container does not transfer cards from cur_container
// to the new container. Transfer is achieved by transfer_cards.
// Returns true if this was the thread that coarsened the container (and added the card).
- bool coarsen_container(ContainerPtr volatile* container_addr,
+ bool coarsen_container(Atomic* container_addr,
ContainerPtr cur_container,
uint card_in_region, bool within_howl = false);
@@ -300,9 +301,9 @@ private:
void transfer_cards(G1CardSetHashTableValue* table_entry, ContainerPtr source_container, uint card_region);
void transfer_cards_in_howl(ContainerPtr parent_container, ContainerPtr source_container, uint card_region);
- G1AddCardResult add_to_container(ContainerPtr volatile* container_addr, ContainerPtr container, uint card_region, uint card, bool increment_total = true);
+ G1AddCardResult add_to_container(Atomic* container_addr, ContainerPtr container, uint card_region, uint card, bool increment_total = true);
- G1AddCardResult add_to_inline_ptr(ContainerPtr volatile* container_addr, ContainerPtr container, uint card_in_region);
+ G1AddCardResult add_to_inline_ptr(Atomic* container_addr, ContainerPtr container, uint card_in_region);
G1AddCardResult add_to_array(ContainerPtr container, uint card_in_region);
G1AddCardResult add_to_bitmap(ContainerPtr container, uint card_in_region);
G1AddCardResult add_to_howl(ContainerPtr parent_container, uint card_region, uint card_in_region, bool increment_total = true);
@@ -366,7 +367,6 @@ public:
size_t num_containers();
- static G1CardSetCoarsenStats coarsen_stats();
static void print_coarsen_stats(outputStream* out);
// Returns size of the actual remembered set containers in bytes.
@@ -412,8 +412,15 @@ public:
using ContainerPtr = G1CardSet::ContainerPtr;
const uint _region_idx;
- uint volatile _num_occupied;
- ContainerPtr volatile _container;
+ Atomic _num_occupied;
+ Atomic _container;
+
+ // Copy constructor needed for use in ConcurrentHashTable.
+ G1CardSetHashTableValue(const G1CardSetHashTableValue& other) :
+ _region_idx(other._region_idx),
+ _num_occupied(other._num_occupied.load_relaxed()),
+ _container(other._container.load_relaxed())
+ { }
G1CardSetHashTableValue(uint region_idx, ContainerPtr container) : _region_idx(region_idx), _num_occupied(0), _container(container) { }
};
diff --git a/src/hotspot/share/gc/g1/g1CardSetContainers.hpp b/src/hotspot/share/gc/g1/g1CardSetContainers.hpp
index 72c7795be2e..78551479e06 100644
--- a/src/hotspot/share/gc/g1/g1CardSetContainers.hpp
+++ b/src/hotspot/share/gc/g1/g1CardSetContainers.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
#include "gc/g1/g1CardSet.hpp"
#include "memory/allocation.hpp"
-#include "runtime/atomicAccess.hpp"
+#include "runtime/atomic.hpp"
#include "utilities/bitMap.hpp"
#include "utilities/globalDefinitions.hpp"
@@ -67,7 +67,7 @@ class G1CardSetInlinePtr : public StackObj {
using ContainerPtr = G1CardSet::ContainerPtr;
- ContainerPtr volatile * _value_addr;
+ Atomic* _value_addr;
ContainerPtr _value;
static const uint SizeFieldLen = 3;
@@ -103,7 +103,7 @@ public:
explicit G1CardSetInlinePtr(ContainerPtr value) :
G1CardSetInlinePtr(nullptr, value) {}
- G1CardSetInlinePtr(ContainerPtr volatile* value_addr, ContainerPtr value) : _value_addr(value_addr), _value(value) {
+ G1CardSetInlinePtr(Atomic* value_addr, ContainerPtr value) : _value_addr(value_addr), _value(value) {
assert(G1CardSet::container_type(_value) == G1CardSet::ContainerInlinePtr, "Value " PTR_FORMAT " is not a valid G1CardSetInlinePtr.", p2i(_value));
}
@@ -145,13 +145,13 @@ public:
// All but inline pointers are of this kind. For those, card entries are stored
// directly in the ContainerPtr of the ConcurrentHashTable node.
class G1CardSetContainer {
- uintptr_t _ref_count;
+ Atomic _ref_count;
protected:
~G1CardSetContainer() = default;
public:
G1CardSetContainer() : _ref_count(3) { }
- uintptr_t refcount() const { return AtomicAccess::load_acquire(&_ref_count); }
+ uintptr_t refcount() const { return _ref_count.load_acquire(); }
bool try_increment_refcount();
@@ -172,7 +172,7 @@ public:
using ContainerPtr = G1CardSet::ContainerPtr;
private:
EntryCountType _size;
- EntryCountType volatile _num_entries;
+ Atomic _num_entries;
// VLA implementation.
EntryDataType _data[1];
@@ -180,10 +180,10 @@ private:
static const EntryCountType EntryMask = LockBitMask - 1;
class G1CardSetArrayLocker : public StackObj {
- EntryCountType volatile* _num_entries_addr;
+ Atomic* _num_entries_addr;
EntryCountType _local_num_entries;
public:
- G1CardSetArrayLocker(EntryCountType volatile* value);
+ G1CardSetArrayLocker(Atomic* value);
EntryCountType num_entries() const { return _local_num_entries; }
void inc_num_entries() {
@@ -192,7 +192,7 @@ private:
}
~G1CardSetArrayLocker() {
- AtomicAccess::release_store(_num_entries_addr, _local_num_entries);
+ _num_entries_addr->release_store(_local_num_entries);
}
};
@@ -213,7 +213,7 @@ public:
template
void iterate(CardVisitor& found);
- size_t num_entries() const { return _num_entries & EntryMask; }
+ size_t num_entries() const { return _num_entries.load_relaxed() & EntryMask; }
static size_t header_size_in_bytes();
@@ -223,7 +223,7 @@ public:
};
class G1CardSetBitMap : public G1CardSetContainer {
- size_t _num_bits_set;
+ Atomic _num_bits_set;
BitMap::bm_word_t _bits[1];
public:
@@ -236,7 +236,7 @@ public:
return bm.at(card_idx);
}
- uint num_bits_set() const { return (uint)_num_bits_set; }
+ uint num_bits_set() const { return (uint)_num_bits_set.load_relaxed(); }
template
void iterate(CardVisitor& found, size_t const size_in_bits, uint offset);
@@ -255,10 +255,10 @@ class G1CardSetHowl : public G1CardSetContainer {
public:
typedef uint EntryCountType;
using ContainerPtr = G1CardSet::ContainerPtr;
- EntryCountType volatile _num_entries;
+ Atomic _num_entries;
private:
// VLA implementation.
- ContainerPtr _buckets[1];
+ Atomic _buckets[1];
// Do not add class member variables beyond this point.
// Iterates over the given ContainerPtr with at index in this Howl card set,
@@ -268,14 +268,14 @@ private:
ContainerPtr at(EntryCountType index) const;
- ContainerPtr const* buckets() const;
+ Atomic const* buckets() const;
public:
G1CardSetHowl(EntryCountType card_in_region, G1CardSetConfiguration* config);
- ContainerPtr const* container_addr(EntryCountType index) const;
+ Atomic const* container_addr(EntryCountType index) const;
- ContainerPtr* container_addr(EntryCountType index);
+ Atomic* container_addr(EntryCountType index);
bool contains(uint card_idx, G1CardSetConfiguration* config);
// Iterates over all ContainerPtrs in this Howl card set, applying a CardOrRangeVisitor
diff --git a/src/hotspot/share/gc/g1/g1CardSetContainers.inline.hpp b/src/hotspot/share/gc/g1/g1CardSetContainers.inline.hpp
index 1958309f517..3c6fb9d1a02 100644
--- a/src/hotspot/share/gc/g1/g1CardSetContainers.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1CardSetContainers.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -67,7 +67,7 @@ inline G1AddCardResult G1CardSetInlinePtr::add(uint card_idx, uint bits_per_card
return Overflow;
}
ContainerPtr new_value = merge(_value, card_idx, num_cards, bits_per_card);
- ContainerPtr old_value = AtomicAccess::cmpxchg(_value_addr, _value, new_value, memory_order_relaxed);
+ ContainerPtr old_value = _value_addr->compare_exchange(_value, new_value, memory_order_relaxed);
if (_value == old_value) {
return Added;
}
@@ -126,7 +126,7 @@ inline bool G1CardSetContainer::try_increment_refcount() {
}
uintptr_t new_value = old_value + 2;
- uintptr_t ref_count = AtomicAccess::cmpxchg(&_ref_count, old_value, new_value);
+ uintptr_t ref_count = _ref_count.compare_exchange(old_value, new_value);
if (ref_count == old_value) {
return true;
}
@@ -137,7 +137,7 @@ inline bool G1CardSetContainer::try_increment_refcount() {
inline uintptr_t G1CardSetContainer::decrement_refcount() {
uintptr_t old_value = refcount();
assert((old_value & 0x1) != 0 && old_value >= 3, "precondition");
- return AtomicAccess::sub(&_ref_count, 2u);
+ return _ref_count.sub_then_fetch(2u);
}
inline G1CardSetArray::G1CardSetArray(uint card_in_region, EntryCountType num_cards) :
@@ -149,14 +149,13 @@ inline G1CardSetArray::G1CardSetArray(uint card_in_region, EntryCountType num_ca
*entry_addr(0) = checked_cast(card_in_region);
}
-inline G1CardSetArray::G1CardSetArrayLocker::G1CardSetArrayLocker(EntryCountType volatile* num_entries_addr) :
+inline G1CardSetArray::G1CardSetArrayLocker::G1CardSetArrayLocker(Atomic* num_entries_addr) :
_num_entries_addr(num_entries_addr) {
SpinYield s;
- EntryCountType num_entries = AtomicAccess::load(_num_entries_addr) & EntryMask;
+ EntryCountType num_entries = _num_entries_addr->load_relaxed() & EntryMask;
while (true) {
- EntryCountType old_value = AtomicAccess::cmpxchg(_num_entries_addr,
- num_entries,
- (EntryCountType)(num_entries | LockBitMask));
+ EntryCountType old_value = _num_entries_addr->compare_exchange(num_entries,
+ (EntryCountType)(num_entries | LockBitMask));
if (old_value == num_entries) {
// Succeeded locking the array.
_local_num_entries = num_entries;
@@ -174,7 +173,7 @@ inline G1CardSetArray::EntryDataType const* G1CardSetArray::base_addr() const {
}
inline G1CardSetArray::EntryDataType const* G1CardSetArray::entry_addr(EntryCountType index) const {
- assert(index < _num_entries, "precondition");
+ assert(index < _num_entries.load_relaxed(), "precondition");
return base_addr() + index;
}
@@ -189,7 +188,7 @@ inline G1CardSetArray::EntryDataType G1CardSetArray::at(EntryCountType index) co
inline G1AddCardResult G1CardSetArray::add(uint card_idx) {
assert(card_idx < (1u << (sizeof(EntryDataType) * BitsPerByte)),
"Card index %u does not fit allowed card value range.", card_idx);
- EntryCountType num_entries = AtomicAccess::load_acquire(&_num_entries) & EntryMask;
+ EntryCountType num_entries = _num_entries.load_acquire() & EntryMask;
EntryCountType idx = 0;
for (; idx < num_entries; idx++) {
if (at(idx) == card_idx) {
@@ -223,7 +222,7 @@ inline G1AddCardResult G1CardSetArray::add(uint card_idx) {
}
inline bool G1CardSetArray::contains(uint card_idx) {
- EntryCountType num_entries = AtomicAccess::load_acquire(&_num_entries) & EntryMask;
+ EntryCountType num_entries = _num_entries.load_acquire() & EntryMask;
for (EntryCountType idx = 0; idx < num_entries; idx++) {
if (at(idx) == card_idx) {
@@ -235,7 +234,7 @@ inline bool G1CardSetArray::contains(uint card_idx) {
template
void G1CardSetArray::iterate(CardVisitor& found) {
- EntryCountType num_entries = AtomicAccess::load_acquire(&_num_entries) & EntryMask;
+ EntryCountType num_entries = _num_entries.load_acquire() & EntryMask;
for (EntryCountType idx = 0; idx < num_entries; idx++) {
found(at(idx));
}
@@ -256,11 +255,11 @@ inline G1CardSetBitMap::G1CardSetBitMap(uint card_in_region, uint size_in_bits)
inline G1AddCardResult G1CardSetBitMap::add(uint card_idx, size_t threshold, size_t size_in_bits) {
BitMapView bm(_bits, size_in_bits);
- if (_num_bits_set >= threshold) {
+ if (_num_bits_set.load_relaxed() >= threshold) {
return bm.at(card_idx) ? Found : Overflow;
}
if (bm.par_set_bit(card_idx)) {
- AtomicAccess::inc(&_num_bits_set, memory_order_relaxed);
+ _num_bits_set.add_then_fetch(1u, memory_order_relaxed);
return Added;
}
return Found;
@@ -276,22 +275,22 @@ inline size_t G1CardSetBitMap::header_size_in_bytes() {
return offset_of(G1CardSetBitMap, _bits);
}
-inline G1CardSetHowl::ContainerPtr const* G1CardSetHowl::container_addr(EntryCountType index) const {
- assert(index < _num_entries, "precondition");
+inline Atomic const* G1CardSetHowl::container_addr(EntryCountType index) const {
+ assert(index < _num_entries.load_relaxed(), "precondition");
return buckets() + index;
}
-inline G1CardSetHowl::ContainerPtr* G1CardSetHowl::container_addr(EntryCountType index) {
- return const_cast(const_cast(this)->container_addr(index));
+inline Atomic* G1CardSetHowl::container_addr(EntryCountType index) {
+ return const_cast*>(const_cast(this)->container_addr(index));
}
inline G1CardSetHowl::ContainerPtr G1CardSetHowl::at(EntryCountType index) const {
- return *container_addr(index);
+ return (*container_addr(index)).load_relaxed();
}
-inline G1CardSetHowl::ContainerPtr const* G1CardSetHowl::buckets() const {
+inline Atomic const* G1CardSetHowl::buckets() const {
const void* ptr = reinterpret_cast(this) + header_size_in_bytes();
- return reinterpret_cast(ptr);
+ return reinterpret_cast const*>(ptr);
}
inline G1CardSetHowl::G1CardSetHowl(EntryCountType card_in_region, G1CardSetConfiguration* config) :
@@ -300,7 +299,7 @@ inline G1CardSetHowl::G1CardSetHowl(EntryCountType card_in_region, G1CardSetConf
EntryCountType num_buckets = config->num_buckets_in_howl();
EntryCountType bucket = config->howl_bucket_index(card_in_region);
for (uint i = 0; i < num_buckets; ++i) {
- *container_addr(i) = G1CardSetInlinePtr();
+ container_addr(i)->store_relaxed(G1CardSetInlinePtr());
if (i == bucket) {
G1CardSetInlinePtr value(container_addr(i), at(i));
value.add(card_in_region, config->inline_ptr_bits_per_card(), config->max_cards_in_inline_ptr());
@@ -310,8 +309,8 @@ inline G1CardSetHowl::G1CardSetHowl(EntryCountType card_in_region, G1CardSetConf
inline bool G1CardSetHowl::contains(uint card_idx, G1CardSetConfiguration* config) {
EntryCountType bucket = config->howl_bucket_index(card_idx);
- ContainerPtr* array_entry = container_addr(bucket);
- ContainerPtr container = AtomicAccess::load_acquire(array_entry);
+ Atomic* array_entry = container_addr(bucket);
+ ContainerPtr container = array_entry->load_acquire();
switch (G1CardSet::container_type(container)) {
case G1CardSet::ContainerArrayOfCards: {
diff --git a/src/hotspot/share/gc/g1/g1CardSetMemory.cpp b/src/hotspot/share/gc/g1/g1CardSetMemory.cpp
index d13a6fe2dca..60602ef942b 100644
--- a/src/hotspot/share/gc/g1/g1CardSetMemory.cpp
+++ b/src/hotspot/share/gc/g1/g1CardSetMemory.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,6 @@
#include "gc/g1/g1CardSetContainers.inline.hpp"
#include "gc/g1/g1CardSetMemory.inline.hpp"
#include "gc/g1/g1MonotonicArena.inline.hpp"
-#include "runtime/atomicAccess.hpp"
#include "utilities/ostream.hpp"
G1CardSetAllocator::G1CardSetAllocator(const char* name,
diff --git a/src/hotspot/share/gc/g1/g1CardTableClaimTable.cpp b/src/hotspot/share/gc/g1/g1CardTableClaimTable.cpp
index e0cadbdd907..d8cabaa00a4 100644
--- a/src/hotspot/share/gc/g1/g1CardTableClaimTable.cpp
+++ b/src/hotspot/share/gc/g1/g1CardTableClaimTable.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -44,20 +44,20 @@ G1CardTableClaimTable::~G1CardTableClaimTable() {
void G1CardTableClaimTable::initialize(uint max_reserved_regions) {
assert(_card_claims == nullptr, "Must not be initialized twice");
- _card_claims = NEW_C_HEAP_ARRAY(uint, max_reserved_regions, mtGC);
+ _card_claims = NEW_C_HEAP_ARRAY(Atomic, max_reserved_regions, mtGC);
_max_reserved_regions = max_reserved_regions;
reset_all_to_unclaimed();
}
void G1CardTableClaimTable::reset_all_to_unclaimed() {
for (uint i = 0; i < _max_reserved_regions; i++) {
- _card_claims[i] = 0;
+ _card_claims[i].store_relaxed(0);
}
}
void G1CardTableClaimTable::reset_all_to_claimed() {
for (uint i = 0; i < _max_reserved_regions; i++) {
- _card_claims[i] = (uint)G1HeapRegion::CardsPerRegion;
+ _card_claims[i].store_relaxed((uint)G1HeapRegion::CardsPerRegion);
}
}
diff --git a/src/hotspot/share/gc/g1/g1CardTableClaimTable.hpp b/src/hotspot/share/gc/g1/g1CardTableClaimTable.hpp
index 4f524b83f97..822ef45c722 100644
--- a/src/hotspot/share/gc/g1/g1CardTableClaimTable.hpp
+++ b/src/hotspot/share/gc/g1/g1CardTableClaimTable.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
#include "gc/g1/g1CardTable.hpp"
#include "memory/allocation.hpp"
+#include "runtime/atomic.hpp"
class G1HeapRegionClosure;
@@ -45,7 +46,7 @@ class G1CardTableClaimTable : public CHeapObj {
// Card table iteration claim values for every heap region, from 0 (completely unclaimed)
// to (>=) G1HeapRegion::CardsPerRegion (completely claimed).
- uint volatile* _card_claims;
+ Atomic* _card_claims;
uint _cards_per_chunk; // For conversion between card index and chunk index.
diff --git a/src/hotspot/share/gc/g1/g1CardTableClaimTable.inline.hpp b/src/hotspot/share/gc/g1/g1CardTableClaimTable.inline.hpp
index d682f0d17ae..35b2484982c 100644
--- a/src/hotspot/share/gc/g1/g1CardTableClaimTable.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1CardTableClaimTable.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,26 +29,25 @@
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1HeapRegion.inline.hpp"
-#include "runtime/atomicAccess.hpp"
bool G1CardTableClaimTable::has_unclaimed_cards(uint region) {
assert(region < _max_reserved_regions, "Tried to access invalid region %u", region);
- return AtomicAccess::load(&_card_claims[region]) < G1HeapRegion::CardsPerRegion;
+ return _card_claims[region].load_relaxed() < G1HeapRegion::CardsPerRegion;
}
void G1CardTableClaimTable::reset_to_unclaimed(uint region) {
assert(region < _max_reserved_regions, "Tried to access invalid region %u", region);
- AtomicAccess::store(&_card_claims[region], 0u);
+ _card_claims[region].store_relaxed(0u);
}
uint G1CardTableClaimTable::claim_cards(uint region, uint increment) {
assert(region < _max_reserved_regions, "Tried to access invalid region %u", region);
- return AtomicAccess::fetch_then_add(&_card_claims[region], increment, memory_order_relaxed);
+ return _card_claims[region].fetch_then_add(increment, memory_order_relaxed);
}
uint G1CardTableClaimTable::claim_chunk(uint region) {
assert(region < _max_reserved_regions, "Tried to access invalid region %u", region);
- return AtomicAccess::fetch_then_add(&_card_claims[region], cards_per_chunk(), memory_order_relaxed);
+ return _card_claims[region].fetch_then_add(cards_per_chunk(), memory_order_relaxed);
}
uint G1CardTableClaimTable::claim_all_cards(uint region) {
diff --git a/src/hotspot/share/gc/g1/g1CodeRootSet.cpp b/src/hotspot/share/gc/g1/g1CodeRootSet.cpp
index 60ad3a2af32..ca4487876b9 100644
--- a/src/hotspot/share/gc/g1/g1CodeRootSet.cpp
+++ b/src/hotspot/share/gc/g1/g1CodeRootSet.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,7 @@
#include "gc/g1/g1HeapRegion.hpp"
#include "memory/allocation.hpp"
#include "oops/oop.inline.hpp"
-#include "runtime/atomicAccess.hpp"
+#include "runtime/atomic.hpp"
#include "utilities/concurrentHashTable.inline.hpp"
#include "utilities/concurrentHashTableTasks.inline.hpp"
@@ -60,7 +60,7 @@ class G1CodeRootSetHashTable : public CHeapObj {
HashTable _table;
HashTableScanTask _table_scanner;
- size_t volatile _num_entries;
+ Atomic _num_entries;
bool is_empty() const { return number_of_entries() == 0; }
@@ -120,7 +120,7 @@ public:
bool grow_hint = false;
bool inserted = _table.insert(Thread::current(), lookup, method, &grow_hint);
if (inserted) {
- AtomicAccess::inc(&_num_entries);
+ _num_entries.add_then_fetch(1u);
}
if (grow_hint) {
_table.grow(Thread::current());
@@ -131,7 +131,7 @@ public:
HashTableLookUp lookup(method);
bool removed = _table.remove(Thread::current(), lookup);
if (removed) {
- AtomicAccess::dec(&_num_entries);
+ _num_entries.sub_then_fetch(1u);
}
return removed;
}
@@ -182,7 +182,7 @@ public:
guarantee(succeeded, "unable to clean table");
if (num_deleted != 0) {
- size_t current_size = AtomicAccess::sub(&_num_entries, num_deleted);
+ size_t current_size = _num_entries.sub_then_fetch(num_deleted);
shrink_to_match(current_size);
}
}
@@ -226,7 +226,7 @@ public:
size_t mem_size() { return sizeof(*this) + _table.get_mem_size(Thread::current()); }
- size_t number_of_entries() const { return AtomicAccess::load(&_num_entries); }
+ size_t number_of_entries() const { return _num_entries.load_relaxed(); }
};
uintx G1CodeRootSetHashTable::HashTableLookUp::get_hash() const {
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
index 2ad5a26d5e6..9424a804bd8 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
@@ -103,7 +103,6 @@
#include "oops/access.inline.hpp"
#include "oops/compressedOops.inline.hpp"
#include "oops/oop.inline.hpp"
-#include "runtime/atomicAccess.hpp"
#include "runtime/cpuTimeCounters.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/init.hpp"
@@ -526,7 +525,7 @@ void G1CollectedHeap::iterate_regions_in_range(MemRegion range, const Func& func
}
}
-HeapWord* G1CollectedHeap::alloc_archive_region(size_t word_size, HeapWord* preferred_addr) {
+HeapWord* G1CollectedHeap::alloc_archive_region(size_t word_size) {
assert(!is_init_completed(), "Expect to be called at JVM init time");
MutexLocker x(Heap_lock);
@@ -687,8 +686,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
// before the allocation is that we avoid having to keep track of the newly
// allocated memory while we do a GC.
// Only try that if we can actually perform a GC.
- if (is_init_completed() && policy()->need_to_start_conc_mark("concurrent humongous allocation",
- word_size)) {
+ if (is_init_completed() &&
+ policy()->need_to_start_conc_mark("concurrent humongous allocation", word_size)) {
try_collect(word_size, GCCause::_g1_humongous_allocation, collection_counters(this));
}
@@ -1321,7 +1320,6 @@ G1CollectedHeap::G1CollectedHeap() :
_card_set_freelist_pool(G1CardSetConfiguration::num_mem_object_types()),
_young_regions_cset_group(card_set_config(), &_card_set_freelist_pool, G1CSetCandidateGroup::YoungRegionId),
_cm(nullptr),
- _cm_thread(nullptr),
_cr(nullptr),
_task_queues(nullptr),
_partial_array_state_manager(nullptr),
@@ -1565,7 +1563,6 @@ jint G1CollectedHeap::initialize() {
// Create the G1ConcurrentMark data structure and thread.
// (Must do this late, so that "max_[reserved_]regions" is defined.)
_cm = new G1ConcurrentMark(this, bitmap_storage);
- _cm_thread = _cm->cm_thread();
// Now expand into the initial heap size.
if (!expand(init_byte_size, _workers)) {
@@ -1637,7 +1634,9 @@ jint G1CollectedHeap::initialize() {
}
bool G1CollectedHeap::concurrent_mark_is_terminating() const {
- return _cm_thread->should_terminate();
+ assert(_cm != nullptr, "_cm must have been created");
+ assert(_cm->is_fully_initialized(), "thread must exist in order to check if mark is terminating");
+ return _cm->cm_thread()->should_terminate();
}
void G1CollectedHeap::stop() {
@@ -1646,7 +1645,9 @@ void G1CollectedHeap::stop() {
// that are destroyed during shutdown.
_cr->stop();
_service_thread->stop();
- _cm_thread->stop();
+ if (_cm->is_fully_initialized()) {
+ _cm->cm_thread()->stop();
+ }
}
void G1CollectedHeap::safepoint_synchronize_begin() {
@@ -1843,7 +1844,7 @@ void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent,
// is set) so that if a waiter requests another System.gc() it doesn't
// incorrectly see that a marking cycle is still in progress.
if (concurrent) {
- _cm_thread->set_idle();
+ _cm->cm_thread()->set_idle();
}
// Notify threads waiting in System.gc() (with ExplicitGCInvokesConcurrent)
@@ -2422,7 +2423,6 @@ void G1CollectedHeap::print_gc_on(outputStream* st) const {
void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
workers()->threads_do(tc);
- tc->do_thread(_cm_thread);
_cm->threads_do(tc);
_cr->threads_do(tc);
tc->do_thread(_service_thread);
@@ -2543,15 +2543,15 @@ HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
}
void G1CollectedHeap::start_concurrent_cycle(bool concurrent_operation_is_full_mark) {
- assert(!_cm_thread->in_progress(), "Can not start concurrent operation while in progress");
-
+ assert(_cm->is_fully_initialized(), "sanity");
+ assert(!_cm->in_progress(), "Can not start concurrent operation while in progress");
MutexLocker x(G1CGC_lock, Mutex::_no_safepoint_check_flag);
if (concurrent_operation_is_full_mark) {
_cm->post_concurrent_mark_start();
- _cm_thread->start_full_mark();
+ _cm->cm_thread()->start_full_mark();
} else {
_cm->post_concurrent_undo_start();
- _cm_thread->start_undo_mark();
+ _cm->cm_thread()->start_undo_mark();
}
G1CGC_lock->notify();
}
@@ -2727,6 +2727,8 @@ void G1CollectedHeap::do_collection_pause_at_safepoint(size_t allocation_word_si
_bytes_used_during_gc = 0;
+ _cm->fully_initialize();
+
policy()->decide_on_concurrent_start_pause();
// Record whether this pause may need to trigger a concurrent operation. Later,
// when we signal the G1ConcurrentMarkThread, the collector state has already
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
index aff7166d391..8ff9d481000 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -54,6 +54,7 @@
#include "memory/allocation.hpp"
#include "memory/iterator.hpp"
#include "memory/memRegion.hpp"
+#include "runtime/atomic.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/threadSMR.hpp"
#include "utilities/bitMap.hpp"
@@ -124,7 +125,7 @@ class G1JavaThreadsListClaimer : public StackObj {
ThreadsListHandle _list;
uint _claim_step;
- volatile uint _cur_claim;
+ Atomic _cur_claim;
// Attempts to claim _claim_step JavaThreads, returning an array of claimed
// JavaThread* with count elements. Returns null (and a zero count) if there
@@ -735,12 +736,10 @@ public:
void iterate_regions_in_range(MemRegion range, const Func& func);
// Commit the required number of G1 region(s) according to the size requested
- // and mark them as 'old' region(s). Preferred address is treated as a hint for
- // the location of the archive space in the heap. The returned address may or may
- // not be same as the preferred address.
+ // and mark them as 'old' region(s).
// This API is only used for allocating heap space for the archived heap objects
// in the CDS archive.
- HeapWord* alloc_archive_region(size_t word_size, HeapWord* preferred_addr);
+ HeapWord* alloc_archive_region(size_t word_size);
// Populate the G1BlockOffsetTable for archived regions with the given
// memory range.
@@ -824,7 +823,6 @@ public:
// The concurrent marker (and the thread it runs in.)
G1ConcurrentMark* _cm;
- G1ConcurrentMarkThread* _cm_thread;
// The concurrent refiner.
G1ConcurrentRefine* _cr;
@@ -1267,7 +1265,6 @@ public:
bool is_marked(oop obj) const;
- inline static bool is_obj_filler(const oop obj);
// Determine if an object is dead, given the object and also
// the region to which the object belongs.
inline bool is_obj_dead(const oop obj, const G1HeapRegion* hr) const;
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp
index abd61e72d57..8782b65b6f9 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,7 @@
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1ConcurrentMark.inline.hpp"
#include "gc/g1/g1EvacFailureRegions.hpp"
+#include "gc/g1/g1EvacStats.inline.hpp"
#include "gc/g1/g1HeapRegion.inline.hpp"
#include "gc/g1/g1HeapRegionManager.inline.hpp"
#include "gc/g1/g1HeapRegionRemSet.hpp"
@@ -38,10 +39,10 @@
#include "gc/g1/g1Policy.hpp"
#include "gc/g1/g1RegionPinCache.inline.hpp"
#include "gc/g1/g1RemSet.hpp"
+#include "gc/shared/collectedHeap.inline.hpp"
#include "gc/shared/markBitMap.inline.hpp"
#include "gc/shared/taskqueue.inline.hpp"
#include "oops/stackChunkOop.hpp"
-#include "runtime/atomicAccess.hpp"
#include "runtime/threadSMR.inline.hpp"
#include "utilities/bitMap.inline.hpp"
@@ -53,10 +54,10 @@ inline bool G1STWIsAliveClosure::do_object_b(oop p) {
inline JavaThread* const* G1JavaThreadsListClaimer::claim(uint& count) {
count = 0;
- if (AtomicAccess::load(&_cur_claim) >= _list.length()) {
+ if (_cur_claim.load_relaxed() >= _list.length()) {
return nullptr;
}
- uint claim = AtomicAccess::fetch_then_add(&_cur_claim, _claim_step);
+ uint claim = _cur_claim.fetch_then_add(_claim_step);
if (claim >= _list.length()) {
return nullptr;
}
@@ -230,16 +231,11 @@ inline bool G1CollectedHeap::requires_barriers(stackChunkOop obj) const {
return !heap_region_containing(obj)->is_young(); // is_in_young does an unnecessary null check
}
-inline bool G1CollectedHeap::is_obj_filler(const oop obj) {
- Klass* k = obj->klass_without_asserts();
- return k == Universe::fillerArrayKlass() || k == vmClasses::FillerObject_klass();
-}
-
inline bool G1CollectedHeap::is_obj_dead(const oop obj, const G1HeapRegion* hr) const {
assert(!hr->is_free(), "looking up obj " PTR_FORMAT " in Free region %u", p2i(obj), hr->hrm_index());
if (hr->is_in_parsable_area(obj)) {
// This object is in the parsable part of the heap, live unless scrubbed.
- return is_obj_filler(obj);
+ return is_filler_object(obj);
} else {
// From Remark until a region has been concurrently scrubbed, parts of the
// region is not guaranteed to be parsable. Use the bitmap for liveness.
diff --git a/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp b/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp
index 954ca40a77f..e7bab32129e 100644
--- a/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp
+++ b/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
#include "gc/g1/g1CollectionSetChooser.hpp"
#include "gc/g1/g1HeapRegionRemSet.inline.hpp"
#include "gc/shared/space.hpp"
-#include "runtime/atomicAccess.hpp"
+#include "runtime/atomic.hpp"
#include "utilities/quickSort.hpp"
// Determine collection set candidates (from marking): For all regions determine
@@ -50,7 +50,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
G1HeapRegion** _data;
- uint volatile _cur_claim_idx;
+ Atomic _cur_claim_idx;
static int compare_region_gc_efficiency(G1HeapRegion** rr1, G1HeapRegion** rr2) {
G1HeapRegion* r1 = *rr1;
@@ -105,7 +105,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
// Claim a new chunk, returning its bounds [from, to[.
void claim_chunk(uint& from, uint& to) {
- uint result = AtomicAccess::add(&_cur_claim_idx, _chunk_size);
+ uint result = _cur_claim_idx.add_then_fetch(_chunk_size);
assert(_max_size > result - 1,
"Array too small, is %u should be %u with chunk size %u.",
_max_size, result, _chunk_size);
@@ -121,14 +121,15 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
}
void sort_by_gc_efficiency() {
- if (_cur_claim_idx == 0) {
+ uint length = _cur_claim_idx.load_relaxed();
+ if (length == 0) {
return;
}
- for (uint i = _cur_claim_idx; i < _max_size; i++) {
+ for (uint i = length; i < _max_size; i++) {
assert(_data[i] == nullptr, "must be");
}
- qsort(_data, _cur_claim_idx, sizeof(_data[0]), (_sort_Fn)compare_region_gc_efficiency);
- for (uint i = _cur_claim_idx; i < _max_size; i++) {
+ qsort(_data, length, sizeof(_data[0]), (_sort_Fn)compare_region_gc_efficiency);
+ for (uint i = length; i < _max_size; i++) {
assert(_data[i] == nullptr, "must be");
}
}
@@ -202,13 +203,13 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
G1CollectedHeap* _g1h;
G1HeapRegionClaimer _hrclaimer;
- uint volatile _num_regions_added;
+ Atomic _num_regions_added;
G1BuildCandidateArray _result;
void update_totals(uint num_regions) {
if (num_regions > 0) {
- AtomicAccess::add(&_num_regions_added, num_regions);
+ _num_regions_added.add_then_fetch(num_regions);
}
}
@@ -220,7 +221,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
void prune(G1HeapRegion** data) {
G1Policy* p = G1CollectedHeap::heap()->policy();
- uint num_candidates = AtomicAccess::load(&_num_regions_added);
+ uint num_candidates = _num_regions_added.load_relaxed();
uint min_old_cset_length = p->calc_min_old_cset_length(num_candidates);
uint num_pruned = 0;
@@ -253,7 +254,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
wasted_bytes,
allowed_waste);
- AtomicAccess::sub(&_num_regions_added, num_pruned, memory_order_relaxed);
+ _num_regions_added.sub_then_fetch(num_pruned, memory_order_relaxed);
}
public:
@@ -274,7 +275,7 @@ public:
_result.sort_by_gc_efficiency();
prune(_result.array());
candidates->set_candidates_from_marking(_result.array(),
- _num_regions_added);
+ _num_regions_added.load_relaxed());
}
};
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
index 456d543fa10..8f3cafe1f5b 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -51,6 +51,9 @@
#include "gc/shared/gcTimer.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
#include "gc/shared/gcVMOperations.hpp"
+#include "gc/shared/partialArraySplitter.inline.hpp"
+#include "gc/shared/partialArrayState.hpp"
+#include "gc/shared/partialArrayTaskStats.hpp"
#include "gc/shared/referencePolicy.hpp"
#include "gc/shared/suspendibleThreadSet.hpp"
#include "gc/shared/taskqueue.inline.hpp"
@@ -67,7 +70,6 @@
#include "nmt/memTracker.hpp"
#include "oops/access.inline.hpp"
#include "oops/oop.inline.hpp"
-#include "runtime/atomicAccess.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
@@ -76,6 +78,7 @@
#include "runtime/prefetch.inline.hpp"
#include "runtime/threads.hpp"
#include "utilities/align.hpp"
+#include "utilities/checkedCast.hpp"
#include "utilities/formatBuffer.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/powerOfTwo.hpp"
@@ -99,7 +102,7 @@ bool G1CMBitMapClosure::do_addr(HeapWord* const addr) {
// We move that task's local finger along.
_task->move_finger_to(addr);
- _task->scan_task_entry(G1TaskQueueEntry::from_oop(cast_to_oop(addr)));
+ _task->process_entry(G1TaskQueueEntry(cast_to_oop(addr)), false /* stolen */);
// we only partially drain the local queue and global stack
_task->drain_local_queue(true);
_task->drain_global_stack(true);
@@ -148,25 +151,25 @@ bool G1CMMarkStack::initialize() {
}
G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::ChunkAllocator::allocate_new_chunk() {
- if (_size >= _max_capacity) {
+ if (_size.load_relaxed() >= _max_capacity) {
return nullptr;
}
- size_t cur_idx = AtomicAccess::fetch_then_add(&_size, 1u);
+ size_t cur_idx = _size.fetch_then_add(1u);
if (cur_idx >= _max_capacity) {
return nullptr;
}
size_t bucket = get_bucket(cur_idx);
- if (AtomicAccess::load_acquire(&_buckets[bucket]) == nullptr) {
+ if (_buckets[bucket].load_acquire() == nullptr) {
if (!_should_grow) {
// Prefer to restart the CM.
return nullptr;
}
MutexLocker x(G1MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
- if (AtomicAccess::load_acquire(&_buckets[bucket]) == nullptr) {
+ if (_buckets[bucket].load_acquire() == nullptr) {
size_t desired_capacity = bucket_size(bucket) * 2;
if (!try_expand_to(desired_capacity)) {
return nullptr;
@@ -175,7 +178,7 @@ G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::ChunkAllocator::allocate_new_
}
size_t bucket_idx = get_bucket_index(cur_idx);
- TaskQueueEntryChunk* result = ::new (&_buckets[bucket][bucket_idx]) TaskQueueEntryChunk;
+ TaskQueueEntryChunk* result = ::new (&_buckets[bucket].load_relaxed()[bucket_idx]) TaskQueueEntryChunk;
result->next = nullptr;
return result;
}
@@ -197,10 +200,10 @@ bool G1CMMarkStack::ChunkAllocator::initialize(size_t initial_capacity, size_t m
_max_capacity = max_capacity;
_num_buckets = get_bucket(_max_capacity) + 1;
- _buckets = NEW_C_HEAP_ARRAY(TaskQueueEntryChunk*, _num_buckets, mtGC);
+ _buckets = NEW_C_HEAP_ARRAY(Atomic, _num_buckets, mtGC);
for (size_t i = 0; i < _num_buckets; i++) {
- _buckets[i] = nullptr;
+ _buckets[i].store_relaxed(nullptr);
}
size_t new_capacity = bucket_size(0);
@@ -240,9 +243,9 @@ G1CMMarkStack::ChunkAllocator::~ChunkAllocator() {
}
for (size_t i = 0; i < _num_buckets; i++) {
- if (_buckets[i] != nullptr) {
- MmapArrayAllocator::free(_buckets[i], bucket_size(i));
- _buckets[i] = nullptr;
+ if (_buckets[i].load_relaxed() != nullptr) {
+ MmapArrayAllocator::free(_buckets[i].load_relaxed(), bucket_size(i));
+ _buckets[i].store_relaxed(nullptr);
}
}
@@ -259,7 +262,7 @@ bool G1CMMarkStack::ChunkAllocator::reserve(size_t new_capacity) {
// and the new capacity (new_capacity). This step ensures that there are no gaps in the
// array and that the capacity accurately reflects the reserved memory.
for (; i <= highest_bucket; i++) {
- if (AtomicAccess::load_acquire(&_buckets[i]) != nullptr) {
+ if (_buckets[i].load_acquire() != nullptr) {
continue; // Skip over already allocated buckets.
}
@@ -279,7 +282,7 @@ bool G1CMMarkStack::ChunkAllocator::reserve(size_t new_capacity) {
return false;
}
_capacity += bucket_capacity;
- AtomicAccess::release_store(&_buckets[i], bucket_base);
+ _buckets[i].release_store(bucket_base);
}
return true;
}
@@ -288,9 +291,9 @@ void G1CMMarkStack::expand() {
_chunk_allocator.try_expand();
}
-void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) {
- elem->next = *list;
- *list = elem;
+void G1CMMarkStack::add_chunk_to_list(Atomic* list, TaskQueueEntryChunk* elem) {
+ elem->next = list->load_relaxed();
+ list->store_relaxed(elem);
}
void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) {
@@ -304,10 +307,10 @@ void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) {
add_chunk_to_list(&_free_list, elem);
}
-G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) {
- TaskQueueEntryChunk* result = *list;
+G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(Atomic* list) {
+ TaskQueueEntryChunk* result = list->load_relaxed();
if (result != nullptr) {
- *list = (*list)->next;
+ list->store_relaxed(list->load_relaxed()->next);
}
return result;
}
@@ -361,8 +364,8 @@ bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) {
void G1CMMarkStack::set_empty() {
_chunks_in_chunk_list = 0;
- _chunk_list = nullptr;
- _free_list = nullptr;
+ _chunk_list.store_relaxed(nullptr);
+ _free_list.store_relaxed(nullptr);
_chunk_allocator.reset();
}
@@ -379,12 +382,12 @@ G1CMRootMemRegions::~G1CMRootMemRegions() {
}
void G1CMRootMemRegions::reset() {
- _num_root_regions = 0;
+ _num_root_regions.store_relaxed(0);
}
void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) {
assert_at_safepoint();
- size_t idx = AtomicAccess::fetch_then_add(&_num_root_regions, 1u);
+ size_t idx = _num_root_regions.fetch_then_add(1u);
assert(idx < _max_regions, "Trying to add more root MemRegions than there is space %zu", _max_regions);
assert(start != nullptr && end != nullptr && start <= end, "Start (" PTR_FORMAT ") should be less or equal to "
"end (" PTR_FORMAT ")", p2i(start), p2i(end));
@@ -395,36 +398,38 @@ void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) {
void G1CMRootMemRegions::prepare_for_scan() {
assert(!scan_in_progress(), "pre-condition");
- _scan_in_progress = _num_root_regions > 0;
+ _scan_in_progress.store_relaxed(num_root_regions() > 0);
- _claimed_root_regions = 0;
- _should_abort = false;
+ _claimed_root_regions.store_relaxed(0);
+ _should_abort.store_relaxed(false);
}
const MemRegion* G1CMRootMemRegions::claim_next() {
- if (_should_abort) {
+ if (_should_abort.load_relaxed()) {
// If someone has set the should_abort flag, we return null to
// force the caller to bail out of their loop.
return nullptr;
}
- if (_claimed_root_regions >= _num_root_regions) {
+ uint local_num_root_regions = num_root_regions();
+ if (_claimed_root_regions.load_relaxed() >= local_num_root_regions) {
return nullptr;
}
- size_t claimed_index = AtomicAccess::fetch_then_add(&_claimed_root_regions, 1u);
- if (claimed_index < _num_root_regions) {
+ size_t claimed_index = _claimed_root_regions.fetch_then_add(1u);
+ if (claimed_index < local_num_root_regions) {
return &_root_regions[claimed_index];
}
return nullptr;
}
uint G1CMRootMemRegions::num_root_regions() const {
- return (uint)_num_root_regions;
+ return (uint)_num_root_regions.load_relaxed();
}
bool G1CMRootMemRegions::contains(const MemRegion mr) const {
- for (uint i = 0; i < _num_root_regions; i++) {
+ uint local_num_root_regions = num_root_regions();
+ for (uint i = 0; i < local_num_root_regions; i++) {
if (_root_regions[i].equals(mr)) {
return true;
}
@@ -434,7 +439,7 @@ bool G1CMRootMemRegions::contains(const MemRegion mr) const {
void G1CMRootMemRegions::notify_scan_done() {
MutexLocker x(G1RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
- _scan_in_progress = false;
+ _scan_in_progress.store_relaxed(false);
G1RootRegionScan_lock->notify_all();
}
@@ -445,10 +450,10 @@ void G1CMRootMemRegions::cancel_scan() {
void G1CMRootMemRegions::scan_finished() {
assert(scan_in_progress(), "pre-condition");
- if (!_should_abort) {
- assert(_claimed_root_regions >= num_root_regions(),
+ if (!_should_abort.load_relaxed()) {
+ assert(_claimed_root_regions.load_relaxed() >= num_root_regions(),
"we should have claimed all root regions, claimed %zu, length = %u",
- _claimed_root_regions, num_root_regions());
+ _claimed_root_regions.load_relaxed(), num_root_regions());
}
notify_scan_done();
@@ -470,7 +475,7 @@ bool G1CMRootMemRegions::wait_until_scan_finished() {
G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
G1RegionToSpaceMapper* bitmap_storage) :
- // _cm_thread set inside the constructor
+ _cm_thread(nullptr),
_g1h(g1h),
_mark_bitmap(),
@@ -481,15 +486,15 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
_global_mark_stack(),
- // _finger set in set_non_marking_state
+ _finger(nullptr), // _finger set in set_non_marking_state
_worker_id_offset(G1ConcRefinementThreads), // The refinement control thread does not refine cards, so it's just the worker threads.
_max_num_tasks(MAX2(ConcGCThreads, ParallelGCThreads)),
- // _num_active_tasks set in set_non_marking_state()
- // _tasks set inside the constructor
-
+ _num_active_tasks(0), // _num_active_tasks set in set_non_marking_state()
+ _tasks(nullptr), // _tasks set inside late_init()
_task_queues(new G1CMTaskQueueSet(_max_num_tasks)),
_terminator(_max_num_tasks, _task_queues),
+ _partial_array_state_manager(new PartialArrayStateManager(_max_num_tasks)),
_first_overflow_barrier_sync(),
_second_overflow_barrier_sync(),
@@ -521,6 +526,12 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
assert(G1CGC_lock != nullptr, "CGC_lock must be initialized");
_mark_bitmap.initialize(g1h->reserved(), bitmap_storage);
+}
+
+void G1ConcurrentMark::fully_initialize() {
+ if (is_fully_initialized()) {
+ return;
+ }
// Create & start ConcurrentMark thread.
_cm_thread = new G1ConcurrentMarkThread(this);
@@ -556,6 +567,14 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
reset_at_marking_complete();
}
+bool G1ConcurrentMark::in_progress() const {
+ return is_fully_initialized() ? _cm_thread->in_progress() : false;
+}
+
+PartialArrayStateManager* G1ConcurrentMark::partial_array_state_manager() const {
+ return _partial_array_state_manager;
+}
+
void G1ConcurrentMark::reset() {
_has_aborted = false;
@@ -620,8 +639,7 @@ void G1ConcurrentMark::reset_marking_for_restart() {
_finger = _heap.start();
for (uint i = 0; i < _max_num_tasks; ++i) {
- G1CMTaskQueue* queue = _task_queues->queue(i);
- queue->set_empty();
+ _tasks[i]->reset_for_restart();
}
}
@@ -650,7 +668,26 @@ void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurr
}
}
+#if TASKQUEUE_STATS
+void G1ConcurrentMark::print_and_reset_taskqueue_stats() {
+
+ _task_queues->print_and_reset_taskqueue_stats("G1ConcurrentMark Oop Queue");
+
+ auto get_pa_stats = [&](uint i) {
+ return _tasks[i]->partial_array_task_stats();
+ };
+
+ PartialArrayTaskStats::log_set(_max_num_tasks, get_pa_stats,
+ "G1ConcurrentMark Partial Array Task Stats");
+
+ for (uint i = 0; i < _max_num_tasks; ++i) {
+ get_pa_stats(i)->reset();
+ }
+}
+#endif
+
void G1ConcurrentMark::reset_at_marking_complete() {
+ TASKQUEUE_STATS_ONLY(print_and_reset_taskqueue_stats());
// We set the global marking state to some default values when we're
// not doing marking.
reset_marking_for_restart();
@@ -738,7 +775,7 @@ private:
// as asserts here to minimize their overhead on the product. However, we
// will have them as guarantees at the beginning / end of the bitmap
// clearing to get some checking in the product.
- assert(!suspendible() || _cm->cm_thread()->in_progress(), "invariant");
+ assert(!suspendible() || _cm->in_progress(), "invariant");
assert(!suspendible() || !G1CollectedHeap::heap()->collector_state()->mark_or_rebuild_in_progress(), "invariant");
// Abort iteration if necessary.
@@ -794,7 +831,8 @@ void G1ConcurrentMark::clear_bitmap(WorkerThreads* workers, bool may_yield) {
void G1ConcurrentMark::cleanup_for_next_mark() {
// Make sure that the concurrent mark thread looks to still be in
// the current cycle.
- guarantee(cm_thread()->in_progress(), "invariant");
+ guarantee(is_fully_initialized(), "should be initializd");
+ guarantee(in_progress(), "invariant");
// We are finishing up the current cycle by clearing the next
// marking bitmap and getting it ready for the next cycle. During
@@ -804,11 +842,26 @@ void G1ConcurrentMark::cleanup_for_next_mark() {
clear_bitmap(_concurrent_workers, true);
+ reset_partial_array_state_manager();
+
// Repeat the asserts from above.
- guarantee(cm_thread()->in_progress(), "invariant");
+ guarantee(is_fully_initialized(), "should be initializd");
+ guarantee(in_progress(), "invariant");
guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
}
+void G1ConcurrentMark::reset_partial_array_state_manager() {
+ for (uint i = 0; i < _max_num_tasks; ++i) {
+ _tasks[i]->unregister_partial_array_splitter();
+ }
+
+ partial_array_state_manager()->reset();
+
+ for (uint i = 0; i < _max_num_tasks; ++i) {
+ _tasks[i]->register_partial_array_splitter();
+ }
+}
+
void G1ConcurrentMark::clear_bitmap(WorkerThreads* workers) {
assert_at_safepoint_on_vm_thread();
// To avoid fragmentation the full collection requesting to clear the bitmap
@@ -1789,17 +1842,18 @@ public:
{ }
void operator()(G1TaskQueueEntry task_entry) const {
- if (task_entry.is_array_slice()) {
- guarantee(_g1h->is_in_reserved(task_entry.slice()), "Slice " PTR_FORMAT " must be in heap.", p2i(task_entry.slice()));
+ if (task_entry.is_partial_array_state()) {
+ oop obj = task_entry.to_partial_array_state()->source();
+ guarantee(_g1h->is_in_reserved(obj), "Partial Array " PTR_FORMAT " must be in heap.", p2i(obj));
return;
}
- guarantee(oopDesc::is_oop(task_entry.obj()),
+ guarantee(oopDesc::is_oop(task_entry.to_oop()),
"Non-oop " PTR_FORMAT ", phase: %s, info: %d",
- p2i(task_entry.obj()), _phase, _info);
- G1HeapRegion* r = _g1h->heap_region_containing(task_entry.obj());
+ p2i(task_entry.to_oop()), _phase, _info);
+ G1HeapRegion* r = _g1h->heap_region_containing(task_entry.to_oop());
guarantee(!(r->in_collection_set() || r->has_index_in_opt_cset()),
"obj " PTR_FORMAT " from %s (%d) in region %u in (optional) collection set",
- p2i(task_entry.obj()), _phase, _info, r->hrm_index());
+ p2i(task_entry.to_oop()), _phase, _info, r->hrm_index());
}
};
@@ -1883,15 +1937,12 @@ bool G1ConcurrentMark::concurrent_cycle_abort() {
// nothing, but this situation should be extremely rare (a full gc after shutdown
// has been signalled is already rare), and this work should be negligible compared
// to actual full gc work.
- if (!cm_thread()->in_progress() && !_g1h->concurrent_mark_is_terminating()) {
+
+ if (!is_fully_initialized() || (!cm_thread()->in_progress() && !_g1h->concurrent_mark_is_terminating())) {
return false;
}
- // Empty mark stack
reset_marking_for_restart();
- for (uint i = 0; i < _max_num_tasks; ++i) {
- _tasks[i]->clear_region_fields();
- }
abort_marking_threads();
@@ -1945,6 +1996,10 @@ void G1ConcurrentMark::print_summary_info() {
}
log.trace(" Concurrent marking:");
+ if (!is_fully_initialized()) {
+ log.trace(" has not been initialized yet");
+ return;
+ }
print_ms_time_info(" ", "remarks", _remark_times);
{
print_ms_time_info(" ", "final marks", _remark_mark_times);
@@ -1961,7 +2016,10 @@ void G1ConcurrentMark::print_summary_info() {
}
void G1ConcurrentMark::threads_do(ThreadClosure* tc) const {
- _concurrent_workers->threads_do(tc);
+ if (is_fully_initialized()) { // they are initialized late
+ tc->do_thread(_cm_thread);
+ _concurrent_workers->threads_do(tc);
+ }
}
void G1ConcurrentMark::print_on(outputStream* st) const {
@@ -2055,6 +2113,24 @@ void G1CMTask::reset(G1CMBitMap* mark_bitmap) {
_mark_stats_cache.reset();
}
+void G1CMTask::reset_for_restart() {
+ clear_region_fields();
+ _task_queue->set_empty();
+ TASKQUEUE_STATS_ONLY(_partial_array_splitter.stats()->reset());
+ TASKQUEUE_STATS_ONLY(_task_queue->stats.reset());
+}
+
+void G1CMTask::register_partial_array_splitter() {
+
+ ::new (&_partial_array_splitter) PartialArraySplitter(_cm->partial_array_state_manager(),
+ _cm->max_num_tasks(),
+ ObjArrayMarkingStride);
+}
+
+void G1CMTask::unregister_partial_array_splitter() {
+ _partial_array_splitter.~PartialArraySplitter();
+}
+
bool G1CMTask::should_exit_termination() {
if (!regular_clock_call()) {
return true;
@@ -2185,7 +2261,7 @@ bool G1CMTask::get_entries_from_global_stack() {
if (task_entry.is_null()) {
break;
}
- assert(task_entry.is_array_slice() || oopDesc::is_oop(task_entry.obj()), "Element " PTR_FORMAT " must be an array slice or oop", p2i(task_entry.obj()));
+ assert(task_entry.is_partial_array_state() || oopDesc::is_oop(task_entry.to_oop()), "Element " PTR_FORMAT " must be an array slice or oop", p2i(task_entry.to_oop()));
bool success = _task_queue->push(task_entry);
// We only call this when the local queue is empty or under a
// given target limit. So, we do not expect this push to fail.
@@ -2216,7 +2292,7 @@ void G1CMTask::drain_local_queue(bool partially) {
G1TaskQueueEntry entry;
bool ret = _task_queue->pop_local(entry);
while (ret) {
- scan_task_entry(entry);
+ process_entry(entry, false /* stolen */);
if (_task_queue->size() <= target_size || has_aborted()) {
ret = false;
} else {
@@ -2226,6 +2302,37 @@ void G1CMTask::drain_local_queue(bool partially) {
}
}
+size_t G1CMTask::start_partial_array_processing(oop obj) {
+ assert(should_be_sliced(obj), "Must be an array object %d and large %zu", obj->is_objArray(), obj->size());
+
+ objArrayOop obj_array = objArrayOop(obj);
+ size_t array_length = obj_array->length();
+
+ size_t initial_chunk_size = _partial_array_splitter.start(_task_queue, obj_array, nullptr, array_length);
+
+ // Mark objArray klass metadata
+ if (_cm_oop_closure->do_metadata()) {
+ _cm_oop_closure->do_klass(obj_array->klass());
+ }
+
+ process_array_chunk(obj_array, 0, initial_chunk_size);
+
+ // Include object header size
+ return objArrayOopDesc::object_size(checked_cast(initial_chunk_size));
+}
+
+size_t G1CMTask::process_partial_array(const G1TaskQueueEntry& task, bool stolen) {
+ PartialArrayState* state = task.to_partial_array_state();
+ // Access state before release by claim().
+ objArrayOop obj = objArrayOop(state->source());
+
+ PartialArraySplitter::Claim claim =
+ _partial_array_splitter.claim(state, _task_queue, stolen);
+
+ process_array_chunk(obj, claim._start, claim._end);
+ return heap_word_size((claim._end - claim._start) * heapOopSize);
+}
+
void G1CMTask::drain_global_stack(bool partially) {
if (has_aborted()) {
return;
@@ -2430,7 +2537,7 @@ void G1CMTask::attempt_stealing() {
while (!has_aborted()) {
G1TaskQueueEntry entry;
if (_cm->try_stealing(_worker_id, entry)) {
- scan_task_entry(entry);
+ process_entry(entry, true /* stolen */);
// And since we're towards the end, let's totally drain the
// local queue and global stack.
@@ -2759,12 +2866,12 @@ G1CMTask::G1CMTask(uint worker_id,
G1ConcurrentMark* cm,
G1CMTaskQueue* task_queue,
G1RegionMarkStats* mark_stats) :
- _objArray_processor(this),
_worker_id(worker_id),
_g1h(G1CollectedHeap::heap()),
_cm(cm),
_mark_bitmap(nullptr),
_task_queue(task_queue),
+ _partial_array_splitter(_cm->partial_array_state_manager(), _cm->max_num_tasks(), ObjArrayMarkingStride),
_mark_stats_cache(mark_stats, G1RegionMarkStatsCache::RegionMarkStatsCacheSize),
_calls(0),
_time_target_ms(0.0),
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp
index 752082ce629..0271e6a4208 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,17 +26,20 @@
#define SHARE_GC_G1_G1CONCURRENTMARK_HPP
#include "gc/g1/g1ConcurrentMarkBitMap.hpp"
-#include "gc/g1/g1ConcurrentMarkObjArrayProcessor.hpp"
#include "gc/g1/g1HeapRegionSet.hpp"
#include "gc/g1/g1HeapVerifier.hpp"
#include "gc/g1/g1RegionMarkStatsCache.hpp"
#include "gc/shared/gcCause.hpp"
+#include "gc/shared/partialArraySplitter.hpp"
+#include "gc/shared/partialArrayState.hpp"
+#include "gc/shared/partialArrayTaskStats.hpp"
#include "gc/shared/taskqueue.hpp"
#include "gc/shared/taskTerminator.hpp"
#include "gc/shared/verifyOption.hpp"
#include "gc/shared/workerThread.hpp"
#include "gc/shared/workerUtils.hpp"
#include "memory/allocation.hpp"
+#include "runtime/atomic.hpp"
#include "utilities/compilerWarnings.hpp"
#include "utilities/numberSeq.hpp"
@@ -53,41 +56,7 @@ class G1RegionToSpaceMapper;
class G1SurvivorRegions;
class ThreadClosure;
-// This is a container class for either an oop or a continuation address for
-// mark stack entries. Both are pushed onto the mark stack.
-class G1TaskQueueEntry {
-private:
- void* _holder;
-
- static const uintptr_t ArraySliceBit = 1;
-
- G1TaskQueueEntry(oop obj) : _holder(obj) {
- assert(_holder != nullptr, "Not allowed to set null task queue element");
- }
- G1TaskQueueEntry(HeapWord* addr) : _holder((void*)((uintptr_t)addr | ArraySliceBit)) { }
-public:
-
- G1TaskQueueEntry() : _holder(nullptr) { }
- // Trivially copyable, for use in GenericTaskQueue.
-
- static G1TaskQueueEntry from_slice(HeapWord* what) { return G1TaskQueueEntry(what); }
- static G1TaskQueueEntry from_oop(oop obj) { return G1TaskQueueEntry(obj); }
-
- oop obj() const {
- assert(!is_array_slice(), "Trying to read array slice " PTR_FORMAT " as oop", p2i(_holder));
- return cast_to_oop(_holder);
- }
-
- HeapWord* slice() const {
- assert(is_array_slice(), "Trying to read oop " PTR_FORMAT " as array slice", p2i(_holder));
- return (HeapWord*)((uintptr_t)_holder & ~ArraySliceBit);
- }
-
- bool is_oop() const { return !is_array_slice(); }
- bool is_array_slice() const { return ((uintptr_t)_holder & ArraySliceBit) != 0; }
- bool is_null() const { return _holder == nullptr; }
-};
-
+typedef ScannerTask G1TaskQueueEntry;
typedef GenericTaskQueue G1CMTaskQueue;
typedef GenericTaskQueueSet G1CMTaskQueueSet;
@@ -172,9 +141,9 @@ private:
size_t _capacity;
size_t _num_buckets;
bool _should_grow;
- TaskQueueEntryChunk* volatile* _buckets;
+ Atomic* _buckets;
char _pad0[DEFAULT_PADDING_SIZE];
- volatile size_t _size;
+ Atomic _size;
char _pad4[DEFAULT_PADDING_SIZE - sizeof(size_t)];
size_t bucket_size(size_t bucket) {
@@ -212,7 +181,7 @@ private:
bool initialize(size_t initial_capacity, size_t max_capacity);
void reset() {
- _size = 0;
+ _size.store_relaxed(0);
_should_grow = false;
}
@@ -241,17 +210,17 @@ private:
ChunkAllocator _chunk_allocator;
char _pad0[DEFAULT_PADDING_SIZE];
- TaskQueueEntryChunk* volatile _free_list; // Linked list of free chunks that can be allocated by users.
+ Atomic _free_list; // Linked list of free chunks that can be allocated by users.
char _pad1[DEFAULT_PADDING_SIZE - sizeof(TaskQueueEntryChunk*)];
- TaskQueueEntryChunk* volatile _chunk_list; // List of chunks currently containing data.
+ Atomic _chunk_list; // List of chunks currently containing data.
volatile size_t _chunks_in_chunk_list;
char _pad2[DEFAULT_PADDING_SIZE - sizeof(TaskQueueEntryChunk*) - sizeof(size_t)];
// Atomically add the given chunk to the list.
- void add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem);
+ void add_chunk_to_list(Atomic* list, TaskQueueEntryChunk* elem);
// Atomically remove and return a chunk from the given list. Returns null if the
// list is empty.
- TaskQueueEntryChunk* remove_chunk_from_list(TaskQueueEntryChunk* volatile* list);
+ TaskQueueEntryChunk* remove_chunk_from_list(Atomic* list);
void add_chunk_to_chunk_list(TaskQueueEntryChunk* elem);
void add_chunk_to_free_list(TaskQueueEntryChunk* elem);
@@ -283,7 +252,7 @@ private:
// Return whether the chunk list is empty. Racy due to unsynchronized access to
// _chunk_list.
- bool is_empty() const { return _chunk_list == nullptr; }
+ bool is_empty() const { return _chunk_list.load_relaxed() == nullptr; }
size_t capacity() const { return _chunk_allocator.capacity(); }
@@ -321,12 +290,12 @@ class G1CMRootMemRegions {
MemRegion* _root_regions;
size_t const _max_regions;
- volatile size_t _num_root_regions; // Actual number of root regions.
+ Atomic _num_root_regions; // Actual number of root regions.
- volatile size_t _claimed_root_regions; // Number of root regions currently claimed.
+ Atomic _claimed_root_regions; // Number of root regions currently claimed.
- volatile bool _scan_in_progress;
- volatile bool _should_abort;
+ Atomic _scan_in_progress;
+ Atomic _should_abort;
void notify_scan_done();
@@ -343,11 +312,11 @@ public:
void prepare_for_scan();
// Forces get_next() to return null so that the iteration aborts early.
- void abort() { _should_abort = true; }
+ void abort() { _should_abort.store_relaxed(true); }
// Return true if the CM thread are actively scanning root regions,
// false otherwise.
- bool scan_in_progress() { return _scan_in_progress; }
+ bool scan_in_progress() { return _scan_in_progress.load_relaxed(); }
// Claim the next root MemRegion to scan atomically, or return null if
// all have been claimed.
@@ -411,6 +380,8 @@ class G1ConcurrentMark : public CHeapObj {
G1CMTaskQueueSet* _task_queues; // Task queue set
TaskTerminator _terminator; // For termination
+ PartialArrayStateManager* _partial_array_state_manager;
+
// Two sync barriers that are used to synchronize tasks when an
// overflow occurs. The algorithm is the following. All tasks enter
// the first one to ensure that they have all stopped manipulating
@@ -488,6 +459,8 @@ class G1ConcurrentMark : public CHeapObj {
// Prints all gathered CM-related statistics
void print_stats();
+ void print_and_reset_taskqueue_stats();
+
HeapWord* finger() { return _finger; }
bool concurrent() { return _concurrent; }
uint active_tasks() { return _num_active_tasks; }
@@ -556,14 +529,14 @@ public:
// mark_in_bitmap call. Updates various statistics data.
void add_to_liveness(uint worker_id, oop const obj, size_t size);
// Did the last marking find a live object between bottom and TAMS?
- bool contains_live_object(uint region) const { return _region_mark_stats[region]._live_words != 0; }
+ bool contains_live_object(uint region) const { return _region_mark_stats[region].live_words() != 0; }
// Live bytes in the given region as determined by concurrent marking, i.e. the amount of
// live bytes between bottom and TAMS.
- size_t live_bytes(uint region) const { return _region_mark_stats[region]._live_words * HeapWordSize; }
+ size_t live_bytes(uint region) const { return _region_mark_stats[region].live_words() * HeapWordSize; }
// Set live bytes for concurrent marking.
- void set_live_bytes(uint region, size_t live_bytes) { _region_mark_stats[region]._live_words = live_bytes / HeapWordSize; }
+ void set_live_bytes(uint region, size_t live_bytes) { _region_mark_stats[region]._live_words.store_relaxed(live_bytes / HeapWordSize); }
// Approximate number of incoming references found during marking.
- size_t incoming_refs(uint region) const { return _region_mark_stats[region]._incoming_refs; }
+ size_t incoming_refs(uint region) const { return _region_mark_stats[region].incoming_refs(); }
// Update the TAMS for the given region to the current top.
inline void update_top_at_mark_start(G1HeapRegion* r);
@@ -582,6 +555,11 @@ public:
uint worker_id_offset() const { return _worker_id_offset; }
+ void fully_initialize();
+ bool is_fully_initialized() const { return _cm_thread != nullptr; }
+ bool in_progress() const;
+ uint max_num_tasks() const {return _max_num_tasks; }
+
// Clear statistics gathered during the concurrent cycle for the given region after
// it has been reclaimed.
void clear_statistics(G1HeapRegion* r);
@@ -631,6 +609,8 @@ public:
// Calculates the number of concurrent GC threads to be used in the marking phase.
uint calc_active_marking_workers();
+ PartialArrayStateManager* partial_array_state_manager() const;
+
// Resets the global marking data structures, as well as the
// task local ones; should be called during concurrent start.
void reset();
@@ -642,6 +622,10 @@ public:
// to be called concurrently to the mutator. It will yield to safepoint requests.
void cleanup_for_next_mark();
+ // Recycle the memory that has been requested by allocators associated with
+ // this manager.
+ void reset_partial_array_state_manager();
+
// Clear the next marking bitmap during safepoint.
void clear_bitmap(WorkerThreads* workers);
@@ -732,14 +716,13 @@ private:
refs_reached_period = 1024,
};
- G1CMObjArrayProcessor _objArray_processor;
-
uint _worker_id;
G1CollectedHeap* _g1h;
G1ConcurrentMark* _cm;
G1CMBitMap* _mark_bitmap;
// the task queue of this task
G1CMTaskQueue* _task_queue;
+ PartialArraySplitter _partial_array_splitter;
G1RegionMarkStatsCache _mark_stats_cache;
// Number of calls to this task
@@ -850,13 +833,26 @@ private:
// mark bitmap scan, and so needs to be pushed onto the mark stack.
bool is_below_finger(oop obj, HeapWord* global_finger) const;
- template void process_grey_task_entry(G1TaskQueueEntry task_entry);
+ template