Merge branch 'master' into 8368692

This commit is contained in:
Weijun Wang 2026-03-24 10:50:10 -04:00
commit dcab2aa45d
1077 changed files with 29888 additions and 18314 deletions

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -68,17 +68,19 @@ java.compiler.interim_EXTRA_FILES := \
TARGETS += $(BUILDTOOLS_OUTPUTDIR)/gensrc/java.compiler.interim/javax/tools/ToolProvider.java
################################################################################
# Use the up-to-date PreviewFeature.java and NoPreview.java from the current
# sources, instead of the versions from the boot JDK, as javac may be referring
# to constants from the up-to-date versions.
# Create a hybrid PreviewFeature.java that combines constants
# from the current sources, as those can be used in javac APIs, and from the
# bootstrap JDK, as those can be used from bootstrap JDK classfiles.
$(eval $(call SetupCopyFiles, COPY_PREVIEW_FEATURES, \
FILES := $(TOPDIR)/src/java.base/share/classes/jdk/internal/javac/PreviewFeature.java \
$(TOPDIR)/src/java.base/share/classes/jdk/internal/javac/NoPreview.java, \
DEST := $(BUILDTOOLS_OUTPUTDIR)/gensrc/java.base.interim/jdk/internal/javac/, \
))
$(BUILDTOOLS_OUTPUTDIR)/gensrc/java.base.interim/jdk/internal/javac/PreviewFeature.java: \
$(TOPDIR)/src/java.base/share/classes/jdk/internal/javac/PreviewFeature.java
$(call LogInfo, Generating $@)
$(JAVA) $(TOPDIR)/make/langtools/tools/previewfeature/SetupPreviewFeature.java \
$(TOPDIR)/src/java.base/share/classes/jdk/internal/javac/PreviewFeature.java \
$@
TARGETS += $(COPY_PREVIEW_FEATURES)
TARGETS += $(BUILDTOOLS_OUTPUTDIR)/gensrc/java.base.interim/jdk/internal/javac/PreviewFeature.java
################################################################################
# Setup the rules to build interim langtools, which is compiled by the boot
@ -123,7 +125,8 @@ define SetupInterimModule
$1_DEPS_INTERIM := $$(addsuffix .interim, $$(filter \
$$(INTERIM_LANGTOOLS_BASE_MODULES), $$(call FindTransitiveDepsForModule, $1)))
$$(BUILD_$1.interim): $$(foreach d, $$($1_DEPS_INTERIM), $$(BUILD_$$d)) $(COPY_PREVIEW_FEATURES)
$$(BUILD_$1.interim): $$(foreach d, $$($1_DEPS_INTERIM), $$(BUILD_$$d)) \
$(BUILDTOOLS_OUTPUTDIR)/gensrc/java.base.interim/jdk/internal/javac/PreviewFeature.java
TARGETS += $$(BUILD_$1.interim)
endef

View File

@ -70,12 +70,15 @@ CLASSLIST_FILE_VM_OPTS = \
# Save the stderr output of the command and print it along with stdout in case
# something goes wrong.
# The classlists must be generated with -Xint to avoid non-determinism
# introduced by JIT compiled code
$(CLASSLIST_FILE): $(INTERIM_IMAGE_DIR)/bin/java$(EXECUTABLE_SUFFIX) $(CLASSLIST_JAR)
$(call MakeDir, $(LINK_OPT_DIR))
$(call LogInfo, Generating $(patsubst $(OUTPUTDIR)/%, %, $@))
$(call LogInfo, Generating $(patsubst $(OUTPUTDIR)/%, %, $(JLI_TRACE_FILE)))
$(FIXPATH) $(INTERIM_IMAGE_DIR)/bin/java -XX:DumpLoadedClassList=$@.raw \
$(CLASSLIST_FILE_VM_OPTS) \
-Xint \
-Xlog:aot=off \
-Xlog:cds=off \
-cp $(SUPPORT_OUTPUTDIR)/classlist.jar \
@ -90,6 +93,7 @@ $(CLASSLIST_FILE): $(INTERIM_IMAGE_DIR)/bin/java$(EXECUTABLE_SUFFIX) $(CLASSLIST
-XX:SharedClassListFile=$@.interim -XX:SharedArchiveFile=$@.jsa \
-Djava.lang.invoke.MethodHandle.TRACE_RESOLVE=true \
$(CLASSLIST_FILE_VM_OPTS) \
-Xint \
--module-path $(SUPPORT_OUTPUTDIR)/classlist.jar \
-Xlog:aot=off \
-Xlog:cds=off \

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -369,6 +369,10 @@ AC_DEFUN_ONCE([BASIC_SETUP_COMPLEX_TOOLS],
IS_GNU_DATE=yes
else
AC_MSG_RESULT([no])
# Likely at the AIX provided version of the date utility here, which is not compatible
if test "x$OPENJDK_TARGET_OS" = "xaix"; then
AC_MSG_ERROR([gnu date from AIX toolbox is required])
fi
IS_GNU_DATE=no
fi
AC_SUBST(IS_GNU_DATE)

View File

@ -102,6 +102,13 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_JDK_OPTIONS],
CHECKING_MSG: [if we should build headless-only (no GUI)])
AC_SUBST(ENABLE_HEADLESS_ONLY)
# Avoid headless-only on macOS and Windows, it is not supported there
if test "x$ENABLE_HEADLESS_ONLY" = xtrue; then
if test "x$OPENJDK_TARGET_OS" = xwindows || test "x$OPENJDK_TARGET_OS" = xmacosx; then
AC_MSG_ERROR([headless-only is not supported on macOS and Windows])
fi
fi
# should we linktime gc unused code sections in the JDK build ?
if test "x$OPENJDK_TARGET_OS" = "xlinux"; then
if test "x$OPENJDK_TARGET_CPU" = "xs390x" || test "x$OPENJDK_TARGET_CPU" = "xppc64le"; then

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -35,8 +35,14 @@ include ProcessMarkdown.gmk
include $(TOPDIR)/make/ToolsJdk.gmk
LAUNCHER_SRC := $(TOPDIR)/src/java.base/share/native/launcher
ifeq ($(call isTargetOs, aix), true)
ADD_PLATFORM_INCLUDE_DIR := -I$(TOPDIR)/src/java.base/aix/native/include
endif
LAUNCHER_CFLAGS += -I$(TOPDIR)/src/java.base/share/native/launcher \
-I$(TOPDIR)/src/java.base/share/native/libjli \
$(ADD_PLATFORM_INCLUDE_DIR) \
-I$(TOPDIR)/src/java.base/$(OPENJDK_TARGET_OS_TYPE)/native/libjli \
-I$(TOPDIR)/src/java.base/$(OPENJDK_TARGET_OS)/native/libjli \
#

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2013, 2025, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2013, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -43,10 +43,15 @@ JVM_SRC_DIRS += $(call uniq, $(wildcard $(foreach d, $(JVM_SRC_ROOTS), \
$(JVM_VARIANT_OUTPUTDIR)/gensrc
#
ifeq ($(call isTargetOs, aix), true)
ADD_PLATFORM_INCLUDE_DIR := -I$(TOPDIR)/src/java.base/aix/native/include
endif
JVM_CFLAGS_INCLUDES += \
$(patsubst %,-I%,$(JVM_SRC_DIRS)) \
-I$(TOPDIR)/src/hotspot/share/include \
-I$(TOPDIR)/src/hotspot/os/$(HOTSPOT_TARGET_OS_TYPE)/include \
$(ADD_PLATFORM_INCLUDE_DIR) \
-I$(SUPPORT_OUTPUTDIR)/modules_include/java.base \
-I$(SUPPORT_OUTPUTDIR)/modules_include/java.base/$(OPENJDK_TARGET_OS_INCLUDE_SUBDIR) \
-I$(TOPDIR)/src/java.base/share/native/libjimage \

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,36 +34,6 @@ import java.util.*;
public class FieldGen {
static FieldParams Curve25519 = new FieldParams(
"IntegerPolynomial25519", 26, 10, 1, 255,
Arrays.asList(
new Term(0, -19)
),
Curve25519CrSequence(), simpleSmallCrSequence(10)
);
private static List<CarryReduce> Curve25519CrSequence() {
List<CarryReduce> result = new ArrayList<CarryReduce>();
// reduce(7,2)
result.add(new Reduce(17));
result.add(new Reduce(18));
// carry(8,2)
result.add(new Carry(8));
result.add(new Carry(9));
// reduce(0,7)
for (int i = 10; i < 17; i++) {
result.add(new Reduce(i));
}
// carry(0,9)
result.addAll(fullCarry(10));
return result;
}
static FieldParams Curve448 = new FieldParams(
"IntegerPolynomial448", 28, 16, 1, 448,
Arrays.asList(
@ -224,8 +194,7 @@ public class FieldGen {
}
static final FieldParams[] ALL_FIELDS = {
Curve25519, Curve448,
P256, P384, P521, O256, O384, O521, O25519, O448
Curve448, P256, P384, P521, O256, O384, O521, O25519, O448
};
public static class Term {

View File

@ -0,0 +1,93 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package previewfeature;
import com.sun.source.util.JavacTask;
import com.sun.source.util.Trees;
import java.io.StringWriter;
import java.lang.reflect.Field;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.HashSet;
import java.util.Set;
import java.util.stream.Collectors;
import javax.lang.model.element.ElementKind;
import javax.tools.ToolProvider;
/* Construct a hybrid PreviewFeature.Feature enum that includes constants both
* from the current JDK sources (so that they can be used in the javac API sources),
* and from the bootstrap JDK (so that they can be used in the bootstrap classfiles).
*
* This hybrid enum is only used for the interim javac.
*/
public class SetupPreviewFeature {
public static void main(String... args) throws Exception {
Class<?> runtimeFeature = Class.forName("jdk.internal.javac.PreviewFeature$Feature");
Set<String> constantsToAdd = new HashSet<>();
for (Field runtimeField : runtimeFeature.getDeclaredFields()) {
if (runtimeField.isEnumConstant()) {
constantsToAdd.add(runtimeField.getName());
}
}
var dummy = new StringWriter();
var compiler = ToolProvider.getSystemJavaCompiler();
var source = Path.of(args[0]);
try (var fm = compiler.getStandardFileManager(null, null, null)) {
JavacTask task =
(JavacTask) compiler.getTask(dummy, null, null, null, null, fm.getJavaFileObjects(source));
task.analyze();
var sourceFeature = task.getElements()
.getTypeElement("jdk.internal.javac.PreviewFeature.Feature");
int insertPosition = -1;
for (var el : sourceFeature.getEnclosedElements()) {
if (el.getKind() == ElementKind.ENUM_CONSTANT) {
constantsToAdd.remove(el.getSimpleName().toString());
if (insertPosition == (-1)) {
var trees = Trees.instance(task);
var elPath = trees.getPath(el);
insertPosition = (int) trees.getSourcePositions()
.getStartPosition(elPath.getCompilationUnit(),
elPath.getLeaf());
}
}
}
var target = Path.of(args[1]);
Files.createDirectories(target.getParent());
if (constantsToAdd.isEmpty()) {
Files.copy(source, target);
} else {
String sourceCode = Files.readString(source);
try (var out = Files.newBufferedWriter(target)) {
out.write(sourceCode, 0, insertPosition);
out.write(constantsToAdd.stream()
.collect(Collectors.joining(", ",
"/*compatibility constants:*/ ",
",\n")));
out.write(sourceCode, insertPosition, sourceCode.length() - insertPosition);
}
}
}
}
}

View File

@ -95,7 +95,8 @@ ifeq ($(call isTargetOsType, unix), true)
CFLAGS := $(VERSION_CFLAGS), \
EXTRA_HEADER_DIRS := libjava, \
EXTRA_OBJECT_FILES := \
$(SUPPORT_OUTPUTDIR)/native/$(MODULE)/libjava/childproc$(OBJ_SUFFIX), \
$(SUPPORT_OUTPUTDIR)/native/$(MODULE)/libjava/childproc$(OBJ_SUFFIX) \
$(SUPPORT_OUTPUTDIR)/native/$(MODULE)/libjava/childproc_errorcodes$(OBJ_SUFFIX), \
LD_SET_ORIGIN := false, \
OUTPUT_DIR := $(SUPPORT_OUTPUTDIR)/modules_libs/$(MODULE), \
))

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -99,14 +99,16 @@ ifeq ($(call isTargetOs, windows), true)
$(TOPDIR)/src/$(MODULE)/windows/native/libawt/windows/awt.rc
endif
# This is the object file to provide the dladdr API, which is not
# part of AIX. It occurs several times in the jdk code base.
# Do not include it. When statically linking the java
# launcher with all JDK and VM static libraries, we use the
# --whole-archive linker option. The duplicate objects in different
# static libraries cause linking errors due to duplicate symbols.
ifeq ($(call isTargetOs, aix), true)
# This is the object file to provide the dladdr API, which is not
# part of AIX. It occurs several times in the jdk code base.
# Do not include it. When statically linking the java
# launcher with all JDK and VM static libraries, we use the
# --whole-archive linker option. The duplicate objects in different
# static libraries cause linking errors due to duplicate symbols.
LIBAWT_STATIC_EXCLUDE_OBJS := porting_aix.o
LIBAWT_CFLAGS += -I$(TOPDIR)/src/java.base/aix/native/include
endif
# -fgcse-after-reload improves performance of MaskFill in Java2D by 20% for
@ -423,6 +425,9 @@ endif
ifeq ($(call isTargetOs, linux)+$(ENABLE_HEADLESS_ONLY), true+true)
LIBJAWT_CFLAGS += -DHEADLESS
endif
ifeq ($(call isTargetOs, aix)+$(ENABLE_HEADLESS_ONLY), true+true)
LIBJAWT_CFLAGS += -DHEADLESS
endif
ifeq ($(call isTargetOs, windows)+$(call isTargetCpu, x86), true+true)
LIBJAWT_LIBS_windows := kernel32.lib

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -29,7 +29,7 @@ DISABLED_WARNINGS_java += dangling-doc-comments
COPY += .gif .png .txt .spec .script .prerm .preinst \
.postrm .postinst .list .sh .desktop .copyright .control .plist .template \
.icns .scpt .wxs .wxl .wxi .wxf .ico .bmp .tiff .service .xsl
.icns .scpt .wxs .wxl .wxi .wxf .ico .bmp .tiff .service .xsl .js
CLEAN += .properties

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -63,7 +63,8 @@ ifeq ($(call isTargetOs, windows), true)
BUILD_JDK_JTREG_EXCLUDE += libDirectIO.c libInheritedChannel.c \
libExplicitAttach.c libImplicitAttach.c \
exelauncher.c libFDLeaker.c exeFDLeakTester.c \
libChangeSignalDisposition.c exePrintSignalDisposition.c
libChangeSignalDisposition.c exePrintSignalDisposition.c \
libConcNativeFork.c libPipesCloseOnExec.c
BUILD_JDK_JTREG_EXECUTABLES_LIBS_exeNullCallerTest := $(LIBCXX)
BUILD_JDK_JTREG_EXECUTABLES_LIBS_exerevokeall := advapi32.lib
@ -77,6 +78,9 @@ else
BUILD_JDK_JTREG_LIBRARIES_LDFLAGS_libLinkerInvokerUnnamed := -pthread
BUILD_JDK_JTREG_LIBRARIES_LDFLAGS_libLinkerInvokerModule := -pthread
BUILD_JDK_JTREG_LIBRARIES_LDFLAGS_libLoaderLookupInvoker := -pthread
BUILD_JDK_JTREG_LIBRARIES_LDFLAGS_libConcNativeFork := -pthread
BUILD_JDK_JTREG_LIBRARIES_LDFLAGS_libPipesCloseOnExec := -pthread
BUILD_JDK_JTREG_LIBRARIES_LDFLAGS_libLoaderLookupInvoker := -pthread
BUILD_JDK_JTREG_LIBRARIES_LDFLAGS_libExplicitAttach := -pthread
BUILD_JDK_JTREG_LIBRARIES_LDFLAGS_libImplicitAttach := -pthread

View File

@ -2524,10 +2524,6 @@ uint Matcher::float_pressure_limit()
return (FLOATPRESSURE == -1) ? _FLOAT_REG_mask.size() : FLOATPRESSURE;
}
bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
return false;
}
const RegMask& Matcher::divI_proj_mask() {
ShouldNotReachHere();
return RegMask::EMPTY;
@ -8024,6 +8020,21 @@ instruct membar_release_lock() %{
ins_pipe(pipe_serial);
%}
instruct membar_storeload() %{
match(MemBarStoreLoad);
ins_cost(VOLATILE_REF_COST*100);
format %{ "MEMBAR-store-load\n\t"
"dmb ish" %}
ins_encode %{
__ block_comment("membar_storeload");
__ membar(Assembler::StoreLoad);
%}
ins_pipe(pipe_serial);
%}
instruct unnecessary_membar_volatile() %{
predicate(unnecessary_volatile(n));
match(MemBarVolatile);
@ -8053,6 +8064,20 @@ instruct membar_volatile() %{
ins_pipe(pipe_serial);
%}
instruct membar_full() %{
match(MemBarFull);
ins_cost(VOLATILE_REF_COST*100);
format %{ "membar_full\n\t"
"dmb ish" %}
ins_encode %{
__ block_comment("membar_full");
__ membar(Assembler::AnyAny);
%}
ins_pipe(pipe_serial);
%}
// ============================================================================
// Cast/Convert Instructions

View File

@ -51,7 +51,6 @@ define_pd_global(bool, ProfileInterpreter, false);
define_pd_global(size_t, CodeCacheExpansionSize, 32*K );
define_pd_global(size_t, CodeCacheMinBlockLength, 1);
define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K);
define_pd_global(bool, NeverActAsServerClassMachine, true );
define_pd_global(bool, CICompileOSR, true );
#endif // !COMPILER2
define_pd_global(bool, UseTypeProfile, false);

View File

@ -74,9 +74,6 @@ define_pd_global(size_t, NonNMethodCodeHeapSize, 5*M );
define_pd_global(size_t, CodeCacheMinBlockLength, 6);
define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K);
// Ergonomics related flags
define_pd_global(bool, NeverActAsServerClassMachine, false);
define_pd_global(bool, TrapBasedRangeChecks, false); // Not needed.
#endif // CPU_AARCH64_C2_GLOBALS_AARCH64_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, Arm Limited. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -146,10 +146,10 @@ void DowncallLinker::StubGenerator::generate() {
bool should_save_return_value = !_needs_return_buffer;
RegSpiller out_reg_spiller(_output_registers);
int spill_offset = -1;
int out_spill_offset = -1;
if (should_save_return_value) {
spill_offset = 0;
out_spill_offset = 0;
// spill area can be shared with shadow space and out args,
// since they are only used before the call,
// and spill area is only used after.
@ -174,6 +174,9 @@ void DowncallLinker::StubGenerator::generate() {
// FP-> | |
// |---------------------| = frame_bottom_offset = frame_size
// | (optional) |
// | in_reg_spiller area |
// |---------------------|
// | (optional) |
// | capture state buf |
// |---------------------| = StubLocations::CAPTURED_STATE_BUFFER
// | (optional) |
@ -187,6 +190,19 @@ void DowncallLinker::StubGenerator::generate() {
GrowableArray<VMStorage> out_regs = ForeignGlobals::replace_place_holders(_input_registers, locs);
ArgumentShuffle arg_shuffle(filtered_java_regs, out_regs, shuffle_reg);
// Need to spill for state capturing runtime call.
// The area spilled into is distinct from the capture state buffer.
RegSpiller in_reg_spiller(out_regs);
int in_spill_offset = -1;
if (_captured_state_mask != 0) {
// The spill area cannot be shared with the out_spill since
// spilling needs to happen before the call. Allocate a new
// region in the stack for this spill space.
in_spill_offset = allocated_frame_size;
allocated_frame_size += in_reg_spiller.spill_size_bytes();
}
#ifndef PRODUCT
LogTarget(Trace, foreign, downcall) lt;
if (lt.is_enabled()) {
@ -228,6 +244,20 @@ void DowncallLinker::StubGenerator::generate() {
arg_shuffle.generate(_masm, shuffle_reg, 0, _abi._shadow_space_bytes);
__ block_comment("} argument shuffle");
if (_captured_state_mask != 0) {
assert(in_spill_offset != -1, "must be");
__ block_comment("{ load initial thread local");
in_reg_spiller.generate_spill(_masm, in_spill_offset);
// Copy the contents of the capture state buffer into thread local
__ ldr(c_rarg0, Address(sp, locs.data_offset(StubLocations::CAPTURED_STATE_BUFFER)));
__ movw(c_rarg1, _captured_state_mask);
__ rt_call(CAST_FROM_FN_PTR(address, DowncallLinker::capture_state_pre), tmp1);
in_reg_spiller.generate_fill(_masm, in_spill_offset);
__ block_comment("} load initial thread local");
}
__ blr(as_Register(locs.get(StubLocations::TARGET_ADDRESS)));
// this call is assumed not to have killed rthread
@ -254,15 +284,15 @@ void DowncallLinker::StubGenerator::generate() {
__ block_comment("{ save thread local");
if (should_save_return_value) {
out_reg_spiller.generate_spill(_masm, spill_offset);
out_reg_spiller.generate_spill(_masm, out_spill_offset);
}
__ ldr(c_rarg0, Address(sp, locs.data_offset(StubLocations::CAPTURED_STATE_BUFFER)));
__ movw(c_rarg1, _captured_state_mask);
__ rt_call(CAST_FROM_FN_PTR(address, DowncallLinker::capture_state), tmp1);
__ rt_call(CAST_FROM_FN_PTR(address, DowncallLinker::capture_state_post), tmp1);
if (should_save_return_value) {
out_reg_spiller.generate_fill(_masm, spill_offset);
out_reg_spiller.generate_fill(_masm, out_spill_offset);
}
__ block_comment("} save thread local");
@ -321,7 +351,7 @@ void DowncallLinker::StubGenerator::generate() {
if (should_save_return_value) {
// Need to save the native result registers around any runtime calls.
out_reg_spiller.generate_spill(_masm, spill_offset);
out_reg_spiller.generate_spill(_masm, out_spill_offset);
}
__ mov(c_rarg0, rthread);
@ -330,7 +360,7 @@ void DowncallLinker::StubGenerator::generate() {
__ blr(tmp1);
if (should_save_return_value) {
out_reg_spiller.generate_fill(_masm, spill_offset);
out_reg_spiller.generate_fill(_masm, out_spill_offset);
}
__ b(L_after_safepoint_poll);
@ -342,13 +372,13 @@ void DowncallLinker::StubGenerator::generate() {
__ bind(L_reguard);
if (should_save_return_value) {
out_reg_spiller.generate_spill(_masm, spill_offset);
out_reg_spiller.generate_spill(_masm, out_spill_offset);
}
__ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages), tmp1);
if (should_save_return_value) {
out_reg_spiller.generate_fill(_masm, spill_offset);
out_reg_spiller.generate_fill(_masm, out_spill_offset);
}
__ b(L_after_reguard);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -245,8 +245,8 @@ inline bool frame::equal(frame other) const {
// Return unique id for this frame. The id must have a value where we can distinguish
// identity and younger/older relationship. null represents an invalid (incomparable)
// frame.
inline intptr_t* frame::id(void) const { return unextended_sp(); }
// frame. Should not be called for heap frames.
inline intptr_t* frame::id(void) const { return real_fp(); }
// Return true if the frame is older (less recent activation) than the frame represented by id
inline bool frame::is_older(intptr_t* id) const { assert(this->id() != nullptr && id != nullptr, "null frame id");
@ -412,6 +412,9 @@ inline frame frame::sender(RegisterMap* map) const {
StackWatermarkSet::on_iteration(map->thread(), result);
}
// Calling frame::id() is currently not supported for heap frames.
assert(result._on_heap || this->_on_heap || result.is_older(this->id()), "Must be");
return result;
}

View File

@ -1,5 +1,5 @@
//
// Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
@ -1112,10 +1112,6 @@ uint Matcher::float_pressure_limit()
return (FLOATPRESSURE == -1) ? 30 : FLOATPRESSURE;
}
bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
return false;
}
// Register for DIVI projection of divmodI
const RegMask& Matcher::divI_proj_mask() {
ShouldNotReachHere();
@ -4440,6 +4436,18 @@ instruct membar_release_lock() %{
ins_pipe(empty);
%}
instruct membar_storeload() %{
match(MemBarStoreLoad);
ins_cost(4*MEMORY_REF_COST);
size(4);
format %{ "MEMBAR-storeload" %}
ins_encode %{
__ membar(MacroAssembler::StoreLoad, noreg);
%}
ins_pipe(long_memory_op);
%}
instruct membar_volatile() %{
match(MemBarVolatile);
ins_cost(4*MEMORY_REF_COST);
@ -4463,6 +4471,18 @@ instruct unnecessary_membar_volatile() %{
ins_pipe(empty);
%}
instruct membar_full() %{
match(MemBarFull);
ins_cost(4*MEMORY_REF_COST);
size(4);
format %{ "MEMBAR-full" %}
ins_encode %{
__ membar(MacroAssembler::StoreLoad, noreg);
%}
ins_pipe(long_memory_op);
%}
//----------Register Move Instructions-----------------------------------------
// Cast Index to Pointer for unsafe natives

View File

@ -52,7 +52,6 @@ define_pd_global(bool, ProfileInterpreter, false);
define_pd_global(size_t, CodeCacheExpansionSize, 32*K );
define_pd_global(size_t, CodeCacheMinBlockLength, 1);
define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K);
define_pd_global(bool, NeverActAsServerClassMachine, true);
define_pd_global(bool, CICompileOSR, true );
#endif // COMPILER2
define_pd_global(bool, UseTypeProfile, false);

View File

@ -93,7 +93,4 @@ define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K);
define_pd_global(bool, TrapBasedRangeChecks, false); // Not needed
// Ergonomics related flags
define_pd_global(bool, NeverActAsServerClassMachine, false);
#endif // CPU_ARM_C2_GLOBALS_ARM_HPP

View File

@ -1580,10 +1580,6 @@ class Assembler : public AbstractAssembler {
static bool is_nop(int x) {
return x == 0x60000000;
}
// endgroup opcode for Power6
static bool is_endgroup(int x) {
return is_ori(x) && inv_ra_field(x) == 1 && inv_rs_field(x) == 1 && inv_d1_field(x) == 0;
}
private:
@ -1659,9 +1655,6 @@ class Assembler : public AbstractAssembler {
inline void ori_opt( Register d, int ui16);
inline void oris_opt(Register d, int ui16);
// endgroup opcode for Power6
inline void endgroup();
// count instructions
inline void cntlzw( Register a, Register s);
inline void cntlzw_( Register a, Register s);

View File

@ -253,8 +253,6 @@ inline void Assembler::mr( Register d, Register s) { Assembler::orr(d, s,
inline void Assembler::ori_opt( Register d, int ui16) { if (ui16!=0) Assembler::ori( d, d, ui16); }
inline void Assembler::oris_opt(Register d, int ui16) { if (ui16!=0) Assembler::oris(d, d, ui16); }
inline void Assembler::endgroup() { Assembler::ori(R1, R1, 0); }
// count instructions
inline void Assembler::cntlzw( Register a, Register s) { emit_int32(CNTLZW_OPCODE | rta(a) | rs(s) | rc(0)); }
inline void Assembler::cntlzw_( Register a, Register s) { emit_int32(CNTLZW_OPCODE | rta(a) | rs(s) | rc(1)); }

View File

@ -51,7 +51,6 @@ define_pd_global(size_t, NonNMethodCodeHeapSize, 5*M );
define_pd_global(size_t, CodeCacheExpansionSize, 32*K);
define_pd_global(size_t, CodeCacheMinBlockLength, 1);
define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K);
define_pd_global(bool, NeverActAsServerClassMachine, true);
define_pd_global(size_t, InitialCodeCacheSize, 160*K);
#endif // !COMPILER2

View File

@ -90,7 +90,4 @@ define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K);
define_pd_global(bool, TrapBasedRangeChecks, true);
// Ergonomics related flags
define_pd_global(bool, NeverActAsServerClassMachine, false);
#endif // CPU_PPC_C2_GLOBALS_PPC_HPP

View File

@ -119,9 +119,6 @@ address Disassembler::decode_instruction0(address here, outputStream * st, addre
} else if (instruction == 0xbadbabe) {
st->print(".data 0xbadbabe");
next = here + Assembler::instr_len(here);
} else if (Assembler::is_endgroup(instruction)) {
st->print("endgroup");
next = here + Assembler::instr_len(here);
} else {
next = here;
}

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2020, 2025 SAP SE. All rights reserved.
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -135,10 +135,10 @@ void DowncallLinker::StubGenerator::generate() {
bool should_save_return_value = !_needs_return_buffer;
RegSpiller out_reg_spiller(_output_registers);
int spill_offset = -1;
int out_spill_offset = -1;
if (should_save_return_value) {
spill_offset = frame::native_abi_reg_args_size;
out_spill_offset = frame::native_abi_reg_args_size;
// Spill area can be shared with additional out args (>8),
// since it is only used after the call.
int frame_size_including_spill_area = frame::native_abi_reg_args_size + out_reg_spiller.spill_size_bytes();
@ -170,6 +170,18 @@ void DowncallLinker::StubGenerator::generate() {
ArgumentShuffle arg_shuffle(filtered_java_regs, out_regs, _abi._scratch1);
// Need to spill for state capturing runtime call.
// The area spilled into is distinct from the capture state buffer.
RegSpiller in_reg_spiller(out_regs);
int in_spill_offset = -1;
if (_captured_state_mask != 0) {
// The spill area cannot be shared with the out_spill since
// spilling needs to happen before the call. Allocate a new
// region in the stack for this spill space.
in_spill_offset = allocated_frame_size;
allocated_frame_size += in_reg_spiller.spill_size_bytes();
}
#ifndef PRODUCT
LogTarget(Trace, foreign, downcall) lt;
if (lt.is_enabled()) {
@ -211,6 +223,21 @@ void DowncallLinker::StubGenerator::generate() {
arg_shuffle.generate(_masm, as_VMStorage(callerSP), frame::jit_out_preserve_size, frame::native_abi_minframe_size);
__ block_comment("} argument shuffle");
if (_captured_state_mask != 0) {
assert(in_spill_offset != -1, "must be");
__ block_comment("{ load initial thread local");
in_reg_spiller.generate_spill(_masm, in_spill_offset);
// Copy the contents of the capture state buffer into thread local
__ load_const_optimized(call_target_address, CAST_FROM_FN_PTR(uint64_t, DowncallLinker::capture_state_pre), R0);
__ ld(R3_ARG1, locs.data_offset(StubLocations::CAPTURED_STATE_BUFFER), R1_SP);
__ load_const_optimized(R4_ARG2, _captured_state_mask, R0);
__ call_c(call_target_address);
in_reg_spiller.generate_fill(_masm, in_spill_offset);
__ block_comment("} load initial thread local");
}
__ call_c(call_target_address);
if (_needs_return_buffer) {
@ -247,16 +274,16 @@ void DowncallLinker::StubGenerator::generate() {
__ block_comment("{ save thread local");
if (should_save_return_value) {
out_reg_spiller.generate_spill(_masm, spill_offset);
out_reg_spiller.generate_spill(_masm, out_spill_offset);
}
__ load_const_optimized(call_target_address, CAST_FROM_FN_PTR(uint64_t, DowncallLinker::capture_state), R0);
__ load_const_optimized(call_target_address, CAST_FROM_FN_PTR(uint64_t, DowncallLinker::capture_state_post), R0);
__ ld(R3_ARG1, locs.data_offset(StubLocations::CAPTURED_STATE_BUFFER), R1_SP);
__ load_const_optimized(R4_ARG2, _captured_state_mask, R0);
__ call_c(call_target_address);
if (should_save_return_value) {
out_reg_spiller.generate_fill(_masm, spill_offset);
out_reg_spiller.generate_fill(_masm, out_spill_offset);
}
__ block_comment("} save thread local");
@ -310,7 +337,7 @@ void DowncallLinker::StubGenerator::generate() {
if (should_save_return_value) {
// Need to save the native result registers around any runtime calls.
out_reg_spiller.generate_spill(_masm, spill_offset);
out_reg_spiller.generate_spill(_masm, out_spill_offset);
}
__ load_const_optimized(call_target_address, CAST_FROM_FN_PTR(uint64_t, JavaThread::check_special_condition_for_native_trans), R0);
@ -318,7 +345,7 @@ void DowncallLinker::StubGenerator::generate() {
__ call_c(call_target_address);
if (should_save_return_value) {
out_reg_spiller.generate_fill(_masm, spill_offset);
out_reg_spiller.generate_fill(_masm, out_spill_offset);
}
__ b(L_after_safepoint_poll);
@ -330,14 +357,14 @@ void DowncallLinker::StubGenerator::generate() {
__ bind(L_reguard);
if (should_save_return_value) {
out_reg_spiller.generate_spill(_masm, spill_offset);
out_reg_spiller.generate_spill(_masm, out_spill_offset);
}
__ load_const_optimized(call_target_address, CAST_FROM_FN_PTR(uint64_t, SharedRuntime::reguard_yellow_pages), R0);
__ call_c(call_target_address);
if (should_save_return_value) {
out_reg_spiller.generate_fill(_masm, spill_offset);
out_reg_spiller.generate_fill(_masm, out_spill_offset);
}
__ b(L_after_reguard);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -137,10 +137,10 @@ inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address
// Return unique id for this frame. The id must have a value where we
// can distinguish identity and younger/older relationship. null
// represents an invalid (incomparable) frame.
// represents an invalid (incomparable) frame. Should not be called for heap frames.
inline intptr_t* frame::id(void) const {
// Use _fp. _sp or _unextended_sp wouldn't be correct due to resizing.
return _fp;
return real_fp();
}
// Return true if this frame is older (less recent activation) than
@ -319,6 +319,9 @@ inline frame frame::sender(RegisterMap* map) const {
StackWatermarkSet::on_iteration(map->thread(), result);
}
// Calling frame::id() is currently not supported for heap frames.
assert(result._on_heap || this->_on_heap || result.is_older(this->id()), "Must be");
return result;
}

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2025 SAP SE. All rights reserved.
* Copyright (c) 2018, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -179,6 +179,11 @@ void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Re
__ ld(dst, 0, dst); // Resolve (untagged) jobject.
}
void BarrierSetAssembler::try_resolve_weak_handle(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path) {
// Load the oop from the weak handle.
__ ld(obj, 0, obj);
}
void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Register tmp) {
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
assert_different_registers(tmp, R0);
@ -275,11 +280,6 @@ OptoReg::Name BarrierSetAssembler::refine_register(const Node* node, OptoReg::Na
return opto_reg;
}
void BarrierSetAssembler::try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path) {
// Load the oop from the weak handle.
__ ld(obj, 0, obj);
}
#undef __
#define __ _masm->

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2022 SAP SE. All rights reserved.
* Copyright (c) 2018, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -70,6 +70,12 @@ public:
virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register dst, Register jni_env,
Register obj, Register tmp, Label& slowpath);
// Can be used in nmethods including native wrappers.
// Attention: obj will only be valid until next safepoint (no SATB barrier).
// TODO: maybe rename to try_peek_weak_handle on all platforms (try: operation may fail, peek: obj is not kept alive)
// (other platforms currently use it for C2 only: try_resolve_weak_handle_in_c2)
virtual void try_resolve_weak_handle(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path);
virtual void barrier_stubs_init() {}
virtual NMethodPatchingType nmethod_patching_type() { return NMethodPatchingType::stw_instruction_and_data_patch; }
@ -81,8 +87,6 @@ public:
#ifdef COMPILER2
OptoReg::Name refine_register(const Node* node, OptoReg::Name opto_reg) const;
virtual void try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj,
Register tmp, Label& slow_path);
#endif // COMPILER2
};

View File

@ -1,7 +1,7 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2025, Red Hat, Inc. All rights reserved.
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
* Copyright (c) 2012, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -663,17 +663,16 @@ void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler
__ block_comment("} try_resolve_jobject_in_native (shenandoahgc)");
}
#ifdef COMPILER2
void ShenandoahBarrierSetAssembler::try_resolve_weak_handle_in_c2(MacroAssembler *masm, Register obj,
Register tmp, Label &slow_path) {
__ block_comment("try_resolve_weak_handle_in_c2 (shenandoahgc) {");
void ShenandoahBarrierSetAssembler::try_resolve_weak_handle(MacroAssembler *masm, Register obj,
Register tmp, Label &slow_path) {
__ block_comment("try_resolve_weak_handle (shenandoahgc) {");
assert_different_registers(obj, tmp);
Label done;
// Resolve weak handle using the standard implementation.
BarrierSetAssembler::try_resolve_weak_handle_in_c2(masm, obj, tmp, slow_path);
BarrierSetAssembler::try_resolve_weak_handle(masm, obj, tmp, slow_path);
// Check if the reference is null, and if it is, take the fast path.
__ cmpdi(CR0, obj, 0);
@ -686,9 +685,8 @@ void ShenandoahBarrierSetAssembler::try_resolve_weak_handle_in_c2(MacroAssembler
__ bne(CR0, slow_path);
__ bind(done);
__ block_comment("} try_resolve_weak_handle_in_c2 (shenandoahgc)");
__ block_comment("} try_resolve_weak_handle (shenandoahgc)");
}
#endif
// Special shenandoah CAS implementation that handles false negatives due
// to concurrent evacuation. That is, the CAS operation is intended to succeed in

View File

@ -1,7 +1,7 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2022, Red Hat, Inc. All rights reserved.
* Copyright (c) 2012, 2022 SAP SE. All rights reserved.
* Copyright (c) 2012, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -122,9 +122,8 @@ public:
virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register dst, Register jni_env,
Register obj, Register tmp, Label& slowpath);
#ifdef COMPILER2
virtual void try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path);
#endif
virtual void try_resolve_weak_handle(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path);
};
#endif // CPU_PPC_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_PPC_HPP

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2025 SAP SE. All rights reserved.
* Copyright (c) 2021, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -627,6 +627,19 @@ void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, R
__ block_comment("} try_resolve_jobject_in_native (zgc)");
}
void ZBarrierSetAssembler::try_resolve_weak_handle(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path) {
// Resolve weak handle using the standard implementation.
BarrierSetAssembler::try_resolve_weak_handle(masm, obj, tmp, slow_path);
// Check if the oop is bad, in which case we need to take the slow path.
__ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatMarkBadMask);
__ andi_(R0, obj, barrier_Relocation::unpatched);
__ bne(CR0, slow_path);
// Oop is okay, so we uncolor it.
__ srdi(obj, obj, ZPointerLoadShift);
}
#undef __
#ifdef COMPILER1
@ -950,19 +963,6 @@ void ZBarrierSetAssembler::generate_c2_store_barrier_stub(MacroAssembler* masm,
__ b(*stub->continuation());
}
void ZBarrierSetAssembler::try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path) {
// Resolve weak handle using the standard implementation.
BarrierSetAssembler::try_resolve_weak_handle_in_c2(masm, obj, tmp, slow_path);
// Check if the oop is bad, in which case we need to take the slow path.
__ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatMarkBadMask);
__ andi_(R0, obj, barrier_Relocation::unpatched);
__ bne(CR0, slow_path);
// Oop is okay, so we uncolor it.
__ srdi(obj, obj, ZPointerLoadShift);
}
#undef __
#endif // COMPILER2

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2022 SAP SE. All rights reserved.
* Copyright (c) 2021, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -72,6 +72,8 @@ public:
virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register dst, Register jni_env,
Register obj, Register tmp, Label& slowpath);
virtual void try_resolve_weak_handle(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path);
virtual void check_oop(MacroAssembler *masm, Register obj, const char* msg);
virtual NMethodPatchingType nmethod_patching_type() { return NMethodPatchingType::conc_instruction_and_data_patch; }
@ -108,8 +110,6 @@ public:
void generate_c2_load_barrier_stub(MacroAssembler* masm, ZLoadBarrierStubC2* stub) const;
void generate_c2_store_barrier_stub(MacroAssembler* masm, ZStoreBarrierStubC2* stub) const;
void try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path);
#endif // COMPILER2
void store_barrier_fast(MacroAssembler* masm,

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
* Copyright (c) 2012, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -483,7 +483,7 @@ void MacroAssembler::set_dest_of_bc_far_at(address instruction_addr, address des
// variant 3, far cond branch to the next instruction, already patched to nops:
//
// nop
// endgroup
// nop
// SKIP/DEST:
//
return;
@ -500,7 +500,7 @@ void MacroAssembler::set_dest_of_bc_far_at(address instruction_addr, address des
if (is_bc_far_variant2_at(instruction_addr) && dest == instruction_addr + 8) {
// Far branch to next instruction: Optimize it by patching nops (produce variant 3).
masm.nop();
masm.endgroup();
masm.nop();
} else {
if (is_bc_far_variant1_at(instruction_addr)) {
// variant 1, the 1st instruction contains the destination address:
@ -2800,7 +2800,7 @@ void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register
// Check if object matches.
ld(tmp3, in_bytes(ObjectMonitor::object_offset()), monitor);
BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
bs_asm->try_resolve_weak_handle_in_c2(this, tmp3, tmp2, slow_path);
bs_asm->try_resolve_weak_handle(this, tmp3, tmp2, slow_path);
cmpd(CR0, tmp3, obj);
bne(CR0, slow_path);

View File

@ -70,14 +70,6 @@ class MacroAssembler: public Assembler {
// Move register if destination register and target register are different
inline void mr_if_needed(Register rd, Register rs, bool allow_invalid = false);
inline void fmr_if_needed(FloatRegister rd, FloatRegister rs);
// This is dedicated for emitting scheduled mach nodes. For better
// readability of the ad file I put it here.
// Endgroups are not needed if
// - the scheduler is off
// - the scheduler found that there is a natural group end, in that
// case it reduced the size of the instruction used in the test
// yielding 'needed'.
inline void endgroup_if_needed(bool needed);
// Memory barriers.
inline void membar(int bits);

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
* Copyright (c) 2002, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -72,11 +72,6 @@ inline void MacroAssembler::mr_if_needed(Register rd, Register rs, bool allow_no
inline void MacroAssembler::fmr_if_needed(FloatRegister rd, FloatRegister rs) {
if (rs != rd) fmr(rd, rs);
}
inline void MacroAssembler::endgroup_if_needed(bool needed) {
if (needed) {
endgroup();
}
}
inline void MacroAssembler::membar(int bits) {
// Comment: Usage of elemental_membar(bits) is not recommended for Power 8.
@ -240,13 +235,13 @@ inline bool MacroAssembler::is_bc_far_variant3_at(address instruction_addr) {
// Variant 3, far cond branch to the next instruction, already patched to nops:
//
// nop
// endgroup
// nop
// SKIP/DEST:
//
const int instruction_1 = *(int*)(instruction_addr);
const int instruction_2 = *(int*)(instruction_addr + 4);
return is_nop(instruction_1) &&
is_endgroup(instruction_2);
is_nop(instruction_2);
}
// set dst to -1, 0, +1 as follows: if CR0bi is "greater than", dst is set to 1,

View File

@ -2457,10 +2457,6 @@ uint Matcher::float_pressure_limit()
return (FLOATPRESSURE == -1) ? 28 : FLOATPRESSURE;
}
bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
return false;
}
// Register for DIVI projection of divmodI.
const RegMask& Matcher::divI_proj_mask() {
ShouldNotReachHere();
@ -6327,36 +6323,8 @@ instruct loadConD_Ex(regD dst, immD src) %{
// Prefetch instructions.
// Must be safe to execute with invalid address (cannot fault).
// Special prefetch versions which use the dcbz instruction.
instruct prefetch_alloc_zero(indirectMemory mem, iRegLsrc src) %{
match(PrefetchAllocation (AddP mem src));
predicate(AllocatePrefetchStyle == 3);
ins_cost(MEMORY_REF_COST);
format %{ "PREFETCH $mem, 2, $src \t// Prefetch write-many with zero" %}
size(4);
ins_encode %{
__ dcbz($src$$Register, $mem$$base$$Register);
%}
ins_pipe(pipe_class_memory);
%}
instruct prefetch_alloc_zero_no_offset(indirectMemory mem) %{
match(PrefetchAllocation mem);
predicate(AllocatePrefetchStyle == 3);
ins_cost(MEMORY_REF_COST);
format %{ "PREFETCH $mem, 2 \t// Prefetch write-many with zero" %}
size(4);
ins_encode %{
__ dcbz($mem$$base$$Register);
%}
ins_pipe(pipe_class_memory);
%}
instruct prefetch_alloc(indirectMemory mem, iRegLsrc src) %{
match(PrefetchAllocation (AddP mem src));
predicate(AllocatePrefetchStyle != 3);
ins_cost(MEMORY_REF_COST);
format %{ "PREFETCH $mem, 2, $src \t// Prefetch write-many" %}
@ -6369,7 +6337,6 @@ instruct prefetch_alloc(indirectMemory mem, iRegLsrc src) %{
instruct prefetch_alloc_no_offset(indirectMemory mem) %{
match(PrefetchAllocation mem);
predicate(AllocatePrefetchStyle != 3);
ins_cost(MEMORY_REF_COST);
format %{ "PREFETCH $mem, 2 \t// Prefetch write-many" %}
@ -7163,6 +7130,18 @@ instruct membar_release_lock() %{
ins_pipe(pipe_class_default);
%}
instruct membar_storeload() %{
match(MemBarStoreLoad);
ins_cost(4*MEMORY_REF_COST);
format %{ "MEMBAR-store-load" %}
size(4);
ins_encode %{
__ fence();
%}
ins_pipe(pipe_class_default);
%}
instruct membar_volatile() %{
match(MemBarVolatile);
ins_cost(4*MEMORY_REF_COST);
@ -7205,6 +7184,18 @@ instruct membar_volatile() %{
// ins_pipe(pipe_class_default);
//%}
instruct membar_full() %{
match(MemBarFull);
ins_cost(4*MEMORY_REF_COST);
format %{ "MEMBAR-full" %}
size(4);
ins_encode %{
__ fence();
%}
ins_pipe(pipe_class_default);
%}
instruct membar_CPUOrder() %{
match(MemBarCPUOrder);
ins_cost(0);
@ -10312,7 +10303,7 @@ instruct cmovI_bso_stackSlotL(iRegIdst dst, flagsRegSrc crx, stackSlotL src) %{
ins_variable_size_depending_on_alignment(true);
format %{ "cmovI $crx, $dst, $src" %}
format %{ "CMOVI $crx, $dst, $src" %}
size(8);
ins_encode( enc_cmove_bso_stackSlotL(dst, crx, src) );
ins_pipe(pipe_class_default);
@ -10325,7 +10316,7 @@ instruct cmovI_bso_reg(iRegIdst dst, flagsRegSrc crx, regD src) %{
ins_variable_size_depending_on_alignment(true);
format %{ "cmovI $crx, $dst, $src" %}
format %{ "CMOVI $crx, $dst, $src" %}
size(8);
ins_encode( enc_cmove_bso_reg(dst, crx, src) );
ins_pipe(pipe_class_default);
@ -10337,7 +10328,7 @@ instruct cmovI_bso_reg_conLvalue0_Ex(iRegIdst dst, flagsRegSrc crx, regD src) %{
effect(DEF dst, USE crx, USE src);
predicate(false);
format %{ "CmovI $dst, $crx, $src \t// postalloc expanded" %}
format %{ "CMOVI $dst, $crx, $src \t// postalloc expanded" %}
postalloc_expand %{
//
// replaces
@ -10487,7 +10478,7 @@ instruct cmovL_bso_stackSlotL(iRegLdst dst, flagsRegSrc crx, stackSlotL src) %{
ins_variable_size_depending_on_alignment(true);
format %{ "cmovL $crx, $dst, $src" %}
format %{ "CMOVL $crx, $dst, $src" %}
size(8);
ins_encode( enc_cmove_bso_stackSlotL(dst, crx, src) );
ins_pipe(pipe_class_default);
@ -10500,7 +10491,7 @@ instruct cmovL_bso_reg(iRegLdst dst, flagsRegSrc crx, regD src) %{
ins_variable_size_depending_on_alignment(true);
format %{ "cmovL $crx, $dst, $src" %}
format %{ "CMOVL $crx, $dst, $src" %}
size(8);
ins_encode( enc_cmove_bso_reg(dst, crx, src) );
ins_pipe(pipe_class_default);
@ -10512,7 +10503,7 @@ instruct cmovL_bso_reg_conLvalue0_Ex(iRegLdst dst, flagsRegSrc crx, regD src) %{
effect(DEF dst, USE crx, USE src);
predicate(false);
format %{ "CmovL $dst, $crx, $src \t// postalloc expanded" %}
format %{ "CMOVL $dst, $crx, $src \t// postalloc expanded" %}
postalloc_expand %{
//
// replaces
@ -10713,9 +10704,9 @@ instruct convF2HF_reg_reg(iRegIdst dst, regF src, regF tmp) %{
effect(TEMP tmp);
ins_cost(3 * DEFAULT_COST);
size(12);
format %{ "xscvdphp $tmp, $src\t# convert to half precision\n\t"
"mffprd $dst, $tmp\t# move result from $tmp to $dst\n\t"
"extsh $dst, $dst\t# make it a proper short"
format %{ "XSCVDPHP $tmp, $src\t# convert to half precision\n\t"
"MFFPRD $dst, $tmp\t# move result from $tmp to $dst\n\t"
"EXTSH $dst, $dst\t# make it a proper short"
%}
ins_encode %{
__ f2hf($dst$$Register, $src$$FloatRegister, $tmp$$FloatRegister);
@ -10727,8 +10718,8 @@ instruct convHF2F_reg_reg(regF dst, iRegIsrc src) %{
match(Set dst (ConvHF2F src));
ins_cost(2 * DEFAULT_COST);
size(8);
format %{ "mtfprd $dst, $src\t# move source from $src to $dst\n\t"
"xscvhpdp $dst, $dst\t# convert from half precision"
format %{ "MTFPRD $dst, $src\t# move source from $src to $dst\n\t"
"XSCVHPDP $dst, $dst\t# convert from half precision"
%}
ins_encode %{
__ hf2f($dst$$FloatRegister, $src$$Register);
@ -11126,7 +11117,7 @@ instruct cmov_bns_less(flagsReg crx) %{
ins_variable_size_depending_on_alignment(true);
format %{ "cmov $crx" %}
format %{ "CMOV $crx" %}
size(12);
ins_encode %{
Label done;
@ -11154,7 +11145,7 @@ instruct cmpF_reg_reg_Ex(flagsReg crx, regF src1, regF src2) %{
match(Set crx (CmpF src1 src2));
ins_cost(DEFAULT_COST+BRANCH_COST);
format %{ "CmpF $crx, $src1, $src2 \t// postalloc expanded" %}
format %{ "CMPF $crx, $src1, $src2 \t// postalloc expanded" %}
postalloc_expand %{
//
// replaces
@ -12312,7 +12303,7 @@ instruct minF(regF dst, regF src1, regF src2) %{
predicate(PowerArchitecturePPC64 >= 9);
ins_cost(DEFAULT_COST);
format %{ "MinF $dst, $src1, $src2" %}
format %{ "XSMINJDP $dst, $src1, $src2\t// MinF" %}
size(4);
ins_encode %{
__ xsminjdp($dst$$FloatRegister->to_vsr(), $src1$$FloatRegister->to_vsr(), $src2$$FloatRegister->to_vsr());
@ -12325,7 +12316,7 @@ instruct minD(regD dst, regD src1, regD src2) %{
predicate(PowerArchitecturePPC64 >= 9);
ins_cost(DEFAULT_COST);
format %{ "MinD $dst, $src1, $src2" %}
format %{ "XSMINJDP $dst, $src1, $src2\t// MinD" %}
size(4);
ins_encode %{
__ xsminjdp($dst$$FloatRegister->to_vsr(), $src1$$FloatRegister->to_vsr(), $src2$$FloatRegister->to_vsr());
@ -12338,7 +12329,7 @@ instruct maxF(regF dst, regF src1, regF src2) %{
predicate(PowerArchitecturePPC64 >= 9);
ins_cost(DEFAULT_COST);
format %{ "MaxF $dst, $src1, $src2" %}
format %{ "XSMAXJDP $dst, $src1, $src2\t// MaxF" %}
size(4);
ins_encode %{
__ xsmaxjdp($dst$$FloatRegister->to_vsr(), $src1$$FloatRegister->to_vsr(), $src2$$FloatRegister->to_vsr());
@ -12351,7 +12342,7 @@ instruct maxD(regD dst, regD src1, regD src2) %{
predicate(PowerArchitecturePPC64 >= 9);
ins_cost(DEFAULT_COST);
format %{ "MaxD $dst, $src1, $src2" %}
format %{ "XSMAXJDP $dst, $src1, $src2\t// MaxD" %}
size(4);
ins_encode %{
__ xsmaxjdp($dst$$FloatRegister->to_vsr(), $src1$$FloatRegister->to_vsr(), $src2$$FloatRegister->to_vsr());
@ -13881,7 +13872,7 @@ instruct vfma2D_neg2(vecX dst, vecX src1, vecX src2) %{
instruct overflowAddL_reg_reg(flagsRegCR0 cr0, iRegLsrc op1, iRegLsrc op2) %{
match(Set cr0 (OverflowAddL op1 op2));
format %{ "add_ $op1, $op2\t# overflow check long" %}
format %{ "ADD_ $op1, $op2\t# overflow check long" %}
size(12);
ins_encode %{
__ li(R0, 0);
@ -13894,7 +13885,7 @@ instruct overflowAddL_reg_reg(flagsRegCR0 cr0, iRegLsrc op1, iRegLsrc op2) %{
instruct overflowSubL_reg_reg(flagsRegCR0 cr0, iRegLsrc op1, iRegLsrc op2) %{
match(Set cr0 (OverflowSubL op1 op2));
format %{ "subfo_ R0, $op2, $op1\t# overflow check long" %}
format %{ "SUBFO_ R0, $op2, $op1\t# overflow check long" %}
size(12);
ins_encode %{
__ li(R0, 0);
@ -13907,7 +13898,7 @@ instruct overflowSubL_reg_reg(flagsRegCR0 cr0, iRegLsrc op1, iRegLsrc op2) %{
instruct overflowNegL_reg(flagsRegCR0 cr0, immL_0 zero, iRegLsrc op2) %{
match(Set cr0 (OverflowSubL zero op2));
format %{ "nego_ R0, $op2\t# overflow check long" %}
format %{ "NEGO_ R0, $op2\t# overflow check long" %}
size(12);
ins_encode %{
__ li(R0, 0);
@ -13920,7 +13911,7 @@ instruct overflowNegL_reg(flagsRegCR0 cr0, immL_0 zero, iRegLsrc op2) %{
instruct overflowMulL_reg_reg(flagsRegCR0 cr0, iRegLsrc op1, iRegLsrc op2) %{
match(Set cr0 (OverflowMulL op1 op2));
format %{ "mulldo_ R0, $op1, $op2\t# overflow check long" %}
format %{ "MULLDO_ R0, $op1, $op2\t# overflow check long" %}
size(12);
ins_encode %{
__ li(R0, 0);
@ -14297,7 +14288,7 @@ instruct ForwardExceptionjmp()
match(ForwardException);
ins_cost(CALL_COST);
format %{ "Jmp forward_exception_stub" %}
format %{ "JMP forward_exception_stub" %}
ins_encode %{
__ set_inst_mark();
__ b64_patchable(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
@ -14325,7 +14316,7 @@ instruct RethrowException() %{
match(Rethrow);
ins_cost(CALL_COST);
format %{ "Jmp rethrow_stub" %}
format %{ "JMP rethrow_stub" %}
ins_encode %{
__ set_inst_mark();
__ b64_patchable((address)OptoRuntime::rethrow_stub(), relocInfo::runtime_call_type);
@ -14367,20 +14358,6 @@ instruct tlsLoadP(threadRegP dst) %{
//---Some PPC specific nodes---------------------------------------------------
// Stop a group.
instruct endGroup() %{
ins_cost(0);
ins_is_nop(true);
format %{ "End Bundle (ori r1, r1, 0)" %}
size(4);
ins_encode %{
__ endgroup();
%}
ins_pipe(pipe_class_default);
%}
// Nop instructions
instruct fxNop() %{

View File

@ -51,7 +51,6 @@ define_pd_global(bool, ProfileInterpreter, false);
define_pd_global(size_t, CodeCacheExpansionSize, 32*K );
define_pd_global(size_t, CodeCacheMinBlockLength, 1);
define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K);
define_pd_global(bool, NeverActAsServerClassMachine, true );
define_pd_global(bool, CICompileOSR, true );
#endif // !COMPILER2
define_pd_global(bool, UseTypeProfile, false);

View File

@ -74,9 +74,6 @@ define_pd_global(size_t, NonNMethodCodeHeapSize, 5*M );
define_pd_global(size_t, CodeCacheMinBlockLength, 6);
define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K);
// Ergonomics related flags
define_pd_global(bool, NeverActAsServerClassMachine, false);
define_pd_global(bool, TrapBasedRangeChecks, false); // Not needed.
#endif // CPU_RISCV_C2_GLOBALS_RISCV_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -140,10 +140,10 @@ void DowncallLinker::StubGenerator::generate() {
bool should_save_return_value = !_needs_return_buffer;
RegSpiller out_reg_spiller(_output_registers);
int spill_offset = -1;
int out_spill_offset = -1;
if (should_save_return_value) {
spill_offset = 0;
out_spill_offset = 0;
// spill area can be shared with shadow space and out args,
// since they are only used before the call,
// and spill area is only used after.
@ -168,6 +168,9 @@ void DowncallLinker::StubGenerator::generate() {
// FP-> | |
// |---------------------| = frame_bottom_offset = frame_size
// | (optional) |
// | in_reg_spiller area |
// |---------------------|
// | (optional) |
// | capture state buf |
// |---------------------| = StubLocations::CAPTURED_STATE_BUFFER
// | (optional) |
@ -181,6 +184,18 @@ void DowncallLinker::StubGenerator::generate() {
GrowableArray<VMStorage> out_regs = ForeignGlobals::replace_place_holders(_input_registers, locs);
ArgumentShuffle arg_shuffle(filtered_java_regs, out_regs, shuffle_reg);
// Need to spill for state capturing runtime call.
// The area spilled into is distinct from the capture state buffer.
RegSpiller in_reg_spiller(out_regs);
int in_spill_offset = -1;
if (_captured_state_mask != 0) {
// The spill area cannot be shared with the out_spill since
// spilling needs to happen before the call. Allocate a new
// region in the stack for this spill space.
in_spill_offset = allocated_frame_size;
allocated_frame_size += in_reg_spiller.spill_size_bytes();
}
#ifndef PRODUCT
LogTarget(Trace, foreign, downcall) lt;
if (lt.is_enabled()) {
@ -226,6 +241,20 @@ void DowncallLinker::StubGenerator::generate() {
arg_shuffle.generate(_masm, shuffle_reg, 0, _abi._shadow_space_bytes);
__ block_comment("} argument shuffle");
if (_captured_state_mask != 0) {
assert(in_spill_offset != -1, "must be");
__ block_comment("{ load initial thread local");
in_reg_spiller.generate_spill(_masm, in_spill_offset);
// Copy the contents of the capture state buffer into thread local
__ ld(c_rarg0, Address(sp, locs.data_offset(StubLocations::CAPTURED_STATE_BUFFER)));
__ mv(c_rarg1, _captured_state_mask);
__ rt_call(CAST_FROM_FN_PTR(address, DowncallLinker::capture_state_pre));
in_reg_spiller.generate_fill(_masm, in_spill_offset);
__ block_comment("} load initial thread local");
}
__ jalr(as_Register(locs.get(StubLocations::TARGET_ADDRESS)));
// this call is assumed not to have killed xthread
@ -254,15 +283,15 @@ void DowncallLinker::StubGenerator::generate() {
__ block_comment("{ save thread local");
if (should_save_return_value) {
out_reg_spiller.generate_spill(_masm, spill_offset);
out_reg_spiller.generate_spill(_masm, out_spill_offset);
}
__ ld(c_rarg0, Address(sp, locs.data_offset(StubLocations::CAPTURED_STATE_BUFFER)));
__ mv(c_rarg1, _captured_state_mask);
__ rt_call(CAST_FROM_FN_PTR(address, DowncallLinker::capture_state));
__ rt_call(CAST_FROM_FN_PTR(address, DowncallLinker::capture_state_post));
if (should_save_return_value) {
out_reg_spiller.generate_fill(_masm, spill_offset);
out_reg_spiller.generate_fill(_masm, out_spill_offset);
}
__ block_comment("} save thread local");
@ -319,7 +348,7 @@ void DowncallLinker::StubGenerator::generate() {
if (should_save_return_value) {
// Need to save the native result registers around any runtime calls.
out_reg_spiller.generate_spill(_masm, spill_offset);
out_reg_spiller.generate_spill(_masm, out_spill_offset);
}
__ mv(c_rarg0, xthread);
@ -327,7 +356,7 @@ void DowncallLinker::StubGenerator::generate() {
__ rt_call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans));
if (should_save_return_value) {
out_reg_spiller.generate_fill(_masm, spill_offset);
out_reg_spiller.generate_fill(_masm, out_spill_offset);
}
__ j(L_after_safepoint_poll);
__ block_comment("} L_safepoint_poll_slow_path");
@ -339,13 +368,13 @@ void DowncallLinker::StubGenerator::generate() {
if (should_save_return_value) {
// Need to save the native result registers around any runtime calls.
out_reg_spiller.generate_spill(_masm, spill_offset);
out_reg_spiller.generate_spill(_masm, out_spill_offset);
}
__ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
if (should_save_return_value) {
out_reg_spiller.generate_fill(_masm, spill_offset);
out_reg_spiller.generate_fill(_masm, out_spill_offset);
}
__ j(L_after_reguard);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -236,8 +236,8 @@ inline bool frame::equal(frame other) const {
// Return unique id for this frame. The id must have a value where we can distinguish
// identity and younger/older relationship. null represents an invalid (incomparable)
// frame.
inline intptr_t* frame::id(void) const { return unextended_sp(); }
// frame. Should not be called for heap frames.
inline intptr_t* frame::id(void) const { return real_fp(); }
// Return true if the frame is older (less recent activation) than the frame represented by id
inline bool frame::is_older(intptr_t* id) const { assert(this->id() != nullptr && id != nullptr, "null frame id");
@ -398,6 +398,9 @@ frame frame::sender(RegisterMap* map) const {
StackWatermarkSet::on_iteration(map->thread(), result);
}
// Calling frame::id() is currently not supported for heap frames.
assert(result._on_heap || this->_on_heap || result.is_older(this->id()), "Must be");
return result;
}

View File

@ -2111,10 +2111,6 @@ uint Matcher::float_pressure_limit()
return (FLOATPRESSURE == -1) ? _FLOAT_REG_mask.size() : FLOATPRESSURE;
}
bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
return false;
}
const RegMask& Matcher::divI_proj_mask() {
ShouldNotReachHere();
return RegMask::EMPTY;
@ -8156,6 +8152,22 @@ instruct unnecessary_membar_rvtso() %{
ins_pipe(real_empty);
%}
instruct membar_storeload_rvtso() %{
predicate(UseZtso);
match(MemBarStoreLoad);
ins_cost(VOLATILE_REF_COST);
format %{ "#@membar_storeload_rvtso\n\t"
"fence w, r"%}
ins_encode %{
__ block_comment("membar_storeload_rvtso");
__ membar(MacroAssembler::StoreLoad);
%}
ins_pipe(pipe_slow);
%}
instruct membar_volatile_rvtso() %{
predicate(UseZtso);
match(MemBarVolatile);
@ -8186,6 +8198,22 @@ instruct unnecessary_membar_volatile_rvtso() %{
ins_pipe(real_empty);
%}
instruct membar_full_rvtso() %{
predicate(UseZtso);
match(MemBarFull);
ins_cost(VOLATILE_REF_COST);
format %{ "#@membar_full_rvtso\n\t"
"fence rw, rw" %}
ins_encode %{
__ block_comment("membar_full_rvtso");
__ membar(MacroAssembler::AnyAny);
%}
ins_pipe(pipe_slow);
%}
// RVWMO
instruct membar_aqcuire_rvwmo() %{
@ -8235,6 +8263,22 @@ instruct membar_storestore_rvwmo() %{
ins_pipe(pipe_serial);
%}
instruct membar_storeload_rvwmo() %{
predicate(!UseZtso);
match(MemBarStoreLoad);
ins_cost(VOLATILE_REF_COST);
format %{ "#@membar_storeload_rvwmo\n\t"
"fence w, r"%}
ins_encode %{
__ block_comment("membar_storeload_rvwmo");
__ membar(MacroAssembler::StoreLoad);
%}
ins_pipe(pipe_serial);
%}
instruct membar_volatile_rvwmo() %{
predicate(!UseZtso);
match(MemBarVolatile);
@ -8279,6 +8323,22 @@ instruct unnecessary_membar_volatile_rvwmo() %{
ins_pipe(real_empty);
%}
instruct membar_full_rvwmo() %{
predicate(!UseZtso);
match(MemBarFull);
ins_cost(VOLATILE_REF_COST);
format %{ "#@membar_full_rvwmo\n\t"
"fence rw, rw" %}
ins_encode %{
__ block_comment("membar_full_rvwmo");
__ membar(MacroAssembler::AnyAny);
%}
ins_pipe(pipe_serial);
%}
instruct spin_wait() %{
predicate(UseZihintpause);
match(OnSpinWait);

View File

@ -55,7 +55,7 @@ class VM_Version : public Abstract_VM_Version {
public:
RVFeatureValue(const char* pretty, int linux_bit_num, bool fstring) :
_pretty(pretty), _feature_string(fstring), _linux_feature_bit(nth_bit(linux_bit_num)) {
_pretty(pretty), _feature_string(fstring), _linux_feature_bit(nth_bit<uint64_t>(linux_bit_num)) {
}
virtual void enable_feature(int64_t value = 0) = 0;
virtual void disable_feature() = 0;

View File

@ -51,7 +51,6 @@ define_pd_global(size_t, NonNMethodCodeHeapSize, 5*M);
define_pd_global(size_t, CodeCacheExpansionSize, 32*K);
define_pd_global(size_t, CodeCacheMinBlockLength, 1);
define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K);
define_pd_global(bool, NeverActAsServerClassMachine, true);
define_pd_global(size_t, InitialCodeCacheSize, 160*K);
#endif // !COMPILER2

View File

@ -78,7 +78,4 @@ define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K);
define_pd_global(bool, TrapBasedRangeChecks, false); // Not needed on z/Architecture.
// Ergonomics related flags
define_pd_global(bool, NeverActAsServerClassMachine, false);
#endif // CPU_S390_C2_GLOBALS_S390_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -129,7 +129,7 @@ void DowncallLinker::StubGenerator::generate() {
assert(!_needs_return_buffer, "unexpected needs_return_buffer");
RegSpiller out_reg_spiller(_output_registers);
int spill_offset = allocated_frame_size;
int out_spill_offset = allocated_frame_size;
allocated_frame_size += BytesPerWord;
StubLocations locs;
@ -153,6 +153,18 @@ void DowncallLinker::StubGenerator::generate() {
GrowableArray<VMStorage> out_regs = ForeignGlobals::replace_place_holders(_input_registers, locs);
ArgumentShuffle arg_shuffle(filtered_java_regs, out_regs, _abi._scratch1);
// Need to spill for state capturing runtime call.
// The area spilled into is distinct from the capture state buffer.
RegSpiller in_reg_spiller(out_regs);
int in_spill_offset = -1;
if (_captured_state_mask != 0) {
// The spill area cannot be shared with the out_spill since
// spilling needs to happen before the call. Allocate a new
// region in the stack for this spill space.
in_spill_offset = allocated_frame_size;
allocated_frame_size += in_reg_spiller.spill_size_bytes();
}
#ifndef PRODUCT
LogTarget(Trace, foreign, downcall) lt;
if (lt.is_enabled()) {
@ -192,6 +204,21 @@ void DowncallLinker::StubGenerator::generate() {
arg_shuffle.generate(_masm, shuffle_reg, frame::z_jit_out_preserve_size, _abi._shadow_space_bytes);
__ block_comment("} argument_shuffle");
if (_captured_state_mask != 0) {
assert(in_spill_offset != -1, "must be");
__ block_comment("{ load initial thread local");
in_reg_spiller.generate_spill(_masm, in_spill_offset);
// Copy the contents of the capture state buffer into thread local
__ load_const_optimized(call_target_address, CAST_FROM_FN_PTR(uint64_t, DowncallLinker::capture_state_pre));
__ z_lg(Z_ARG1, Address(Z_SP, locs.data_offset(StubLocations::CAPTURED_STATE_BUFFER)));
__ load_const_optimized(Z_ARG2, _captured_state_mask);
__ call(call_target_address);
in_reg_spiller.generate_fill(_masm, in_spill_offset);
__ block_comment("} load initial thread local");
}
__ call(as_Register(locs.get(StubLocations::TARGET_ADDRESS)));
//////////////////////////////////////////////////////////////////////////////
@ -199,14 +226,14 @@ void DowncallLinker::StubGenerator::generate() {
if (_captured_state_mask != 0) {
__ block_comment("save_thread_local {");
out_reg_spiller.generate_spill(_masm, spill_offset);
out_reg_spiller.generate_spill(_masm, out_spill_offset);
__ load_const_optimized(call_target_address, CAST_FROM_FN_PTR(uint64_t, DowncallLinker::capture_state));
__ load_const_optimized(call_target_address, CAST_FROM_FN_PTR(uint64_t, DowncallLinker::capture_state_post));
__ z_lg(Z_ARG1, Address(Z_SP, locs.data_offset(StubLocations::CAPTURED_STATE_BUFFER)));
__ load_const_optimized(Z_ARG2, _captured_state_mask);
__ call(call_target_address);
out_reg_spiller.generate_fill(_masm, spill_offset);
out_reg_spiller.generate_fill(_masm, out_spill_offset);
__ block_comment("} save_thread_local");
}
@ -259,13 +286,13 @@ void DowncallLinker::StubGenerator::generate() {
__ bind(L_safepoint_poll_slow_path);
// Need to save the native result registers around any runtime calls.
out_reg_spiller.generate_spill(_masm, spill_offset);
out_reg_spiller.generate_spill(_masm, out_spill_offset);
__ load_const_optimized(call_target_address, CAST_FROM_FN_PTR(uint64_t, JavaThread::check_special_condition_for_native_trans));
__ z_lgr(Z_ARG1, Z_thread);
__ call(call_target_address);
out_reg_spiller.generate_fill(_masm, spill_offset);
out_reg_spiller.generate_fill(_masm, out_spill_offset);
__ z_bru(L_after_safepoint_poll);
__ block_comment("} L_safepoint_poll_slow_path");
@ -275,12 +302,12 @@ void DowncallLinker::StubGenerator::generate() {
__ bind(L_reguard);
// Need to save the native result registers around any runtime calls.
out_reg_spiller.generate_spill(_masm, spill_offset);
out_reg_spiller.generate_spill(_masm, out_spill_offset);
__ load_const_optimized(call_target_address, CAST_FROM_FN_PTR(uint64_t, SharedRuntime::reguard_yellow_pages));
__ call(call_target_address);
out_reg_spiller.generate_fill(_masm, spill_offset);
out_reg_spiller.generate_fill(_masm, out_spill_offset);
__ z_bru(L_after_reguard);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2024 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -463,7 +463,7 @@
// Accessors
inline intptr_t* fp() const { return _fp; }
inline intptr_t* fp() const { assert_absolute(); return _fp; }
private:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2024 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -133,10 +133,10 @@ inline void frame::interpreter_frame_set_monitors(BasicObjectLock* monitors) {
// Return unique id for this frame. The id must have a value where we
// can distinguish identity and younger/older relationship. null
// represents an invalid (incomparable) frame.
// represents an invalid (incomparable) frame. Should not be called for heap frames.
inline intptr_t* frame::id(void) const {
// Use _fp. _sp or _unextended_sp wouldn't be correct due to resizing.
return _fp;
return real_fp();
}
// Return true if this frame is older (less recent activation) than

View File

@ -169,6 +169,11 @@ void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Re
__ z_lg(obj, 0, obj); // Resolve (untagged) jobject.
}
void BarrierSetAssembler::try_resolve_weak_handle(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path) {
// Load the oop from the weak handle.
__ z_lg(obj, Address(obj));
}
void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm) {
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
__ align(4, __ offset() + OFFSET_TO_PATCHABLE_DATA); // must align the following block which requires atomic updates
@ -206,11 +211,6 @@ OptoReg::Name BarrierSetAssembler::refine_register(const Node* node, OptoReg::Na
return opto_reg;
}
void BarrierSetAssembler::try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path) {
// Load the oop from the weak handle.
__ z_lg(obj, Address(obj));
}
#undef __
#define __ _masm->

View File

@ -58,6 +58,11 @@ public:
virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
Register obj, Register tmp, Label& slowpath);
// Can be used in nmethods including native wrappers.
// Attention: obj will only be valid until next safepoint (no SATB barrier).
// (other platforms currently use it for C2 only: try_resolve_weak_handle_in_c2)
virtual void try_resolve_weak_handle(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path);
virtual void nmethod_entry_barrier(MacroAssembler* masm);
virtual void barrier_stubs_init() {}
@ -65,8 +70,6 @@ public:
#ifdef COMPILER2
OptoReg::Name refine_register(const Node* node,
OptoReg::Name opto_reg) const;
virtual void try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj,
Register tmp, Label& slow_path);
#endif // COMPILER2
static const int OFFSET_TO_PATCHABLE_DATA_INSTRUCTION = 6 + 6 + 6; // iihf(6) + iilf(6) + lg(6)

View File

@ -1,7 +1,7 @@
/*
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2024 SAP SE. All rights reserved.
* Copyright 2024 IBM Corporation. All rights reserved.
* Copyright 2024, 2026 IBM Corporation. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -6413,7 +6413,7 @@ void MacroAssembler::compiler_fast_lock_object(Register obj, Register box, Regis
// Check if object matches.
z_lg(tmp2, Address(tmp1_monitor, ObjectMonitor::object_offset()));
BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
bs_asm->try_resolve_weak_handle_in_c2(this, tmp2, Z_R0_scratch, slow_path);
bs_asm->try_resolve_weak_handle(this, tmp2, Z_R0_scratch, slow_path);
z_cgr(obj, tmp2);
z_brne(slow_path);

View File

@ -1929,10 +1929,6 @@ uint Matcher::float_pressure_limit()
return (FLOATPRESSURE == -1) ? 15 : FLOATPRESSURE;
}
bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
return false;
}
// Register for DIVI projection of divmodI
const RegMask& Matcher::divI_proj_mask() {
return _Z_RARG4_INT_REG_mask;
@ -5239,6 +5235,15 @@ instruct membar_release_lock() %{
ins_pipe(pipe_class_dummy);
%}
instruct membar_storeload() %{
match(MemBarStoreLoad);
ins_cost(4 * MEMORY_REF_COST);
size(2);
format %{ "MEMBAR-storeload" %}
ins_encode %{ __ z_fence(); %}
ins_pipe(pipe_class_dummy);
%}
instruct membar_volatile() %{
match(MemBarVolatile);
ins_cost(4 * MEMORY_REF_COST);
@ -5258,6 +5263,15 @@ instruct unnecessary_membar_volatile() %{
ins_pipe(pipe_class_dummy);
%}
instruct membar_full() %{
match(MemBarFull);
ins_cost(4 * MEMORY_REF_COST);
size(2);
format %{ "MEMBAR-full" %}
ins_encode %{ __ z_fence(); %}
ins_pipe(pipe_class_dummy);
%}
instruct membar_CPUOrder() %{
match(MemBarCPUOrder);
ins_cost(0);

View File

@ -50,7 +50,6 @@ define_pd_global(bool, ProfileInterpreter, false);
define_pd_global(size_t, CodeCacheExpansionSize, 32*K );
define_pd_global(size_t, CodeCacheMinBlockLength, 1 );
define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K);
define_pd_global(bool, NeverActAsServerClassMachine, true );
define_pd_global(bool, CICompileOSR, true );
#endif // !COMPILER2
define_pd_global(bool, UseTypeProfile, false);

View File

@ -73,7 +73,4 @@ define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K);
define_pd_global(bool, TrapBasedRangeChecks, false); // Not needed on x86.
// Ergonomics related flags
define_pd_global(bool, NeverActAsServerClassMachine, false);
#endif // CPU_X86_C2_GLOBALS_X86_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -145,10 +145,10 @@ void DowncallLinker::StubGenerator::generate() {
// when we don't use a return buffer we need to spill the return value around our slow path calls
bool should_save_return_value = !_needs_return_buffer;
RegSpiller out_reg_spiller(_output_registers);
int spill_rsp_offset = -1;
int out_spill_rsp_offset = -1;
if (should_save_return_value) {
spill_rsp_offset = 0;
out_spill_rsp_offset = 0;
// spill area can be shared with shadow space and out args,
// since they are only used before the call,
// and spill area is only used after.
@ -173,6 +173,9 @@ void DowncallLinker::StubGenerator::generate() {
// FP-> | |
// |---------------------| = frame_bottom_offset = frame_size
// | (optional) |
// | in_reg_spiller area |
// |---------------------|
// | (optional) |
// | capture state buf |
// |---------------------| = StubLocations::CAPTURED_STATE_BUFFER
// | (optional) |
@ -188,6 +191,18 @@ void DowncallLinker::StubGenerator::generate() {
VMStorage shuffle_reg = as_VMStorage(rbx);
ArgumentShuffle arg_shuffle(filtered_java_regs, out_regs, shuffle_reg);
// Need to spill for state capturing runtime call.
// The area spilled into is distinct from the capture state buffer.
RegSpiller in_reg_spiller(out_regs);
int in_spill_rsp_offset = -1;
if (_captured_state_mask != 0) {
// The spill area cannot be shared with the shadow/out args space
// since spilling needs to happen before the call. Allocate a new
// region in the stack for this spill space.
in_spill_rsp_offset = allocated_frame_size;
allocated_frame_size += in_reg_spiller.spill_size_bytes();
}
#ifndef PRODUCT
LogTarget(Trace, foreign, downcall) lt;
if (lt.is_enabled()) {
@ -232,6 +247,19 @@ void DowncallLinker::StubGenerator::generate() {
arg_shuffle.generate(_masm, shuffle_reg, 0, _abi._shadow_space_bytes);
__ block_comment("} argument shuffle");
if (_captured_state_mask != 0) {
assert(in_spill_rsp_offset != -1, "must be");
__ block_comment("{ load initial thread local");
in_reg_spiller.generate_spill(_masm, in_spill_rsp_offset);
// Copy the contents of the capture state buffer into thread local
__ movptr(c_rarg0, Address(rsp, locs.data_offset(StubLocations::CAPTURED_STATE_BUFFER)));
__ movl(c_rarg1, _captured_state_mask);
runtime_call(_masm, CAST_FROM_FN_PTR(address, DowncallLinker::capture_state_pre));
in_reg_spiller.generate_fill(_masm, in_spill_rsp_offset);
__ block_comment("} load initial thread local");
}
__ call(as_Register(locs.get(StubLocations::TARGET_ADDRESS)));
assert(!_abi.is_volatile_reg(r15_thread), "Call assumed not to kill r15");
@ -258,15 +286,15 @@ void DowncallLinker::StubGenerator::generate() {
__ block_comment("{ save thread local");
if (should_save_return_value) {
out_reg_spiller.generate_spill(_masm, spill_rsp_offset);
out_reg_spiller.generate_spill(_masm, out_spill_rsp_offset);
}
__ movptr(c_rarg0, Address(rsp, locs.data_offset(StubLocations::CAPTURED_STATE_BUFFER)));
__ movl(c_rarg1, _captured_state_mask);
runtime_call(_masm, CAST_FROM_FN_PTR(address, DowncallLinker::capture_state));
runtime_call(_masm, CAST_FROM_FN_PTR(address, DowncallLinker::capture_state_post));
if (should_save_return_value) {
out_reg_spiller.generate_fill(_masm, spill_rsp_offset);
out_reg_spiller.generate_fill(_masm, out_spill_rsp_offset);
}
__ block_comment("} save thread local");
@ -319,14 +347,14 @@ void DowncallLinker::StubGenerator::generate() {
__ bind(L_safepoint_poll_slow_path);
if (should_save_return_value) {
out_reg_spiller.generate_spill(_masm, spill_rsp_offset);
out_reg_spiller.generate_spill(_masm, out_spill_rsp_offset);
}
__ mov(c_rarg0, r15_thread);
runtime_call(_masm, CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans));
if (should_save_return_value) {
out_reg_spiller.generate_fill(_masm, spill_rsp_offset);
out_reg_spiller.generate_fill(_masm, out_spill_rsp_offset);
}
__ jmp(L_after_safepoint_poll);
@ -338,13 +366,13 @@ void DowncallLinker::StubGenerator::generate() {
__ bind(L_reguard);
if (should_save_return_value) {
out_reg_spiller.generate_spill(_masm, spill_rsp_offset);
out_reg_spiller.generate_spill(_masm, out_spill_rsp_offset);
}
runtime_call(_masm, CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
if (should_save_return_value) {
out_reg_spiller.generate_fill(_masm, spill_rsp_offset);
out_reg_spiller.generate_fill(_masm, out_spill_rsp_offset);
}
__ jmp(L_after_reguard);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -231,8 +231,8 @@ inline bool frame::equal(frame other) const {
// Return unique id for this frame. The id must have a value where we can distinguish
// identity and younger/older relationship. null represents an invalid (incomparable)
// frame.
inline intptr_t* frame::id(void) const { return unextended_sp(); }
// frame. Should not be called for heap frames.
inline intptr_t* frame::id(void) const { return real_fp(); }
// Return true if the frame is older (less recent activation) than the frame represented by id
inline bool frame::is_older(intptr_t* id) const { assert(this->id() != nullptr && id != nullptr, "null frame id");
@ -397,6 +397,9 @@ inline frame frame::sender(RegisterMap* map) const {
StackWatermarkSet::on_iteration(map->thread(), result);
}
// Calling frame::id() is currently not supported for heap frames.
assert(result._on_heap || this->_on_heap || result.is_older(this->id()), "Must be");
return result;
}

View File

@ -117,9 +117,6 @@ define_pd_global(intx, InitArrayShortSize, 8*BytesPerLong);
product(bool, UseIncDec, true, DIAGNOSTIC, \
"Use INC, DEC instructions on x86") \
\
product(bool, UseNewLongLShift, false, \
"Use optimized bitwise shift left") \
\
product(bool, UseAddressNop, false, \
"Use '0F 1F [addr]' NOP instructions on x86 cpus") \
\
@ -168,16 +165,27 @@ define_pd_global(intx, InitArrayShortSize, 8*BytesPerLong);
"Perform Ecore Optimization") \
\
/* Minimum array size in bytes to use AVX512 intrinsics */ \
/* for copy, inflate and fill which don't bail out early based on any */ \
/* for inflate and fill which don't bail out early based on any */ \
/* condition. When this value is set to zero compare operations like */ \
/* compare, vectorizedMismatch, compress can also use AVX512 intrinsics.*/\
product(int, AVX3Threshold, 4096, DIAGNOSTIC, \
"Minimum array size in bytes to use AVX512 intrinsics" \
"for copy, inflate and fill. When this value is set as zero" \
"for inflate and fill. When this value is set as zero" \
"compare operations can also use AVX512 intrinsics.") \
range(0, max_jint) \
constraint(AVX3ThresholdConstraintFunc,AfterErgo) \
\
/* Minimum array size in bytes to use AVX512 intrinsics */ \
/* for copy and fill which don't bail out early based on any */ \
/* condition. When this value is set to zero clear operations that */ \
/* work on memory blocks can also use AVX512 intrinsics. */ \
product(int, CopyAVX3Threshold, 4096, DIAGNOSTIC, \
"Minimum array size in bytes to use AVX512 intrinsics" \
"for copy and fill. When this value is set as zero" \
"clear operations can also use AVX512 intrinsics.") \
range(0, max_jint) \
constraint(CopyAVX3ThresholdConstraintFunc,AfterErgo) \
\
product(bool, IntelJccErratumMitigation, true, DIAGNOSTIC, \
"Turn off JVM mitigations related to Intel micro code " \
"mitigations for the Intel JCC erratum") \

View File

@ -5820,7 +5820,7 @@ void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register rtmp, X
// cnt - number of qwords (8-byte words).
// base - start address, qword aligned.
Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end;
bool use64byteVector = (MaxVectorSize == 64) && (VM_Version::avx3_threshold() == 0);
bool use64byteVector = (MaxVectorSize == 64) && (CopyAVX3Threshold == 0);
if (use64byteVector) {
vpxor(xtmp, xtmp, xtmp, AVX_512bit);
} else if (MaxVectorSize >= 32) {
@ -5884,7 +5884,7 @@ void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register rtmp, X
// Clearing constant sized memory using YMM/ZMM registers.
void MacroAssembler::clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask) {
assert(UseAVX > 2 && VM_Version::supports_avx512vl(), "");
bool use64byteVector = (MaxVectorSize > 32) && (VM_Version::avx3_threshold() == 0);
bool use64byteVector = (MaxVectorSize > 32) && (CopyAVX3Threshold == 0);
int vector64_count = (cnt & (~0x7)) >> 3;
cnt = cnt & 0x7;
@ -6109,8 +6109,8 @@ void MacroAssembler::generate_fill(BasicType t, bool aligned,
// Fill 64-byte chunks
Label L_fill_64_bytes_loop_avx3, L_check_fill_64_bytes_avx2;
// If number of bytes to fill < VM_Version::avx3_threshold(), perform fill using AVX2
cmpptr(count, VM_Version::avx3_threshold());
// If number of bytes to fill < CopyAVX3Threshold, perform fill using AVX2
cmpptr(count, CopyAVX3Threshold);
jccb(Assembler::below, L_check_fill_64_bytes_avx2);
vpbroadcastd(xtmp, xtmp, Assembler::AVX_512bit);
@ -9483,7 +9483,6 @@ void MacroAssembler::generate_fill_avx3(BasicType type, Register to, Register va
Label L_fill_zmm_sequence;
int shift = -1;
int avx3threshold = VM_Version::avx3_threshold();
switch(type) {
case T_BYTE: shift = 0;
break;
@ -9499,10 +9498,10 @@ void MacroAssembler::generate_fill_avx3(BasicType type, Register to, Register va
fatal("Unhandled type: %s\n", type2name(type));
}
if ((avx3threshold != 0) || (MaxVectorSize == 32)) {
if ((CopyAVX3Threshold != 0) || (MaxVectorSize == 32)) {
if (MaxVectorSize == 64) {
cmpq(count, avx3threshold >> shift);
cmpq(count, CopyAVX3Threshold >> shift);
jcc(Assembler::greater, L_fill_zmm_sequence);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -166,12 +166,12 @@ class StubGenerator: public StubCodeGenerator {
// - If target supports AVX3 features (BW+VL+F) then implementation uses 32 byte vectors (YMMs)
// for both special cases (various small block sizes) and aligned copy loop. This is the
// default configuration.
// - If copy length is above AVX3Threshold, then implementation use 64 byte vectors (ZMMs)
// - If copy length is above CopyAVX3Threshold, then implementation use 64 byte vectors (ZMMs)
// for main copy loop (and subsequent tail) since bulk of the cycles will be consumed in it.
// - If user forces MaxVectorSize=32 then above 4096 bytes its seen that REP MOVs shows a
// better performance for disjoint copies. For conjoint/backward copy vector based
// copy performs better.
// - If user sets AVX3Threshold=0, then special cases for small blocks sizes operate over
// - If user sets CopyAVX3Threshold=0, then special cases for small blocks sizes operate over
// 64 byte vector registers (ZMMs).
address generate_disjoint_copy_avx3_masked(StubId stub_id, address* entry);
@ -330,6 +330,19 @@ class StubGenerator: public StubCodeGenerator {
void aesecb_decrypt(Register source_addr, Register dest_addr, Register key, Register len);
// Shared implementation for ECB/AES Encrypt and Decrypt, which does 4 blocks
// in a loop at a time to hide instruction latency. Set is_encrypt=true for
// encryption, false for decryption.
address generate_electronicCodeBook_AESCrypt_Parallel(bool is_encrypt);
// A version of ECB/AES Encrypt which does 4 blocks in a loop at a time
// to hide instruction latency
address generate_electronicCodeBook_encryptAESCrypt_Parallel();
// A version of ECB/AES Decrypt which does 4 blocks in a loop at a time
// to hide instruction latency
address generate_electronicCodeBook_decryptAESCrypt_Parallel();
// Vector AES Galois Counter Mode implementation
address generate_galoisCounterMode_AESCrypt();
void aesgcm_encrypt(Register in, Register len, Register ct, Register out, Register key,

View File

@ -144,7 +144,7 @@ address StubGenerator::generate_updateBytesAdler32() {
__ align32();
if (VM_Version::supports_avx512vl()) {
// AVX2 performs better for smaller inputs because of leaner post loop reduction sequence..
__ cmpl(s, MAX2(128, VM_Version::avx3_threshold()));
__ cmpl(s, MAX2(128, CopyAVX3Threshold));
__ jcc(Assembler::belowEqual, SLOOP1A_AVX2);
__ lea(end, Address(s, data, Address::times_1, - (2*CHUNKSIZE -1)));

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2025, Intel Corporation. All rights reserved.
* Copyright (c) 2019, 2026, Intel Corporation. All rights reserved.
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -218,6 +218,8 @@ void StubGenerator::generate_aes_stubs() {
StubRoutines::_galoisCounterMode_AESCrypt = generate_galoisCounterMode_AESCrypt();
} else {
StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel();
StubRoutines::_electronicCodeBook_encryptAESCrypt = generate_electronicCodeBook_encryptAESCrypt_Parallel();
StubRoutines::_electronicCodeBook_decryptAESCrypt = generate_electronicCodeBook_decryptAESCrypt_Parallel();
if (VM_Version::supports_avx2()) {
StubRoutines::_galoisCounterMode_AESCrypt = generate_avx2_galoisCounterMode_AESCrypt();
}
@ -1399,6 +1401,200 @@ address StubGenerator::generate_cipherBlockChaining_encryptAESCrypt() {
return start;
}
// This is a version of ECB/AES Encrypt/Decrypt which does 4 blocks in a loop
// at a time to hide instruction latency.
//
// For encryption (is_encrypt=true):
// pxor key[0], aesenc key[1..rounds-1], aesenclast key[rounds]
// For decryption (is_encrypt=false):
// pxor key[1], aesdec key[2..rounds], aesdeclast key[0]
//
// Arguments:
//
// Inputs:
// c_rarg0 - source byte array address
// c_rarg1 - destination byte array address
// c_rarg2 - session key (Ke/Kd) in little endian int array
// c_rarg3 - input length (must be multiple of blocksize 16)
//
// Output:
// rax - input length
//
address StubGenerator::generate_electronicCodeBook_AESCrypt_Parallel(bool is_encrypt) {
assert(UseAES, "need AES instructions and misaligned SSE support");
__ align(CodeEntryAlignment);
StubId stub_id = is_encrypt ? StubId::stubgen_electronicCodeBook_encryptAESCrypt_id
: StubId::stubgen_electronicCodeBook_decryptAESCrypt_id;
StubCodeMark mark(this, stub_id);
address start = __ pc();
const Register from = c_rarg0; // source array address
const Register to = c_rarg1; // destination array address
const Register key = c_rarg2; // key array address
const Register len_reg = c_rarg3; // src len (must be multiple of blocksize 16)
const Register pos = rax;
const Register keylen = r11;
const XMMRegister xmm_result0 = xmm0;
const XMMRegister xmm_result1 = xmm1;
const XMMRegister xmm_result2 = xmm2;
const XMMRegister xmm_result3 = xmm3;
const XMMRegister xmm_key_shuf_mask = xmm4;
const XMMRegister xmm_key_tmp = xmm5;
// keys 0-9 pre-loaded into xmm6-xmm15
const int XMM_REG_NUM_KEY_FIRST = 6;
const int XMM_REG_NUM_KEY_LAST = 15;
const XMMRegister xmm_key_first = as_XMMRegister(XMM_REG_NUM_KEY_FIRST);
// for key_128, key_192, key_256
const int ROUNDS[3] = {10, 12, 14};
Label L_exit;
Label L_loop4[3], L_single[3], L_done[3];
#ifdef DoFour
#undef DoFour
#endif
#ifdef DoOne
#undef DoOne
#endif
#define DoFour(opc, reg) \
__ opc(xmm_result0, reg); \
__ opc(xmm_result1, reg); \
__ opc(xmm_result2, reg); \
__ opc(xmm_result3, reg);
#define DoOne(opc, reg) \
__ opc(xmm_result0, reg);
__ enter(); // required for proper stackwalking of RuntimeStub frame
__ push(len_reg); // save original length for return value
__ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
__ movdqu(xmm_key_shuf_mask, ExternalAddress(key_shuffle_mask_addr()), r10 /*rscratch*/);
// load up xmm regs 6 thru 15 with keys 0x00 - 0x90
for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum <= XMM_REG_NUM_KEY_LAST; rnum++, offset += 0x10) {
load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask);
}
__ xorptr(pos, pos);
// key length could be only {11, 13, 15} * 4 = {44, 52, 60}
__ cmpl(keylen, 52);
__ jcc(Assembler::equal, L_loop4[1]);
__ cmpl(keylen, 60);
__ jcc(Assembler::equal, L_loop4[2]);
// k == 0: generate code for key_128
// k == 1: generate code for key_192
// k == 2: generate code for key_256
for (int k = 0; k < 3; ++k) {
__ align(OptoLoopAlignment);
__ BIND(L_loop4[k]);
__ cmpptr(len_reg, 4 * AESBlockSize);
__ jcc(Assembler::less, L_single[k]);
__ movdqu(xmm_result0, Address(from, pos, Address::times_1, 0 * AESBlockSize));
__ movdqu(xmm_result1, Address(from, pos, Address::times_1, 1 * AESBlockSize));
__ movdqu(xmm_result2, Address(from, pos, Address::times_1, 2 * AESBlockSize));
__ movdqu(xmm_result3, Address(from, pos, Address::times_1, 3 * AESBlockSize));
if (is_encrypt) {
DoFour(pxor, xmm_key_first);
for (int rnum = 1; rnum < 10; rnum++) {
DoFour(aesenc, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST));
}
for (int i = 10; i < ROUNDS[k]; i++) {
load_key(xmm_key_tmp, key, i * 0x10, xmm_key_shuf_mask);
DoFour(aesenc, xmm_key_tmp);
}
load_key(xmm_key_tmp, key, ROUNDS[k] * 0x10, xmm_key_shuf_mask);
DoFour(aesenclast, xmm_key_tmp);
} else {
DoFour(pxor, as_XMMRegister(1 + XMM_REG_NUM_KEY_FIRST));
for (int rnum = 2; rnum < 10; rnum++) {
DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST));
}
for (int i = 10; i <= ROUNDS[k]; i++) {
load_key(xmm_key_tmp, key, i * 0x10, xmm_key_shuf_mask);
DoFour(aesdec, xmm_key_tmp);
}
DoFour(aesdeclast, xmm_key_first);
}
__ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0);
__ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1);
__ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2);
__ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3);
__ addptr(pos, 4 * AESBlockSize);
__ subptr(len_reg, 4 * AESBlockSize);
__ jmp(L_loop4[k]);
__ align(OptoLoopAlignment);
__ BIND(L_single[k]);
__ cmpptr(len_reg, AESBlockSize);
__ jcc(Assembler::less, L_done[k]);
__ movdqu(xmm_result0, Address(from, pos, Address::times_1, 0));
if (is_encrypt) {
DoOne(pxor, xmm_key_first);
for (int rnum = 1; rnum < 10; rnum++) {
DoOne(aesenc, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST));
}
for (int i = 10; i < ROUNDS[k]; i++) {
load_key(xmm_key_tmp, key, i * 0x10, xmm_key_shuf_mask);
DoOne(aesenc, xmm_key_tmp);
}
load_key(xmm_key_tmp, key, ROUNDS[k] * 0x10, xmm_key_shuf_mask);
DoOne(aesenclast, xmm_key_tmp);
} else {
DoOne(pxor, as_XMMRegister(1 + XMM_REG_NUM_KEY_FIRST));
for (int rnum = 2; rnum < 10; rnum++) {
DoOne(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST));
}
for (int i = 10; i <= ROUNDS[k]; i++) {
load_key(xmm_key_tmp, key, i * 0x10, xmm_key_shuf_mask);
DoOne(aesdec, xmm_key_tmp);
}
DoOne(aesdeclast, xmm_key_first);
}
__ movdqu(Address(to, pos, Address::times_1, 0), xmm_result0);
__ addptr(pos, AESBlockSize);
__ subptr(len_reg, AESBlockSize);
__ jmp(L_single[k]);
__ BIND(L_done[k]);
if (k < 2) __ jmp(L_exit);
} //for key_128/192/256
__ BIND(L_exit);
// Clear all XMM registers holding sensitive key material before returning
__ pxor(xmm_key_tmp, xmm_key_tmp);
for (int rnum = XMM_REG_NUM_KEY_FIRST; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) {
__ pxor(as_XMMRegister(rnum), as_XMMRegister(rnum));
}
__ pop(rax);
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
return start;
#undef DoFour
#undef DoOne
}
address StubGenerator::generate_electronicCodeBook_encryptAESCrypt_Parallel() {
return generate_electronicCodeBook_AESCrypt_Parallel(true);
}
address StubGenerator::generate_electronicCodeBook_decryptAESCrypt_Parallel() {
return generate_electronicCodeBook_AESCrypt_Parallel(false);
}
// This is a version of CBC/AES Decrypt which does 4 blocks in a loop at a time
// to hide instruction latency
//
@ -1493,7 +1689,7 @@ address StubGenerator::generate_cipherBlockChaining_decryptAESCrypt_Parallel() {
__ opc(xmm_result0, src_reg); \
__ opc(xmm_result1, src_reg); \
__ opc(xmm_result2, src_reg); \
__ opc(xmm_result3, src_reg); \
__ opc(xmm_result3, src_reg);
for (int k = 0; k < 3; ++k) {
__ BIND(L_multiBlock_loopTopHead[k]);

View File

@ -511,12 +511,12 @@ void StubGenerator::copy_bytes_backward(Register from, Register dest,
// - If target supports AVX3 features (BW+VL+F) then implementation uses 32 byte vectors (YMMs)
// for both special cases (various small block sizes) and aligned copy loop. This is the
// default configuration.
// - If copy length is above AVX3Threshold, then implementation use 64 byte vectors (ZMMs)
// - If copy length is above CopyAVX3Threshold, then implementation use 64 byte vectors (ZMMs)
// for main copy loop (and subsequent tail) since bulk of the cycles will be consumed in it.
// - If user forces MaxVectorSize=32 then above 4096 bytes its seen that REP MOVs shows a
// better performance for disjoint copies. For conjoint/backward copy vector based
// copy performs better.
// - If user sets AVX3Threshold=0, then special cases for small blocks sizes operate over
// - If user sets CopyAVX3Threshold=0, then special cases for small blocks sizes operate over
// 64 byte vector registers (ZMMs).
// Inputs:
@ -575,8 +575,7 @@ address StubGenerator::generate_disjoint_copy_avx3_masked(StubId stub_id, addres
StubCodeMark mark(this, stub_id);
address start = __ pc();
int avx3threshold = VM_Version::avx3_threshold();
bool use64byteVector = (MaxVectorSize > 32) && (avx3threshold == 0);
bool use64byteVector = (MaxVectorSize > 32) && (CopyAVX3Threshold == 0);
const int large_threshold = 2621440; // 2.5 MB
Label L_main_loop, L_main_loop_64bytes, L_tail, L_tail64, L_exit, L_entry;
Label L_repmovs, L_main_pre_loop, L_main_pre_loop_64bytes, L_pre_main_post_64;
@ -647,7 +646,7 @@ address StubGenerator::generate_disjoint_copy_avx3_masked(StubId stub_id, addres
__ cmpq(temp2, large_threshold);
__ jcc(Assembler::greaterEqual, L_copy_large);
}
if (avx3threshold != 0) {
if (CopyAVX3Threshold != 0) {
__ cmpq(count, threshold[shift]);
if (MaxVectorSize == 64) {
// Copy using 64 byte vectors.
@ -659,7 +658,7 @@ address StubGenerator::generate_disjoint_copy_avx3_masked(StubId stub_id, addres
}
}
if ((MaxVectorSize < 64) || (avx3threshold != 0)) {
if ((MaxVectorSize < 64) || (CopyAVX3Threshold != 0)) {
// Partial copy to make dst address 32 byte aligned.
__ movq(temp2, to);
__ andq(temp2, 31);
@ -913,8 +912,7 @@ address StubGenerator::generate_conjoint_copy_avx3_masked(StubId stub_id, addres
StubCodeMark mark(this, stub_id);
address start = __ pc();
int avx3threshold = VM_Version::avx3_threshold();
bool use64byteVector = (MaxVectorSize > 32) && (avx3threshold == 0);
bool use64byteVector = (MaxVectorSize > 32) && (CopyAVX3Threshold == 0);
Label L_main_pre_loop, L_main_pre_loop_64bytes, L_pre_main_post_64;
Label L_main_loop, L_main_loop_64bytes, L_tail, L_tail64, L_exit, L_entry;
@ -979,12 +977,12 @@ address StubGenerator::generate_conjoint_copy_avx3_masked(StubId stub_id, addres
// PRE-MAIN-POST loop for aligned copy.
__ BIND(L_entry);
if ((MaxVectorSize > 32) && (avx3threshold != 0)) {
if ((MaxVectorSize > 32) && (CopyAVX3Threshold != 0)) {
__ cmpq(temp1, threshold[shift]);
__ jcc(Assembler::greaterEqual, L_pre_main_post_64);
}
if ((MaxVectorSize < 64) || (avx3threshold != 0)) {
if ((MaxVectorSize < 64) || (CopyAVX3Threshold != 0)) {
// Partial copy to make dst address 32 byte aligned.
__ leaq(temp2, Address(to, temp1, (Address::ScaleFactor)(shift), 0));
__ andq(temp2, 31);
@ -1199,7 +1197,7 @@ void StubGenerator::arraycopy_avx3_special_cases_conjoint(XMMRegister xmm, KRegi
bool use64byteVector, Label& L_entry, Label& L_exit) {
Label L_entry_64, L_entry_96, L_entry_128;
Label L_entry_160, L_entry_192;
bool avx3 = (MaxVectorSize > 32) && (VM_Version::avx3_threshold() == 0);
bool avx3 = (MaxVectorSize > 32) && (CopyAVX3Threshold == 0);
int size_mat[][6] = {
/* T_BYTE */ {32 , 64, 96 , 128 , 160 , 192 },

View File

@ -1508,9 +1508,6 @@ void VM_Version::get_processor_features() {
MaxLoopPad = 11;
}
#endif // COMPILER2
if (FLAG_IS_DEFAULT(UseXMMForArrayCopy)) {
UseXMMForArrayCopy = true; // use SSE2 movq on new ZX cpus
}
if (supports_sse4_2()) { // new ZX cpus
if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
UseUnalignedLoadStores = true; // use movdqu on newest ZX cpus
@ -1528,10 +1525,6 @@ void VM_Version::get_processor_features() {
// Use it on new AMD cpus starting from Opteron.
UseAddressNop = true;
}
if (supports_sse2() && FLAG_IS_DEFAULT(UseNewLongLShift)) {
// Use it on new AMD cpus starting from Opteron.
UseNewLongLShift = true;
}
if (FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper)) {
if (supports_sse4a()) {
UseXmmLoadAndClearUpper = true; // use movsd only on '10h' Opteron
@ -1571,10 +1564,6 @@ void VM_Version::get_processor_features() {
if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) {
FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3);
}
// On family 15h processors use XMM and UnalignedLoadStores for Array Copy
if (supports_sse2() && FLAG_IS_DEFAULT(UseXMMForArrayCopy)) {
FLAG_SET_DEFAULT(UseXMMForArrayCopy, true);
}
if (supports_sse2() && FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
FLAG_SET_DEFAULT(UseUnalignedLoadStores, true);
}
@ -1591,9 +1580,6 @@ void VM_Version::get_processor_features() {
if (cpu_family() >= 0x17) {
// On family >=17h processors use XMM and UnalignedLoadStores
// for Array Copy
if (supports_sse2() && FLAG_IS_DEFAULT(UseXMMForArrayCopy)) {
FLAG_SET_DEFAULT(UseXMMForArrayCopy, true);
}
if (supports_sse2() && FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
FLAG_SET_DEFAULT(UseUnalignedLoadStores, true);
}
@ -1640,9 +1626,6 @@ void VM_Version::get_processor_features() {
}
#endif // COMPILER2
if (FLAG_IS_DEFAULT(UseXMMForArrayCopy)) {
UseXMMForArrayCopy = true; // use SSE2 movq on new Intel cpus
}
if ((supports_sse4_2() && supports_ht()) || supports_avx()) { // Newest Intel cpus
if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus
@ -1967,6 +1950,18 @@ void VM_Version::get_processor_features() {
if (FLAG_IS_DEFAULT(UseCopySignIntrinsic)) {
FLAG_SET_DEFAULT(UseCopySignIntrinsic, true);
}
// CopyAVX3Threshold is the threshold at which 64-byte instructions are used
// for implementing the array copy and clear operations.
// The Intel platforms that supports the serialize instruction
// have improved implementation of 64-byte load/stores and so the default
// threshold is set to 0 for these platforms.
if (FLAG_IS_DEFAULT(CopyAVX3Threshold)) {
if (is_intel() && is_intel_server_family() && supports_serialize()) {
FLAG_SET_DEFAULT(CopyAVX3Threshold, 0);
} else {
FLAG_SET_DEFAULT(CopyAVX3Threshold, AVX3Threshold);
}
}
}
void VM_Version::print_platform_virtualization_info(outputStream* st) {
@ -2122,17 +2117,6 @@ bool VM_Version::is_intel_darkmont() {
return is_intel() && is_intel_server_family() && (_model == 0xCC || _model == 0xDD);
}
// avx3_threshold() sets the threshold at which 64-byte instructions are used
// for implementing the array copy and clear operations.
// The Intel platforms that supports the serialize instruction
// has improved implementation of 64-byte load/stores and so the default
// threshold is set to 0 for these platforms.
int VM_Version::avx3_threshold() {
return (is_intel_server_family() &&
supports_serialize() &&
FLAG_IS_DEFAULT(AVX3Threshold)) ? 0 : AVX3Threshold;
}
void VM_Version::clear_apx_test_state() {
clear_apx_test_state_stub();
}

View File

@ -828,7 +828,7 @@ public:
static uint32_t cpu_stepping() { return _cpuid_info.cpu_stepping(); }
static int cpu_family() { return _cpu;}
static bool is_P6() { return cpu_family() >= 6; }
static bool is_intel_server_family() { return cpu_family() == 6 || cpu_family() == 19; }
static bool is_intel_server_family() { return cpu_family() == 6 || cpu_family() == 18 || cpu_family() == 19; }
static bool is_amd() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x68747541; } // 'htuA'
static bool is_hygon() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x6F677948; } // 'ogyH'
static bool is_amd_family() { return is_amd() || is_hygon(); }
@ -958,8 +958,6 @@ public:
static bool is_intel_darkmont();
static int avx3_threshold();
static bool is_intel_tsc_synched_at_init();
static void insert_features_names(VM_Version::VM_Features features, stringStream& ss);

View File

@ -2763,13 +2763,6 @@ uint Matcher::float_pressure_limit()
return (FLOATPRESSURE == -1) ? default_float_pressure_threshold : FLOATPRESSURE;
}
bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
// In 64 bit mode a code which use multiply when
// devisor is constant is faster than hardware
// DIV instruction (it uses MulHiL).
return false;
}
// Register for DIVI projection of divmodI
const RegMask& Matcher::divI_proj_mask() {
return INT_RAX_REG_mask();
@ -8852,6 +8845,21 @@ instruct membar_release_lock()
ins_pipe(empty);
%}
instruct membar_storeload(rFlagsReg cr) %{
match(MemBarStoreLoad);
effect(KILL cr);
ins_cost(400);
format %{
$$template
$$emit$$"lock addl [rsp + #0], 0\t! membar_storeload"
%}
ins_encode %{
__ membar(Assembler::StoreLoad);
%}
ins_pipe(pipe_slow);
%}
instruct membar_volatile(rFlagsReg cr) %{
match(MemBarVolatile);
effect(KILL cr);
@ -8879,6 +8887,21 @@ instruct unnecessary_membar_volatile()
ins_pipe(empty);
%}
instruct membar_full(rFlagsReg cr) %{
match(MemBarFull);
effect(KILL cr);
ins_cost(400);
format %{
$$template
$$emit$$"lock addl [rsp + #0], 0\t! membar_full"
%}
ins_encode %{
__ membar(Assembler::StoreLoad);
%}
ins_pipe(pipe_slow);
%}
instruct membar_storestore() %{
match(MemBarStoreStore);
match(StoreStoreFence);

View File

@ -2667,3 +2667,7 @@ void os::print_memory_mappings(char* addr, size_t bytes, outputStream* st) {}
void os::jfr_report_memory_info() {}
#endif // INCLUDE_JFR
void os::print_open_file_descriptors(outputStream* st) {
// File descriptor counting not implemented on AIX
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2024 SAP SE. All rights reserved.
* Copyright (c) 2012, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,25 +37,9 @@
// (see http://linux.die.net/man/3/dladdr)
// dladdr(3) is not POSIX but a GNU extension, and is not available on AIX.
//
// Differences between AIX dladdr and Linux dladdr:
//
// 1) Dl_info.dli_fbase: can never work, is disabled.
// A loaded image on AIX is divided in multiple segments, at least two
// (text and data) but potentially also far more. This is because the loader may
// load each member into an own segment, as for instance happens with the libC.a
// 2) Dl_info.dli_sname: This only works for code symbols (functions); for data, a
// zero-length string is returned ("").
// 3) Dl_info.dli_saddr: For code, this will return the entry point of the function,
// not the function descriptor.
typedef struct {
const char *dli_fname; // file path of loaded library
// void *dli_fbase;
const char *dli_sname; // symbol name; "" if not known
void *dli_saddr; // address of *entry* of function; not function descriptor;
} Dl_info;
#include "dl_info.h"
// Note: we export this to use it inside J2se too
#ifdef __cplusplus
extern "C"
#endif

View File

@ -76,6 +76,7 @@
# include <fcntl.h>
# include <fenv.h>
# include <inttypes.h>
# include <mach/mach.h>
# include <poll.h>
# include <pthread.h>
# include <pwd.h>
@ -102,6 +103,7 @@
#endif
#ifdef __APPLE__
#include <libproc.h>
#include <mach/task_info.h>
#include <mach-o/dyld.h>
#endif
@ -2596,3 +2598,45 @@ bool os::pd_dll_unload(void* libhandle, char* ebuf, int ebuflen) {
return res;
} // end: os::pd_dll_unload()
void os::print_open_file_descriptors(outputStream* st) {
#ifdef __APPLE__
char buf[1024 * sizeof(struct proc_fdinfo)];
os::Bsd::print_open_file_descriptors(st, buf, sizeof(buf));
#else
st->print_cr("Open File Descriptors: unknown");
#endif
}
void os::Bsd::print_open_file_descriptors(outputStream* st, char* buf, size_t buflen) {
#ifdef __APPLE__
pid_t my_pid;
// ensure the scratch buffer is big enough for at least one FD info struct
precond(buflen >= sizeof(struct proc_fdinfo));
kern_return_t kres = pid_for_task(mach_task_self(), &my_pid);
if (kres != KERN_SUCCESS) {
st->print_cr("Open File Descriptors: unknown");
return;
}
size_t max_fds = buflen / sizeof(struct proc_fdinfo);
struct proc_fdinfo* fds = reinterpret_cast<struct proc_fdinfo*>(buf);
// fill our buffer with FD info, up to the available buffer size
int res = proc_pidinfo(my_pid, PROC_PIDLISTFDS, 0, fds, max_fds * sizeof(struct proc_fdinfo));
if (res <= 0) {
st->print_cr("Open File Descriptors: unknown");
return;
}
// print lower threshold if count exceeds buffer size
int nfiles = res / sizeof(struct proc_fdinfo);
if ((size_t)nfiles >= max_fds) {
st->print_cr("Open File Descriptors: > %zu", max_fds);
return;
}
st->print_cr("Open File Descriptors: %d", nfiles);
#else
st->print_cr("Open File Descriptors: unknown");
#endif
}

View File

@ -123,6 +123,8 @@ class os::Bsd {
static int get_node_by_cpu(int cpu_id);
static void print_uptime_info(outputStream* st);
static void print_open_file_descriptors(outputStream* st, char* buf, size_t buflen);
static void print_open_file_descriptors(outputStream* st);
};
#endif // OS_BSD_OS_BSD_HPP

View File

@ -28,7 +28,6 @@
#include "cgroupV2Subsystem_linux.hpp"
#include "logging/log.hpp"
#include "memory/allocation.hpp"
#include "os_linux.hpp"
#include "runtime/globals.hpp"
#include "runtime/os.hpp"
#include "utilities/globalDefinitions.hpp"
@ -605,6 +604,11 @@ void CgroupSubsystemFactory::cleanup(CgroupInfo* cg_infos) {
}
}
void CgroupSubsystem::adjust_controllers(physical_memory_size_type upper_mem_bound, int upper_cpu_bound) {
CgroupUtil::adjust_controller(memory_controller()->controller(), upper_mem_bound);
CgroupUtil::adjust_controller(cpu_controller()->controller(), upper_cpu_bound);
}
/* active_processor_count
*
* Calculate an appropriate number of active processors for the
@ -631,7 +635,7 @@ void CgroupSubsystemFactory::cleanup(CgroupInfo* cg_infos) {
* return:
* true if there were no errors. false otherwise.
*/
bool CgroupSubsystem::active_processor_count(double& value) {
bool CgroupSubsystem::active_processor_count(int (*cpu_bound_func)(), double& value) {
// We use a cache with a timeout to avoid performing expensive
// computations in the event this function is called frequently.
// [See 8227006].
@ -643,7 +647,7 @@ bool CgroupSubsystem::active_processor_count(double& value) {
return true;
}
int cpu_count = os::Linux::active_processor_count();
int cpu_count = cpu_bound_func();
double result = -1;
if (!CgroupUtil::processor_count(contrl->controller(), cpu_count, result)) {
return false;

View File

@ -278,7 +278,7 @@ class CgroupMemoryController: public CHeapObj<mtInternal> {
class CgroupSubsystem: public CHeapObj<mtInternal> {
public:
bool memory_limit_in_bytes(physical_memory_size_type upper_bound, physical_memory_size_type& value);
bool active_processor_count(double& value);
bool active_processor_count(int (*cpu_bound_func)(), double& value);
virtual bool pids_max(uint64_t& value) = 0;
virtual bool pids_current(uint64_t& value) = 0;
@ -291,6 +291,8 @@ class CgroupSubsystem: public CHeapObj<mtInternal> {
virtual CachingCgroupController<CgroupCpuController, double>* cpu_controller() = 0;
virtual CgroupCpuacctController* cpuacct_controller() = 0;
void adjust_controllers(physical_memory_size_type upper_mem_bound, int upper_cpu_bound);
bool cpu_quota(int& value);
bool cpu_period(int& value);
bool cpu_shares(int& value);

View File

@ -24,7 +24,6 @@
*/
#include "cgroupUtil_linux.hpp"
#include "os_linux.hpp"
bool CgroupUtil::processor_count(CgroupCpuController* cpu_ctrl, int upper_bound, double& value) {
assert(upper_bound > 0, "upper bound of cpus must be positive");
@ -82,7 +81,7 @@ double CgroupUtil::get_updated_cpu_limit(CgroupCpuController* cpu,
return lowest;
}
void CgroupUtil::adjust_controller(CgroupMemoryController* mem) {
void CgroupUtil::adjust_controller(CgroupMemoryController* mem, physical_memory_size_type upper_bound) {
assert(mem->cgroup_path() != nullptr, "invariant");
if (strstr(mem->cgroup_path(), "../") != nullptr) {
log_warning(os, container)("Cgroup memory controller path at '%s' seems to have moved "
@ -100,17 +99,16 @@ void CgroupUtil::adjust_controller(CgroupMemoryController* mem) {
char* cg_path = os::strdup(orig);
char* last_slash;
assert(cg_path[0] == '/', "cgroup path must start with '/'");
physical_memory_size_type phys_mem = os::Linux::physical_memory();
char* limit_cg_path = nullptr;
physical_memory_size_type limit = value_unlimited;
physical_memory_size_type lowest_limit = phys_mem;
lowest_limit = get_updated_mem_limit(mem, lowest_limit, phys_mem);
physical_memory_size_type orig_limit = lowest_limit != phys_mem ? lowest_limit : phys_mem;
physical_memory_size_type lowest_limit = upper_bound;
lowest_limit = get_updated_mem_limit(mem, lowest_limit, upper_bound);
physical_memory_size_type orig_limit = lowest_limit != upper_bound ? lowest_limit : upper_bound;
while ((last_slash = strrchr(cg_path, '/')) != cg_path) {
*last_slash = '\0'; // strip path
// update to shortened path and try again
mem->set_subsystem_path(cg_path);
limit = get_updated_mem_limit(mem, lowest_limit, phys_mem);
limit = get_updated_mem_limit(mem, lowest_limit, upper_bound);
if (limit < lowest_limit) {
lowest_limit = limit;
os::free(limit_cg_path); // handles nullptr
@ -119,13 +117,13 @@ void CgroupUtil::adjust_controller(CgroupMemoryController* mem) {
}
// need to check limit at mount point
mem->set_subsystem_path("/");
limit = get_updated_mem_limit(mem, lowest_limit, phys_mem);
limit = get_updated_mem_limit(mem, lowest_limit, upper_bound);
if (limit < lowest_limit) {
lowest_limit = limit;
os::free(limit_cg_path); // handles nullptr
limit_cg_path = os::strdup("/");
}
assert(lowest_limit <= phys_mem, "limit must not exceed host memory");
assert(lowest_limit <= upper_bound, "limit must not exceed upper bound");
if (lowest_limit != orig_limit) {
// we've found a lower limit anywhere in the hierarchy,
// set the path to the limit path
@ -147,7 +145,7 @@ void CgroupUtil::adjust_controller(CgroupMemoryController* mem) {
os::free(limit_cg_path);
}
void CgroupUtil::adjust_controller(CgroupCpuController* cpu) {
void CgroupUtil::adjust_controller(CgroupCpuController* cpu, int upper_bound) {
assert(cpu->cgroup_path() != nullptr, "invariant");
if (strstr(cpu->cgroup_path(), "../") != nullptr) {
log_warning(os, container)("Cgroup cpu controller path at '%s' seems to have moved "
@ -165,17 +163,16 @@ void CgroupUtil::adjust_controller(CgroupCpuController* cpu) {
char* cg_path = os::strdup(orig);
char* last_slash;
assert(cg_path[0] == '/', "cgroup path must start with '/'");
int host_cpus = os::Linux::active_processor_count();
int lowest_limit = host_cpus;
double cpus = get_updated_cpu_limit(cpu, lowest_limit, host_cpus);
int orig_limit = lowest_limit != host_cpus ? lowest_limit : host_cpus;
int lowest_limit = upper_bound;
double cpus = get_updated_cpu_limit(cpu, lowest_limit, upper_bound);
int orig_limit = lowest_limit != upper_bound ? lowest_limit : upper_bound;
char* limit_cg_path = nullptr;
while ((last_slash = strrchr(cg_path, '/')) != cg_path) {
*last_slash = '\0'; // strip path
// update to shortened path and try again
cpu->set_subsystem_path(cg_path);
cpus = get_updated_cpu_limit(cpu, lowest_limit, host_cpus);
if (cpus != host_cpus && cpus < lowest_limit) {
cpus = get_updated_cpu_limit(cpu, lowest_limit, upper_bound);
if (cpus != upper_bound && cpus < lowest_limit) {
lowest_limit = cpus;
os::free(limit_cg_path); // handles nullptr
limit_cg_path = os::strdup(cg_path);
@ -183,8 +180,8 @@ void CgroupUtil::adjust_controller(CgroupCpuController* cpu) {
}
// need to check limit at mount point
cpu->set_subsystem_path("/");
cpus = get_updated_cpu_limit(cpu, lowest_limit, host_cpus);
if (cpus != host_cpus && cpus < lowest_limit) {
cpus = get_updated_cpu_limit(cpu, lowest_limit, upper_bound);
if (cpus != upper_bound && cpus < lowest_limit) {
lowest_limit = cpus;
os::free(limit_cg_path); // handles nullptr
limit_cg_path = os::strdup(cg_path);

View File

@ -35,10 +35,10 @@ class CgroupUtil: AllStatic {
static bool processor_count(CgroupCpuController* cpu, int upper_bound, double& value);
// Given a memory controller, adjust its path to a point in the hierarchy
// that represents the closest memory limit.
static void adjust_controller(CgroupMemoryController* m);
static void adjust_controller(CgroupMemoryController* m, physical_memory_size_type upper_bound);
// Given a cpu controller, adjust its path to a point in the hierarchy
// that represents the closest cpu limit.
static void adjust_controller(CgroupCpuController* c);
static void adjust_controller(CgroupCpuController* c, int upper_bound);
private:
static physical_memory_size_type get_updated_mem_limit(CgroupMemoryController* m,
physical_memory_size_type lowest,

View File

@ -326,8 +326,6 @@ CgroupV1Subsystem::CgroupV1Subsystem(CgroupV1Controller* cpuset,
_cpuset(cpuset),
_cpuacct(cpuacct),
_pids(pids) {
CgroupUtil::adjust_controller(memory);
CgroupUtil::adjust_controller(cpu);
_memory = new CachingCgroupController<CgroupMemoryController, physical_memory_size_type>(memory);
_cpu = new CachingCgroupController<CgroupCpuController, double>(cpu);
}

View File

@ -154,8 +154,6 @@ CgroupV2Subsystem::CgroupV2Subsystem(CgroupV2MemoryController * memory,
CgroupV2CpuacctController* cpuacct,
CgroupV2Controller unified) :
_unified(unified) {
CgroupUtil::adjust_controller(memory);
CgroupUtil::adjust_controller(cpu);
_memory = new CachingCgroupController<CgroupMemoryController, physical_memory_size_type>(memory);
_cpu = new CachingCgroupController<CgroupCpuController, double>(cpu);
_cpuacct = cpuacct;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2024, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -35,11 +35,16 @@
#include <dirent.h>
ExplicitHugePageSupport::ExplicitHugePageSupport() :
_initialized(false), _pagesizes(), _default_hugepage_size(SIZE_MAX), _inconsistent(false) {}
_initialized{false}, _os_supported{}, _pre_allocated{}, _default_hugepage_size{SIZE_MAX}, _inconsistent{false} {}
os::PageSizes ExplicitHugePageSupport::pagesizes() const {
os::PageSizes ExplicitHugePageSupport::os_supported() const {
assert(_initialized, "Not initialized");
return _pagesizes;
return _os_supported;
}
os::PageSizes ExplicitHugePageSupport::pre_allocated() const {
assert(_initialized, "Not initialized");
return _pre_allocated;
}
size_t ExplicitHugePageSupport::default_hugepage_size() const {
@ -129,10 +134,24 @@ static os::PageSizes scan_hugepages() {
return pagesizes;
}
static os::PageSizes filter_pre_allocated_hugepages(os::PageSizes pagesizes) {
os::PageSizes pre_allocated{};
char filename[PATH_MAX];
for (size_t ps = pagesizes.smallest(); ps != 0; ps = pagesizes.next_larger(ps)) {
os::snprintf_checked(filename, sizeof(filename), "%s/hugepages-%zukB/nr_hugepages", sys_hugepages, ps / K);
size_t pages;
bool read_success = read_number_file(filename, &pages);
if (read_success && pages > 0) {
pre_allocated.add(ps);
}
}
return pre_allocated;
}
void ExplicitHugePageSupport::print_on(outputStream* os) {
if (_initialized) {
os->print_cr("Explicit hugepage support:");
for (size_t s = _pagesizes.smallest(); s != 0; s = _pagesizes.next_larger(s)) {
for (size_t s = _os_supported.smallest(); s != 0; s = _os_supported.next_larger(s)) {
os->print_cr(" hugepage size: " EXACTFMT, EXACTFMTARGS(s));
}
os->print_cr(" default hugepage size: " EXACTFMT, EXACTFMTARGS(_default_hugepage_size));
@ -147,14 +166,15 @@ void ExplicitHugePageSupport::print_on(outputStream* os) {
void ExplicitHugePageSupport::scan_os() {
_default_hugepage_size = scan_default_hugepagesize();
if (_default_hugepage_size > 0) {
_pagesizes = scan_hugepages();
_os_supported = scan_hugepages();
_pre_allocated = filter_pre_allocated_hugepages(_os_supported);
// See https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt: /proc/meminfo should match
// /sys/kernel/mm/hugepages/hugepages-xxxx. However, we may run on a broken kernel (e.g. on WSL)
// that only exposes /proc/meminfo but not /sys/kernel/mm/hugepages. In that case, we are not
// sure about the state of hugepage support by the kernel, so we won't use explicit hugepages.
if (!_pagesizes.contains(_default_hugepage_size)) {
if (!_os_supported.contains(_default_hugepage_size)) {
log_info(pagesize)("Unexpected configuration: default pagesize (%zu) "
"has no associated directory in /sys/kernel/mm/hugepages..", _default_hugepage_size);
"has no associated directory in /sys/kernel/mm/hugepages.", _default_hugepage_size);
_inconsistent = true;
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2024, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -45,7 +45,10 @@ class ExplicitHugePageSupport {
// All supported hugepage sizes (sizes for which entries exist
// in /sys/kernel/mm/hugepages/hugepage-xxx)
os::PageSizes _pagesizes;
os::PageSizes _os_supported;
// Above pages filtered for where the contents of file nr_hugepages was larger than zero
os::PageSizes _pre_allocated;
// Contains the default hugepage. The "default hugepage size" is the one that
// - is marked in /proc/meminfo as "Hugepagesize"
@ -60,7 +63,8 @@ public:
void scan_os();
os::PageSizes pagesizes() const;
os::PageSizes os_supported() const;
os::PageSizes pre_allocated() const;
size_t default_hugepage_size() const;
void print_on(outputStream* os);

View File

@ -59,6 +59,11 @@ void OSContainer::init() {
if (cgroup_subsystem == nullptr) {
return; // Required subsystem files not found or other error
}
// Adjust controller paths once subsystem is initialized
physical_memory_size_type phys_mem = os::Linux::physical_memory();
int host_cpus = os::Linux::active_processor_count();
cgroup_subsystem->adjust_controllers(phys_mem, host_cpus);
/*
* In order to avoid a false positive on is_containerized() on
* Linux systems outside a container *and* to ensure compatibility
@ -252,7 +257,7 @@ char * OSContainer::cpu_cpuset_memory_nodes() {
bool OSContainer::active_processor_count(double& value) {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
return cgroup_subsystem->active_processor_count(value);
return cgroup_subsystem->active_processor_count(&os::Linux::active_processor_count, value);
}
bool OSContainer::cpu_quota(int& value) {

View File

@ -83,6 +83,7 @@
#endif
# include <ctype.h>
# include <dirent.h>
# include <dlfcn.h>
# include <endian.h>
# include <errno.h>
@ -113,6 +114,7 @@
# include <sys/types.h>
# include <sys/utsname.h>
# include <syscall.h>
# include <time.h>
# include <unistd.h>
#ifdef __GLIBC__
# include <malloc.h>
@ -2161,6 +2163,8 @@ void os::print_os_info(outputStream* st) {
os::Posix::print_rlimit_info(st);
os::print_open_file_descriptors(st);
os::Posix::print_load_average(st);
st->cr();
@ -3814,8 +3818,8 @@ static int hugetlbfs_page_size_flag(size_t page_size) {
}
static bool hugetlbfs_sanity_check(size_t page_size) {
const os::PageSizes page_sizes = HugePages::explicit_hugepage_info().pagesizes();
assert(page_sizes.contains(page_size), "Invalid page sizes passed");
const os::PageSizes os_supported = HugePages::explicit_hugepage_info().os_supported();
assert(os_supported.contains(page_size), "Invalid page sizes passed (%zu)", page_size);
// Include the page size flag to ensure we sanity check the correct page size.
int flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | hugetlbfs_page_size_flag(page_size);
@ -3829,16 +3833,16 @@ static bool hugetlbfs_sanity_check(size_t page_size) {
log_info(pagesize)("Large page size (" EXACTFMT ") failed sanity check, "
"checking if smaller large page sizes are usable",
EXACTFMTARGS(page_size));
for (size_t page_size_ = page_sizes.next_smaller(page_size);
page_size_ > os::vm_page_size();
page_size_ = page_sizes.next_smaller(page_size_)) {
flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | hugetlbfs_page_size_flag(page_size_);
p = mmap(nullptr, page_size_, PROT_READ|PROT_WRITE, flags, -1, 0);
for (size_t size = os_supported.next_smaller(page_size);
size > os::vm_page_size();
size = os_supported.next_smaller(size)) {
flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | hugetlbfs_page_size_flag(size);
p = mmap(nullptr, size, PROT_READ|PROT_WRITE, flags, -1, 0);
if (p != MAP_FAILED) {
// Mapping succeeded, sanity check passed.
munmap(p, page_size_);
munmap(p, size);
log_info(pagesize)("Large page size (" EXACTFMT ") passed sanity check",
EXACTFMTARGS(page_size_));
EXACTFMTARGS(size));
return true;
}
}
@ -4020,7 +4024,7 @@ void os::Linux::large_page_init() {
// - os::large_page_size() is the default explicit hugepage size (/proc/meminfo "Hugepagesize")
// - os::pagesizes() contains all hugepage sizes the kernel supports, regardless whether there
// are pages configured in the pool or not (from /sys/kernel/hugepages/hugepage-xxxx ...)
os::PageSizes all_large_pages = HugePages::explicit_hugepage_info().pagesizes();
os::PageSizes all_large_pages = HugePages::explicit_hugepage_info().os_supported();
const size_t default_large_page_size = HugePages::default_explicit_hugepage_size();
// 3) Consistency check and post-processing
@ -4062,10 +4066,10 @@ void os::Linux::large_page_init() {
_large_page_size = large_page_size;
// Populate _page_sizes with large page sizes less than or equal to
// _large_page_size.
for (size_t page_size = _large_page_size; page_size != 0;
page_size = all_large_pages.next_smaller(page_size)) {
// Populate _page_sizes with _large_page_size (default large page size) even if not pre-allocated.
// Then, populate _page_sizes with all smaller large page sizes that have been pre-allocated.
os::PageSizes pre_allocated = HugePages::explicit_hugepage_info().pre_allocated();
for (size_t page_size = _large_page_size; page_size != 0; page_size = pre_allocated.next_smaller(page_size)) {
_page_sizes.add(page_size);
}
}
@ -4129,12 +4133,12 @@ static char* reserve_memory_special_huge_tlbfs(size_t bytes,
size_t page_size,
char* req_addr,
bool exec) {
const os::PageSizes page_sizes = HugePages::explicit_hugepage_info().pagesizes();
const os::PageSizes os_supported = HugePages::explicit_hugepage_info().os_supported();
assert(UseLargePages, "only for Huge TLBFS large pages");
assert(is_aligned(req_addr, alignment), "Must be");
assert(is_aligned(req_addr, page_size), "Must be");
assert(is_aligned(alignment, os::vm_allocation_granularity()), "Must be");
assert(page_sizes.contains(page_size), "Must be a valid page size");
assert(os_supported.contains(page_size), "Must be a valid page size");
assert(page_size > os::vm_page_size(), "Must be a large page size");
assert(bytes >= page_size, "Shouldn't allocate large pages for small sizes");
@ -4549,6 +4553,7 @@ void os::Linux::numa_init() {
FLAG_SET_ERGO_IF_DEFAULT(UseNUMAInterleaving, true);
}
#if INCLUDE_PARALLELGC
if (UseParallelGC && UseNUMA && UseLargePages && !can_commit_large_page_memory()) {
// With static large pages we cannot uncommit a page, so there's no way
// we can make the adaptive lgrp chunk resizing work. If the user specified both
@ -4560,6 +4565,7 @@ void os::Linux::numa_init() {
UseAdaptiveNUMAChunkSizing = false;
}
}
#endif
}
void os::Linux::disable_numa(const char* reason, bool warning) {
@ -5427,3 +5433,31 @@ bool os::pd_dll_unload(void* libhandle, char* ebuf, int ebuflen) {
return res;
} // end: os::pd_dll_unload()
void os::print_open_file_descriptors(outputStream* st) {
DIR* dirp = opendir("/proc/self/fd");
int fds = 0;
struct dirent* dentp;
const jlong TIMEOUT_NS = 50000000L; // 50 ms in nanoseconds
bool timed_out = false;
// limit proc file read to 50ms
jlong start = os::javaTimeNanos();
assert(dirp != nullptr, "No proc fs?");
while ((dentp = readdir(dirp)) != nullptr && !timed_out) {
if (isdigit(dentp->d_name[0])) fds++;
if (fds % 100 == 0) {
jlong now = os::javaTimeNanos();
if ((now - start) > TIMEOUT_NS) {
timed_out = true;
}
}
}
closedir(dirp);
if (timed_out) {
st->print_cr("Open File Descriptors: > %d", fds);
} else {
st->print_cr("Open File Descriptors: %d", fds);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -366,6 +366,8 @@ provider hotspot_jni {
probe IsInstanceOf__return(uintptr_t);
probe IsSameObject__entry(void*, void*, void*);
probe IsSameObject__return(uintptr_t);
probe IsVirtualThread__entry(void*, void*);
probe IsVirtualThread__return(uintptr_t);
probe MonitorEnter__entry(void*, void*);
probe MonitorEnter__return(uint32_t);
probe MonitorExit__entry(void*, void*);

View File

@ -888,6 +888,14 @@ void* os::lookup_function(const char* name) {
return dlsym(RTLD_DEFAULT, name);
}
int64_t os::ftell(FILE* file) {
return ::ftell(file);
}
int os::fseek(FILE* file, int64_t offset, int whence) {
return ::fseek(file, offset, whence);
}
jlong os::lseek(int fd, jlong offset, int whence) {
return (jlong) ::lseek(fd, offset, whence);
}

View File

@ -1084,18 +1084,9 @@ static char* mmap_create_shared(size_t size) {
// release a named shared memory region that was mmap-ed.
//
static void unmap_shared(char* addr, size_t bytes) {
int res;
if (MemTracker::enabled()) {
MemTracker::NmtVirtualMemoryLocker nvml;
res = ::munmap(addr, bytes);
if (res == 0) {
MemTracker::record_virtual_memory_release(addr, bytes);
}
} else {
res = ::munmap(addr, bytes);
}
if (res != 0) {
log_info(os)("os::release_memory failed (" PTR_FORMAT ", %zu)", p2i(addr), bytes);
MemTracker::record_virtual_memory_release(addr, bytes);
if (::munmap(addr, bytes) != 0) {
fatal("os::release_memory failed (" PTR_FORMAT ", %zu)", p2i(addr), bytes);
}
}

View File

@ -5114,6 +5114,13 @@ jlong os::seek_to_file_offset(int fd, jlong offset) {
return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET);
}
int64_t os::ftell(FILE* file) {
return ::_ftelli64(file);
}
int os::fseek(FILE* file, int64_t offset, int whence) {
return ::_fseeki64(file,offset, whence);
}
jlong os::lseek(int fd, jlong offset, int whence) {
return (jlong) ::_lseeki64(fd, offset, whence);
@ -6276,6 +6283,10 @@ const void* os::get_saved_assert_context(const void** sigInfo) {
return nullptr;
}
void os::print_open_file_descriptors(outputStream* st) {
// File descriptor counting not supported on Windows.
}
/*
* Windows/x64 does not use stack frames the way expected by Java:
* [1] in most cases, there is no frame pointer. All locals are addressed via RSP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1682,12 +1682,7 @@ void PerfMemory::detach(char* addr, size_t bytes) {
return;
}
if (MemTracker::enabled()) {
// it does not go through os api, the operation has to record from here
MemTracker::NmtVirtualMemoryLocker nvml;
remove_file_mapping(addr);
MemTracker::record_virtual_memory_release(addr, bytes);
} else {
remove_file_mapping(addr);
}
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_release(addr, bytes);
remove_file_mapping(addr);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2011, 2015, Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -27,7 +27,6 @@
#define OS_CPU_BSD_ZERO_ATOMICACCESS_BSD_ZERO_HPP
#include "orderAccess_bsd_zero.hpp"
#include "runtime/os.hpp"
// Implementation of class AtomicAccess

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,8 +26,6 @@
#define OS_CPU_LINUX_ARM_ATOMICACCESS_LINUX_ARM_HPP
#include "memory/allStatic.hpp"
#include "runtime/os.hpp"
#include "runtime/vm_version.hpp"
// Implementation of class AtomicAccess

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,7 +27,6 @@
// Included in orderAccess.hpp header file.
#include "runtime/os.hpp"
#include "runtime/vm_version.hpp"
// Implementation of class OrderAccess.

View File

@ -36,40 +36,42 @@
#include <sys/auxv.h>
#include <sys/prctl.h>
static constexpr uint64_t feature_bit(int n) { return nth_bit<uint64_t>(n); }
#ifndef HWCAP_ISA_I
#define HWCAP_ISA_I nth_bit('I' - 'A')
#define HWCAP_ISA_I feature_bit('I' - 'A')
#endif
#ifndef HWCAP_ISA_M
#define HWCAP_ISA_M nth_bit('M' - 'A')
#define HWCAP_ISA_M feature_bit('M' - 'A')
#endif
#ifndef HWCAP_ISA_A
#define HWCAP_ISA_A nth_bit('A' - 'A')
#define HWCAP_ISA_A feature_bit('A' - 'A')
#endif
#ifndef HWCAP_ISA_F
#define HWCAP_ISA_F nth_bit('F' - 'A')
#define HWCAP_ISA_F feature_bit('F' - 'A')
#endif
#ifndef HWCAP_ISA_D
#define HWCAP_ISA_D nth_bit('D' - 'A')
#define HWCAP_ISA_D feature_bit('D' - 'A')
#endif
#ifndef HWCAP_ISA_C
#define HWCAP_ISA_C nth_bit('C' - 'A')
#define HWCAP_ISA_C feature_bit('C' - 'A')
#endif
#ifndef HWCAP_ISA_Q
#define HWCAP_ISA_Q nth_bit('Q' - 'A')
#define HWCAP_ISA_Q feature_bit('Q' - 'A')
#endif
#ifndef HWCAP_ISA_H
#define HWCAP_ISA_H nth_bit('H' - 'A')
#define HWCAP_ISA_H feature_bit('H' - 'A')
#endif
#ifndef HWCAP_ISA_V
#define HWCAP_ISA_V nth_bit('V' - 'A')
#define HWCAP_ISA_V feature_bit('V' - 'A')
#endif
#define read_csr(csr) \

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2019 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -26,10 +26,6 @@
#ifndef OS_CPU_LINUX_S390_ATOMICACCESS_LINUX_S390_HPP
#define OS_CPU_LINUX_S390_ATOMICACCESS_LINUX_S390_HPP
#include "runtime/atomicAccess.hpp"
#include "runtime/os.hpp"
#include "runtime/vm_version.hpp"
// Note that the compare-and-swap instructions on System z perform
// a serialization function before the storage operand is fetched
// and again after the operation is completed.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, Microsoft Corporation. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -27,9 +27,7 @@
#define OS_CPU_WINDOWS_AARCH64_ATOMICACCESS_WINDOWS_AARCH64_HPP
#include <intrin.h>
#include "runtime/os.hpp"
#include "runtime/vm_version.hpp"
#include <windows.h>
// As per atomicAccess.hpp all read-modify-write operations have to provide two-way
// barriers semantics. The memory_order parameter is ignored - we always provide

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,7 +26,7 @@
#define OS_CPU_WINDOWS_X86_ATOMICACCESS_WINDOWS_X86_HPP
#include <intrin.h>
#include "runtime/os.hpp"
#include <windows.h>
// Note that in MSVC, volatile memory accesses are explicitly
// guaranteed to have acquire release semantics (w.r.t. compiler

View File

@ -4276,7 +4276,9 @@ bool MatchRule::is_ideal_membar() const {
!strcmp(_opType,"LoadFence" ) ||
!strcmp(_opType,"StoreFence") ||
!strcmp(_opType,"StoreStoreFence") ||
!strcmp(_opType,"MemBarStoreLoad") ||
!strcmp(_opType,"MemBarVolatile") ||
!strcmp(_opType,"MemBarFull") ||
!strcmp(_opType,"MemBarCPUOrder") ||
!strcmp(_opType,"MemBarStoreStore") ||
!strcmp(_opType,"OnSpinWait");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2024, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -234,7 +234,8 @@ bool AOTClassInitializer::can_archive_initialized_mirror(InstanceKlass* ik) {
}
void AOTClassInitializer::call_runtime_setup(JavaThread* current, InstanceKlass* ik) {
assert(ik->has_aot_initialized_mirror(), "sanity");
precond(ik->has_aot_initialized_mirror());
precond(!AOTLinkedClassBulkLoader::is_initializing_classes_early());
if (ik->is_runtime_setup_required()) {
if (log_is_enabled(Info, aot, init)) {
ResourceMark rm;

View File

@ -81,6 +81,7 @@ bool AOTConstantPoolResolver::is_resolution_deterministic(ConstantPool* cp, int
bool AOTConstantPoolResolver::is_class_resolution_deterministic(InstanceKlass* cp_holder, Klass* resolved_class) {
assert(!is_in_archivebuilder_buffer(cp_holder), "sanity");
assert(!is_in_archivebuilder_buffer(resolved_class), "sanity");
assert_at_safepoint(); // try_add_candidate() is called below and requires to be at safepoint.
if (resolved_class->is_instance_klass()) {
InstanceKlass* ik = InstanceKlass::cast(resolved_class);
@ -346,7 +347,15 @@ void AOTConstantPoolResolver::maybe_resolve_fmi_ref(InstanceKlass* ik, Method* m
break;
case Bytecodes::_invokehandle:
InterpreterRuntime::cds_resolve_invokehandle(raw_index, cp, CHECK);
if (CDSConfig::is_dumping_method_handles()) {
ResolvedMethodEntry* method_entry = cp->resolved_method_entry_at(raw_index);
int cp_index = method_entry->constant_pool_index();
Symbol* sig = cp->uncached_signature_ref_at(cp_index);
Klass* k;
if (check_methodtype_signature(cp(), sig, &k, true)) {
InterpreterRuntime::cds_resolve_invokehandle(raw_index, cp, CHECK);
}
}
break;
default:
@ -400,7 +409,7 @@ void AOTConstantPoolResolver::preresolve_indy_cp_entries(JavaThread* current, In
// Check the MethodType signatures used by parameters to the indy BSMs. Make sure we don't
// use types that have been excluded, or else we might end up creating MethodTypes that cannot be stored
// in the AOT cache.
bool AOTConstantPoolResolver::check_methodtype_signature(ConstantPool* cp, Symbol* sig, Klass** return_type_ret) {
bool AOTConstantPoolResolver::check_methodtype_signature(ConstantPool* cp, Symbol* sig, Klass** return_type_ret, bool is_invokehandle) {
ResourceMark rm;
for (SignatureStream ss(sig); !ss.is_done(); ss.next()) {
if (ss.is_reference()) {
@ -413,11 +422,18 @@ bool AOTConstantPoolResolver::check_methodtype_signature(ConstantPool* cp, Symbo
if (SystemDictionaryShared::should_be_excluded(k)) {
if (log_is_enabled(Warning, aot, resolve)) {
ResourceMark rm;
log_warning(aot, resolve)("Cannot aot-resolve Lambda proxy because %s is excluded", k->external_name());
log_warning(aot, resolve)("Cannot aot-resolve %s because %s is excluded",
is_invokehandle ? "invokehandle" : "Lambda proxy",
k->external_name());
}
return false;
}
// cp->pool_holder() must be able to resolve k in production run
precond(CDSConfig::is_dumping_aot_linked_classes());
precond(SystemDictionaryShared::is_builtin_loader(cp->pool_holder()->class_loader_data()));
precond(SystemDictionaryShared::is_builtin_loader(k->class_loader_data()));
if (ss.at_return_type() && return_type_ret != nullptr) {
*return_type_ret = k;
}
@ -475,11 +491,44 @@ bool AOTConstantPoolResolver::check_lambda_metafactory_methodhandle_arg(Constant
return false;
}
// klass and sigature of the method (no need to check the method name)
Symbol* sig = cp->method_handle_signature_ref_at(mh_index);
Symbol* klass_name = cp->klass_name_at(cp->method_handle_klass_index_at(mh_index));
if (log_is_enabled(Debug, aot, resolve)) {
ResourceMark rm;
log_debug(aot, resolve)("Checking MethodType of MethodHandle for LambdaMetafactory BSM arg %d: %s", arg_i, sig->as_C_string());
}
{
Klass* k = find_loaded_class(Thread::current(), cp->pool_holder()->class_loader(), klass_name);
if (k == nullptr) {
// Dumping AOT cache: all classes should have been loaded by FinalImageRecipes::load_all_classes(). k must have
// been a class that was excluded when FinalImageRecipes recorded all classes at the end of the training run.
//
// Dumping static CDS archive: all classes in the classlist have already been loaded, before we resolve
// constants. k must have been a class that was excluded when the classlist was written
// at the end of the training run.
if (log_is_enabled(Warning, aot, resolve)) {
ResourceMark rm;
log_warning(aot, resolve)("Cannot aot-resolve Lambda proxy because %s is not loaded", klass_name->as_C_string());
}
return false;
}
if (SystemDictionaryShared::should_be_excluded(k)) {
if (log_is_enabled(Warning, aot, resolve)) {
ResourceMark rm;
log_warning(aot, resolve)("Cannot aot-resolve Lambda proxy because %s is excluded", k->external_name());
}
return false;
}
// cp->pool_holder() must be able to resolve k in production run
precond(CDSConfig::is_dumping_aot_linked_classes());
precond(SystemDictionaryShared::is_builtin_loader(cp->pool_holder()->class_loader_data()));
precond(SystemDictionaryShared::is_builtin_loader(k->class_loader_data()));
}
return check_methodtype_signature(cp, sig);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -74,7 +74,10 @@ class AOTConstantPoolResolver : AllStatic {
static void maybe_resolve_fmi_ref(InstanceKlass* ik, Method* m, Bytecodes::Code bc, int raw_index,
GrowableArray<bool>* resolve_fmi_list, TRAPS);
static bool check_methodtype_signature(ConstantPool* cp, Symbol* sig, Klass** return_type_ret = nullptr);
public:
static bool check_methodtype_signature(ConstantPool* cp, Symbol* sig, Klass** return_type_ret = nullptr, bool is_invokehandle = false);
private:
static bool check_lambda_metafactory_signature(ConstantPool* cp, Symbol* sig);
static bool check_lambda_metafactory_methodtype_arg(ConstantPool* cp, int bsms_attribute_index, int arg_i);
static bool check_lambda_metafactory_methodhandle_arg(ConstantPool* cp, int bsms_attribute_index, int arg_i);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2024, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -116,11 +116,24 @@ void AOTLinkedClassBulkLoader::preload_classes_in_table(Array<InstanceKlass*>* c
}
}
#ifdef ASSERT
// true iff we are inside AOTLinkedClassBulkLoader::link_classes(), when
// we are moving classes into the fully_initialized state before the
// JVM is able to execute any bytecodes.
static bool _is_initializing_classes_early = false;
bool AOTLinkedClassBulkLoader::is_initializing_classes_early() {
return _is_initializing_classes_early;
}
#endif
// Some cached heap objects may hold references to methods in aot-linked
// classes (via MemberName). We need to make sure all classes are
// linked before executing any bytecode.
void AOTLinkedClassBulkLoader::link_classes(JavaThread* current) {
DEBUG_ONLY(_is_initializing_classes_early = true);
link_classes_impl(current);
DEBUG_ONLY(_is_initializing_classes_early = false);
if (current->has_pending_exception()) {
exit_on_exception(current);
}
@ -135,6 +148,13 @@ void AOTLinkedClassBulkLoader::link_classes_impl(TRAPS) {
link_classes_in_table(table->boot2(), CHECK);
link_classes_in_table(table->platform(), CHECK);
link_classes_in_table(table->app(), CHECK);
init_classes_for_loader(Handle(), AOTLinkedClassTable::get()->boot1(), /*early_only=*/true, CHECK);
init_classes_for_loader(Handle(), AOTLinkedClassTable::get()->boot2(), /*early_only=*/true, CHECK);
init_classes_for_loader(Handle(), AOTLinkedClassTable::get()->platform(), /*early_only=*/true, CHECK);
init_classes_for_loader(Handle(), AOTLinkedClassTable::get()->app(), /*early_only=*/true, CHECK);
log_info(aot, init)("------ finished early class init");
}
void AOTLinkedClassBulkLoader::link_classes_in_table(Array<InstanceKlass*>* classes, TRAPS) {
@ -216,7 +236,7 @@ void AOTLinkedClassBulkLoader::validate_module(Klass* k, const char* category_na
#endif
void AOTLinkedClassBulkLoader::init_javabase_classes(JavaThread* current) {
init_classes_for_loader(Handle(), AOTLinkedClassTable::get()->boot1(), current);
init_classes_for_loader(Handle(), AOTLinkedClassTable::get()->boot1(), /*early_only=*/false, current);
if (current->has_pending_exception()) {
exit_on_exception(current);
}
@ -246,9 +266,9 @@ void AOTLinkedClassBulkLoader::init_non_javabase_classes_impl(TRAPS) {
assert(h_system_loader() != nullptr, "must be");
AOTLinkedClassTable* table = AOTLinkedClassTable::get();
init_classes_for_loader(Handle(), table->boot2(), CHECK);
init_classes_for_loader(h_platform_loader, table->platform(), CHECK);
init_classes_for_loader(h_system_loader, table->app(), CHECK);
init_classes_for_loader(Handle(), table->boot2(), /*early_only=*/false, CHECK);
init_classes_for_loader(h_platform_loader, table->platform(), /*early_only=*/false, CHECK);
init_classes_for_loader(h_system_loader, table->app(), /*early_only=*/false, CHECK);
if (Universe::is_fully_initialized() && VerifyDuringStartup) {
// Make sure we're still in a clean state.
@ -324,22 +344,80 @@ void AOTLinkedClassBulkLoader::initiate_loading(JavaThread* current, const char*
}
}
// Some AOT-linked classes for <class_loader> must be initialized early. This includes
// - classes that were AOT-initialized by AOTClassInitializer
// - the classes of all objects that are reachable from the archived mirrors of
// the AOT-linked classes for <class_loader>.
void AOTLinkedClassBulkLoader::init_classes_for_loader(Handle class_loader, Array<InstanceKlass*>* classes, TRAPS) {
// Can we move ik into fully_initialized state before the JVM is able to execute
// bytecodes?
static bool is_early_init_possible(InstanceKlass* ik) {
if (ik->is_runtime_setup_required()) {
// Bytecodes need to be executed in order to initialize this class.
if (log_is_enabled(Debug, aot, init)) {
ResourceMark rm;
log_debug(aot, init)("No early init %s: needs runtimeSetup()",
ik->external_name());
}
return false;
}
if (ik->super() != nullptr && !ik->super()->is_initialized()) {
// is_runtime_setup_required() == true for a super type
if (log_is_enabled(Debug, aot, init)) {
ResourceMark rm;
log_debug(aot, init)("No early init %s: super type %s not initialized",
ik->external_name(), ik->super()->external_name());
}
return false;
}
Array<InstanceKlass*>* interfaces = ik->local_interfaces();
int num_interfaces = interfaces->length();
for (int i = 0; i < num_interfaces; i++) {
InstanceKlass* intf = interfaces->at(i);
if (!intf->is_initialized() && intf->interface_needs_clinit_execution_as_super(/*also_check_supers*/false)) {
// is_runtime_setup_required() == true for this interface
if (log_is_enabled(Debug, aot, init)) {
ResourceMark rm;
log_debug(aot, init)("No early init %s: interface type %s not initialized",
ik->external_name(), intf->external_name());
}
return false;
}
}
return true;
}
// Normally, classes are initialized on demand. However, some AOT-linked classes
// for the class_loader must be proactively intialized, including:
// - Classes that have an AOT-initialized mirror (they were AOT-initialized by
// AOTClassInitializer during the assembly phase).
// - The classes of all objects that are reachable from the archived mirrors of
// the AOT-linked classes for the class_loader. These are recorded in the special
// subgraph.
//
// (early_only == true) means that this function is called before the JVM
// is capable of executing Java bytecodes.
void AOTLinkedClassBulkLoader::init_classes_for_loader(Handle class_loader, Array<InstanceKlass*>* classes,
bool early_only, TRAPS) {
if (classes != nullptr) {
for (int i = 0; i < classes->length(); i++) {
InstanceKlass* ik = classes->at(i);
assert(ik->class_loader_data() != nullptr, "must be");
if (ik->has_aot_initialized_mirror()) {
ik->initialize_with_aot_initialized_mirror(CHECK);
bool do_init = ik->has_aot_initialized_mirror();
if (do_init && early_only && !is_early_init_possible(ik)) {
// ik will be proactively initialized later when init_classes_for_loader()
// is called again with (early_only == false).
do_init = false;
}
if (do_init) {
ik->initialize_with_aot_initialized_mirror(early_only, CHECK);
}
}
}
HeapShared::init_classes_for_special_subgraph(class_loader, CHECK);
if (!early_only) {
HeapShared::init_classes_for_special_subgraph(class_loader, CHECK);
}
}
void AOTLinkedClassBulkLoader::replay_training_at_init(Array<InstanceKlass*>* classes, TRAPS) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2024, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -56,7 +56,7 @@ class AOTLinkedClassBulkLoader : AllStatic {
static void link_classes_impl(TRAPS);
static void link_classes_in_table(Array<InstanceKlass*>* classes, TRAPS);
static void init_non_javabase_classes_impl(TRAPS);
static void init_classes_for_loader(Handle class_loader, Array<InstanceKlass*>* classes, TRAPS);
static void init_classes_for_loader(Handle class_loader, Array<InstanceKlass*>* classes, bool early_only, TRAPS);
static void replay_training_at_init(Array<InstanceKlass*>* classes, TRAPS) NOT_CDS_RETURN;
#ifdef ASSERT
@ -73,8 +73,9 @@ public:
static void init_javabase_classes(JavaThread* current) NOT_CDS_RETURN;
static void init_non_javabase_classes(JavaThread* current) NOT_CDS_RETURN;
static void exit_on_exception(JavaThread* current);
static void replay_training_at_init_for_preloaded_classes(TRAPS) NOT_CDS_RETURN;
static bool is_initializing_classes_early() NOT_DEBUG({return false;});
};
#endif // SHARE_CDS_AOTLINKEDCLASSBULKLOADER_HPP

View File

@ -98,8 +98,8 @@ void AOTMapLogger::dumptime_log(ArchiveBuilder* builder, FileMapInfo* mapinfo,
DumpRegion* rw_region = &builder->_rw_region;
DumpRegion* ro_region = &builder->_ro_region;
dumptime_log_metaspace_region("rw region", rw_region, &builder->_rw_src_objs);
dumptime_log_metaspace_region("ro region", ro_region, &builder->_ro_src_objs);
dumptime_log_metaspace_region("rw region", rw_region, &builder->_rw_src_objs, &builder->_ro_src_objs);
dumptime_log_metaspace_region("ro region", ro_region, &builder->_rw_src_objs, &builder->_ro_src_objs);
address bitmap_end = address(bitmap + bitmap_size_in_bytes);
log_region_range("bitmap", address(bitmap), bitmap_end, nullptr);
@ -122,17 +122,6 @@ void AOTMapLogger::dumptime_log(ArchiveBuilder* builder, FileMapInfo* mapinfo,
class AOTMapLogger::RuntimeGatherArchivedMetaspaceObjs : public UniqueMetaspaceClosure {
GrowableArrayCHeap<ArchivedObjInfo, mtClass> _objs;
static int compare_objs_by_addr(ArchivedObjInfo* a, ArchivedObjInfo* b) {
intx diff = a->_src_addr - b->_src_addr;
if (diff < 0) {
return -1;
} else if (diff == 0) {
return 0;
} else {
return 1;
}
}
public:
GrowableArrayCHeap<ArchivedObjInfo, mtClass>* objs() { return &_objs; }
@ -152,7 +141,7 @@ public:
void finish() {
UniqueMetaspaceClosure::finish();
_objs.sort(compare_objs_by_addr);
_objs.sort(compare_by_address);
}
}; // AOTMapLogger::RuntimeGatherArchivedMetaspaceObjs
@ -203,24 +192,47 @@ void AOTMapLogger::runtime_log(FileMapInfo* mapinfo, GrowableArrayCHeap<Archived
}
void AOTMapLogger::dumptime_log_metaspace_region(const char* name, DumpRegion* region,
const ArchiveBuilder::SourceObjList* src_objs) {
const ArchiveBuilder::SourceObjList* rw_objs,
const ArchiveBuilder::SourceObjList* ro_objs) {
address region_base = address(region->base());
address region_top = address(region->top());
log_region_range(name, region_base, region_top, region_base + _buffer_to_requested_delta);
if (log_is_enabled(Debug, aot, map)) {
GrowableArrayCHeap<ArchivedObjInfo, mtClass> objs;
for (int i = 0; i < src_objs->objs()->length(); i++) {
ArchiveBuilder::SourceObjInfo* src_info = src_objs->at(i);
// With -XX:+UseCompactObjectHeaders, it's possible for small objects (including some from
// ro_objs) to be allocated in the gaps in the RW region.
collect_metaspace_objs(&objs, region_base, region_top, rw_objs);
collect_metaspace_objs(&objs, region_base, region_top, ro_objs);
objs.sort(compare_by_address);
log_metaspace_objects_impl(address(region->base()), address(region->end()), &objs, 0, objs.length());
}
}
void AOTMapLogger::collect_metaspace_objs(GrowableArrayCHeap<ArchivedObjInfo, mtClass>* objs,
address region_base, address region_top ,
const ArchiveBuilder::SourceObjList* src_objs) {
for (int i = 0; i < src_objs->objs()->length(); i++) {
ArchiveBuilder::SourceObjInfo* src_info = src_objs->at(i);
address buf_addr = src_info->buffered_addr();
if (region_base <= buf_addr && buf_addr < region_top) {
ArchivedObjInfo info;
info._src_addr = src_info->source_addr();
info._buffered_addr = src_info->buffered_addr();
info._buffered_addr = buf_addr;
info._requested_addr = info._buffered_addr + _buffer_to_requested_delta;
info._bytes = src_info->size_in_bytes();
info._type = src_info->type();
objs.append(info);
objs->append(info);
}
}
}
log_metaspace_objects_impl(address(region->base()), address(region->end()), &objs, 0, objs.length());
int AOTMapLogger::compare_by_address(ArchivedObjInfo* a, ArchivedObjInfo* b) {
if (a->_buffered_addr < b->_buffered_addr) {
return -1;
} else if (a->_buffered_addr > b->_buffered_addr) {
return 1;
} else {
return 0;
}
}

View File

@ -127,7 +127,12 @@ private:
static void runtime_log(FileMapInfo* mapinfo, GrowableArrayCHeap<ArchivedObjInfo, mtClass>* objs);
static void runtime_log_metaspace_regions(FileMapInfo* mapinfo, GrowableArrayCHeap<ArchivedObjInfo, mtClass>* objs);
static void dumptime_log_metaspace_region(const char* name, DumpRegion* region,
const ArchiveBuilder::SourceObjList* src_objs);
const ArchiveBuilder::SourceObjList* rw_objs,
const ArchiveBuilder::SourceObjList* ro_objs);
static void collect_metaspace_objs(GrowableArrayCHeap<ArchivedObjInfo, mtClass>* objs,
address region_base, address region_top ,
const ArchiveBuilder::SourceObjList* src_objs);
static int compare_by_address(ArchivedObjInfo* a, ArchivedObjInfo* b);
// Common code for dumptime/runtime
static void log_file_header(FileMapInfo* mapinfo);

View File

@ -64,6 +64,11 @@ HeapRootSegments AOTMappedHeapWriter::_heap_root_segments;
address AOTMappedHeapWriter::_requested_bottom;
address AOTMappedHeapWriter::_requested_top;
static size_t _num_strings = 0;
static size_t _string_bytes = 0;
static size_t _num_packages = 0;
static size_t _num_protection_domains = 0;
GrowableArrayCHeap<AOTMappedHeapWriter::NativePointerInfo, mtClassShared>* AOTMappedHeapWriter::_native_pointers;
GrowableArrayCHeap<oop, mtClassShared>* AOTMappedHeapWriter::_source_objs;
GrowableArrayCHeap<AOTMappedHeapWriter::HeapObjOrder, mtClassShared>* AOTMappedHeapWriter::_source_objs_order;
@ -71,8 +76,6 @@ GrowableArrayCHeap<AOTMappedHeapWriter::HeapObjOrder, mtClassShared>* AOTMappedH
AOTMappedHeapWriter::BufferOffsetToSourceObjectTable*
AOTMappedHeapWriter::_buffer_offset_to_source_obj_table = nullptr;
DumpedInternedStrings *AOTMappedHeapWriter::_dumped_interned_strings = nullptr;
typedef HashTable<
size_t, // offset of a filler from AOTMappedHeapWriter::buffer_bottom()
size_t, // size of this filler (in bytes)
@ -87,7 +90,6 @@ void AOTMappedHeapWriter::init() {
Universe::heap()->collect(GCCause::_java_lang_system_gc);
_buffer_offset_to_source_obj_table = new (mtClassShared) BufferOffsetToSourceObjectTable(/*size (prime)*/36137, /*max size*/1 * M);
_dumped_interned_strings = new (mtClass)DumpedInternedStrings(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE);
_fillers = new (mtClassShared) FillersTable();
_requested_bottom = nullptr;
_requested_top = nullptr;
@ -141,9 +143,6 @@ int AOTMappedHeapWriter::narrow_oop_shift() {
void AOTMappedHeapWriter::delete_tables_with_raw_oops() {
delete _source_objs;
_source_objs = nullptr;
delete _dumped_interned_strings;
_dumped_interned_strings = nullptr;
}
void AOTMappedHeapWriter::add_source_obj(oop src_obj) {
@ -181,25 +180,6 @@ bool AOTMappedHeapWriter::is_too_large_to_archive(size_t size) {
}
}
// Keep track of the contents of the archived interned string table. This table
// is used only by CDSHeapVerifier.
void AOTMappedHeapWriter::add_to_dumped_interned_strings(oop string) {
assert_at_safepoint(); // DumpedInternedStrings uses raw oops
assert(!is_string_too_large_to_archive(string), "must be");
bool created;
_dumped_interned_strings->put_if_absent(string, true, &created);
if (created) {
// Prevent string deduplication from changing the value field to
// something not in the archive.
java_lang_String::set_deduplication_forbidden(string);
_dumped_interned_strings->maybe_grow();
}
}
bool AOTMappedHeapWriter::is_dumped_interned_string(oop o) {
return _dumped_interned_strings->get(o) != nullptr;
}
// Various lookup functions between source_obj, buffered_obj and requested_obj
bool AOTMappedHeapWriter::is_in_requested_range(oop o) {
assert(_requested_bottom != nullptr, "do not call before _requested_bottom is initialized");
@ -430,6 +410,7 @@ void AOTMappedHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtC
assert(info != nullptr, "must be");
size_t buffer_offset = copy_one_source_obj_to_buffer(src_obj);
info->set_buffer_offset(buffer_offset);
assert(buffer_offset <= 0x7fffffff, "sanity");
OopHandle handle(Universe::vm_global(), src_obj);
_buffer_offset_to_source_obj_table->put_when_absent(buffer_offset, handle);
@ -442,6 +423,9 @@ void AOTMappedHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtC
log_info(aot)("Size of heap region = %zu bytes, %d objects, %d roots, %d native ptrs",
_buffer_used, _source_objs->length() + 1, roots->length(), _num_native_ptrs);
log_info(aot)(" strings = %8zu (%zu bytes)", _num_strings, _string_bytes);
log_info(aot)(" packages = %8zu", _num_packages);
log_info(aot)(" protection domains = %8zu", _num_protection_domains);
}
size_t AOTMappedHeapWriter::filler_array_byte_size(int length) {
@ -530,7 +514,25 @@ void update_buffered_object_field(address buffered_obj, int field_offset, T valu
*field_addr = value;
}
void AOTMappedHeapWriter::update_stats(oop src_obj) {
if (java_lang_String::is_instance(src_obj)) {
_num_strings ++;
_string_bytes += src_obj->size() * HeapWordSize;
_string_bytes += java_lang_String::value(src_obj)->size() * HeapWordSize;
} else {
Klass* k = src_obj->klass();
Symbol* name = k->name();
if (name->equals("java/lang/NamedPackage") || name->equals("java/lang/Package")) {
_num_packages ++;
} else if (name->equals("java/security/ProtectionDomain")) {
_num_protection_domains ++;
}
}
}
size_t AOTMappedHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
update_stats(src_obj);
assert(!is_too_large_to_archive(src_obj), "already checked");
size_t byte_size = src_obj->size() * HeapWordSize;
assert(byte_size > 0, "no zero-size objects");
@ -896,8 +898,14 @@ void AOTMappedHeapWriter::compute_ptrmap(AOTMappedHeapInfo* heap_info) {
native_ptr = RegeneratedClasses::get_regenerated_object(native_ptr);
}
guarantee(ArchiveBuilder::current()->has_been_archived((address)native_ptr),
"Metadata %p should have been archived", native_ptr);
if (!ArchiveBuilder::current()->has_been_archived((address)native_ptr)) {
ResourceMark rm;
LogStreamHandle(Error, aot) log;
log.print("Marking native pointer for oop %p (type = %s, offset = %d)",
cast_from_oop<void*>(src_obj), src_obj->klass()->external_name(), field_offset);
src_obj->print_on(&log);
fatal("Metadata %p should have been archived", native_ptr);
}
address buffered_native_ptr = ArchiveBuilder::current()->get_buffered_addr((address)native_ptr);
address requested_native_ptr = ArchiveBuilder::current()->to_requested(buffered_native_ptr);

Some files were not shown because too many files have changed in this diff Show More