Merge branch 'master' of https://github.com/openjdk/jdk into JDK-8256951-reuse-cmp-cc

This commit is contained in:
Ashay Rane 2026-04-20 09:43:02 -05:00
commit 2a9cecb5ef
No known key found for this signature in database
GPG Key ID: FEC7A8C8FA893F9E
2061 changed files with 53904 additions and 34779 deletions

View File

@ -37,13 +37,13 @@ runs:
- name: 'Check cache for already built JTReg'
id: get-cached
uses: actions/cache@v4
uses: actions/cache@v5
with:
path: jtreg/installed
key: jtreg-${{ steps.version.outputs.value }}
- name: 'Checkout the JTReg source'
uses: actions/checkout@v4
uses: actions/checkout@v6
with:
repository: openjdk/jtreg
ref: jtreg-${{ steps.version.outputs.value }}
@ -61,7 +61,7 @@ runs:
if: (steps.get-cached.outputs.cache-hit != 'true')
- name: 'Upload JTReg artifact'
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v6
with:
name: bundles-jtreg-${{ steps.version.outputs.value }}
path: jtreg/installed

View File

@ -66,7 +66,7 @@ runs:
shell: bash
- name: 'Upload build logs'
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v6
with:
name: failure-logs-${{ inputs.platform }}${{ inputs.debug-suffix }}
path: failure-logs
@ -74,7 +74,7 @@ runs:
# This is the best way I found to abort the job with an error message
- name: 'Notify about build failures'
uses: actions/github-script@v7
uses: actions/github-script@v8
with:
script: core.setFailed('Build failed. See summary for details.')
if: steps.check.outputs.failure == 'true'

View File

@ -65,7 +65,7 @@ runs:
- name: 'Check cache for BootJDK'
id: get-cached-bootjdk
uses: actions/cache@v4
uses: actions/cache@v5
with:
path: bootjdk/jdk
key: boot-jdk-${{ inputs.platform }}-${{ steps.sha256.outputs.value }}

View File

@ -54,14 +54,14 @@ runs:
steps:
- name: 'Download bundles artifact'
id: download-bundles
uses: actions/download-artifact@v4
uses: actions/download-artifact@v8
with:
name: bundles-${{ inputs.platform }}${{ inputs.debug-suffix }}
path: bundles
continue-on-error: true
- name: 'Download bundles artifact (retry)'
uses: actions/download-artifact@v4
uses: actions/download-artifact@v8
with:
name: bundles-${{ inputs.platform }}${{ inputs.debug-suffix }}
path: bundles
@ -69,7 +69,7 @@ runs:
- name: 'Download static bundles artifact'
id: download-static-bundles
uses: actions/download-artifact@v4
uses: actions/download-artifact@v8
with:
name: bundles-${{ inputs.platform }}${{ inputs.debug-suffix }}${{ inputs.static-suffix }}
path: bundles

View File

@ -40,7 +40,7 @@ runs:
var: GTEST_VERSION
- name: 'Checkout GTest source'
uses: actions/checkout@v4
uses: actions/checkout@v6
with:
repository: google/googletest
ref: 'v${{ steps.version.outputs.value }}'

View File

@ -41,7 +41,7 @@ runs:
- name: 'Download JTReg artifact'
id: download-jtreg
uses: actions/download-artifact@v4
uses: actions/download-artifact@v8
with:
name: bundles-jtreg-${{ steps.version.outputs.value }}
path: jtreg/installed

View File

@ -31,7 +31,7 @@ runs:
steps:
- name: 'Install MSYS2'
id: msys2
uses: msys2/setup-msys2@v2.28.0
uses: msys2/setup-msys2@v2.31.0
with:
install: 'autoconf tar unzip zip make'
path-type: minimal

View File

@ -87,7 +87,7 @@ runs:
shell: bash
- name: 'Upload bundles artifact'
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v6
with:
name: bundles-${{ inputs.platform }}${{ inputs.debug-suffix }}${{ inputs.static-suffix }}${{ inputs.bundle-suffix }}
path: bundles

5
.github/pull_request_template.md vendored Normal file
View File

@ -0,0 +1,5 @@
---------
- [ ] I confirm that I make this contribution in accordance with the [OpenJDK Interim AI Policy](https://openjdk.org/legal/ai).

View File

@ -74,7 +74,7 @@ jobs:
steps:
- name: 'Checkout the JDK source'
uses: actions/checkout@v4
uses: actions/checkout@v6
- name: 'Install toolchain and dependencies'
run: |

View File

@ -94,7 +94,7 @@ jobs:
steps:
- name: 'Checkout the JDK source'
uses: actions/checkout@v4
uses: actions/checkout@v6
- name: 'Get the BootJDK'
id: bootjdk
@ -122,7 +122,7 @@ jobs:
- name: 'Check cache for sysroot'
id: get-cached-sysroot
uses: actions/cache@v4
uses: actions/cache@v5
with:
path: sysroot
key: sysroot-${{ matrix.debian-arch }}-${{ hashFiles('./.github/workflows/build-cross-compile.yml') }}

View File

@ -84,7 +84,7 @@ jobs:
steps:
- name: 'Checkout the JDK source'
uses: actions/checkout@v4
uses: actions/checkout@v6
- name: 'Get the BootJDK'
id: bootjdk

View File

@ -75,7 +75,7 @@ jobs:
steps:
- name: 'Checkout the JDK source'
uses: actions/checkout@v4
uses: actions/checkout@v6
- name: 'Get the BootJDK'
id: bootjdk

View File

@ -83,7 +83,7 @@ jobs:
steps:
- name: 'Checkout the JDK source'
uses: actions/checkout@v4
uses: actions/checkout@v6
- name: 'Get MSYS2'
uses: ./.github/actions/get-msys2

View File

@ -75,7 +75,7 @@ jobs:
steps:
- name: 'Checkout the scripts'
uses: actions/checkout@v4
uses: actions/checkout@v6
with:
sparse-checkout: |
.github

View File

@ -128,7 +128,7 @@ jobs:
steps:
- name: 'Checkout the JDK source'
uses: actions/checkout@v4
uses: actions/checkout@v6
- name: 'Get MSYS2'
uses: ./.github/actions/get-msys2
@ -239,7 +239,7 @@ jobs:
if: always()
- name: 'Upload test results'
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v6
with:
path: results
name: ${{ steps.package.outputs.artifact-name }}
@ -247,7 +247,7 @@ jobs:
# This is the best way I found to abort the job with an error message
- name: 'Notify about test failures'
uses: actions/github-script@v7
uses: actions/github-script@v8
with:
script: core.setFailed('${{ steps.run-tests.outputs.error-message }}')
if: steps.run-tests.outputs.failure == 'true'

2
.gitignore vendored
View File

@ -24,8 +24,6 @@ NashornProfile.txt
/.gdbinit
/.lldbinit
**/core.[0-9]*
*.rej
*.orig
test/benchmarks/**/target
/src/hotspot/CMakeLists.txt
/src/hotspot/compile_commands.json

View File

@ -187,14 +187,18 @@ fi
SOURCE_PREFIX="<sourceFolder url=\"file://"
SOURCE_POSTFIX="\" isTestSource=\"false\" />"
# SOURCES is a single string containing embeded newlines.
for root in $MODULE_ROOTS; do
if [ "x$CYGPATH" != "x" ]; then
root=`$CYGPATH -am $root`
elif [ "x$WSL_DISTRO_NAME" != "x" ]; then
root=`wslpath -am $root`
fi
SOURCES=$SOURCES" $SOURCE_PREFIX""$root""$SOURCE_POSTFIX"
# Add line termination/indentation for everything after the first entry.
if [ "x$SOURCES" != "x" ]; then
SOURCES="${SOURCES}\n "
fi
SOURCES="${SOURCES}${SOURCE_PREFIX}${root}${SOURCE_POSTFIX}"
done
add_replacement "###SOURCE_ROOTS###" "$SOURCES"

View File

@ -284,9 +284,10 @@ possible, or if you want to use a fully qualified test descriptor, add
<h3 id="gtest">Gtest</h3>
<p><strong>Note:</strong> To be able to run the Gtest suite, you need to
configure your build to be able to find a proper version of the gtest
source. For details, see the section <a
href="building.html#running-tests">"Running Tests" in the build
documentation</a>.</p>
source. For details, see the section <strong>"Running Tests" in the
build documentation</strong> (<a
href="building.html#running-tests">html</a>, <a
href="building.md#running-tests">markdown</a>).</p>
<p>Since the Hotspot Gtest suite is so quick, the default is to run all
tests. This is specified by just <code>gtest</code>, or as a fully
qualified test descriptor <code>gtest:all</code>.</p>

View File

@ -198,8 +198,8 @@ use a fully qualified test descriptor, add `jtreg:`, e.g.
**Note:** To be able to run the Gtest suite, you need to configure your build
to be able to find a proper version of the gtest source. For details, see the
section ["Running Tests" in the build
documentation](building.html#running-tests).
section **"Running Tests" in the build
documentation** ([html](building.html#running-tests), [markdown](building.md#running-tests)).
Since the Hotspot Gtest suite is so quick, the default is to run all tests.
This is specified by just `gtest`, or as a fully qualified test descriptor

View File

@ -1,4 +1,4 @@
# Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -93,19 +93,16 @@ JAVADOC_DISABLED_DOCLINT_WARNINGS := missing
JAVADOC_DISABLED_DOCLINT_PACKAGES := org.w3c.* javax.smartcardio
# The initial set of options for javadoc
# -XDaccessInternalAPI is a temporary workaround, see 8373909
JAVADOC_OPTIONS := -use -keywords -notimestamp \
-serialwarn -encoding utf-8 -docencoding utf-8 -breakiterator \
-splitIndex --system none -javafx --expand-requires transitive \
--override-methods=summary \
-XDaccessInternalAPI
--override-methods=summary
# The reference options must stay stable to allow for comparisons across the
# development cycle.
REFERENCE_OPTIONS := -XDignore.symbol.file=true -use -keywords -notimestamp \
-serialwarn -encoding utf-8 -breakiterator -splitIndex --system none \
-html5 -javafx --expand-requires transitive \
-XDaccessInternalAPI
-html5 -javafx --expand-requires transitive
# Should we add DRAFT stamps to the generated javadoc?
ifeq ($(VERSION_IS_GA), true)

View File

@ -44,6 +44,9 @@ ifeq ($(HSDIS_BACKEND), capstone)
else ifeq ($(call isTargetCpuArch, aarch64), true)
CAPSTONE_ARCH := CS_ARCH_$(CAPSTONE_ARCH_AARCH64_NAME)
CAPSTONE_MODE := CS_MODE_ARM
else ifeq ($(call isTargetCpuArch, arm), true)
CAPSTONE_ARCH := CS_ARCH_ARM
CAPSTONE_MODE := CS_MODE_ARM
else
$(error No support for Capstone on this platform)
endif

View File

@ -1020,6 +1020,9 @@ define SetupRunJtregTestBody
VM_OPTIONS := $$(JTREG_ALL_OPTIONS) ))
$$(call LogWarn, AOT_JDK_CACHE=$$($1_AOT_JDK_CACHE))
$1_JTREG_BASIC_OPTIONS += -vmoption:-XX:AOTCache="$$($1_AOT_JDK_CACHE)"
$1_JTREG_BASIC_OPTIONS += $$(addprefix $$(JTREG_PROBLEM_LIST_PREFIX), $$(wildcard \
$$(addprefix $$($1_TEST_ROOT)/, ProblemList-AotJdk.txt) \
))
endif

View File

@ -544,12 +544,9 @@ AC_DEFUN([FLAGS_SETUP_CFLAGS_HELPER],
TOOLCHAIN_CFLAGS_JVM="$TOOLCHAIN_CFLAGS_JVM -fstack-protector"
TOOLCHAIN_CFLAGS_JDK="-fvisibility=hidden -pipe -fstack-protector"
# reduce lib size on linux in link step, this needs also special compile flags
# do this on s390x also for libjvm (where serviceability agent is not supported)
if test "x$ENABLE_LINKTIME_GC" = xtrue; then
TOOLCHAIN_CFLAGS_JDK="$TOOLCHAIN_CFLAGS_JDK -ffunction-sections -fdata-sections"
if test "x$OPENJDK_TARGET_CPU" = xs390x && test "x$DEBUG_LEVEL" == xrelease; then
TOOLCHAIN_CFLAGS_JVM="$TOOLCHAIN_CFLAGS_JVM -ffunction-sections -fdata-sections"
fi
TOOLCHAIN_CFLAGS_JVM="$TOOLCHAIN_CFLAGS_JVM -ffunction-sections -fdata-sections"
fi
# technically NOT for CXX (but since this gives *worse* performance, use
# no-strict-aliasing everywhere!)

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -53,16 +53,15 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS_HELPER],
# add --icf=all (Identical Code Folding — merges identical functions)
BASIC_LDFLAGS="-Wl,-z,defs -Wl,-z,relro -Wl,-z,now -Wl,--no-as-needed -Wl,--exclude-libs,ALL"
BASIC_LDFLAGS_JVM_ONLY=""
# Linux : remove unused code+data in link step
if test "x$ENABLE_LINKTIME_GC" = xtrue; then
if test "x$OPENJDK_TARGET_CPU" = xs390x; then
BASIC_LDFLAGS="$BASIC_LDFLAGS -Wl,--gc-sections"
else
BASIC_LDFLAGS_JDK_ONLY="$BASIC_LDFLAGS_JDK_ONLY -Wl,--gc-sections"
fi
# keep vtables : -Wl,--undefined-glob=_ZTV* (but this seems not to work with gold ld)
# so keep at least the Metadata vtable that is used in the serviceability agent
BASIC_LDFLAGS_JVM_ONLY="$BASIC_LDFLAGS_JVM_ONLY -Wl,--gc-sections -Wl,--undefined=_ZTV8Metadata"
BASIC_LDFLAGS_JDK_ONLY="$BASIC_LDFLAGS_JDK_ONLY -Wl,--gc-sections"
fi
BASIC_LDFLAGS_JVM_ONLY=""
LDFLAGS_LTO="-flto=auto -fuse-linker-plugin -fno-strict-aliasing $DEBUG_PREFIX_CFLAGS"
LDFLAGS_CXX_PARTIAL_LINKING="$MACHINE_FLAG -r"

View File

@ -217,10 +217,12 @@ AC_DEFUN([TOOLCHAIN_FIND_VISUAL_STUDIO_BAT_FILE],
TOOLCHAIN_CHECK_POSSIBLE_VISUAL_STUDIO_ROOT([$TARGET_CPU], [$VS_VERSION],
[$PROGRAMFILES_X86/$VS_INSTALL_DIR], [well-known name])
fi
# Derive system drive root from CMD (which is at <drive>/windows/system32/cmd.exe)
WINSYSDRIVE_ROOT="$(dirname "$(dirname "$(dirname "$CMD")")")"
TOOLCHAIN_CHECK_POSSIBLE_VISUAL_STUDIO_ROOT([$TARGET_CPU], [$VS_VERSION],
[c:/program files/$VS_INSTALL_DIR], [well-known name])
[$WINSYSDRIVE_ROOT/program files/$VS_INSTALL_DIR], [well-known name])
TOOLCHAIN_CHECK_POSSIBLE_VISUAL_STUDIO_ROOT([$TARGET_CPU], [$VS_VERSION],
[c:/program files (x86)/$VS_INSTALL_DIR], [well-known name])
[$WINSYSDRIVE_ROOT/program files (x86)/$VS_INSTALL_DIR], [well-known name])
if test "x$SDK_INSTALL_DIR" != x; then
if test "x$ProgramW6432" != x; then
TOOLCHAIN_CHECK_POSSIBLE_WIN_SDK_ROOT([$TARGET_CPU], [$VS_VERSION],
@ -235,9 +237,9 @@ AC_DEFUN([TOOLCHAIN_FIND_VISUAL_STUDIO_BAT_FILE],
[$PROGRAMFILES/$SDK_INSTALL_DIR], [well-known name])
fi
TOOLCHAIN_CHECK_POSSIBLE_WIN_SDK_ROOT([$TARGET_CPU], [$VS_VERSION],
[c:/program files/$SDK_INSTALL_DIR], [well-known name])
[$WINSYSDRIVE_ROOT/program files/$SDK_INSTALL_DIR], [well-known name])
TOOLCHAIN_CHECK_POSSIBLE_WIN_SDK_ROOT([$TARGET_CPU], [$VS_VERSION],
[c:/program files (x86)/$SDK_INSTALL_DIR], [well-known name])
[$WINSYSDRIVE_ROOT/program files (x86)/$SDK_INSTALL_DIR], [well-known name])
fi
VCVARS_VER=auto
@ -338,7 +340,7 @@ AC_DEFUN([TOOLCHAIN_EXTRACT_VISUAL_STUDIO_ENV],
OLDPATH="$PATH"
# Make sure we only capture additions to PATH needed by VS.
# Clear out path, but need system dir present for vsvars cmd file to be able to run
export PATH=$WINENV_PREFIX/c/windows/system32
export PATH="$(dirname "$CMD")"
# The "| cat" is to stop SetEnv.Cmd to mess with system colors on some systems
# We can't pass -vcvars_ver=$VCVARS_VER here because cmd.exe eats all '='
# in bat file arguments. :-(

View File

@ -36,16 +36,16 @@ include $(TOPDIR)/make/ToolsJdk.gmk
LAUNCHER_SRC := $(TOPDIR)/src/java.base/share/native/launcher
ifeq ($(call isTargetOs, aix), true)
ADD_PLATFORM_INCLUDE_DIR := -I$(TOPDIR)/src/java.base/aix/native/include
endif
LAUNCHER_CFLAGS += -I$(TOPDIR)/src/java.base/share/native/launcher \
-I$(TOPDIR)/src/java.base/share/native/libjli \
$(ADD_PLATFORM_INCLUDE_DIR) \
-I$(TOPDIR)/src/java.base/$(OPENJDK_TARGET_OS_TYPE)/native/libjli \
-I$(TOPDIR)/src/java.base/$(OPENJDK_TARGET_OS)/native/libjli \
#
ifeq ($(call isTargetOs, aix), true)
LAUNCHER_CFLAGS += -I$(TOPDIR)/src/java.base/aix/native/include
endif
MACOSX_PLIST_DIR := $(TOPDIR)/src/java.base/macosx/native/launcher
JAVA_MANIFEST := $(TOPDIR)/src/java.base/windows/native/launcher/java.manifest

View File

@ -29,21 +29,21 @@ GTEST_VERSION=1.14.0
JTREG_VERSION=8.2.1+1
LINUX_X64_BOOT_JDK_EXT=tar.gz
LINUX_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk25/bd75d5f9689641da8e1daabeccb5528b/36/GPL/openjdk-25_linux-x64_bin.tar.gz
LINUX_X64_BOOT_JDK_SHA256=59cdcaf255add4721de38eb411d4ecfe779356b61fb671aee63c7dec78054c2b
LINUX_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk26/c3cc523845074aa0af4f5e1e1ed4151d/35/GPL/openjdk-26_linux-x64_bin.tar.gz
LINUX_X64_BOOT_JDK_SHA256=83c78367f8c81257beef72aca4bbbf8e6dac8ca2b3a4546a85879a09e6e4e128
ALPINE_LINUX_X64_BOOT_JDK_EXT=tar.gz
ALPINE_LINUX_X64_BOOT_JDK_URL=https://github.com/adoptium/temurin25-binaries/releases/download/jdk-25%2B36/OpenJDK25U-jdk_x64_alpine-linux_hotspot_25_36.tar.gz
ALPINE_LINUX_X64_BOOT_JDK_SHA256=637e47474d411ed86134f413af7d5fef4180ddb0bf556347b7e74a88cf8904c8
ALPINE_LINUX_X64_BOOT_JDK_URL=https://github.com/adoptium/temurin26-binaries/releases/download/jdk-26%2B35/OpenJDK26U-jdk_x64_alpine-linux_hotspot_26_35.tar.gz
ALPINE_LINUX_X64_BOOT_JDK_SHA256=c105e581fdccb4e7120d889235d1ad8d5b2bed0af4972bc881e0a8ba687c94a4
MACOS_AARCH64_BOOT_JDK_EXT=tar.gz
MACOS_AARCH64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk25/bd75d5f9689641da8e1daabeccb5528b/36/GPL/openjdk-25_macos-aarch64_bin.tar.gz
MACOS_AARCH64_BOOT_JDK_SHA256=2006337bf326fdfdf6117081751ba38c1c8706d63419ecac7ff102ff7c776876
MACOS_AARCH64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk26/c3cc523845074aa0af4f5e1e1ed4151d/35/GPL/openjdk-26_macos-aarch64_bin.tar.gz
MACOS_AARCH64_BOOT_JDK_SHA256=254586bcd1bf6dcd125ad667ac32562cb1e2ab1abf3a61fb117b6fabb571e765
MACOS_X64_BOOT_JDK_EXT=tar.gz
MACOS_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk25/bd75d5f9689641da8e1daabeccb5528b/36/GPL/openjdk-25_macos-x64_bin.tar.gz
MACOS_X64_BOOT_JDK_SHA256=47482ad9888991ecac9b2bcc131e2b53ff78aff275104cef85f66252308e8a09
MACOS_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk26/c3cc523845074aa0af4f5e1e1ed4151d/35/GPL/openjdk-26_macos-x64_bin.tar.gz
MACOS_X64_BOOT_JDK_SHA256=8642b89d889c14ede2c446fd5bbe3621c8a3082e3df02013fd1658e39f52929a
WINDOWS_X64_BOOT_JDK_EXT=zip
WINDOWS_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk25/bd75d5f9689641da8e1daabeccb5528b/36/GPL/openjdk-25_windows-x64_bin.zip
WINDOWS_X64_BOOT_JDK_SHA256=85bcc178461e2cb3c549ab9ca9dfa73afd54c09a175d6510d0884071867137d3
WINDOWS_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk26/c3cc523845074aa0af4f5e1e1ed4151d/35/GPL/openjdk-26_windows-x64_bin.zip
WINDOWS_X64_BOOT_JDK_SHA256=2dd2d92c9374cd49a120fe9d916732840bf6bb9f0e0cc29794917a3c08b99c5f

View File

@ -387,8 +387,8 @@ var getJibProfilesCommon = function (input, data) {
};
};
common.boot_jdk_version = "25";
common.boot_jdk_build_number = "37";
common.boot_jdk_version = "26";
common.boot_jdk_build_number = "35";
common.boot_jdk_home = input.get("boot_jdk", "install_path") + "/jdk-"
+ common.boot_jdk_version
+ (input.build_os == "macosx" ? ".jdk/Contents/Home" : "");

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -37,6 +37,6 @@ DEFAULT_VERSION_DATE=2026-09-15
DEFAULT_VERSION_CLASSFILE_MAJOR=71 # "`$EXPR $DEFAULT_VERSION_FEATURE + 44`"
DEFAULT_VERSION_CLASSFILE_MINOR=0
DEFAULT_VERSION_DOCS_API_SINCE=11
DEFAULT_ACCEPTABLE_BOOT_VERSIONS="25 26 27"
DEFAULT_ACCEPTABLE_BOOT_VERSIONS="26 27"
DEFAULT_JDK_SOURCE_TARGET_VERSION=27
DEFAULT_PROMOTED_VERSION_PRE=ea

View File

@ -87,6 +87,7 @@ public class CLDRConverter {
static final String EXEMPLAR_CITY_PREFIX = "timezone.excity.";
static final String ZONE_NAME_PREFIX = "timezone.displayname.";
static final String METAZONE_ID_PREFIX = "metazone.id.";
static final String METAZONE_DSTOFFSET_PREFIX = "metazone.dstoffset.";
static final String PARENT_LOCALE_PREFIX = "parentLocale.";
static final String LIKELY_SCRIPT_PREFIX = "likelyScript.";
static final String META_EMPTY_ZONE_NAME = "EMPTY_ZONE";
@ -139,6 +140,11 @@ public class CLDRConverter {
private static final Map<String, String> tzdbSubstLetters = HashMap.newHashMap(512);
private static final Map<String, String> tzdbLinks = HashMap.newHashMap(512);
// Map of explicit dst offsets for metazones
// key: time zone ID
// value: explicit dstOffset for the corresponding metazone name
static final Map<String, String> explicitDstOffsets = HashMap.newHashMap(32);
static enum DraftType {
UNCONFIRMED,
PROVISIONAL,
@ -795,10 +801,7 @@ public class CLDRConverter {
String tzKey = Optional.ofNullable((String)handlerSupplMeta.get(tzid))
.orElse(tzid);
// Follow link, if needed
String tzLink = null;
for (var k = tzKey; tzdbLinks.containsKey(k);) {
k = tzLink = tzdbLinks.get(k);
}
String tzLink = getTZDBLink(tzKey);
if (tzLink == null && tzdbLinks.containsValue(tzKey)) {
// reverse link search
// this is needed as in tzdb, "America/Buenos_Aires" links to
@ -827,7 +830,7 @@ public class CLDRConverter {
} else {
// TZDB short names
tznames = Arrays.copyOf(tznames, tznames.length);
fillTZDBShortNames(tzid, tznames);
fillTZDBShortNames(tzKey, tznames);
names.put(tzid, tznames);
}
} else {
@ -840,11 +843,13 @@ public class CLDRConverter {
String metaKey = METAZONE_ID_PREFIX + meta;
data = map.get(metaKey);
if (data instanceof String[] tznames) {
// TZDB short names
tznames = Arrays.copyOf((String[])names.getOrDefault(metaKey, tznames), 6);
fillTZDBShortNames(tzid, tznames);
// Keep the metazone prefix here.
names.putIfAbsent(metaKey, tznames);
if (isDefaultZone(meta, tzKey)) {
// Record the metazone names only from the default
// (001) zone, with short names filled from TZDB
tznames = Arrays.copyOf(tznames, tznames.length);
fillTZDBShortNames(tzKey, tznames);
names.put(metaKey, tznames);
}
names.put(tzid, meta);
if (tzLink != null && availableIds.contains(tzLink)) {
names.put(tzLink, meta);
@ -867,6 +872,12 @@ public class CLDRConverter {
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
names.putAll(exCities);
// Explicit metazone offsets
if (id.equals("root")) {
explicitDstOffsets.forEach((k, v) ->
names.put(METAZONE_DSTOFFSET_PREFIX + k, v));
}
// If there's no UTC entry at this point, add an empty one
if (!names.isEmpty() && !names.containsKey("UTC")) {
names.putIfAbsent(METAZONE_ID_PREFIX + META_EMPTY_ZONE_NAME, EMPTY_ZONE);
@ -1492,12 +1503,12 @@ public class CLDRConverter {
* Fill the TZDB short names if there is no name provided by the CLDR
*/
private static void fillTZDBShortNames(String tzid, String[] names) {
var val = tzdbShortNamesMap.get(tzdbLinks.getOrDefault(tzid, tzid));
var val = tzdbShortNamesMap.getOrDefault(tzid, tzdbShortNamesMap.get(getTZDBLink(tzid)));
if (val != null) {
var format = val.split(NBSP)[0];
var rule = val.split(NBSP)[1];
IntStream.of(1, 3, 5).forEach(i -> {
if (names[i] == null) {
if (names[i] == null || names[i].isEmpty()) {
if (format.contains("%s")) {
names[i] = switch (i) {
case 1 -> format.formatted(tzdbSubstLetters.get(rule + NBSP + STD));
@ -1519,6 +1530,21 @@ public class CLDRConverter {
}
}
private static boolean isDefaultZone(String meta, String tzid) {
String zone001 = handlerMetaZones.zidMap().get(meta);
var tzLink = getTZDBLink(tzid);
return canonicalTZMap.getOrDefault(tzid, tzid).equals(zone001) ||
tzLink != null && canonicalTZMap.getOrDefault(tzLink, tzLink).equals(zone001);
}
private static String getTZDBLink(String tzid) {
String tzLink = null;
for (var k = tzid; tzdbLinks.containsKey(k);) {
k = tzLink = tzdbLinks.get(k);
}
return tzLink;
}
/*
* Convert TZDB offsets to JDK's offsets, eg, "-08" to "GMT-08:00".
* If it cannot recognize the pattern, return the argument as is.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -84,7 +84,15 @@ class MetaZonesParseHandler extends AbstractLDMLHandler<String> {
if (fromLDT.isBefore(now) && toLDT.isAfter(now)) {
metazone = attributes.getValue("mzone");
// Explicit metazone DST offsets. Only the "dst" offset is needed,
// as "std" is used by default when it doesn't match.
String dstOffset = attributes.getValue("dstOffset");
if (dstOffset != null) {
CLDRConverter.explicitDstOffsets.put(tzid, dstOffset);
}
}
pushIgnoredContainer(qName);
break;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -198,7 +198,8 @@ class ResourceBundleGenerator implements BundleGenerator {
} else if (value instanceof String) {
String valStr = (String)value;
if (type == BundleType.TIMEZONE &&
!key.startsWith(CLDRConverter.EXEMPLAR_CITY_PREFIX) ||
!(key.startsWith(CLDRConverter.EXEMPLAR_CITY_PREFIX) ||
key.startsWith(CLDRConverter.METAZONE_DSTOFFSET_PREFIX)) ||
valStr.startsWith(META_VALUE_PREFIX)) {
out.printf(" { \"%s\", %s },\n", key, CLDRConverter.saveConvert(valStr, useJava));
} else {

View File

@ -25,13 +25,13 @@
package build.tools.taglet;
import java.net.URI;
import java.util.EnumSet;
import java.util.List;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import java.lang.reflect.Field;
import javax.lang.model.element.Element;
@ -141,6 +141,11 @@ public class JSpec implements Taglet {
@Override
public String toString(List<? extends DocTree> tags, Element elem) {
throw new UnsupportedOperationException();
}
// @Override - requires JDK-8373922 in build JDK
public String toString(List<? extends DocTree> tags, Element elem, URI docRoot) {
if (tags.isEmpty())
return "";
@ -177,7 +182,7 @@ public class JSpec implements Taglet {
String preview = m.group("preview"); // null if no preview feature
String chapter = m.group("chapter");
String section = m.group("section");
String rootParent = currentPath().replaceAll("[^/]+", "..");
String rootParent = docRoot.resolve("..").toString();
String url = preview == null ?
String.format("%1$s/specs/%2$s/%2$s-%3$s.html#%2$s-%3$s%4$s",
@ -230,23 +235,6 @@ public class JSpec implements Taglet {
return sb.toString();
}
private static ThreadLocal<String> CURRENT_PATH = null;
private String currentPath() {
if (CURRENT_PATH == null) {
try {
Field f = Class.forName("jdk.javadoc.internal.doclets.formats.html.HtmlDocletWriter")
.getField("CURRENT_PATH");
@SuppressWarnings("unchecked")
ThreadLocal<String> tl = (ThreadLocal<String>) f.get(null);
CURRENT_PATH = tl;
} catch (ReflectiveOperationException e) {
throw new RuntimeException("Cannot determine current path", e);
}
}
return CURRENT_PATH.get();
}
private String expand(List<? extends DocTree> trees) {
return (new SimpleDocTreeVisitor<StringBuilder, StringBuilder>() {
public StringBuilder defaultAction(DocTree tree, StringBuilder sb) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,7 +32,9 @@ import jdk.javadoc.doclet.Taglet;
import javax.lang.model.element.*;
import javax.lang.model.type.DeclaredType;
import javax.lang.model.util.Elements;
import java.io.IOException;
import java.net.URI;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.*;
@ -78,6 +80,11 @@ public final class SealedGraph implements Taglet {
@Override
public String toString(List<? extends DocTree> tags, Element element) {
throw new UnsupportedOperationException();
}
// @Override - requires JDK-8373922 in build JDK
public String toString(List<? extends DocTree> tags, Element element, URI docRoot) {
if (sealedDotOutputDir == null || sealedDotOutputDir.isEmpty()) {
return "";
}
@ -85,9 +92,15 @@ public final class SealedGraph implements Taglet {
return "";
}
ModuleElement module = docletEnvironment.getElementUtils().getModuleOf(element);
Elements util = docletEnvironment.getElementUtils();
ModuleElement module = util.getModuleOf(element);
// '.' in .DOT file name is converted to '/' in .SVG path, so we use '-' as separator for nested classes.
// module_package.subpackage.Outer-Inner.dot => module/package/subpackage/Outer-Inner-sealed-graph.svg
Path dotFile = Path.of(sealedDotOutputDir,
module.getQualifiedName() + "_" + typeElement.getQualifiedName() + ".dot");
module.getQualifiedName() + "_"
+ util.getPackageOf(element).getQualifiedName() + "."
+ packagelessCanonicalName(typeElement).replace(".", "-") + ".dot");
Set<String> exports = module.getDirectives().stream()
.filter(ModuleElement.ExportsDirective.class::isInstance)
@ -99,7 +112,7 @@ public final class SealedGraph implements Taglet {
.map(Objects::toString)
.collect(Collectors.toUnmodifiableSet());
String dotContent = new Renderer().graph(typeElement, exports);
String dotContent = new Renderer().graph(typeElement, exports, docRoot);
try {
Files.writeString(dotFile, dotContent, WRITE, CREATE, TRUNCATE_EXISTING);
@ -107,8 +120,8 @@ public final class SealedGraph implements Taglet {
throw new RuntimeException(e);
}
String simpleTypeName = packagelessCanonicalName(typeElement).replace('.', '/');
String imageFile = simpleTypeName + "-sealed-graph.svg";
String simpleTypeName = packagelessCanonicalName(typeElement);
String imageFile = simpleTypeName.replace(".", "-") + "-sealed-graph.svg";
int thumbnailHeight = 100; // also appears in the stylesheet
String hoverImage = "<span>"
+ getImage(simpleTypeName, imageFile, -1, true)
@ -137,21 +150,26 @@ public final class SealedGraph implements Taglet {
private final class Renderer {
// Generates a graph in DOT format
String graph(TypeElement rootClass, Set<String> exports) {
final State state = new State(rootClass);
String graph(TypeElement rootClass, Set<String> exports, URI pathToRoot) {
if (!isInPublicApi(rootClass, exports)) {
// Alternatively we can return "" for the graph since there is no single root to render
throw new IllegalArgumentException("Root not in public API: " + rootClass.getQualifiedName());
}
final State state = new State(pathToRoot);
traverse(state, rootClass, exports);
return state.render();
}
static void traverse(State state, TypeElement node, Set<String> exports) {
if (!isInPublicApi(node, exports)) {
throw new IllegalArgumentException("Bad request, not in public API: " + node.getQualifiedName());
}
state.addNode(node);
if (!(node.getModifiers().contains(Modifier.SEALED) || node.getModifiers().contains(Modifier.FINAL))) {
state.addNonSealedEdge(node);
} else {
for (TypeElement subNode : permittedSubclasses(node, exports)) {
if (isInPublicApi(node, exports) && isInPublicApi(subNode, exports)) {
state.addEdge(node, subNode);
}
state.addEdge(node, subNode);
traverse(state, subNode, exports);
}
}
@ -163,7 +181,7 @@ public final class SealedGraph implements Taglet {
private static final String TOOLTIP = "tooltip";
private static final String LINK = "href";
private final TypeElement rootNode;
private final URI pathToRoot;
private final StringBuilder builder;
@ -188,8 +206,8 @@ public final class SealedGraph implements Taglet {
}
}
public State(TypeElement rootNode) {
this.rootNode = rootNode;
public State(URI pathToRoot) {
this.pathToRoot = pathToRoot;
nodeStyleMap = new LinkedHashMap<>();
builder = new StringBuilder()
.append("digraph G {")
@ -212,24 +230,15 @@ public final class SealedGraph implements Taglet {
var styles = nodeStyleMap.computeIfAbsent(id(node), n -> new LinkedHashMap<>());
styles.put(LABEL, new StyleItem.PlainString(node.getSimpleName().toString()));
styles.put(TOOLTIP, new StyleItem.PlainString(node.getQualifiedName().toString()));
styles.put(LINK, new StyleItem.PlainString(relativeLink(node)));
styles.put(LINK, new StyleItem.PlainString(pathToRoot.resolve(relativeLink(node)).toString()));
}
// A permitted class must be in the same package or in the same module.
// This implies the module is always the same.
private String relativeLink(TypeElement node) {
var util = SealedGraph.this.docletEnvironment.getElementUtils();
var nodePackage = util.getPackageOf(node);
// Note: SVG files for nested types use the simple names of containing types as parent directories.
// We therefore need to convert all dots in the qualified name to "../" below.
var backNavigator = rootNode.getQualifiedName().toString().chars()
.filter(c -> c == '.')
.mapToObj(c -> "../")
.collect(joining());
var forwardNavigator = nodePackage.getQualifiedName().toString()
.replace(".", "/");
var path = util.getModuleOf(node).getQualifiedName().toString() + "/"
+ util.getPackageOf(node).getQualifiedName().toString().replace(".", "/");
return backNavigator + forwardNavigator + "/" + packagelessCanonicalName(node) + ".html";
return path + "/" + packagelessCanonicalName(node) + ".html";
}
public void addEdge(TypeElement node, TypeElement subNode) {
@ -281,25 +290,33 @@ public final class SealedGraph implements Taglet {
private String quotedId(TypeElement node) {
return "\"" + id(node) + "\"";
}
private String simpleName(String name) {
int lastDot = name.lastIndexOf('.');
return lastDot < 0
? name
: name.substring(lastDot);
}
}
private static List<TypeElement> permittedSubclasses(TypeElement node, Set<String> exports) {
return node.getPermittedSubclasses().stream()
.filter(DeclaredType.class::isInstance)
.map(DeclaredType.class::cast)
.map(DeclaredType::asElement)
.filter(TypeElement.class::isInstance)
.map(TypeElement.class::cast)
.filter(te -> isInPublicApi(te, exports))
.toList();
List<TypeElement> dfsStack = new ArrayList<TypeElement>().reversed(); // Faster operations to head
SequencedCollection<TypeElement> result = new LinkedHashSet<>(); // Deduplicate diamond interface inheritance
// The starting node may be in the public API - still expand it
prependSubclasses(node, dfsStack);
while (!dfsStack.isEmpty()) {
TypeElement now = dfsStack.removeFirst();
if (isInPublicApi(now, exports)) {
result.addLast(now);
} else {
// Skip the non-exported classes in the hierarchy
prependSubclasses(now, dfsStack);
}
}
return List.copyOf(result);
}
private static void prependSubclasses(TypeElement node, List<TypeElement> dfs) {
for (var e : node.getPermittedSubclasses().reversed()) {
if (e instanceof DeclaredType dt && dt.asElement() instanceof TypeElement te) {
dfs.addFirst(te);
}
}
}
private static boolean isInPublicApi(TypeElement typeElement, Set<String> exports) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,13 +25,13 @@
package build.tools.taglet;
import java.net.URI;
import java.util.EnumSet;
import java.util.List;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import java.lang.reflect.Field;
import javax.lang.model.element.Element;
@ -91,6 +91,11 @@ public class ToolGuide implements Taglet {
@Override
public String toString(List<? extends DocTree> tags, Element elem) {
throw new UnsupportedOperationException();
}
// @Override - requires JDK-8373922 in build JDK
public String toString(List<? extends DocTree> tags, Element elem, URI docRoot) {
if (tags.isEmpty())
return "";
@ -118,7 +123,7 @@ public class ToolGuide implements Taglet {
if (label.isEmpty()) {
label = name;
}
String rootParent = currentPath().replaceAll("[^/]+", "..");
String rootParent = docRoot.resolve("..").toString();
String url = String.format("%s/%s/%s.html",
rootParent, BASE_URL, name);
@ -141,22 +146,4 @@ public class ToolGuide implements Taglet {
return sb.toString();
}
private static ThreadLocal<String> CURRENT_PATH = null;
private String currentPath() {
if (CURRENT_PATH == null) {
try {
Field f = Class.forName("jdk.javadoc.internal.doclets.formats.html.HtmlDocletWriter")
.getField("CURRENT_PATH");
@SuppressWarnings("unchecked")
ThreadLocal<String> tl = (ThreadLocal<String>) f.get(null);
CURRENT_PATH = tl;
} catch (ReflectiveOperationException e) {
throw new RuntimeException("Cannot determine current path", e);
}
}
return CURRENT_PATH.get();
}
}

View File

@ -30,6 +30,7 @@ import java.io.StringWriter;
import java.lang.reflect.Field;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
import java.util.HashSet;
import java.util.Set;
import java.util.stream.Collectors;
@ -76,7 +77,7 @@ public class SetupPreviewFeature {
var target = Path.of(args[1]);
Files.createDirectories(target.getParent());
if (constantsToAdd.isEmpty()) {
Files.copy(source, target);
Files.copy(source, target, StandardCopyOption.REPLACE_EXISTING);
} else {
String sourceCode = Files.readString(source);
try (var out = Files.newBufferedWriter(target)) {

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -257,6 +257,7 @@ ifeq ($(ENABLE_HEADLESS_ONLY), false)
DISABLED_WARNINGS_microsoft_dgif_lib.c := 4018 4267, \
DISABLED_WARNINGS_microsoft_splashscreen_impl.c := 4018 4267 4244, \
DISABLED_WARNINGS_microsoft_splashscreen_png.c := 4267, \
DISABLED_WARNINGS_microsoft_pngread.c := 4146, \
DISABLED_WARNINGS_microsoft_splashscreen_sys.c := 4267 4244, \
LDFLAGS := $(ICONV_LDFLAGS), \
LDFLAGS_windows := -delayload:user32.dll, \
@ -338,11 +339,8 @@ else
# noexcept-type required for GCC 7 builds. Not required for GCC 8+.
# expansion-to-defined required for GCC 9 builds. Not required for GCC 10+.
# maybe-uninitialized required for GCC 8 builds. Not required for GCC 9+.
# calloc-transposed-args required for GCC 14 builds. (fixed upstream in
# Harfbuzz 032c931e1c0cfb20f18e5acb8ba005775242bd92)
HARFBUZZ_DISABLED_WARNINGS_CXX_gcc := class-memaccess noexcept-type \
expansion-to-defined dangling-reference maybe-uninitialized \
calloc-transposed-args
expansion-to-defined dangling-reference maybe-uninitialized
HARFBUZZ_DISABLED_WARNINGS_clang := missing-field-initializers \
range-loop-analysis unused-variable
HARFBUZZ_DISABLED_WARNINGS_microsoft := 4267 4244
@ -397,6 +395,8 @@ $(eval $(call SetupJdkLibrary, BUILD_LIBFONTMANAGER, \
AccelGlyphCache.c, \
CFLAGS := $(LIBFONTMANAGER_CFLAGS), \
CXXFLAGS := $(LIBFONTMANAGER_CFLAGS), \
CXXFLAGS_gcc := -fno-rtti -fno-exceptions, \
CXXFLAGS_clang := -fno-rtti -fno-exceptions, \
OPTIMIZATION := HIGHEST, \
CFLAGS_windows = -DCC_NOEX, \
EXTRA_HEADER_DIRS := $(LIBFONTMANAGER_EXTRA_HEADER_DIRS), \

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -55,6 +55,12 @@ else
LIBSAPROC_LINK_TYPE := C
endif
# DWARF related sources would be included on supported platforms only.
LIBSAPROC_EXCLUDE_FILES :=
ifneq ($(call And, $(call isTargetOs, linux) $(call isTargetCpu, x86_64 aarch64)), true)
LIBSAPROC_EXCLUDE_FILES := DwarfParser.cpp dwarf.cpp
endif
$(eval $(call SetupJdkLibrary, BUILD_LIBSAPROC, \
NAME := saproc, \
LINK_TYPE := $(LIBSAPROC_LINK_TYPE), \
@ -70,6 +76,7 @@ $(eval $(call SetupJdkLibrary, BUILD_LIBSAPROC, \
CFLAGS := $(LIBSAPROC_CFLAGS), \
CXXFLAGS := $(LIBSAPROC_CFLAGS) $(LIBSAPROC_CXXFLAGS), \
EXTRA_SRC := $(LIBSAPROC_EXTRA_SRC), \
EXCLUDE_FILES := $(LIBSAPROC_EXCLUDE_FILES), \
JDK_LIBS := java.base:libjava, \
LIBS_linux := $(LIBDL), \
LIBS_macosx := \

View File

@ -88,7 +88,10 @@ function setup() {
fi
if [[ -z ${CMD+x} ]]; then
CMD="$DRIVEPREFIX/c/windows/system32/cmd.exe"
CMD="$(type -p cmd.exe 2>/dev/null)"
if [[ -z "$CMD" ]]; then
CMD="$DRIVEPREFIX/c/windows/system32/cmd.exe"
fi
fi
if [[ -z ${WINTEMP+x} ]]; then

View File

@ -1182,12 +1182,12 @@ class CallStubImpl {
public:
// Size of call trampoline stub.
static uint size_call_trampoline() {
return 0; // no call trampolines on this platform
return MacroAssembler::max_trampoline_stub_size();
}
// number of relocations needed by a call trampoline stub
static uint reloc_call_trampoline() {
return 0; // no call trampolines on this platform
return 5; // metadata; call dest; trampoline address; trampoline destination; trampoline_owner_metadata
}
};
@ -2233,15 +2233,9 @@ uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
{
st->print_cr("# MachUEPNode");
if (UseCompressedClassPointers) {
st->print_cr("\tldrw rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
st->print_cr("\tldrw r10, [rscratch2 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
st->print_cr("\tcmpw rscratch1, r10");
} else {
st->print_cr("\tldr rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
st->print_cr("\tldr r10, [rscratch2 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
st->print_cr("\tcmp rscratch1, r10");
}
st->print_cr("\tldrw rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
st->print_cr("\tldrw r10, [rscratch2 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
st->print_cr("\tcmpw rscratch1, r10");
st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
}
#endif

View File

@ -1,6 +1,6 @@
//
// Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2020, 2025, Arm Limited. All rights reserved.
// Copyright (c) 2020, 2026, Arm Limited. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
@ -247,10 +247,39 @@ source %{
case Op_MinVHF:
case Op_MaxVHF:
case Op_SqrtVHF:
if (UseSVE == 0 && !is_feat_fp16_supported()) {
return false;
}
break;
// At the time of writing this, the Vector API has no half-float (FP16) species.
// Consequently, AddReductionVHF and MulReductionVHF are only produced by the
// auto-vectorizer, which requires strictly ordered semantics for FP reductions.
//
// There is no direct Neon instruction that performs strictly ordered floating
// point add reduction. Hence, on Neon only machines, the add reduction operation
// is implemented as a scalarized sequence using half-precision scalar instruction
// FADD which requires FEAT_FP16 and ASIMDHP to be available on the target.
// On SVE machines (UseSVE > 0) however, there is a direct instruction (FADDA) which
// implements strictly ordered floating point add reduction which does not require
// the FEAT_FP16 and ASIMDHP checks as SVE supports half-precision floats by default.
case Op_AddReductionVHF:
// FEAT_FP16 is enabled if both "fphp" and "asimdhp" features are supported.
// Only the Neon instructions need this check. SVE supports half-precision floats
// by default.
if (UseSVE == 0 && !is_feat_fp16_supported()) {
if (length_in_bytes < 8 || (UseSVE == 0 && !is_feat_fp16_supported())) {
return false;
}
break;
case Op_MulReductionVHF:
// There are no direct Neon/SVE instructions that perform strictly ordered
// floating point multiply reduction.
// For vector length ≤ 16 bytes, the reduction is implemented as a scalarized
// sequence using half-precision scalar instruction FMUL. This path requires
// FEAT_FP16 and ASIMDHP to be available on the target.
// For vector length > 16 bytes, this operation is disabled because there is no
// direct SVE instruction that performs a strictly ordered FP16 multiply
// reduction.
if (length_in_bytes < 8 || length_in_bytes > 16 || !is_feat_fp16_supported()) {
return false;
}
break;
@ -300,6 +329,7 @@ source %{
case Op_VectorRearrange:
case Op_MulReductionVD:
case Op_MulReductionVF:
case Op_MulReductionVHF:
case Op_MulReductionVI:
case Op_MulReductionVL:
case Op_CompressBitsV:
@ -364,6 +394,7 @@ source %{
case Op_VectorMaskCmp:
case Op_LoadVectorGather:
case Op_StoreVectorScatter:
case Op_AddReductionVHF:
case Op_AddReductionVF:
case Op_AddReductionVD:
case Op_AndReductionV:
@ -597,13 +628,9 @@ instruct vloadcon(vReg dst, immI0 src) %{
BasicType bt = Matcher::vector_element_basic_type(this);
if (UseSVE == 0) {
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
int entry_idx = __ vector_iota_entry_index(bt);
assert(length_in_bytes <= 16, "must be");
// The iota indices are ordered by type B/S/I/L/F/D, and the offset between two types is 16.
int offset = exact_log2(type2aelembytes(bt)) << 4;
if (is_floating_point_type(bt)) {
offset += 32;
}
__ lea(rscratch1, ExternalAddress(StubRoutines::aarch64::vector_iota_indices() + offset));
__ lea(rscratch1, ExternalAddress(StubRoutines::aarch64::vector_iota_indices(entry_idx)));
if (length_in_bytes == 16) {
__ ldrq($dst$$FloatRegister, rscratch1);
} else {
@ -3406,6 +3433,44 @@ instruct reduce_non_strict_order_add4F_neon(vRegF dst, vRegF fsrc, vReg vsrc, vR
ins_pipe(pipe_slow);
%}
// Add Reduction for Half floats (FP16).
// Neon does not provide direct instructions for strictly ordered floating-point add reductions.
// On Neon-only targets (UseSVE = 0), this operation is implemented as a sequence of scalar additions:
// values equal to the vector width are loaded into a vector register, each lane is extracted,
// and its value is accumulated into the running sum, producing a final scalar result.
instruct reduce_addHF_neon(vRegF dst, vRegF fsrc, vReg vsrc, vReg tmp) %{
predicate(UseSVE == 0);
match(Set dst (AddReductionVHF fsrc vsrc));
effect(TEMP_DEF dst, TEMP tmp);
format %{ "reduce_addHF $dst, $fsrc, $vsrc\t# 4HF/8HF. KILL $tmp" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this, $vsrc);
__ neon_reduce_add_fp16($dst$$FloatRegister, $fsrc$$FloatRegister,
$vsrc$$FloatRegister, length_in_bytes, $tmp$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
// This rule calculates the reduction result in strict order. Two cases will
// reach here:
// 1. Non strictly-ordered AddReductionVHF when vector size > 128-bits. For example -
// AddReductionVHF generated by Vector API. For vector size > 128-bits, it is more
// beneficial performance-wise to generate direct SVE instruction even if it is
// strictly ordered.
// 2. Strictly-ordered AddReductionVHF. For example - AddReductionVHF generated by
// auto-vectorization on SVE machine.
instruct reduce_addHF_sve(vRegF dst_src1, vReg src2) %{
predicate(UseSVE > 0);
match(Set dst_src1 (AddReductionVHF dst_src1 src2));
format %{ "reduce_addHF_sve $dst_src1, $dst_src1, $src2" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this, $src2);
assert(length_in_bytes == MaxVectorSize, "invalid vector length");
__ sve_fadda($dst_src1$$FloatRegister, __ H, ptrue, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
// This rule calculates the reduction result in strict order. Two cases will
// reach here:
// 1. Non strictly-ordered AddReductionVF when vector size > 128-bits. For example -
@ -3496,12 +3561,14 @@ instruct reduce_addL_masked(iRegLNoSp dst, iRegL isrc, vReg vsrc, pRegGov pg, vR
ins_pipe(pipe_slow);
%}
instruct reduce_addF_masked(vRegF dst_src1, vReg src2, pRegGov pg) %{
instruct reduce_addFHF_masked(vRegF dst_src1, vReg src2, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src1 (AddReductionVHF (Binary dst_src1 src2) pg));
match(Set dst_src1 (AddReductionVF (Binary dst_src1 src2) pg));
format %{ "reduce_addF_masked $dst_src1, $pg, $dst_src1, $src2" %}
format %{ "reduce_addFHF_masked $dst_src1, $pg, $dst_src1, $src2" %}
ins_encode %{
__ sve_fadda($dst_src1$$FloatRegister, __ S,
BasicType bt = Matcher::vector_element_basic_type(this, $src2);
__ sve_fadda($dst_src1$$FloatRegister, __ elemType_to_regVariant(bt),
$pg$$PRegister, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
@ -3549,14 +3616,17 @@ instruct reduce_mulL(iRegLNoSp dst, iRegL isrc, vReg vsrc) %{
ins_pipe(pipe_slow);
%}
instruct reduce_mulF(vRegF dst, vRegF fsrc, vReg vsrc, vReg tmp) %{
instruct reduce_mulFHF(vRegF dst, vRegF fsrc, vReg vsrc, vReg tmp) %{
predicate(Matcher::vector_length_in_bytes(n->in(2)) <= 16);
match(Set dst (MulReductionVHF fsrc vsrc));
match(Set dst (MulReductionVF fsrc vsrc));
effect(TEMP_DEF dst, TEMP tmp);
format %{ "reduce_mulF $dst, $fsrc, $vsrc\t# 2F/4F. KILL $tmp" %}
format %{ "reduce_mulFHF $dst, $fsrc, $vsrc\t# 2F/4F/4HF/8HF. KILL $tmp" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this, $vsrc);
__ neon_reduce_mul_fp($dst$$FloatRegister, T_FLOAT, $fsrc$$FloatRegister,
BasicType bt = Matcher::vector_element_basic_type(this, $vsrc);
__ neon_reduce_mul_fp($dst$$FloatRegister, bt, $fsrc$$FloatRegister,
$vsrc$$FloatRegister, length_in_bytes, $tmp$$FloatRegister);
%}
ins_pipe(pipe_slow);

View File

@ -1,6 +1,6 @@
//
// Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2020, 2025, Arm Limited. All rights reserved.
// Copyright (c) 2020, 2026, Arm Limited. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
@ -237,10 +237,39 @@ source %{
case Op_MinVHF:
case Op_MaxVHF:
case Op_SqrtVHF:
if (UseSVE == 0 && !is_feat_fp16_supported()) {
return false;
}
break;
// At the time of writing this, the Vector API has no half-float (FP16) species.
// Consequently, AddReductionVHF and MulReductionVHF are only produced by the
// auto-vectorizer, which requires strictly ordered semantics for FP reductions.
//
// There is no direct Neon instruction that performs strictly ordered floating
// point add reduction. Hence, on Neon only machines, the add reduction operation
// is implemented as a scalarized sequence using half-precision scalar instruction
// FADD which requires FEAT_FP16 and ASIMDHP to be available on the target.
// On SVE machines (UseSVE > 0) however, there is a direct instruction (FADDA) which
// implements strictly ordered floating point add reduction which does not require
// the FEAT_FP16 and ASIMDHP checks as SVE supports half-precision floats by default.
case Op_AddReductionVHF:
// FEAT_FP16 is enabled if both "fphp" and "asimdhp" features are supported.
// Only the Neon instructions need this check. SVE supports half-precision floats
// by default.
if (UseSVE == 0 && !is_feat_fp16_supported()) {
if (length_in_bytes < 8 || (UseSVE == 0 && !is_feat_fp16_supported())) {
return false;
}
break;
case Op_MulReductionVHF:
// There are no direct Neon/SVE instructions that perform strictly ordered
// floating point multiply reduction.
// For vector length ≤ 16 bytes, the reduction is implemented as a scalarized
// sequence using half-precision scalar instruction FMUL. This path requires
// FEAT_FP16 and ASIMDHP to be available on the target.
// For vector length > 16 bytes, this operation is disabled because there is no
// direct SVE instruction that performs a strictly ordered FP16 multiply
// reduction.
if (length_in_bytes < 8 || length_in_bytes > 16 || !is_feat_fp16_supported()) {
return false;
}
break;
@ -290,6 +319,7 @@ source %{
case Op_VectorRearrange:
case Op_MulReductionVD:
case Op_MulReductionVF:
case Op_MulReductionVHF:
case Op_MulReductionVI:
case Op_MulReductionVL:
case Op_CompressBitsV:
@ -354,6 +384,7 @@ source %{
case Op_VectorMaskCmp:
case Op_LoadVectorGather:
case Op_StoreVectorScatter:
case Op_AddReductionVHF:
case Op_AddReductionVF:
case Op_AddReductionVD:
case Op_AndReductionV:
@ -2063,6 +2094,25 @@ instruct reduce_non_strict_order_add4F_neon(vRegF dst, vRegF fsrc, vReg vsrc, vR
ins_pipe(pipe_slow);
%}
dnl
// Add Reduction for Half floats (FP16).
// Neon does not provide direct instructions for strictly ordered floating-point add reductions.
// On Neon-only targets (UseSVE = 0), this operation is implemented as a sequence of scalar additions:
// values equal to the vector width are loaded into a vector register, each lane is extracted,
// and its value is accumulated into the running sum, producing a final scalar result.
instruct reduce_addHF_neon(vRegF dst, vRegF fsrc, vReg vsrc, vReg tmp) %{
predicate(UseSVE == 0);
match(Set dst (AddReductionVHF fsrc vsrc));
effect(TEMP_DEF dst, TEMP tmp);
format %{ "reduce_addHF $dst, $fsrc, $vsrc\t# 4HF/8HF. KILL $tmp" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this, $vsrc);
__ neon_reduce_add_fp16($dst$$FloatRegister, $fsrc$$FloatRegister,
$vsrc$$FloatRegister, length_in_bytes, $tmp$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
dnl
dnl REDUCE_ADD_FP_SVE($1, $2 )
dnl REDUCE_ADD_FP_SVE(type, size)
define(`REDUCE_ADD_FP_SVE', `
@ -2074,21 +2124,26 @@ define(`REDUCE_ADD_FP_SVE', `
// strictly ordered.
// 2. Strictly-ordered AddReductionV$1. For example - AddReductionV$1 generated by
// auto-vectorization on SVE machine.
instruct reduce_add$1_sve(vReg$1 dst_src1, vReg src2) %{
predicate(!VM_Version::use_neon_for_vector(Matcher::vector_length_in_bytes(n->in(2))) ||
n->as_Reduction()->requires_strict_order());
instruct reduce_add$1_sve(vReg`'ifelse($1, HF, F, $1) dst_src1, vReg src2) %{
ifelse($1, HF,
`predicate(UseSVE > 0);',
`predicate(!VM_Version::use_neon_for_vector(Matcher::vector_length_in_bytes(n->in(2))) ||
n->as_Reduction()->requires_strict_order());')
match(Set dst_src1 (AddReductionV$1 dst_src1 src2));
format %{ "reduce_add$1_sve $dst_src1, $dst_src1, $src2" %}
ins_encode %{
assert(UseSVE > 0, "must be sve");
uint length_in_bytes = Matcher::vector_length_in_bytes(this, $src2);
ifelse($1, HF, `',
`assert(UseSVE > 0, "must be sve");
')dnl
uint length_in_bytes = Matcher::vector_length_in_bytes(this, $src2);
assert(length_in_bytes == MaxVectorSize, "invalid vector length");
__ sve_fadda($dst_src1$$FloatRegister, __ $2, ptrue, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}')dnl
dnl
REDUCE_ADD_FP_SVE(F, S)
REDUCE_ADD_FP_SVE(HF, H)
REDUCE_ADD_FP_SVE(F, S)
// reduction addD
@ -2129,21 +2184,30 @@ dnl
dnl REDUCE_ADD_FP_PREDICATE($1, $2 )
dnl REDUCE_ADD_FP_PREDICATE(insn_name, op_name)
define(`REDUCE_ADD_FP_PREDICATE', `
instruct reduce_add$1_masked(vReg$1 dst_src1, vReg src2, pRegGov pg) %{
instruct reduce_add$1_masked(vReg$2 dst_src1, vReg src2, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src1 (AddReductionV$1 (Binary dst_src1 src2) pg));
ifelse($2, F,
`match(Set dst_src1 (AddReductionVHF (Binary dst_src1 src2) pg));
match(Set dst_src1 (AddReductionV$2 (Binary dst_src1 src2) pg));',
`match(Set dst_src1 (AddReductionV$2 (Binary dst_src1 src2) pg));')
format %{ "reduce_add$1_masked $dst_src1, $pg, $dst_src1, $src2" %}
ins_encode %{
__ sve_fadda($dst_src1$$FloatRegister, __ $2,
$pg$$PRegister, $src2$$FloatRegister);
ifelse($2, F,
`BasicType bt = Matcher::vector_element_basic_type(this, $src2);
',)dnl
ifelse($2, F,
`__ sve_fadda($dst_src1$$FloatRegister, __ elemType_to_regVariant(bt),
$pg$$PRegister, $src2$$FloatRegister);',
`__ sve_fadda($dst_src1$$FloatRegister, __ $2,
$pg$$PRegister, $src2$$FloatRegister);')
%}
ins_pipe(pipe_slow);
%}')dnl
dnl
REDUCE_ADD_INT_PREDICATE(I, iRegIorL2I)
REDUCE_ADD_INT_PREDICATE(L, iRegL)
REDUCE_ADD_FP_PREDICATE(F, S)
REDUCE_ADD_FP_PREDICATE(D, D)
REDUCE_ADD_FP_PREDICATE(FHF, F)
REDUCE_ADD_FP_PREDICATE(D, D)
// ------------------------------ Vector reduction mul -------------------------
@ -2176,30 +2240,37 @@ instruct reduce_mulL(iRegLNoSp dst, iRegL isrc, vReg vsrc) %{
ins_pipe(pipe_slow);
%}
instruct reduce_mulF(vRegF dst, vRegF fsrc, vReg vsrc, vReg tmp) %{
predicate(Matcher::vector_length_in_bytes(n->in(2)) <= 16);
match(Set dst (MulReductionVF fsrc vsrc));
dnl REDUCE_MUL_FP($1, $2 )
dnl REDUCE_MUL_FP(insn_name, op_name)
define(`REDUCE_MUL_FP', `
instruct reduce_mul$1(vReg$2 dst, vReg$2 ifelse($2, F, fsrc, dsrc), vReg vsrc, vReg tmp) %{
predicate(Matcher::vector_length_in_bytes(n->in(2)) ifelse($2, F, <=, ==) 16);
ifelse($2, F,
`match(Set dst (MulReductionVHF fsrc vsrc));
match(Set dst (MulReductionV$2 fsrc vsrc));',
`match(Set dst (MulReductionV$2 dsrc vsrc));')
effect(TEMP_DEF dst, TEMP tmp);
format %{ "reduce_mulF $dst, $fsrc, $vsrc\t# 2F/4F. KILL $tmp" %}
ifelse($2, F,
`format %{ "reduce_mul$1 $dst, $fsrc, $vsrc\t# 2F/4F/4HF/8HF. KILL $tmp" %}',
`format %{ "reduce_mul$1 $dst, $dsrc, $vsrc\t# 2D. KILL $tmp" %}')
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this, $vsrc);
__ neon_reduce_mul_fp($dst$$FloatRegister, T_FLOAT, $fsrc$$FloatRegister,
$vsrc$$FloatRegister, length_in_bytes, $tmp$$FloatRegister);
ifelse($2, F,
`uint length_in_bytes = Matcher::vector_length_in_bytes(this, $vsrc);
',)dnl
ifelse($2, F,
`BasicType bt = Matcher::vector_element_basic_type(this, $vsrc);
',)dnl
ifelse($2, F,
`__ neon_reduce_mul_fp($dst$$FloatRegister, bt, $fsrc$$FloatRegister,
$vsrc$$FloatRegister, length_in_bytes, $tmp$$FloatRegister);',
`__ neon_reduce_mul_fp($dst$$FloatRegister, T_DOUBLE, $dsrc$$FloatRegister,
$vsrc$$FloatRegister, 16, $tmp$$FloatRegister);')
%}
ins_pipe(pipe_slow);
%}
instruct reduce_mulD(vRegD dst, vRegD dsrc, vReg vsrc, vReg tmp) %{
predicate(Matcher::vector_length_in_bytes(n->in(2)) == 16);
match(Set dst (MulReductionVD dsrc vsrc));
effect(TEMP_DEF dst, TEMP tmp);
format %{ "reduce_mulD $dst, $dsrc, $vsrc\t# 2D. KILL $tmp" %}
ins_encode %{
__ neon_reduce_mul_fp($dst$$FloatRegister, T_DOUBLE, $dsrc$$FloatRegister,
$vsrc$$FloatRegister, 16, $tmp$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
%}')dnl
dnl
REDUCE_MUL_FP(FHF, F)
REDUCE_MUL_FP(D, D)
dnl
dnl REDUCE_BITWISE_OP_NEON($1, $2 $3 $4 )

View File

@ -1,6 +1,7 @@
/*
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2024, Red Hat Inc. All rights reserved.
* Copyright 2026 Arm Limited and/or its affiliates.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1000,30 +1001,6 @@ public:
f(0b0101010, 31, 25), f(0, 24), sf(offset, 23, 5), f(0, 4), f(cond, 3, 0);
}
#define INSN(NAME, cond) \
void NAME(address dest) { \
br(cond, dest); \
}
INSN(beq, EQ);
INSN(bne, NE);
INSN(bhs, HS);
INSN(bcs, CS);
INSN(blo, LO);
INSN(bcc, CC);
INSN(bmi, MI);
INSN(bpl, PL);
INSN(bvs, VS);
INSN(bvc, VC);
INSN(bhi, HI);
INSN(bls, LS);
INSN(bge, GE);
INSN(blt, LT);
INSN(bgt, GT);
INSN(ble, LE);
INSN(bal, AL);
INSN(bnv, NV);
void br(Condition cc, Label &L);
#undef INSN
@ -1095,6 +1072,10 @@ public:
#undef INSN
void wfet(Register rt) {
system(0b00, 0b011, 0b0001, 0b0000, 0b000, rt);
}
// we only provide mrs and msr for the special purpose system
// registers where op1 (instr[20:19]) == 11
// n.b msr has L (instr[21]) == 0 mrs has L == 1
@ -1274,6 +1255,13 @@ public:
sz, 0b000, ordered);
}
void load_store_volatile(Register data, BasicType type, Register addr,
bool is_load) {
load_store_exclusive(dummy_reg, data, dummy_reg, addr,
(Assembler::operand_size)exact_log2(type2aelembytes(type)),
is_load ? 0b110 : 0b100, /* ordered = */ true);
}
#define INSN4(NAME, sz, op, o0) /* Four registers */ \
void NAME(Register Rs, Register Rt1, Register Rt2, Register Rn) { \
guarantee(Rs != Rn && Rs != Rt1 && Rs != Rt2, "unpredictable instruction"); \

View File

@ -1,6 +1,7 @@
/*
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* Copyright 2026 Arm Limited and/or its affiliates.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -42,6 +43,7 @@
#include "runtime/frame.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/threadIdentifier.hpp"
#include "utilities/powerOfTwo.hpp"
#include "vmreg_aarch64.inline.hpp"
@ -59,22 +61,6 @@ const Register SHIFT_count = r0; // where count for shift operations must be
#define __ _masm->
static void select_different_registers(Register preserve,
Register extra,
Register &tmp1,
Register &tmp2) {
if (tmp1 == preserve) {
assert_different_registers(tmp1, tmp2, extra);
tmp1 = extra;
} else if (tmp2 == preserve) {
assert_different_registers(tmp1, tmp2, extra);
tmp2 = extra;
}
assert_different_registers(preserve, tmp1, tmp2);
}
static void select_different_registers(Register preserve,
Register extra,
Register &tmp1,
@ -536,6 +522,10 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
#if INCLUDE_CDS
if (AOTCodeCache::is_on_for_dump()) {
address b = c->as_pointer();
if (b == (address)ThreadIdentifier::unsafe_offset()) {
__ lea(dest->as_register_lo(), ExternalAddress(b));
break;
}
if (AOTRuntimeConstants::contains(b)) {
__ load_aotrc_address(dest->as_register_lo(), b);
break;
@ -922,8 +912,15 @@ void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
reg2stack(temp, dest, dest->type());
}
void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type,
LIR_PatchCode patch_code, CodeEmitInfo* info,
bool wide) {
mem2reg(src, dest, type, patch_code, info, wide, false);
}
void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type,
LIR_PatchCode patch_code, CodeEmitInfo* info,
bool wide, bool is_volatile) {
LIR_Address* addr = src->as_address_ptr();
LIR_Address* from_addr = src->as_address_ptr();
@ -936,10 +933,27 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
return;
}
if (is_volatile) {
load_volatile(from_addr, dest, type, info);
} else {
load_unordered(from_addr, dest, type, wide, info);
}
if (is_reference_type(type)) {
if (UseCompressedOops && !wide) {
__ decode_heap_oop(dest->as_register());
}
__ verify_oop(dest->as_register());
}
}
void LIR_Assembler::load_unordered(LIR_Address *from_addr, LIR_Opr dest,
BasicType type, bool wide, CodeEmitInfo* info) {
if (info != nullptr) {
add_debug_info_for_null_check_here(info);
}
int null_check_here = code_offset();
switch (type) {
case T_FLOAT: {
__ ldrs(dest->as_float_reg(), as_Address(from_addr));
@ -997,16 +1011,44 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
default:
ShouldNotReachHere();
}
if (is_reference_type(type)) {
if (UseCompressedOops && !wide) {
__ decode_heap_oop(dest->as_register());
}
__ verify_oop(dest->as_register());
}
}
void LIR_Assembler::load_volatile(LIR_Address *from_addr, LIR_Opr dest,
BasicType type, CodeEmitInfo* info) {
__ lea(rscratch1, as_Address(from_addr));
Register dest_reg = rscratch2;
if (!is_floating_point_type(type)) {
dest_reg = (dest->is_single_cpu()
? dest->as_register() : dest->as_register_lo());
}
if (info != nullptr) {
add_debug_info_for_null_check_here(info);
}
// Uses LDAR to ensure memory ordering.
__ load_store_volatile(dest_reg, type, rscratch1, /*is_load*/true);
switch (type) {
// LDAR is unsigned so need to sign-extend for byte and short
case T_BYTE:
__ sxtb(dest_reg, dest_reg);
break;
case T_SHORT:
__ sxth(dest_reg, dest_reg);
break;
// need to move from GPR to FPR after LDAR with FMOV for floating types
case T_FLOAT:
__ fmovs(dest->as_float_reg(), dest_reg);
break;
case T_DOUBLE:
__ fmovd(dest->as_double_reg(), dest_reg);
break;
default:
break;
}
}
int LIR_Assembler::array_element_size(BasicType type) const {
int elem_size = type2aelembytes(type);
@ -1269,12 +1311,9 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
} else if (obj == klass_RInfo) {
klass_RInfo = dst;
}
if (k->is_loaded() && !UseCompressedClassPointers) {
select_different_registers(obj, dst, k_RInfo, klass_RInfo);
} else {
Rtmp1 = op->tmp3()->as_register();
select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
}
Rtmp1 = op->tmp3()->as_register();
select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
assert_different_registers(obj, k_RInfo, klass_RInfo);
@ -2778,7 +2817,9 @@ void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* arg
}
void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
if (dest->is_address() || src->is_address()) {
if (src->is_address()) {
mem2reg(src, dest, type, lir_patch_none, info, /*wide*/false, /*is_volatile*/true);
} else if (dest->is_address()) {
move_op(src, dest, type, lir_patch_none, info, /*wide*/false);
} else {
ShouldNotReachHere();

View File

@ -57,6 +57,12 @@ friend class ArrayCopyStub;
void casw(Register addr, Register newval, Register cmpval);
void casl(Register addr, Register newval, Register cmpval);
void mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type,
LIR_PatchCode patch_code,
CodeEmitInfo* info, bool wide, bool is_volatile);
void load_unordered(LIR_Address *from_addr, LIR_Opr dest, BasicType type, bool wide, CodeEmitInfo* info);
void load_volatile(LIR_Address *from_addr, LIR_Opr dest, BasicType type, CodeEmitInfo* info);
static const int max_tableswitches = 20;
struct tableswitch switches[max_tableswitches];
int tableswitch_count;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -1287,9 +1287,7 @@ void LIRGenerator::do_CheckCast(CheckCast* x) {
}
LIR_Opr reg = rlock_result(x);
LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
tmp3 = new_register(objectType);
}
tmp3 = new_register(objectType);
__ checkcast(reg, obj.result(), x->klass(),
new_register(objectType), new_register(objectType), tmp3,
x->direct_compare(), info_for_exception, patching_info, stub,
@ -1308,9 +1306,7 @@ void LIRGenerator::do_InstanceOf(InstanceOf* x) {
}
obj.load_item();
LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
tmp3 = new_register(objectType);
}
tmp3 = new_register(objectType);
__ instanceof(reg, obj.result(), x->klass(),
new_register(objectType), new_register(objectType), tmp3,
x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
@ -1402,14 +1398,5 @@ void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
CodeEmitInfo* info) {
// 8179954: We need to make sure that the code generated for
// volatile accesses forms a sequentially-consistent set of
// operations when combined with STLR and LDAR. Without a leading
// membar it's possible for a simple Dekker test to fail if loads
// use LD;DMB but stores use STLR. This can happen if C2 compiles
// the stores in one method and C1 compiles the loads in another.
if (!CompilerConfig::is_c1_only_no_jvmci()) {
__ membar();
}
__ volatile_load_mem_reg(address, result, info);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -105,12 +105,8 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
} else {
mov(t1, checked_cast<int32_t>(markWord::prototype().value()));
str(t1, Address(obj, oopDesc::mark_offset_in_bytes()));
if (UseCompressedClassPointers) { // Take care not to kill klass
encode_klass_not_null(t1, klass);
strw(t1, Address(obj, oopDesc::klass_offset_in_bytes()));
} else {
str(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
}
encode_klass_not_null(t1, klass); // Take care not to kill klass
strw(t1, Address(obj, oopDesc::klass_offset_in_bytes()));
}
if (len->is_valid()) {
@ -121,7 +117,7 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
// Clear gap/first 4 bytes following the length field.
strw(zr, Address(obj, base_offset));
}
} else if (UseCompressedClassPointers && !UseCompactObjectHeaders) {
} else if (!UseCompactObjectHeaders) {
store_klass_gap(obj, zr);
}
}

View File

@ -1,5 +1,6 @@
/*
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright 2026 Arm Limited and/or its affiliates.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1883,6 +1884,27 @@ void C2_MacroAssembler::neon_reduce_mul_fp(FloatRegister dst, BasicType bt,
BLOCK_COMMENT("neon_reduce_mul_fp {");
switch(bt) {
// The T_SHORT type below is for Float16 type which also uses floating-point
// instructions.
case T_SHORT:
fmulh(dst, fsrc, vsrc);
ext(vtmp, T8B, vsrc, vsrc, 2);
fmulh(dst, dst, vtmp);
ext(vtmp, T8B, vsrc, vsrc, 4);
fmulh(dst, dst, vtmp);
ext(vtmp, T8B, vsrc, vsrc, 6);
fmulh(dst, dst, vtmp);
if (isQ) {
ext(vtmp, T16B, vsrc, vsrc, 8);
fmulh(dst, dst, vtmp);
ext(vtmp, T16B, vsrc, vsrc, 10);
fmulh(dst, dst, vtmp);
ext(vtmp, T16B, vsrc, vsrc, 12);
fmulh(dst, dst, vtmp);
ext(vtmp, T16B, vsrc, vsrc, 14);
fmulh(dst, dst, vtmp);
}
break;
case T_FLOAT:
fmuls(dst, fsrc, vsrc);
ins(vtmp, S, vsrc, 0, 1);
@ -1907,6 +1929,33 @@ void C2_MacroAssembler::neon_reduce_mul_fp(FloatRegister dst, BasicType bt,
BLOCK_COMMENT("} neon_reduce_mul_fp");
}
// Vector reduction add for half float type with ASIMD instructions.
void C2_MacroAssembler::neon_reduce_add_fp16(FloatRegister dst, FloatRegister fsrc, FloatRegister vsrc,
unsigned vector_length_in_bytes, FloatRegister vtmp) {
assert(vector_length_in_bytes == 8 || vector_length_in_bytes == 16, "unsupported");
bool isQ = vector_length_in_bytes == 16;
BLOCK_COMMENT("neon_reduce_add_fp16 {");
faddh(dst, fsrc, vsrc);
ext(vtmp, T8B, vsrc, vsrc, 2);
faddh(dst, dst, vtmp);
ext(vtmp, T8B, vsrc, vsrc, 4);
faddh(dst, dst, vtmp);
ext(vtmp, T8B, vsrc, vsrc, 6);
faddh(dst, dst, vtmp);
if (isQ) {
ext(vtmp, T16B, vsrc, vsrc, 8);
faddh(dst, dst, vtmp);
ext(vtmp, T16B, vsrc, vsrc, 10);
faddh(dst, dst, vtmp);
ext(vtmp, T16B, vsrc, vsrc, 12);
faddh(dst, dst, vtmp);
ext(vtmp, T16B, vsrc, vsrc, 14);
faddh(dst, dst, vtmp);
}
BLOCK_COMMENT("} neon_reduce_add_fp16");
}
// Helper to select logical instruction
void C2_MacroAssembler::neon_reduce_logical_helper(int opc, bool is64, Register Rd,
Register Rn, Register Rm,
@ -2414,17 +2463,17 @@ void C2_MacroAssembler::neon_rearrange_hsd(FloatRegister dst, FloatRegister src,
break;
case T_LONG:
case T_DOUBLE:
// Load the iota indices for Long type. The indices are ordered by
// type B/S/I/L/F/D, and the offset between two types is 16; Hence
// the offset for L is 48.
lea(rscratch1,
ExternalAddress(StubRoutines::aarch64::vector_iota_indices() + 48));
ldrq(tmp, rscratch1);
// Check whether the input "shuffle" is the same with iota indices.
// Return "src" if true, otherwise swap the two elements of "src".
cm(EQ, dst, size2, shuffle, tmp);
ext(tmp, size1, src, src, 8);
bsl(dst, size1, src, tmp);
{
int idx = vector_iota_entry_index(T_LONG);
lea(rscratch1,
ExternalAddress(StubRoutines::aarch64::vector_iota_indices(idx)));
ldrq(tmp, rscratch1);
// Check whether the input "shuffle" is the same with iota indices.
// Return "src" if true, otherwise swap the two elements of "src".
cm(EQ, dst, size2, shuffle, tmp);
ext(tmp, size1, src, src, 8);
bsl(dst, size1, src, tmp);
}
break;
default:
assert(false, "unsupported element type");
@ -2896,3 +2945,24 @@ void C2_MacroAssembler::sve_cpy(FloatRegister dst, SIMD_RegVariant T,
}
Assembler::sve_cpy(dst, T, pg, imm8, isMerge);
}
int C2_MacroAssembler::vector_iota_entry_index(BasicType bt) {
// The vector iota entries array is ordered by type B/S/I/L/F/D, and
// the offset between two types is 16.
switch(bt) {
case T_BYTE:
return 0;
case T_SHORT:
return 1;
case T_INT:
return 2;
case T_LONG:
return 3;
case T_FLOAT:
return 4;
case T_DOUBLE:
return 5;
default:
ShouldNotReachHere();
}
}

View File

@ -177,6 +177,9 @@
FloatRegister fsrc, FloatRegister vsrc,
unsigned vector_length_in_bytes, FloatRegister vtmp);
void neon_reduce_add_fp16(FloatRegister dst, FloatRegister fsrc, FloatRegister vsrc,
unsigned vector_length_in_bytes, FloatRegister vtmp);
void neon_reduce_logical(int opc, Register dst, BasicType bt, Register isrc,
FloatRegister vsrc, unsigned vector_length_in_bytes);
@ -249,4 +252,5 @@
void sve_cpy(FloatRegister dst, SIMD_RegVariant T, PRegister pg, int imm8,
bool isMerge);
int vector_iota_entry_index(BasicType bt);
#endif // CPU_AARCH64_C2_MACROASSEMBLER_AARCH64_HPP

View File

@ -89,16 +89,21 @@ void CompiledDirectCall::set_to_interpreted(const methodHandle& callee, address
NativeMovConstReg* method_holder
= nativeMovConstReg_at(stub + NativeInstruction::instruction_size);
// In AOT "production" run we have mixture of AOTed and normal JITed code.
// Static call stub in AOTed nmethod always has far jump.
// Normal JITed nmethod may have short or far jump depending on distance.
// Determine actual jump instruction we have in code.
address next_instr = method_holder->next_instruction_address();
bool is_general_jump = nativeInstruction_at(next_instr)->is_general_jump();
#ifdef ASSERT
NativeJump* jump = MacroAssembler::codestub_branch_needs_far_jump()
? nativeGeneralJump_at(method_holder->next_instruction_address())
: nativeJump_at(method_holder->next_instruction_address());
NativeJump* jump = is_general_jump ? nativeGeneralJump_at(next_instr) : nativeJump_at(next_instr);
verify_mt_safe(callee, entry, method_holder, jump);
#endif
// Update stub.
method_holder->set_data((intptr_t)callee());
MacroAssembler::pd_patch_instruction(method_holder->next_instruction_address(), entry);
MacroAssembler::pd_patch_instruction(next_instr, entry);
ICache::invalidate_range(stub, to_interp_stub_size());
// Update jump to call.
set_destination_mt_safe(stub);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -56,8 +56,10 @@ void CardTableBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet d
}
}
void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj, Address dst) {
void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2) {
precond(tmp1 != noreg);
precond(tmp2 != noreg);
assert_different_registers(obj, tmp1, tmp2);
BarrierSet* bs = BarrierSet::barrier_set();
assert(bs->kind() == BarrierSet::CardTableBarrierSet, "Wrong barrier set kind");
@ -65,16 +67,16 @@ void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register ob
assert(CardTable::dirty_card_val() == 0, "must be");
__ load_byte_map_base(rscratch1);
__ load_byte_map_base(tmp1);
if (UseCondCardMark) {
Label L_already_dirty;
__ ldrb(rscratch2, Address(obj, rscratch1));
__ cbz(rscratch2, L_already_dirty);
__ strb(zr, Address(obj, rscratch1));
__ ldrb(tmp2, Address(obj, tmp1));
__ cbz(tmp2, L_already_dirty);
__ strb(zr, Address(obj, tmp1));
__ bind(L_already_dirty);
} else {
__ strb(zr, Address(obj, rscratch1));
__ strb(zr, Address(obj, tmp1));
}
}
@ -112,10 +114,10 @@ void CardTableBarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorS
if (needs_post_barrier) {
// flatten object address if needed
if (!precise || (dst.index() == noreg && dst.offset() == 0)) {
store_check(masm, dst.base(), dst);
store_check(masm, dst.base(), tmp1, tmp2);
} else {
__ lea(tmp3, dst);
store_check(masm, tmp3, dst);
store_check(masm, tmp3, tmp1, tmp2);
}
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -46,7 +46,7 @@ protected:
virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address dst, Register val, Register tmp1, Register tmp2, Register tmp3);
void store_check(MacroAssembler* masm, Register obj, Address dst);
void store_check(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2);
};
#endif // CPU_AARCH64_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_AARCH64_HPP

View File

@ -50,14 +50,10 @@ void LIR_OpShenandoahCompareAndSwap::emit_code(LIR_Assembler* masm) {
ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm->masm(), addr, cmpval, newval, /*acquire*/ true, /*release*/ true, /*is_cae*/ false, result);
if (CompilerConfig::is_c1_only_no_jvmci()) {
// The membar here is necessary to prevent reordering between the
// release store in the CAS above and a subsequent volatile load.
// However for tiered compilation C1 inserts a full barrier before
// volatile loads which means we don't need an additional barrier
// here (see LIRGenerator::volatile_field_load()).
__ membar(__ AnyAny);
}
// The membar here is necessary to prevent reordering between the
// release store in the CAS above and a subsequent volatile load.
// See also: LIR_Assembler::casw, LIR_Assembler::casl.
__ membar(__ AnyAny);
}
#undef __

View File

@ -879,7 +879,9 @@ void ZBarrierSetAssembler::patch_barrier_relocation(address addr, int format) {
ShouldNotReachHere();
}
ICache::invalidate_word((address)patch_addr);
if (!UseSingleICacheInvalidation) {
ICache::invalidate_word((address)patch_addr);
}
}
#ifdef COMPILER1

View File

@ -115,18 +115,26 @@ define_pd_global(intx, InlineSmallCode, 1000);
"Value -1 means off.") \
range(-1, 4096) \
product(ccstr, OnSpinWaitInst, "yield", DIAGNOSTIC, \
"The instruction to use to implement " \
"java.lang.Thread.onSpinWait()." \
"Valid values are: none, nop, isb, yield, sb.") \
"The instruction to use for java.lang.Thread.onSpinWait(). " \
"Valid values are: none, nop, isb, yield, sb, wfet.") \
constraint(OnSpinWaitInstNameConstraintFunc, AtParse) \
product(uint, OnSpinWaitInstCount, 1, DIAGNOSTIC, \
"The number of OnSpinWaitInst instructions to generate." \
"It cannot be used with OnSpinWaitInst=none.") \
"The number of OnSpinWaitInst instructions to generate. " \
"It cannot be used with OnSpinWaitInst=none. " \
"For OnSpinWaitInst=wfet it must be 1.") \
range(1, 99) \
product(uint, OnSpinWaitDelay, 40, DIAGNOSTIC, \
"The minimum delay (in nanoseconds) of the OnSpinWait loop. " \
"It can only be used with -XX:OnSpinWaitInst=wfet.") \
range(1, 1000) \
product(ccstr, UseBranchProtection, "none", \
"Branch Protection to use: none, standard, pac-ret") \
product(bool, AlwaysMergeDMB, true, DIAGNOSTIC, \
"Always merge DMB instructions in code emission") \
product(bool, NeoverseN1ICacheErratumMitigation, false, DIAGNOSTIC, \
"Enable workaround for Neoverse N1 erratum 1542419") \
product(bool, UseSingleICacheInvalidation, false, DIAGNOSTIC, \
"Defer multiple ICache invalidation to single invalidation") \
// end of ARCH_FLAGS

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -989,26 +989,15 @@ void InterpreterMacroAssembler::profile_final_call(Register mdp) {
void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
Register mdp,
bool receiver_can_be_null) {
Register mdp) {
if (ProfileInterpreter) {
Label profile_continue;
// If no method data exists, go to profile_continue.
test_method_data_pointer(mdp, profile_continue);
Label skip_receiver_profile;
if (receiver_can_be_null) {
Label not_null;
// We are making a call. Increment the count for null receiver.
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
b(skip_receiver_profile);
bind(not_null);
}
// Record the receiver type.
profile_receiver_type(receiver, mdp, 0);
bind(skip_receiver_profile);
// The method data pointer needs to be updated to reflect the new target.
update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size()));

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -285,8 +285,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
void profile_not_taken_branch(Register mdp);
void profile_call(Register mdp);
void profile_final_call(Register mdp);
void profile_virtual_call(Register receiver, Register mdp,
bool receiver_can_be_null = false);
void profile_virtual_call(Register receiver, Register mdp);
void profile_ret(Register return_bci, Register mdp);
void profile_null_seen(Register mdp);
void profile_typecheck(Register mdp, Register klass);

View File

@ -55,6 +55,7 @@
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/integerCast.hpp"
#include "utilities/powerOfTwo.hpp"
#ifdef COMPILER1
#include "c1/c1_LIRAssembler.hpp"
@ -762,7 +763,7 @@ void MacroAssembler::call_VM_base(Register oop_result,
assert(java_thread == rthread, "unexpected register");
#ifdef ASSERT
// TraceBytecodes does not use r12 but saves it over the call, so don't verify
// if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");
// if (!TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");
#endif // ASSERT
assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result");
@ -952,7 +953,10 @@ void MacroAssembler::emit_static_call_stub() {
}
int MacroAssembler::static_call_stub_size() {
if (!codestub_branch_needs_far_jump()) {
// During AOT production run AOT and JIT compiled code
// are used at the same time. We need this size
// to be the same for both types of code.
if (!codestub_branch_needs_far_jump() && !AOTCodeCache::is_on_for_use()) {
// isb; movk; movz; movz; b
return 5 * NativeInstruction::instruction_size;
}
@ -1002,14 +1006,10 @@ int MacroAssembler::ic_check(int end_alignment) {
load_narrow_klass_compact(tmp1, receiver);
ldrw(tmp2, Address(data, CompiledICData::speculated_klass_offset()));
cmpw(tmp1, tmp2);
} else if (UseCompressedClassPointers) {
} else {
ldrw(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes()));
ldrw(tmp2, Address(data, CompiledICData::speculated_klass_offset()));
cmpw(tmp1, tmp2);
} else {
ldr(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes()));
ldr(tmp2, Address(data, CompiledICData::speculated_klass_offset()));
cmp(tmp1, tmp2);
}
Label dont;
@ -2917,7 +2917,11 @@ void MacroAssembler::increment(Address dst, int value)
// Push lots of registers in the bit set supplied. Don't push sp.
// Return the number of words pushed
int MacroAssembler::push(unsigned int bitset, Register stack) {
int MacroAssembler::push(RegSet regset, Register stack) {
if (regset.bits() == 0) {
return 0;
}
auto bitset = integer_cast<unsigned int>(regset.bits());
int words_pushed = 0;
// Scan bitset to accumulate register pairs
@ -2947,7 +2951,11 @@ int MacroAssembler::push(unsigned int bitset, Register stack) {
return count;
}
int MacroAssembler::pop(unsigned int bitset, Register stack) {
int MacroAssembler::pop(RegSet regset, Register stack) {
if (regset.bits() == 0) {
return 0;
}
auto bitset = integer_cast<unsigned int>(regset.bits());
int words_pushed = 0;
// Scan bitset to accumulate register pairs
@ -2979,7 +2987,11 @@ int MacroAssembler::pop(unsigned int bitset, Register stack) {
// Push lots of registers in the bit set supplied. Don't push sp.
// Return the number of dwords pushed
int MacroAssembler::push_fp(unsigned int bitset, Register stack, FpPushPopMode mode) {
int MacroAssembler::push_fp(FloatRegSet regset, Register stack, FpPushPopMode mode) {
if (regset.bits() == 0) {
return 0;
}
auto bitset = integer_cast<unsigned int>(regset.bits());
int words_pushed = 0;
bool use_sve = false;
int sve_vector_size_in_bytes = 0;
@ -3092,7 +3104,11 @@ int MacroAssembler::push_fp(unsigned int bitset, Register stack, FpPushPopMode m
}
// Return the number of dwords popped
int MacroAssembler::pop_fp(unsigned int bitset, Register stack, FpPushPopMode mode) {
int MacroAssembler::pop_fp(FloatRegSet regset, Register stack, FpPushPopMode mode) {
if (regset.bits() == 0) {
return 0;
}
auto bitset = integer_cast<unsigned int>(regset.bits());
int words_pushed = 0;
bool use_sve = false;
int sve_vector_size_in_bytes = 0;
@ -3202,7 +3218,11 @@ int MacroAssembler::pop_fp(unsigned int bitset, Register stack, FpPushPopMode mo
}
// Return the number of dwords pushed
int MacroAssembler::push_p(unsigned int bitset, Register stack) {
int MacroAssembler::push_p(PRegSet regset, Register stack) {
if (regset.bits() == 0) {
return 0;
}
auto bitset = integer_cast<unsigned int>(regset.bits());
bool use_sve = false;
int sve_predicate_size_in_slots = 0;
@ -3239,7 +3259,11 @@ int MacroAssembler::push_p(unsigned int bitset, Register stack) {
}
// Return the number of dwords popped
int MacroAssembler::pop_p(unsigned int bitset, Register stack) {
int MacroAssembler::pop_p(PRegSet regset, Register stack) {
if (regset.bits() == 0) {
return 0;
}
auto bitset = integer_cast<unsigned int>(regset.bits());
bool use_sve = false;
int sve_predicate_size_in_slots = 0;
@ -3278,7 +3302,6 @@ int MacroAssembler::pop_p(unsigned int bitset, Register stack) {
#ifdef ASSERT
void MacroAssembler::verify_heapbase(const char* msg) {
#if 0
assert (UseCompressedOops || UseCompressedClassPointers, "should be compressed");
assert (Universe::heap() != nullptr, "java heap should be initialized");
if (!UseCompressedOops || Universe::ptr_base() == nullptr) {
// rheapbase is allocated as general register
@ -3456,7 +3479,7 @@ void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement
void MacroAssembler::reinit_heapbase()
{
if (UseCompressedOops) {
if (Universe::is_fully_initialized()) {
if (Universe::is_fully_initialized() && !AOTCodeCache::is_on_for_dump()) {
mov(rheapbase, CompressedOops::base());
} else {
lea(rheapbase, ExternalAddress(CompressedOops::base_addr()));
@ -5067,13 +5090,10 @@ void MacroAssembler::load_narrow_klass_compact(Register dst, Register src) {
void MacroAssembler::load_klass(Register dst, Register src) {
if (UseCompactObjectHeaders) {
load_narrow_klass_compact(dst, src);
decode_klass_not_null(dst);
} else if (UseCompressedClassPointers) {
ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
decode_klass_not_null(dst);
} else {
ldr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
}
decode_klass_not_null(dst);
}
void MacroAssembler::restore_cpu_control_state_after_jni(Register tmp1, Register tmp2) {
@ -5125,25 +5145,22 @@ void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, R
void MacroAssembler::cmp_klass(Register obj, Register klass, Register tmp) {
assert_different_registers(obj, klass, tmp);
if (UseCompressedClassPointers) {
if (UseCompactObjectHeaders) {
load_narrow_klass_compact(tmp, obj);
} else {
ldrw(tmp, Address(obj, oopDesc::klass_offset_in_bytes()));
}
if (CompressedKlassPointers::base() == nullptr) {
cmp(klass, tmp, LSL, CompressedKlassPointers::shift());
return;
} else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
&& CompressedKlassPointers::shift() == 0) {
// Only the bottom 32 bits matter
cmpw(klass, tmp);
return;
}
decode_klass_not_null(tmp);
if (UseCompactObjectHeaders) {
load_narrow_klass_compact(tmp, obj);
} else {
ldr(tmp, Address(obj, oopDesc::klass_offset_in_bytes()));
ldrw(tmp, Address(obj, oopDesc::klass_offset_in_bytes()));
}
if (CompressedKlassPointers::base() == nullptr) {
cmp(klass, tmp, LSL, CompressedKlassPointers::shift());
return;
} else if (!AOTCodeCache::is_on_for_dump() &&
((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
&& CompressedKlassPointers::shift() == 0) {
// Only the bottom 32 bits matter
cmpw(klass, tmp);
return;
}
decode_klass_not_null(tmp);
cmp(klass, tmp);
}
@ -5151,36 +5168,25 @@ void MacroAssembler::cmp_klasses_from_objects(Register obj1, Register obj2, Regi
if (UseCompactObjectHeaders) {
load_narrow_klass_compact(tmp1, obj1);
load_narrow_klass_compact(tmp2, obj2);
cmpw(tmp1, tmp2);
} else if (UseCompressedClassPointers) {
} else {
ldrw(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes()));
ldrw(tmp2, Address(obj2, oopDesc::klass_offset_in_bytes()));
cmpw(tmp1, tmp2);
} else {
ldr(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes()));
ldr(tmp2, Address(obj2, oopDesc::klass_offset_in_bytes()));
cmp(tmp1, tmp2);
}
cmpw(tmp1, tmp2);
}
void MacroAssembler::store_klass(Register dst, Register src) {
// FIXME: Should this be a store release? concurrent gcs assumes
// klass length is valid if klass field is not null.
assert(!UseCompactObjectHeaders, "not with compact headers");
if (UseCompressedClassPointers) {
encode_klass_not_null(src);
strw(src, Address(dst, oopDesc::klass_offset_in_bytes()));
} else {
str(src, Address(dst, oopDesc::klass_offset_in_bytes()));
}
encode_klass_not_null(src);
strw(src, Address(dst, oopDesc::klass_offset_in_bytes()));
}
void MacroAssembler::store_klass_gap(Register dst, Register src) {
assert(!UseCompactObjectHeaders, "not with compact headers");
if (UseCompressedClassPointers) {
// Store to klass gap in destination
strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes()));
}
// Store to klass gap in destination
strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes()));
}
// Algorithm must match CompressedOops::encode.
@ -5326,8 +5332,6 @@ MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode() {
}
MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode(address base, int shift, const size_t range) {
assert(UseCompressedClassPointers, "not using compressed class pointers");
// KlassDecodeMode shouldn't be set already.
assert(_klass_decode_mode == KlassDecodeNone, "set once");
@ -5393,7 +5397,7 @@ void MacroAssembler::encode_klass_not_null_for_aot(Register dst, Register src) {
}
void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
if (AOTCodeCache::is_on_for_dump()) {
if (CompressedKlassPointers::base() != nullptr && AOTCodeCache::is_on_for_dump()) {
encode_klass_not_null_for_aot(dst, src);
return;
}
@ -5457,8 +5461,6 @@ void MacroAssembler::decode_klass_not_null_for_aot(Register dst, Register src) {
}
void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
assert (UseCompressedClassPointers, "should only be used for compressed headers");
if (AOTCodeCache::is_on_for_dump()) {
decode_klass_not_null_for_aot(dst, src);
return;
@ -5525,7 +5527,6 @@ void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
}
void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
assert (UseCompressedClassPointers, "should only be used for compressed headers");
assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
int index = oop_recorder()->find_index(k);
@ -6835,6 +6836,9 @@ void MacroAssembler::spin_wait() {
assert(VM_Version::supports_sb(), "current CPU does not support SB instruction");
sb();
break;
case SpinWait::WFET:
spin_wait_wfet(VM_Version::spin_wait_desc().delay());
break;
default:
ShouldNotReachHere();
}
@ -6842,6 +6846,28 @@ void MacroAssembler::spin_wait() {
block_comment("}");
}
void MacroAssembler::spin_wait_wfet(int delay_ns) {
// The sequence assumes CNTFRQ_EL0 is fixed to 1GHz. The assumption is valid
// starting from Armv8.6, according to the "D12.1.2 The system counter" of the
// Arm Architecture Reference Manual for A-profile architecture version M.a.a.
// This is sufficient because FEAT_WFXT is introduced from Armv8.6.
Register target = rscratch1;
Register current = rscratch2;
get_cntvctss_el0(current);
add(target, current, delay_ns);
Label L_wait_loop;
bind(L_wait_loop);
wfet(target);
get_cntvctss_el0(current);
cmp(current, target);
br(LT, L_wait_loop);
sb();
}
// Stack frame creation/removal
void MacroAssembler::enter(bool strip_ret_addr) {

View File

@ -499,29 +499,20 @@ private:
void mov_immediate64(Register dst, uint64_t imm64);
void mov_immediate32(Register dst, uint32_t imm32);
int push(unsigned int bitset, Register stack);
int pop(unsigned int bitset, Register stack);
int push_fp(unsigned int bitset, Register stack, FpPushPopMode mode);
int pop_fp(unsigned int bitset, Register stack, FpPushPopMode mode);
int push_p(unsigned int bitset, Register stack);
int pop_p(unsigned int bitset, Register stack);
void mov(Register dst, Address a);
public:
void push(RegSet regs, Register stack) { if (regs.bits()) push(regs.bits(), stack); }
void pop(RegSet regs, Register stack) { if (regs.bits()) pop(regs.bits(), stack); }
int push(RegSet regset, Register stack);
int pop(RegSet regset, Register stack);
void push_fp(FloatRegSet regs, Register stack, FpPushPopMode mode = PushPopFull) { if (regs.bits()) push_fp(regs.bits(), stack, mode); }
void pop_fp(FloatRegSet regs, Register stack, FpPushPopMode mode = PushPopFull) { if (regs.bits()) pop_fp(regs.bits(), stack, mode); }
int push_fp(FloatRegSet regset, Register stack, FpPushPopMode mode = PushPopFull);
int pop_fp(FloatRegSet regset, Register stack, FpPushPopMode mode = PushPopFull);
static RegSet call_clobbered_gp_registers();
void push_p(PRegSet regs, Register stack) { if (regs.bits()) push_p(regs.bits(), stack); }
void pop_p(PRegSet regs, Register stack) { if (regs.bits()) pop_p(regs.bits(), stack); }
int push_p(PRegSet regset, Register stack);
int pop_p(PRegSet regset, Register stack);
// Push and pop everything that might be clobbered by a native
// runtime call except rscratch1 and rscratch2. (They are always
@ -660,6 +651,14 @@ public:
msr(0b011, 0b0100, 0b0010, 0b000, reg);
}
// CNTVCTSS_EL0: op1 == 011
// CRn == 1110
// CRm == 0000
// op2 == 110
inline void get_cntvctss_el0(Register reg) {
mrs(0b011, 0b1110, 0b0000, 0b110, reg);
}
// idiv variant which deals with MINLONG as dividend and -1 as divisor
int corrected_idivl(Register result, Register ra, Register rb,
bool want_remainder, Register tmp = rscratch1);
@ -891,10 +890,6 @@ public:
// thread in the default location (rthread)
void reset_last_Java_frame(bool clear_fp);
// Stores
void store_check(Register obj); // store check for obj - register is destroyed afterwards
void store_check(Register obj, Address dst); // same as above, dst is exact store location (reg. is destroyed)
void resolve_jobject(Register value, Register tmp1, Register tmp2);
void resolve_global_jobject(Register value, Register tmp1, Register tmp2);
@ -1724,6 +1719,7 @@ public:
// Code for java.lang.Thread::onSpinWait() intrinsic.
void spin_wait();
void spin_wait_wfet(int delay_ns);
void fast_lock(Register basic_lock, Register obj, Register t1, Register t2, Register t3, Label& slow);
void fast_unlock(Register obj, Register t1, Register t2, Register t3, Label& slow);

View File

@ -97,7 +97,7 @@ protected:
#define MACOS_WX_WRITE MACOS_AARCH64_ONLY(os::thread_wx_enable_write())
void set_char_at(int offset, char c) { MACOS_WX_WRITE; *addr_at(offset) = (u_char)c; }
void set_int_at(int offset, jint i) { MACOS_WX_WRITE; *(jint*)addr_at(offset) = i; }
void set_uint_at(int offset, jint i) { MACOS_WX_WRITE; *(juint*)addr_at(offset) = i; }
void set_uint_at(int offset, juint i) { MACOS_WX_WRITE; *(juint*)addr_at(offset) = i; }
void set_ptr_at(int offset, address ptr) { MACOS_WX_WRITE; *(address*)addr_at(offset) = ptr; }
void set_oop_at(int offset, oop o) { MACOS_WX_WRITE; *(oop*)addr_at(offset) = o; }
#undef MACOS_WX_WRITE
@ -178,13 +178,11 @@ public:
address destination() const;
void set_destination(address dest) {
int offset = dest - instruction_address();
unsigned int insn = 0b100101 << 26;
int64_t offset = dest - instruction_address();
juint insn = 0b100101u << 26u;
assert((offset & 3) == 0, "should be");
offset >>= 2;
offset &= (1 << 26) - 1; // mask off insn part
insn |= offset;
set_int_at(displacement_offset, insn);
Instruction_aarch64::spatch(reinterpret_cast<address>(&insn), 25, 0, offset >> 2);
set_uint_at(displacement_offset, insn);
}
void verify_alignment() { ; }

View File

@ -54,7 +54,12 @@ void Relocation::pd_set_data_value(address x, bool verify_only) {
bytes = MacroAssembler::pd_patch_instruction_size(addr(), x);
break;
}
ICache::invalidate_range(addr(), bytes);
if (UseSingleICacheInvalidation) {
assert(_binding != nullptr, "expect to be called with RelocIterator in use");
} else {
ICache::invalidate_range(addr(), bytes);
}
}
address Relocation::pd_call_destination(address orig_addr) {

View File

@ -290,7 +290,7 @@ ExceptionBlob* OptoRuntime::generate_exception_blob() {
assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
const char* name = OptoRuntime::stub_name(StubId::c2_exception_id);
CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::C2Blob, (uint)BlobId::c2_exception_id, name);
CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::C2Blob, BlobId::c2_exception_id);
if (blob != nullptr) {
return blob->as_exception_blob();
}

View File

@ -32,6 +32,7 @@ bool SpinWait::supports(const char *name) {
strcmp(name, "isb") == 0 ||
strcmp(name, "yield") == 0 ||
strcmp(name, "sb") == 0 ||
strcmp(name, "wfet") == 0 ||
strcmp(name, "none") == 0);
}
@ -46,6 +47,8 @@ SpinWait::Inst SpinWait::from_name(const char* name) {
return SpinWait::YIELD;
} else if (strcmp(name, "sb") == 0) {
return SpinWait::SB;
} else if (strcmp(name, "wfet") == 0) {
return SpinWait::WFET;
}
return SpinWait::NONE;

View File

@ -24,6 +24,8 @@
#ifndef CPU_AARCH64_SPIN_WAIT_AARCH64_HPP
#define CPU_AARCH64_SPIN_WAIT_AARCH64_HPP
#include "utilities/debug.hpp"
class SpinWait {
public:
enum Inst {
@ -31,21 +33,30 @@ public:
NOP,
ISB,
YIELD,
SB
SB,
WFET
};
private:
Inst _inst;
int _count;
int _delay;
Inst from_name(const char *name);
public:
SpinWait(Inst inst = NONE, int count = 0) : _inst(inst), _count(inst == NONE ? 0 : count) {}
SpinWait(const char *name, int count) : SpinWait(from_name(name), count) {}
SpinWait(Inst inst = NONE, int count = 0, int delay = -1)
: _inst(inst), _count(inst == NONE ? 0 : count), _delay(delay) {}
SpinWait(const char *name, int count, int delay)
: SpinWait(from_name(name), count, delay) {}
Inst inst() const { return _inst; }
int inst_count() const { return _count; }
int delay() const {
assert(_inst == WFET, "Specifying the delay value is only supported for WFET");
assert(_delay > 0, "The delay value must be positive");
return _delay;
}
static bool supports(const char *name);
};

View File

@ -29,32 +29,39 @@
#define STUBGEN_PREUNIVERSE_BLOBS_ARCH_DO(do_stub, \
do_arch_blob, \
do_arch_entry, \
do_arch_entry_init) \
do_arch_entry_init, \
do_arch_entry_array) \
do_arch_blob(preuniverse, 0) \
#define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \
do_arch_blob, \
do_arch_entry, \
do_arch_entry_init) \
do_arch_entry_init, \
do_arch_entry_array) \
do_arch_blob(initial, 10000) \
#define STUBGEN_CONTINUATION_BLOBS_ARCH_DO(do_stub, \
do_arch_blob, \
do_arch_entry, \
do_arch_entry_init) \
do_arch_entry_init, \
do_arch_entry_array) \
do_arch_blob(continuation, 2000) \
// count needed for declaration of vector_iota_indices stub
#define VECTOR_IOTA_COUNT 6
#define STUBGEN_COMPILER_BLOBS_ARCH_DO(do_stub, \
do_arch_blob, \
do_arch_entry, \
do_arch_entry_init) \
do_arch_entry_init, \
do_arch_entry_array) \
do_arch_blob(compiler, 70000) \
do_stub(compiler, vector_iota_indices) \
do_arch_entry(aarch64, compiler, vector_iota_indices, \
vector_iota_indices, vector_iota_indices) \
do_arch_entry_array(aarch64, compiler, vector_iota_indices, \
vector_iota_indices, vector_iota_indices, \
VECTOR_IOTA_COUNT) \
do_stub(compiler, large_array_equals) \
do_arch_entry(aarch64, compiler, large_array_equals, \
large_array_equals, large_array_equals) \
@ -84,8 +91,7 @@
do_stub(compiler, count_positives) \
do_arch_entry(aarch64, compiler, count_positives, count_positives, \
count_positives) \
do_stub(compiler, count_positives_long) \
do_arch_entry(aarch64, compiler, count_positives_long, \
do_arch_entry(aarch64, compiler, count_positives, \
count_positives_long, count_positives_long) \
do_stub(compiler, compare_long_string_LL) \
do_arch_entry(aarch64, compiler, compare_long_string_LL, \
@ -108,14 +114,16 @@
do_stub(compiler, string_indexof_linear_ul) \
do_arch_entry(aarch64, compiler, string_indexof_linear_ul, \
string_indexof_linear_ul, string_indexof_linear_ul) \
/* this uses the entry for ghash_processBlocks */ \
do_stub(compiler, ghash_processBlocks_wide) \
do_stub(compiler, ghash_processBlocks_small) \
do_arch_entry(aarch64, compiler, ghash_processBlocks_small, \
ghash_processBlocks_small, ghash_processBlocks_small) \
#define STUBGEN_FINAL_BLOBS_ARCH_DO(do_stub, \
do_arch_blob, \
do_arch_entry, \
do_arch_entry_init) \
do_arch_entry_init, \
do_arch_entry_array) \
do_arch_blob(final, 20000 ZGC_ONLY(+85000)) \
do_stub(final, copy_byte_f) \
do_arch_entry(aarch64, final, copy_byte_f, copy_byte_f, \
@ -139,9 +147,49 @@
do_stub(final, spin_wait) \
do_arch_entry_init(aarch64, final, spin_wait, spin_wait, \
spin_wait, empty_spin_wait) \
/* stub only -- entries are not stored in StubRoutines::aarch64 */ \
/* n.b. these are not the same as the generic atomic stubs */ \
do_stub(final, atomic_entry_points) \
do_arch_entry(aarch64, final, atomic_entry_points, \
atomic_fetch_add_4_impl, atomic_fetch_add_4_impl) \
do_arch_entry(aarch64, final, atomic_entry_points, \
atomic_fetch_add_8_impl, atomic_fetch_add_8_impl) \
do_arch_entry(aarch64, final, atomic_entry_points, \
atomic_fetch_add_4_relaxed_impl, \
atomic_fetch_add_4_relaxed_impl) \
do_arch_entry(aarch64, final, atomic_entry_points, \
atomic_fetch_add_8_relaxed_impl, \
atomic_fetch_add_8_relaxed_impl) \
do_arch_entry(aarch64, final, atomic_entry_points, \
atomic_xchg_4_impl, atomic_xchg_4_impl) \
do_arch_entry(aarch64, final, atomic_entry_points, \
atomic_xchg_8_impl, atomic_xchg_8_impl) \
do_arch_entry(aarch64, final, atomic_entry_points, \
atomic_cmpxchg_1_impl, atomic_cmpxchg_1_impl) \
do_arch_entry(aarch64, final, atomic_entry_points, \
atomic_cmpxchg_4_impl, atomic_cmpxchg_4_impl) \
do_arch_entry(aarch64, final, atomic_entry_points, \
atomic_cmpxchg_8_impl, atomic_cmpxchg_8_impl) \
do_arch_entry(aarch64, final, atomic_entry_points, \
atomic_cmpxchg_1_relaxed_impl, \
atomic_cmpxchg_1_relaxed_impl) \
do_arch_entry(aarch64, final, atomic_entry_points, \
atomic_cmpxchg_4_relaxed_impl, \
atomic_cmpxchg_4_relaxed_impl) \
do_arch_entry(aarch64, final, atomic_entry_points, \
atomic_cmpxchg_8_relaxed_impl, \
atomic_cmpxchg_8_relaxed_impl) \
do_arch_entry(aarch64, final, atomic_entry_points, \
atomic_cmpxchg_4_release_impl, \
atomic_cmpxchg_4_release_impl) \
do_arch_entry(aarch64, final, atomic_entry_points, \
atomic_cmpxchg_8_release_impl, \
atomic_cmpxchg_8_release_impl) \
do_arch_entry(aarch64, final, atomic_entry_points, \
atomic_cmpxchg_4_seq_cst_impl, \
atomic_cmpxchg_4_seq_cst_impl) \
do_arch_entry(aarch64, final, atomic_entry_points, \
atomic_cmpxchg_8_seq_cst_impl, \
atomic_cmpxchg_8_seq_cst_impl) \
#endif // CPU_AARCH64_STUBDECLARATIONS_HPP

File diff suppressed because it is too large Load Diff

View File

@ -41,8 +41,12 @@ static void empty_spin_wait() { }
#define DEFINE_ARCH_ENTRY_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \
address StubRoutines:: arch :: STUB_FIELD_NAME(field_name) = CAST_FROM_FN_PTR(address, init_function);
STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY, DEFINE_ARCH_ENTRY_INIT)
#define DEFINE_ARCH_ENTRY_ARRAY(arch, blob_name, stub_name, field_name, getter_name, count) \
address StubRoutines:: arch :: STUB_FIELD_NAME(field_name) [count];
STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY, DEFINE_ARCH_ENTRY_INIT, DEFINE_ARCH_ENTRY_ARRAY)
#undef DEFINE_ARCH_ENTRY_ARARAY
#undef DEFINE_ARCH_ENTRY_INIT
#undef DEFINE_ARCH_ENTRY
@ -413,3 +417,36 @@ ATTRIBUTE_ALIGNED(64) jdouble StubRoutines::aarch64::_pio2[] = {
2.73370053816464559624e-44, // 0x36E3822280000000
2.16741683877804819444e-51, // 0x3569F31D00000000
};
#if INCLUDE_CDS
extern void StubGenerator_init_AOTAddressTable(GrowableArray<address>& addresses);
void StubRoutines::init_AOTAddressTable() {
ResourceMark rm;
GrowableArray<address> external_addresses;
// publish static addresses referred to by aarch64 generator
// n.b. we have to use use an extern call here because class
// StubGenerator, which provides the static method that knows how to
// add the relevant addresses, is declared in a source file rather
// than in a separately includeable header.
StubGenerator_init_AOTAddressTable(external_addresses);
// publish external data addresses defined in nested aarch64 class
StubRoutines::aarch64::init_AOTAddressTable(external_addresses);
AOTCodeCache::publish_external_addresses(external_addresses);
}
void StubRoutines::aarch64::init_AOTAddressTable(GrowableArray<address>& external_addresses) {
#define ADD(addr) external_addresses.append((address)(addr));
ADD(_kyberConsts);
ADD(_dilithiumConsts);
// this is added in generic code
// ADD(_crc_table);
ADD(_adler_table);
ADD(_npio2_hw);
ADD(_dsin_coef);
ADD(_dcos_coef);
ADD(_two_over_pi);
ADD(_pio2);
#undef ADD
}
#endif // INCLUDE_CDS

View File

@ -60,9 +60,13 @@ class aarch64 {
#define DECLARE_ARCH_ENTRY_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \
DECLARE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name)
private:
STUBGEN_ARCH_ENTRIES_DO(DECLARE_ARCH_ENTRY, DECLARE_ARCH_ENTRY_INIT)
#define DECLARE_ARCH_ENTRY_ARRAY(arch, blob_name, stub_name, field_name, getter_name, count) \
static address STUB_FIELD_NAME(field_name) [count];
private:
STUBGEN_ARCH_ENTRIES_DO(DECLARE_ARCH_ENTRY, DECLARE_ARCH_ENTRY_INIT, DECLARE_ARCH_ENTRY_ARRAY)
#undef DECLARE_ARCH_ENTRY_ARRAY
#undef DECLARE_ARCH_ENTRY_INIT
#undef DECLARE_ARCH_ENTRY
@ -78,8 +82,15 @@ private:
#define DEFINE_ARCH_ENTRY_GETTER_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \
DEFINE_ARCH_ENTRY_GETTER(arch, blob_name, stub_name, field_name, getter_name)
STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY_GETTER, DEFINE_ARCH_ENTRY_GETTER_INIT)
#define DEFINE_ARCH_ENTRY_GETTER_ARRAY(arch, blob_name, stub_name, field_name, getter_name, count) \
static address getter_name(int idx) { \
assert(0 <= idx && idx < count, "entry array index out of range"); \
return STUB_FIELD_NAME(field_name) [idx]; \
}
STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY_GETTER, DEFINE_ARCH_ENTRY_GETTER_INIT, DEFINE_ARCH_ENTRY_GETTER_ARRAY)
#undef DEFINE_ARCH_ENTRY_GETTER_ARRAY
#undef DEFINE_ARCH_ENTRY_GETTER_INIT
#undef DEFINE_ARCH_ENTRY_GETTER
@ -110,6 +121,11 @@ private:
_completed = true;
}
#if INCLUDE_CDS
static void init_AOTAddressTable(GrowableArray<address>& external_addresses);
#endif // INCLUDE_CDS
private:
static uint16_t _kyberConsts[];
static uint32_t _dilithiumConsts[];

View File

@ -1,7 +1,7 @@
/*
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2020, Red Hat Inc. All rights reserved.
* Copyright 2025 Arm Limited and/or its affiliates.
* Copyright 2025, 2026 Arm Limited and/or its affiliates.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,6 +24,7 @@
*
*/
#include "logging/log.hpp"
#include "pauth_aarch64.hpp"
#include "register_aarch64.hpp"
#include "runtime/arguments.hpp"
@ -52,17 +53,56 @@ uintptr_t VM_Version::_pac_mask;
SpinWait VM_Version::_spin_wait;
bool VM_Version::_cache_dic_enabled;
bool VM_Version::_cache_idc_enabled;
bool VM_Version::_ic_ivau_trapped;
const char* VM_Version::_features_names[MAX_CPU_FEATURES] = { nullptr };
static SpinWait get_spin_wait_desc() {
SpinWait spin_wait(OnSpinWaitInst, OnSpinWaitInstCount);
SpinWait spin_wait(OnSpinWaitInst, OnSpinWaitInstCount, OnSpinWaitDelay);
if (spin_wait.inst() == SpinWait::SB && !VM_Version::supports_sb()) {
vm_exit_during_initialization("OnSpinWaitInst is SB but current CPU does not support SB instruction");
}
if (spin_wait.inst() == SpinWait::WFET) {
if (!VM_Version::supports_wfxt()) {
vm_exit_during_initialization("OnSpinWaitInst is WFET but the CPU does not support the WFET instruction");
}
if (!VM_Version::supports_ecv()) {
vm_exit_during_initialization("The CPU does not support the FEAT_ECV required by the -XX:OnSpinWaitInst=wfet implementation");
}
if (!VM_Version::supports_sb()) {
vm_exit_during_initialization("The CPU does not support the SB instruction required by the -XX:OnSpinWaitInst=wfet implementation");
}
if (OnSpinWaitInstCount != 1) {
vm_exit_during_initialization("OnSpinWaitInstCount for OnSpinWaitInst 'wfet' must be 1");
}
} else {
if (!FLAG_IS_DEFAULT(OnSpinWaitDelay)) {
vm_exit_during_initialization("OnSpinWaitDelay can only be used with -XX:OnSpinWaitInst=wfet");
}
}
return spin_wait;
}
static bool has_neoverse_n1_errata_1542419() {
const int major_rev_num = VM_Version::cpu_variant();
const int minor_rev_num = VM_Version::cpu_revision();
// Neoverse N1: 0xd0c
// Erratum 1542419 affects r3p0, r3p1 and r4p0.
// It is fixed in r4p1 and later revisions, which are not affected.
return (VM_Version::cpu_family() == VM_Version::CPU_ARM &&
VM_Version::model_is(0xd0c) &&
((major_rev_num == 3 && minor_rev_num == 0) ||
(major_rev_num == 3 && minor_rev_num == 1) ||
(major_rev_num == 4 && minor_rev_num == 0)));
}
void VM_Version::initialize() {
#define SET_CPU_FEATURE_NAME(id, name, bit) \
_features_names[bit] = XSTR(name);
@ -74,9 +114,14 @@ void VM_Version::initialize() {
_supports_atomic_getset8 = true;
_supports_atomic_getadd8 = true;
get_os_cpu_info();
_cache_dic_enabled = false;
_cache_idc_enabled = false;
_ic_ivau_trapped = false;
int dcache_line = VM_Version::dcache_line_size();
get_os_cpu_info();
_cpu_features = _features;
int dcache_line = dcache_line_size();
// Limit AllocatePrefetchDistance so that it does not exceed the
// static constraint of 512 defined in runtime/globals.hpp.
@ -124,7 +169,7 @@ void VM_Version::initialize() {
// if dcpop is available publish data cache line flush size via
// generic field, otherwise let if default to zero thereby
// disabling writeback
if (VM_Version::supports_dcpop()) {
if (supports_dcpop()) {
_data_cache_line_flush_size = dcache_line;
}
}
@ -245,14 +290,24 @@ void VM_Version::initialize() {
}
}
if (FLAG_IS_DEFAULT(UseCRC32)) {
UseCRC32 = VM_Version::supports_crc32();
if (supports_sha1() || supports_sha256() ||
supports_sha3() || supports_sha512()) {
if (FLAG_IS_DEFAULT(UseSHA)) {
FLAG_SET_DEFAULT(UseSHA, true);
} else if (!UseSHA) {
clear_feature(CPU_SHA1);
clear_feature(CPU_SHA2);
clear_feature(CPU_SHA3);
clear_feature(CPU_SHA512);
}
} else if (UseSHA) {
warning("SHA instructions are not available on this CPU");
FLAG_SET_DEFAULT(UseSHA, false);
}
if (UseCRC32 && !VM_Version::supports_crc32()) {
warning("UseCRC32 specified, but not supported on this CPU");
FLAG_SET_DEFAULT(UseCRC32, false);
}
CHECK_CPU_FEATURE(supports_crc32, CRC32);
CHECK_CPU_FEATURE(supports_lse, LSE);
CHECK_CPU_FEATURE(supports_aes, AES);
if (_cpu == CPU_ARM &&
model_is_in({ CPU_MODEL_ARM_NEOVERSE_V1, CPU_MODEL_ARM_NEOVERSE_V2,
@ -265,7 +320,7 @@ void VM_Version::initialize() {
}
}
if (UseCryptoPmullForCRC32 && (!VM_Version::supports_pmull() || !VM_Version::supports_sha3() || !VM_Version::supports_crc32())) {
if (UseCryptoPmullForCRC32 && (!supports_pmull() || !supports_sha3() || !supports_crc32())) {
warning("UseCryptoPmullForCRC32 specified, but not supported on this CPU");
FLAG_SET_DEFAULT(UseCryptoPmullForCRC32, false);
}
@ -279,48 +334,40 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false);
}
if (VM_Version::supports_lse()) {
if (FLAG_IS_DEFAULT(UseLSE))
FLAG_SET_DEFAULT(UseLSE, true);
} else {
if (UseLSE) {
warning("UseLSE specified, but not supported on this CPU");
FLAG_SET_DEFAULT(UseLSE, false);
}
}
if (VM_Version::supports_aes()) {
UseAES = UseAES || FLAG_IS_DEFAULT(UseAES);
UseAESIntrinsics =
UseAESIntrinsics || (UseAES && FLAG_IS_DEFAULT(UseAESIntrinsics));
if (UseAESIntrinsics && !UseAES) {
warning("UseAESIntrinsics enabled, but UseAES not, enabling");
UseAES = true;
if (supports_aes()) {
if (FLAG_IS_DEFAULT(UseAESIntrinsics)) {
FLAG_SET_DEFAULT(UseAESIntrinsics, true);
}
if (FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) {
FLAG_SET_DEFAULT(UseAESCTRIntrinsics, true);
}
} else {
if (UseAES) {
warning("AES instructions are not available on this CPU");
FLAG_SET_DEFAULT(UseAES, false);
}
if (UseAESIntrinsics) {
warning("AES intrinsics are not available on this CPU");
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
}
if (UseAESCTRIntrinsics) {
warning("AES/CTR intrinsics are not available on this CPU");
FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
if (!UseAES) {
if (UseAESIntrinsics) {
warning("AES intrinsics require UseAES flag to be enabled. Intrinsics will be disabled.");
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
}
if (UseAESCTRIntrinsics) {
warning("AES/CTR intrinsics require UseAES flag to be enabled. Intrinsics will be disabled.");
FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
}
} else if (!cpu_supports_aes()) {
if (UseAESIntrinsics) {
warning("AES intrinsics are not available on this CPU");
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
}
if (UseAESCTRIntrinsics) {
warning("AES/CTR intrinsics are not available on this CPU");
FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
}
}
}
if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
UseCRC32Intrinsics = true;
}
if (VM_Version::supports_crc32()) {
if (supports_crc32()) {
if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) {
FLAG_SET_DEFAULT(UseCRC32CIntrinsics, true);
}
@ -337,17 +384,7 @@ void VM_Version::initialize() {
UseMD5Intrinsics = true;
}
if (VM_Version::supports_sha1() || VM_Version::supports_sha256() ||
VM_Version::supports_sha3() || VM_Version::supports_sha512()) {
if (FLAG_IS_DEFAULT(UseSHA)) {
FLAG_SET_DEFAULT(UseSHA, true);
}
} else if (UseSHA) {
warning("SHA instructions are not available on this CPU");
FLAG_SET_DEFAULT(UseSHA, false);
}
if (UseSHA && VM_Version::supports_sha1()) {
if (UseSHA && supports_sha1()) {
if (FLAG_IS_DEFAULT(UseSHA1Intrinsics)) {
FLAG_SET_DEFAULT(UseSHA1Intrinsics, true);
}
@ -356,7 +393,7 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
}
if (UseSHA && VM_Version::supports_sha256()) {
if (UseSHA && supports_sha256()) {
if (FLAG_IS_DEFAULT(UseSHA256Intrinsics)) {
FLAG_SET_DEFAULT(UseSHA256Intrinsics, true);
}
@ -366,7 +403,7 @@ void VM_Version::initialize() {
}
if (UseSHA) {
// No need to check VM_Version::supports_sha3(), since a fallback GPR intrinsic implementation is provided.
// No need to check supports_sha3(), since a fallback GPR intrinsic implementation is provided.
if (FLAG_IS_DEFAULT(UseSHA3Intrinsics)) {
FLAG_SET_DEFAULT(UseSHA3Intrinsics, true);
}
@ -376,7 +413,7 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseSHA3Intrinsics, false);
}
if (UseSHA3Intrinsics && VM_Version::supports_sha3()) {
if (UseSHA3Intrinsics && supports_sha3()) {
// Auto-enable UseSIMDForSHA3Intrinsic on hardware with performance benefit.
// Note that the evaluation of SHA3 extension Intrinsics shows better performance
// on Apple and Qualcomm silicon but worse performance on Neoverse V1 and N2.
@ -386,12 +423,12 @@ void VM_Version::initialize() {
}
}
}
if (UseSHA3Intrinsics && UseSIMDForSHA3Intrinsic && !VM_Version::supports_sha3()) {
if (UseSHA3Intrinsics && UseSIMDForSHA3Intrinsic && !supports_sha3()) {
warning("Intrinsics for SHA3-224, SHA3-256, SHA3-384 and SHA3-512 crypto hash functions not available on this CPU.");
FLAG_SET_DEFAULT(UseSHA3Intrinsics, false);
}
if (UseSHA && VM_Version::supports_sha512()) {
if (UseSHA && supports_sha512()) {
if (FLAG_IS_DEFAULT(UseSHA512Intrinsics)) {
FLAG_SET_DEFAULT(UseSHA512Intrinsics, true);
}
@ -400,11 +437,7 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
}
if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA3Intrinsics || UseSHA512Intrinsics)) {
FLAG_SET_DEFAULT(UseSHA, false);
}
if (VM_Version::supports_pmull()) {
if (supports_pmull()) {
if (FLAG_IS_DEFAULT(UseGHASHIntrinsics)) {
FLAG_SET_DEFAULT(UseGHASHIntrinsics, true);
}
@ -455,7 +488,7 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseBlockZeroing, true);
}
if (FLAG_IS_DEFAULT(BlockZeroingLowLimit)) {
FLAG_SET_DEFAULT(BlockZeroingLowLimit, 4 * VM_Version::zva_length());
FLAG_SET_DEFAULT(BlockZeroingLowLimit, 4 * zva_length());
}
} else if (UseBlockZeroing) {
if (!FLAG_IS_DEFAULT(UseBlockZeroing)) {
@ -464,11 +497,11 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseBlockZeroing, false);
}
if (VM_Version::supports_sve2()) {
if (supports_sve2()) {
if (FLAG_IS_DEFAULT(UseSVE)) {
FLAG_SET_DEFAULT(UseSVE, 2);
}
} else if (VM_Version::supports_sve()) {
} else if (supports_sve()) {
if (FLAG_IS_DEFAULT(UseSVE)) {
FLAG_SET_DEFAULT(UseSVE, 1);
} else if (UseSVE > 1) {
@ -519,7 +552,7 @@ void VM_Version::initialize() {
// 1) this code has been built with branch-protection and
// 2) the CPU/OS supports it
#ifdef __ARM_FEATURE_PAC_DEFAULT
if (!VM_Version::supports_paca()) {
if (!supports_paca()) {
// Disable PAC to prevent illegal instruction crashes.
warning("ROP-protection specified, but not supported on this CPU. Disabling ROP-protection.");
} else {
@ -661,6 +694,43 @@ void VM_Version::initialize() {
clear_feature(CPU_SVE);
}
if (FLAG_IS_DEFAULT(UseSingleICacheInvalidation) && is_cache_idc_enabled() && is_cache_dic_enabled()) {
FLAG_SET_DEFAULT(UseSingleICacheInvalidation, true);
}
if (FLAG_IS_DEFAULT(NeoverseN1ICacheErratumMitigation) && has_neoverse_n1_errata_1542419()
&& is_cache_idc_enabled() && !is_cache_dic_enabled()) {
if (_ic_ivau_trapped) {
FLAG_SET_DEFAULT(NeoverseN1ICacheErratumMitigation, true);
} else {
log_info(os)("IC IVAU is not trapped; disabling NeoverseN1ICacheErratumMitigation");
FLAG_SET_DEFAULT(NeoverseN1ICacheErratumMitigation, false);
}
}
if (NeoverseN1ICacheErratumMitigation) {
if (!has_neoverse_n1_errata_1542419()) {
vm_exit_during_initialization("NeoverseN1ICacheErratumMitigation is set for the CPU not having Neoverse N1 errata 1542419");
}
// If the user explicitly set the flag, verify the trap is active.
if (!FLAG_IS_DEFAULT(NeoverseN1ICacheErratumMitigation) && !_ic_ivau_trapped) {
vm_exit_during_initialization("NeoverseN1ICacheErratumMitigation is set but IC IVAU is not trapped. "
"The optimization is not safe on this system.");
}
if (FLAG_IS_DEFAULT(UseSingleICacheInvalidation)) {
FLAG_SET_DEFAULT(UseSingleICacheInvalidation, true);
}
if (!UseSingleICacheInvalidation) {
vm_exit_during_initialization("NeoverseN1ICacheErratumMitigation is set but UseSingleICacheInvalidation is not enabled");
}
}
if (UseSingleICacheInvalidation
&& (!is_cache_idc_enabled() || (!is_cache_dic_enabled() && !NeoverseN1ICacheErratumMitigation))) {
vm_exit_during_initialization("UseSingleICacheInvalidation is set but neither IDC nor DIC nor NeoverseN1ICacheErratumMitigation is enabled");
}
// Construct the "features" string
stringStream ss(512);
ss.print("0x%02x:0x%x:0x%03x:%d", _cpu, _variant, _model, _revision);

View File

@ -58,6 +58,12 @@ protected:
// When _prefer_sve_merging_mode_cpy is true, `cpy (imm, zeroing)` is
// implemented as `movi; cpy(imm, merging)`.
static constexpr bool _prefer_sve_merging_mode_cpy = true;
static bool _cache_dic_enabled;
static bool _cache_idc_enabled;
// IC IVAU trap probe for Neoverse N1 erratum 1542419.
// Set by get_os_cpu_info() on Linux via ic_ivau_probe_linux_aarch64.S.
static bool _ic_ivau_trapped;
static SpinWait _spin_wait;
@ -159,7 +165,9 @@ public:
/* flags above must follow Linux HWCAP */ \
decl(SVEBITPERM, svebitperm, 27) \
decl(SVE2, sve2, 28) \
decl(A53MAC, a53mac, 31)
decl(A53MAC, a53mac, 31) \
decl(ECV, ecv, 32) \
decl(WFXT, wfxt, 33)
enum Feature_Flag {
#define DECLARE_CPU_FEATURE_FLAG(id, name, bit) CPU_##id = bit,
@ -191,6 +199,8 @@ public:
return (features & BIT_MASK(flag)) != 0;
}
static bool cpu_supports_aes() { return supports_feature(_cpu_features, CPU_AES); }
static int cpu_family() { return _cpu; }
static int cpu_model() { return _model; }
static int cpu_model2() { return _model2; }
@ -253,6 +263,10 @@ public:
return vector_length_in_bytes <= 16;
}
static bool is_cache_dic_enabled() { return _cache_dic_enabled; }
static bool is_cache_idc_enabled() { return _cache_idc_enabled; }
static bool is_ic_ivau_trapped() { return _ic_ivau_trapped; }
static void get_cpu_features_name(void* features_buffer, stringStream& ss);
// Returns names of features present in features_set1 but not in features_set2

View File

@ -1332,7 +1332,8 @@ void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
load_addr = address;
}
__ volatile_load_mem_reg(load_addr, result, info);
return;
} else {
__ load(address, result, info, lir_patch_none);
}
__ load(address, result, info, lir_patch_none);
__ membar_acquire();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1210,7 +1210,7 @@ void InterpreterMacroAssembler::profile_final_call(Register mdp) {
// Sets mdp, blows Rtemp.
void InterpreterMacroAssembler::profile_virtual_call(Register mdp, Register receiver, bool receiver_can_be_null) {
void InterpreterMacroAssembler::profile_virtual_call(Register mdp, Register receiver) {
assert_different_registers(mdp, receiver, Rtemp);
if (ProfileInterpreter) {
@ -1219,19 +1219,8 @@ void InterpreterMacroAssembler::profile_virtual_call(Register mdp, Register rece
// If no method data exists, go to profile_continue.
test_method_data_pointer(mdp, profile_continue);
Label skip_receiver_profile;
if (receiver_can_be_null) {
Label not_null;
cbnz(receiver, not_null);
// We are making a call. Increment the count for null receiver.
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()), Rtemp);
b(skip_receiver_profile);
bind(not_null);
}
// Record the receiver type.
record_klass_in_profile(receiver, mdp, Rtemp, true);
bind(skip_receiver_profile);
// The method data pointer needs to be updated to reflect the new target.
update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size()));

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -239,8 +239,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
void profile_call(Register mdp); // Sets mdp, blows Rtemp.
void profile_final_call(Register mdp); // Sets mdp, blows Rtemp.
void profile_virtual_call(Register mdp, Register receiver, // Sets mdp, blows Rtemp.
bool receiver_can_be_null = false);
void profile_virtual_call(Register mdp, Register receiver); // Sets mdp, blows Rtemp.
void profile_ret(Register mdp, Register return_bci); // Sets mdp, blows R0-R3/R0-R18, Rtemp, LR
void profile_null_seen(Register mdp); // Sets mdp.
void profile_typecheck(Register mdp, Register klass); // Sets mdp, blows Rtemp.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -75,7 +75,6 @@
static bool narrow_klass_use_complex_address() {
NOT_LP64(ShouldNotCallThis());
assert(UseCompressedClassPointers, "only for compressed klass code");
return false;
}

View File

@ -104,14 +104,13 @@ void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Registe
__ andr(temp, temp, (unsigned)java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK);
__ cmp(temp, ref_kind);
__ b(L, eq);
{ char* buf = NEW_C_HEAP_ARRAY(char, 100, mtInternal);
jio_snprintf(buf, 100, "verify_ref_kind expected %x", ref_kind);
const char* msg = ref_kind_to_verify_msg(ref_kind);
if (ref_kind == JVM_REF_invokeVirtual ||
ref_kind == JVM_REF_invokeSpecial)
ref_kind == JVM_REF_invokeSpecial) {
// could do this for all ref_kinds, but would explode assembly code size
trace_method_handle(_masm, buf);
__ stop(buf);
trace_method_handle(_masm, msg);
}
__ stop(msg);
BLOCK_COMMENT("} verify_ref_kind");
__ bind(L);
}

View File

@ -29,7 +29,8 @@
#define STUBGEN_PREUNIVERSE_BLOBS_ARCH_DO(do_stub, \
do_arch_blob, \
do_arch_entry, \
do_arch_entry_init) \
do_arch_entry_init, \
do_arch_entry_array) \
do_arch_blob(preuniverse, 500) \
do_stub(preuniverse, atomic_load_long) \
do_arch_entry(Arm, preuniverse, atomic_load_long, \
@ -42,7 +43,8 @@
#define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \
do_arch_blob, \
do_arch_entry, \
do_arch_entry_init) \
do_arch_entry_init, \
do_arch_entry_array) \
do_arch_blob(initial, 9000) \
do_stub(initial, idiv_irem) \
do_arch_entry(Arm, initial, idiv_irem, \
@ -51,14 +53,16 @@
#define STUBGEN_CONTINUATION_BLOBS_ARCH_DO(do_stub, \
do_arch_blob, \
do_arch_entry, \
do_arch_entry_init) \
do_arch_entry_init, \
do_arch_entry_array) \
do_arch_blob(continuation, 2000) \
#define STUBGEN_COMPILER_BLOBS_ARCH_DO(do_stub, \
do_arch_blob, \
do_arch_entry, \
do_arch_entry_init) \
do_arch_entry_init, \
do_arch_entry_array) \
do_arch_blob(compiler, 22000) \
do_stub(compiler, partial_subtype_check) \
do_arch_entry(Arm, compiler, partial_subtype_check, \
@ -68,7 +72,8 @@
#define STUBGEN_FINAL_BLOBS_ARCH_DO(do_stub, \
do_arch_blob, \
do_arch_entry, \
do_arch_entry_init) \
do_arch_entry_init, \
do_arch_entry_array) \
do_arch_blob(final, 22000) \

View File

@ -3211,7 +3211,7 @@ class StubGenerator: public StubCodeGenerator {
}
public:
StubGenerator(CodeBuffer* code, BlobId blob_id) : StubCodeGenerator(code, blob_id) {
StubGenerator(CodeBuffer* code, BlobId blob_id, AOTStubData* stub_data) : StubCodeGenerator(code, blob_id, stub_data) {
switch(blob_id) {
case BlobId::stubgen_preuniverse_id:
generate_preuniverse_stubs();
@ -3235,8 +3235,8 @@ class StubGenerator: public StubCodeGenerator {
}
}; // end class declaration
void StubGenerator_generate(CodeBuffer* code, BlobId blob_id) {
StubGenerator g(code, blob_id);
void StubGenerator_generate(CodeBuffer* code, BlobId blob_id, AOTStubData* stub_data) {
StubGenerator g(code, blob_id, stub_data);
}
// implementation of internal development flag

View File

@ -32,10 +32,16 @@
#define DEFINE_ARCH_ENTRY_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \
address StubRoutines:: arch :: STUB_FIELD_NAME(field_name) = CAST_FROM_FN_PTR(address, init_function);
STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY, DEFINE_ARCH_ENTRY_INIT)
STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY, DEFINE_ARCH_ENTRY_INIT, DEFINE_ARCH_ENTRY_ARRAY)
#undef DEFINE_ARCH_ENTRY_INIT
#undef DEFINE_ARCH_ENTRY
address StubRoutines::crc_table_addr() { ShouldNotCallThis(); return nullptr; }
address StubRoutines::crc32c_table_addr() { ShouldNotCallThis(); return nullptr; }
#if INCLUDE_CDS
// nothing to do for arm
void StubRoutines::init_AOTAddressTable() {
}
#endif // INCLUDE_CDS

View File

@ -55,9 +55,13 @@ class Arm {
#define DECLARE_ARCH_ENTRY_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \
DECLARE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name)
private:
STUBGEN_ARCH_ENTRIES_DO(DECLARE_ARCH_ENTRY, DECLARE_ARCH_ENTRY_INIT)
#define DECLARE_ARCH_ENTRY_ARRAY(arch, blob_name, stub_name, field_name, getter_name, count) \
static address STUB_FIELD_NAME(field_name) [count] ;
private:
STUBGEN_ARCH_ENTRIES_DO(DECLARE_ARCH_ENTRY, DECLARE_ARCH_ENTRY_INIT, DECLARE_ARCH_ENTRY_ARRAY)
#undef DECLARE_ARCH_ENTRY_ARRAY
#undef DECLARE_ARCH_ENTRY_INIT
#undef DECLARE_ARCH_ENTRY
@ -71,8 +75,12 @@ public:
#define DEFINE_ARCH_ENTRY_GETTER_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \
DEFINE_ARCH_ENTRY_GETTER(arch, blob_name, stub_name, field_name, getter_name)
STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY_GETTER, DEFINE_ARCH_ENTRY_GETTER_INIT)
#define DEFINE_ARCH_ENTRY_GETTER_ARRAY(arch, blob_name, stub_name, field_name, getter_name, count) \
static address getter_name(int idx) { return STUB_FIELD_NAME(field_name) [idx] ; }
STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY_GETTER, DEFINE_ARCH_ENTRY_GETTER_INIT, DEFINE_ARCH_ENTRY_GETTER_ARRAY)
#undef DEFINE_ARCH_ENTRY_GETTER_ARRAY
#undef DEFINE_ARCH_ENTRY_GETTER_INIT
#undef DEFINE_ARCH_ENTRY_GETTER

View File

@ -1143,6 +1143,7 @@ void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
Unimplemented();
// __ volatile_load_mem_reg(address, result, info);
#endif
__ membar_acquire();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -144,7 +144,7 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
if (len->is_valid()) {
stw(len, arrayOopDesc::length_offset_in_bytes(), obj);
} else if (UseCompressedClassPointers && !UseCompactObjectHeaders) {
} else if (!UseCompactObjectHeaders) {
// Otherwise length is in the class gap.
store_klass_gap(obj);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -258,7 +258,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
void profile_not_taken_branch(Register scratch1, Register scratch2);
void profile_call(Register scratch1, Register scratch2);
void profile_final_call(Register scratch1, Register scratch2);
void profile_virtual_call(Register Rreceiver, Register Rscratch1, Register Rscratch2, bool receiver_can_be_null);
void profile_virtual_call(Register Rreceiver, Register Rscratch1, Register Rscratch2);
void profile_typecheck(Register Rklass, Register Rscratch1, Register Rscratch2);
void profile_ret(TosState state, Register return_bci, Register scratch1, Register scratch2);
void profile_switch_default(Register scratch1, Register scratch2);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -1340,28 +1340,15 @@ void InterpreterMacroAssembler::profile_final_call(Register scratch1, Register s
// Count a virtual call in the bytecodes.
void InterpreterMacroAssembler::profile_virtual_call(Register Rreceiver,
Register Rscratch1,
Register Rscratch2,
bool receiver_can_be_null) {
Register Rscratch2) {
if (!ProfileInterpreter) { return; }
Label profile_continue;
// If no method data exists, go to profile_continue.
test_method_data_pointer(profile_continue);
Label skip_receiver_profile;
if (receiver_can_be_null) {
Label not_null;
cmpdi(CR0, Rreceiver, 0);
bne(CR0, not_null);
// We are making a call. Increment the count for null receiver.
increment_mdp_data_at(in_bytes(CounterData::count_offset()), Rscratch1, Rscratch2);
b(skip_receiver_profile);
bind(not_null);
}
// Record the receiver type.
record_klass_in_profile(Rreceiver, Rscratch1, Rscratch2);
bind(skip_receiver_profile);
// The method data pointer needs to be updated to reflect the new target.
update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));

View File

@ -3201,23 +3201,17 @@ Register MacroAssembler::encode_klass_not_null(Register dst, Register src) {
void MacroAssembler::store_klass(Register dst_oop, Register klass, Register ck) {
assert(!UseCompactObjectHeaders, "not with compact headers");
if (UseCompressedClassPointers) {
Register compressedKlass = encode_klass_not_null(ck, klass);
stw(compressedKlass, oopDesc::klass_offset_in_bytes(), dst_oop);
} else {
std(klass, oopDesc::klass_offset_in_bytes(), dst_oop);
}
Register compressedKlass = encode_klass_not_null(ck, klass);
stw(compressedKlass, oopDesc::klass_offset_in_bytes(), dst_oop);
}
void MacroAssembler::store_klass_gap(Register dst_oop, Register val) {
assert(!UseCompactObjectHeaders, "not with compact headers");
if (UseCompressedClassPointers) {
if (val == noreg) {
val = R0;
li(val, 0);
}
stw(val, oopDesc::klass_gap_offset_in_bytes(), dst_oop);
if (val == noreg) {
val = R0;
li(val, 0);
}
stw(val, oopDesc::klass_gap_offset_in_bytes(), dst_oop);
}
int MacroAssembler::instr_size_for_decode_klass_not_null() {
@ -3226,17 +3220,13 @@ int MacroAssembler::instr_size_for_decode_klass_not_null() {
// Not yet computed?
if (computed_size == -1) {
if (!UseCompressedClassPointers) {
computed_size = 0;
} else {
// Determine by scratch emit.
ResourceMark rm;
int code_size = 8 * BytesPerInstWord;
CodeBuffer cb("decode_klass_not_null scratch buffer", code_size, 0);
MacroAssembler* a = new MacroAssembler(&cb);
a->decode_klass_not_null(R11_scratch1);
computed_size = a->offset();
}
// Determine by scratch emit.
ResourceMark rm;
int code_size = 8 * BytesPerInstWord;
CodeBuffer cb("decode_klass_not_null scratch buffer", code_size, 0);
MacroAssembler* a = new MacroAssembler(&cb);
a->decode_klass_not_null(R11_scratch1);
computed_size = a->offset();
}
return computed_size;
@ -3259,18 +3249,14 @@ void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
void MacroAssembler::load_klass_no_decode(Register dst, Register src) {
if (UseCompactObjectHeaders) {
load_narrow_klass_compact(dst, src);
} else if (UseCompressedClassPointers) {
lwz(dst, oopDesc::klass_offset_in_bytes(), src);
} else {
ld(dst, oopDesc::klass_offset_in_bytes(), src);
lwz(dst, oopDesc::klass_offset_in_bytes(), src);
}
}
void MacroAssembler::load_klass(Register dst, Register src) {
load_klass_no_decode(dst, src);
if (UseCompressedClassPointers) { // also true for UseCompactObjectHeaders
decode_klass_not_null(dst);
}
decode_klass_not_null(dst);
}
// Loads the obj's Klass* into dst.
@ -3286,18 +3272,13 @@ void MacroAssembler::load_narrow_klass_compact(Register dst, Register src) {
void MacroAssembler::cmp_klass(ConditionRegister dst, Register obj, Register klass, Register tmp, Register tmp2) {
assert_different_registers(obj, klass, tmp);
if (UseCompressedClassPointers) {
if (UseCompactObjectHeaders) {
load_narrow_klass_compact(tmp, obj);
} else {
lwz(tmp, oopDesc::klass_offset_in_bytes(), obj);
}
Register encoded_klass = encode_klass_not_null(tmp2, klass);
cmpw(dst, tmp, encoded_klass);
if (UseCompactObjectHeaders) {
load_narrow_klass_compact(tmp, obj);
} else {
ld(tmp, oopDesc::klass_offset_in_bytes(), obj);
cmpd(dst, tmp, klass);
lwz(tmp, oopDesc::klass_offset_in_bytes(), obj);
}
Register encoded_klass = encode_klass_not_null(tmp2, klass);
cmpw(dst, tmp, encoded_klass);
}
void MacroAssembler::cmp_klasses_from_objects(ConditionRegister dst, Register obj1, Register obj2, Register tmp1, Register tmp2) {
@ -3305,14 +3286,10 @@ void MacroAssembler::cmp_klasses_from_objects(ConditionRegister dst, Register ob
load_narrow_klass_compact(tmp1, obj1);
load_narrow_klass_compact(tmp2, obj2);
cmpw(dst, tmp1, tmp2);
} else if (UseCompressedClassPointers) {
} else {
lwz(tmp1, oopDesc::klass_offset_in_bytes(), obj1);
lwz(tmp2, oopDesc::klass_offset_in_bytes(), obj2);
cmpw(dst, tmp1, tmp2);
} else {
ld(tmp1, oopDesc::klass_offset_in_bytes(), obj1);
ld(tmp2, oopDesc::klass_offset_in_bytes(), obj2);
cmpd(dst, tmp1, tmp2);
}
}

View File

@ -87,7 +87,6 @@
static bool narrow_klass_use_complex_address() {
NOT_LP64(ShouldNotCallThis());
assert(UseCompressedClassPointers, "only for compressed klass code");
// TODO: PPC port if (MatchDecodeNodes) return true;
return false;
}

View File

@ -104,14 +104,13 @@ void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Registe
__ andi(temp, temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK);
__ cmpwi(CR1, temp, ref_kind);
__ beq(CR1, L);
{ char* buf = NEW_C_HEAP_ARRAY(char, 100, mtInternal);
jio_snprintf(buf, 100, "verify_ref_kind expected %x", ref_kind);
if (ref_kind == JVM_REF_invokeVirtual ||
ref_kind == JVM_REF_invokeSpecial)
// could do this for all ref_kinds, but would explode assembly code size
trace_method_handle(_masm, buf);
__ stop(buf);
const char* msg = ref_kind_to_verify_msg(ref_kind);
if (ref_kind == JVM_REF_invokeVirtual ||
ref_kind == JVM_REF_invokeSpecial) {
// could do this for all ref_kinds, but would explode assembly code size
trace_method_handle(_masm, msg);
}
__ stop(msg);
BLOCK_COMMENT("} verify_ref_kind");
__ BIND(L);
}

View File

@ -0,0 +1,46 @@
/*
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "runtime/registerMap.hpp"
address RegisterMap::pd_location(VMReg base_reg, int slot_idx) const {
if (base_reg->is_VectorRegister()) {
// Not all physical slots belonging to a VectorRegister have corresponding
// valid VMReg locations in the RegisterMap.
// (See RegisterSaver::push_frame_reg_args_and_save_live_registers.)
// However, the slots are always saved to the stack in a contiguous region
// of memory so we can calculate the address of the upper slots by
// offsetting from the base address.
assert(base_reg->is_concrete(), "must pass base reg");
address base_location = location(base_reg, nullptr);
if (base_location != nullptr) {
intptr_t offset_in_bytes = slot_idx * VMRegImpl::stack_slot_size;
return base_location + offset_in_bytes;
} else {
return nullptr;
}
} else {
return location(base_reg->next(slot_idx), nullptr);
}
}

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2013 SAP SE. All rights reserved.
* Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,9 +35,7 @@
// Since there is none, we just return null.
address pd_location(VMReg reg) const { return nullptr; }
address pd_location(VMReg base_reg, int slot_idx) const {
return location(base_reg->next(slot_idx), nullptr);
}
address pd_location(VMReg base_reg, int slot_idx) const;
// no PD state to clear or copy:
void pd_clear() {}

View File

@ -102,7 +102,7 @@ class RegisterSaver {
// During deoptimization only the result registers need to be restored
// all the other values have already been extracted.
static void restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes);
static void restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes, bool save_vectors);
// Constants and data structures:
@ -349,7 +349,7 @@ OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssemble
}
// Note that generate_oop_map in the following loop is only used for the
// polling_page_vectors_safepoint_handler_blob.
// polling_page_vectors_safepoint_handler_blob and the deopt_blob.
// The order in which the vector contents are stored depends on Endianess and
// the utilized instructions (PowerArchitecturePPC64).
assert(is_aligned(offset, StackAlignmentInBytes), "should be");
@ -361,6 +361,7 @@ OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssemble
__ stxvp(as_VectorRegister(reg_num).to_vsr(), offset, R1_SP);
// Note: The contents were read in the same order (see loadV16_Power9 node in ppc.ad).
// RegisterMap::pd_location only uses the first VMReg for each VectorRegister.
if (generate_oop_map) {
map->set_callee_saved(VMRegImpl::stack2reg(offset >> 2),
RegisterSaver_LiveVecRegs[i LITTLE_ENDIAN_ONLY(+1) ].vmreg);
@ -380,6 +381,7 @@ OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssemble
__ stxvd2x(as_VectorRegister(reg_num)->to_vsr(), R31, R1_SP);
}
// Note: The contents were read in the same order (see loadV16_Power8 / loadV16_Power9 node in ppc.ad).
// RegisterMap::pd_location only uses the first VMReg for each VectorRegister.
if (generate_oop_map) {
VMReg vsr = RegisterSaver_LiveVecRegs[i].vmreg;
map->set_callee_saved(VMRegImpl::stack2reg(offset >> 2), vsr);
@ -566,10 +568,14 @@ void RegisterSaver::restore_argument_registers_and_pop_frame(MacroAssembler*masm
}
// Restore the registers that might be holding a result.
void RegisterSaver::restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes) {
void RegisterSaver::restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes, bool save_vectors) {
const int regstosave_num = sizeof(RegisterSaver_LiveRegs) /
sizeof(RegisterSaver::LiveRegType);
const int register_save_size = regstosave_num * reg_size; // VS registers not relevant here.
const int vecregstosave_num = save_vectors ? (sizeof(RegisterSaver_LiveVecRegs) /
sizeof(RegisterSaver::LiveRegType))
: 0;
const int register_save_size = regstosave_num * reg_size + vecregstosave_num * vec_reg_size;
const int register_save_offset = frame_size_in_bytes - register_save_size;
// restore all result registers (ints and floats)
@ -598,7 +604,7 @@ void RegisterSaver::restore_result_registers(MacroAssembler* masm, int frame_siz
offset += reg_size;
}
assert(offset == frame_size_in_bytes, "consistency check");
assert(offset == frame_size_in_bytes - (save_vectors ? vecregstosave_num * vec_reg_size : 0), "consistency check");
}
// Is vector's size (in bytes) bigger than a size saved by default?
@ -2909,7 +2915,8 @@ void SharedRuntime::generate_deopt_blob() {
map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
&first_frame_size_in_bytes,
/*generate_oop_map=*/ true,
RegisterSaver::return_pc_is_lr);
RegisterSaver::return_pc_is_lr,
/*save_vectors*/ SuperwordUseVSX);
assert(map != nullptr, "OopMap must have been created");
__ li(exec_mode_reg, Deoptimization::Unpack_deopt);
@ -2943,7 +2950,8 @@ void SharedRuntime::generate_deopt_blob() {
RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
&first_frame_size_in_bytes,
/*generate_oop_map=*/ false,
RegisterSaver::return_pc_is_pre_saved);
RegisterSaver::return_pc_is_pre_saved,
/*save_vectors*/ SuperwordUseVSX);
// Deopt during an exception. Save exec mode for unpack_frames.
__ li(exec_mode_reg, Deoptimization::Unpack_exception);
@ -2958,7 +2966,8 @@ void SharedRuntime::generate_deopt_blob() {
RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
&first_frame_size_in_bytes,
/*generate_oop_map=*/ false,
RegisterSaver::return_pc_is_pre_saved);
RegisterSaver::return_pc_is_pre_saved,
/*save_vectors*/ SuperwordUseVSX);
__ li(exec_mode_reg, Deoptimization::Unpack_reexecute);
#endif
@ -2984,7 +2993,7 @@ void SharedRuntime::generate_deopt_blob() {
// Restore only the result registers that have been saved
// by save_volatile_registers(...).
RegisterSaver::restore_result_registers(masm, first_frame_size_in_bytes);
RegisterSaver::restore_result_registers(masm, first_frame_size_in_bytes, /*save_vectors*/ SuperwordUseVSX);
// reload the exec mode from the UnrollBlock (it might have changed)
__ lwz(exec_mode_reg, in_bytes(Deoptimization::UnrollBlock::unpack_kind_offset()), unroll_block_reg);

View File

@ -29,35 +29,40 @@
#define STUBGEN_PREUNIVERSE_BLOBS_ARCH_DO(do_stub, \
do_arch_blob, \
do_arch_entry, \
do_arch_entry_init) \
do_arch_entry_init, \
do_arch_entry_array) \
do_arch_blob(preuniverse, 0) \
#define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \
do_arch_blob, \
do_arch_entry, \
do_arch_entry_init) \
do_arch_entry_init, \
do_arch_entry_array) \
do_arch_blob(initial, 20000) \
#define STUBGEN_CONTINUATION_BLOBS_ARCH_DO(do_stub, \
do_arch_blob, \
do_arch_entry, \
do_arch_entry_init) \
do_arch_entry_init, \
do_arch_entry_array) \
do_arch_blob(continuation, 2000) \
#define STUBGEN_COMPILER_BLOBS_ARCH_DO(do_stub, \
do_arch_blob, \
do_arch_entry, \
do_arch_entry_init) \
do_arch_entry_init, \
do_arch_entry_array) \
do_arch_blob(compiler, 24000) \
#define STUBGEN_FINAL_BLOBS_ARCH_DO(do_stub, \
do_arch_blob, \
do_arch_entry, \
do_arch_entry_init) \
do_arch_entry_init, \
do_arch_entry_array) \
do_arch_blob(final, 24000) \

View File

@ -5095,7 +5095,7 @@ void generate_lookup_secondary_supers_table_stub() {
}
public:
StubGenerator(CodeBuffer* code, BlobId blob_id) : StubCodeGenerator(code, blob_id) {
StubGenerator(CodeBuffer* code, BlobId blob_id, AOTStubData *stub_data) : StubCodeGenerator(code, blob_id, stub_data) {
switch(blob_id) {
case BlobId::stubgen_preuniverse_id:
generate_preuniverse_stubs();
@ -5119,7 +5119,7 @@ void generate_lookup_secondary_supers_table_stub() {
}
};
void StubGenerator_generate(CodeBuffer* code, BlobId blob_id) {
StubGenerator g(code, blob_id);
void StubGenerator_generate(CodeBuffer* code, BlobId blob_id, AOTStubData *stub_data) {
StubGenerator g(code, blob_id, stub_data);
}

View File

@ -183,3 +183,9 @@ address StubRoutines::ppc::generate_crc_constants(juint reverse_poly) {
return consts;
}
#if INCLUDE_CDS
// nothing to do for ppc
void StubRoutines::init_AOTAddressTable() {
}
#endif // INCLUDE_CDS

View File

@ -3489,7 +3489,7 @@ void TemplateTable::invokevirtual(int byte_no) {
// Get receiver klass.
__ load_klass_check_null_throw(Rrecv_klass, Rrecv, R11_scratch1);
__ verify_klass_ptr(Rrecv_klass);
__ profile_virtual_call(Rrecv_klass, R11_scratch1, R12_scratch2, false);
__ profile_virtual_call(Rrecv_klass, R11_scratch1, R12_scratch2);
generate_vtable_call(Rrecv_klass, Rvtableindex_or_method, Rret_addr, R11_scratch1);
}
@ -3596,7 +3596,7 @@ void TemplateTable::invokeinterface_object_method(Register Rrecv_klass,
// Non-final callc case.
__ bind(LnotFinal);
__ lhz(Rindex, in_bytes(ResolvedMethodEntry::table_index_offset()), Rcache);
__ profile_virtual_call(Rrecv_klass, Rtemp1, Rscratch, false);
__ profile_virtual_call(Rrecv_klass, Rtemp1, Rscratch);
generate_vtable_call(Rrecv_klass, Rindex, Rret, Rscratch);
}
@ -3664,7 +3664,7 @@ void TemplateTable::invokeinterface(int byte_no) {
__ lookup_interface_method(Rrecv_klass, Rinterface_klass, noreg, noreg, Rscratch1, Rscratch2,
L_no_such_interface, /*return_method=*/false);
__ profile_virtual_call(Rrecv_klass, Rscratch1, Rscratch2, false);
__ profile_virtual_call(Rrecv_klass, Rscratch1, Rscratch2);
// Find entry point to call.

View File

@ -25,6 +25,7 @@
#include "asm/assembler.inline.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "compiler/compilerDefinitions.inline.hpp"
#include "compiler/disassembler.hpp"
#include "jvm.h"
#include "memory/resourceArea.hpp"
@ -105,7 +106,7 @@ void VM_Version::initialize() {
if (PowerArchitecturePPC64 >= 9) {
// Performance is good since Power9.
if (FLAG_IS_DEFAULT(SuperwordUseVSX)) {
if (FLAG_IS_DEFAULT(SuperwordUseVSX) && CompilerConfig::is_c2_enabled()) {
FLAG_SET_ERGO(SuperwordUseVSX, true);
}
}
@ -310,11 +311,6 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseSHA3Intrinsics, false);
}
if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) {
FLAG_SET_DEFAULT(UseSHA, false);
}
#ifdef COMPILER2
if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {
UseSquareToLenIntrinsic = true;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -196,12 +196,9 @@ void LIR_Assembler::arraycopy_type_check(Register src, Register src_pos, Registe
if (UseCompactObjectHeaders) {
__ load_narrow_klass_compact(tmp, src);
__ load_narrow_klass_compact(t0, dst);
} else if (UseCompressedClassPointers) {
} else {
__ lwu(tmp, Address(src, oopDesc::klass_offset_in_bytes()));
__ lwu(t0, Address(dst, oopDesc::klass_offset_in_bytes()));
} else {
__ ld(tmp, Address(src, oopDesc::klass_offset_in_bytes()));
__ ld(t0, Address(dst, oopDesc::klass_offset_in_bytes()));
}
__ bne(tmp, t0, *stub->entry(), /* is_far */ true);
} else {
@ -243,37 +240,6 @@ void LIR_Assembler::arraycopy_type_check(Register src, Register src_pos, Registe
}
}
void LIR_Assembler::arraycopy_assert(Register src, Register dst, Register tmp, ciArrayKlass *default_type, int flags) {
assert(default_type != nullptr, "null default_type!");
BasicType basic_type = default_type->element_type()->basic_type();
if (basic_type == T_ARRAY) { basic_type = T_OBJECT; }
if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
// Sanity check the known type with the incoming class. For the
// primitive case the types must match exactly with src.klass and
// dst.klass each exactly matching the default type. For the
// object array case, if no type check is needed then either the
// dst type is exactly the expected type and the src type is a
// subtype which we can't check or src is the same array as dst
// but not necessarily exactly of type default_type.
Label known_ok, halt;
__ mov_metadata(tmp, default_type->constant_encoding());
if (UseCompressedClassPointers) {
__ encode_klass_not_null(tmp);
}
if (basic_type != T_OBJECT) {
__ cmp_klass_compressed(dst, tmp, t0, halt, false);
__ cmp_klass_compressed(src, tmp, t0, known_ok, true);
} else {
__ cmp_klass_compressed(dst, tmp, t0, known_ok, true);
__ beq(src, dst, known_ok);
}
__ bind(halt);
__ stop("incorrect type information in arraycopy");
__ bind(known_ok);
}
}
void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
ciArrayKlass *default_type = op->expected_type();
Register src = op->src()->as_register();
@ -304,7 +270,28 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
}
#ifdef ASSERT
arraycopy_assert(src, dst, tmp, default_type, flags);
if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
// Sanity check the known type with the incoming class. For the
// primitive case the types must match exactly with src.klass and
// dst.klass each exactly matching the default type. For the
// object array case, if no type check is needed then either the
// dst type is exactly the expected type and the src type is a
// subtype which we can't check or src is the same array as dst
// but not necessarily exactly of type default_type.
Label known_ok, halt;
__ mov_metadata(tmp, default_type->constant_encoding());
if (basic_type != T_OBJECT) {
__ cmp_klass_bne(dst, tmp, t0, t1, halt);
__ cmp_klass_beq(src, tmp, t0, t1, known_ok);
} else {
__ cmp_klass_beq(dst, tmp, t0, t1, known_ok);
__ beq(src, dst, known_ok);
}
__ bind(halt);
__ stop("incorrect type information in arraycopy");
__ bind(known_ok);
}
#endif
#ifndef PRODUCT

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -39,7 +39,6 @@
void arraycopy_type_check(Register src, Register src_pos, Register length,
Register dst, Register dst_pos, Register tmp,
CodeStub *stub, BasicType basic_type, int flags);
void arraycopy_assert(Register src, Register dst, Register tmp, ciArrayKlass *default_type, int flags);
void arraycopy_prepare_params(Register src, Register src_pos, Register length,
Register dst, Register dst_pos, BasicType basic_type);
void arraycopy_checkcast_prepare_params(Register src, Register src_pos, Register length,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -55,20 +55,6 @@ const Register SHIFT_count = x10; // where count for shift operations must be
#define __ _masm->
static void select_different_registers(Register preserve,
Register extra,
Register &tmp1,
Register &tmp2) {
if (tmp1 == preserve) {
assert_different_registers(tmp1, tmp2, extra);
tmp1 = extra;
} else if (tmp2 == preserve) {
assert_different_registers(tmp1, tmp2, extra);
tmp2 = extra;
}
assert_different_registers(preserve, tmp1, tmp2);
}
static void select_different_registers(Register preserve,
Register extra,
Register &tmp1,
@ -1155,12 +1141,8 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
} else if (obj == klass_RInfo) {
klass_RInfo = dst;
}
if (k->is_loaded() && !UseCompressedClassPointers) {
select_different_registers(obj, dst, k_RInfo, klass_RInfo);
} else {
Rtmp1 = op->tmp3()->as_register();
select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
}
Rtmp1 = op->tmp3()->as_register();
select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
assert_different_registers(obj, k_RInfo, klass_RInfo);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -1073,9 +1073,7 @@ void LIRGenerator::do_CheckCast(CheckCast* x) {
}
LIR_Opr reg = rlock_result(x);
LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
tmp3 = new_register(objectType);
}
tmp3 = new_register(objectType);
__ checkcast(reg, obj.result(), x->klass(),
new_register(objectType), new_register(objectType), tmp3,
x->direct_compare(), info_for_exception, patching_info, stub,
@ -1094,9 +1092,7 @@ void LIRGenerator::do_InstanceOf(InstanceOf* x) {
}
obj.load_item();
LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
tmp3 = new_register(objectType);
}
tmp3 = new_register(objectType);
__ instanceof(reg, obj.result(), x->klass(),
new_register(objectType), new_register(objectType), tmp3,
x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
@ -1173,4 +1169,5 @@ void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
CodeEmitInfo* info) {
__ volatile_load_mem_reg(address, result, info);
__ membar_acquire();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -92,12 +92,8 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
// This assumes that all prototype bits fitr in an int32_t
mv(tmp1, checked_cast<int32_t>(markWord::prototype().value()));
sd(tmp1, Address(obj, oopDesc::mark_offset_in_bytes()));
if (UseCompressedClassPointers) { // Take care not to kill klass
encode_klass_not_null(tmp1, klass, tmp2);
sw(tmp1, Address(obj, oopDesc::klass_offset_in_bytes()));
} else {
sd(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
}
encode_klass_not_null(tmp1, klass, tmp2);
sw(tmp1, Address(obj, oopDesc::klass_offset_in_bytes()));
}
if (len->is_valid()) {
@ -108,7 +104,7 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
// Clear gap/first 4 bytes following the length field.
sw(zr, Address(obj, base_offset));
}
} else if (UseCompressedClassPointers && !UseCompactObjectHeaders) {
} else if (!UseCompactObjectHeaders) {
store_klass_gap(obj, zr);
}
}

Some files were not shown because too many files have changed in this diff Show More