Merge branch 'master' into JDK-8373026-vector-algorithms

This commit is contained in:
Emanuel Peter 2025-12-16 08:54:38 +01:00
commit 40c51e8fa5
884 changed files with 35832 additions and 17774 deletions

View File

@ -59,7 +59,7 @@ on:
jobs:
build-linux:
name: build
runs-on: ubuntu-22.04
runs-on: ubuntu-24.04
container:
image: alpine:3.20

View File

@ -48,7 +48,7 @@ on:
jobs:
build-cross-compile:
name: build
runs-on: ubuntu-22.04
runs-on: ubuntu-24.04
strategy:
fail-fast: false

View File

@ -75,7 +75,7 @@ on:
jobs:
build-linux:
name: build
runs-on: ubuntu-22.04
runs-on: ubuntu-24.04
strategy:
fail-fast: false
@ -115,9 +115,21 @@ jobs:
if [[ '${{ inputs.apt-architecture }}' != '' ]]; then
sudo dpkg --add-architecture ${{ inputs.apt-architecture }}
fi
sudo apt-get update
sudo apt-get install --only-upgrade apt
sudo apt-get install gcc-${{ inputs.gcc-major-version }}${{ inputs.gcc-package-suffix }} g++-${{ inputs.gcc-major-version }}${{ inputs.gcc-package-suffix }} libxrandr-dev${{ steps.arch.outputs.suffix }} libxtst-dev${{ steps.arch.outputs.suffix }} libcups2-dev${{ steps.arch.outputs.suffix }} libasound2-dev${{ steps.arch.outputs.suffix }} ${{ inputs.apt-extra-packages }}
sudo apt update
sudo apt install --only-upgrade apt
sudo apt install \
gcc-${{ inputs.gcc-major-version }}${{ inputs.gcc-package-suffix }} \
g++-${{ inputs.gcc-major-version }}${{ inputs.gcc-package-suffix }} \
libasound2-dev${{ steps.arch.outputs.suffix }} \
libcups2-dev${{ steps.arch.outputs.suffix }} \
libfontconfig1-dev${{ steps.arch.outputs.suffix }} \
libx11-dev${{ steps.arch.outputs.suffix }} \
libxext-dev${{ steps.arch.outputs.suffix }} \
libxrandr-dev${{ steps.arch.outputs.suffix }} \
libxrender-dev${{ steps.arch.outputs.suffix }} \
libxt-dev${{ steps.arch.outputs.suffix }} \
libxtst-dev${{ steps.arch.outputs.suffix }} \
${{ inputs.apt-extra-packages }}
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-${{ inputs.gcc-major-version }} 100 --slave /usr/bin/g++ g++ /usr/bin/g++-${{ inputs.gcc-major-version }}
- name: 'Configure'

View File

@ -57,7 +57,7 @@ jobs:
prepare:
name: 'Prepare the run'
runs-on: ubuntu-22.04
runs-on: ubuntu-24.04
env:
# List of platforms to exclude by default
EXCLUDED_PLATFORMS: 'alpine-linux-x64'
@ -405,7 +405,7 @@ jobs:
with:
platform: linux-x64
bootjdk-platform: linux-x64
runs-on: ubuntu-22.04
runs-on: ubuntu-24.04
dry-run: ${{ needs.prepare.outputs.dry-run == 'true' }}
debug-suffix: -debug
@ -419,7 +419,7 @@ jobs:
with:
platform: linux-x64
bootjdk-platform: linux-x64
runs-on: ubuntu-22.04
runs-on: ubuntu-24.04
dry-run: ${{ needs.prepare.outputs.dry-run == 'true' }}
static-suffix: "-static"

View File

@ -1,7 +1,7 @@
[general]
project=jdk
jbs=JDK
version=26
version=27
[checks]
error=author,committer,reviewers,merge,issues,executable,symlink,message,hg-tag,whitespace,problemlists,copyright

View File

@ -1,7 +1,7 @@
# Welcome to the JDK!
For build instructions please see the
[online documentation](https://openjdk.org/groups/build/doc/building.html),
[online documentation](https://git.openjdk.org/jdk/blob/master/doc/building.md),
or either of these files:
- [doc/building.html](doc/building.html) (html version)

View File

@ -38,7 +38,7 @@
# directory.
# - open a terminal program and run these commands:
# cd "${JDK_CHECKOUT}"/src/jdk.compiler/share/data/symbols
# bash ../../../../../make/scripts/generate-symbol-data.sh "${JDK_N_INSTALL}"
# bash ../../../../../bin/generate-symbol-data.sh "${JDK_N_INSTALL}"
# - this command will generate or update data for "--release N" into the ${JDK_CHECKOUT}/src/jdk.compiler/share/data/symbols
# directory, updating all registration necessary. If the goal was to update the data, and there are no
# new or changed files in the ${JDK_CHECKOUT}/src/jdk.compiler/share/data/symbols directory after running this script,

View File

@ -541,6 +541,11 @@ href="#apple-xcode">Apple Xcode</a> on some strategies to deal with
this.</p>
<p>It is recommended that you use at least macOS 14 and Xcode 15.4, but
earlier versions may also work.</p>
<p>Starting with Xcode 26, introduced in macOS 26, the Metal toolchain
no longer comes bundled with Xcode, so it needs to be installed
separately. This can either be done via the Xcode's Settings/Components
UI, or in the command line calling
<code>xcodebuild -downloadComponent metalToolchain</code>.</p>
<p>The standard macOS environment contains the basic tooling needed to
build, but for external libraries a package manager is recommended. The
JDK uses <a href="https://brew.sh/">homebrew</a> in the examples, but

View File

@ -352,6 +352,11 @@ on some strategies to deal with this.
It is recommended that you use at least macOS 14 and Xcode 15.4, but
earlier versions may also work.
Starting with Xcode 26, introduced in macOS 26, the Metal toolchain no longer
comes bundled with Xcode, so it needs to be installed separately. This can
either be done via the Xcode's Settings/Components UI, or in the command line
calling `xcodebuild -downloadComponent metalToolchain`.
The standard macOS environment contains the basic tooling needed to build, but
for external libraries a package manager is recommended. The JDK uses
[homebrew](https://brew.sh/) in the examples, but feel free to use whatever

View File

@ -1037,8 +1037,8 @@ running destructors at exit can lead to problems.</p>
<p>Some of the approaches used in HotSpot to avoid dynamic
initialization include:</p>
<ul>
<li><p>Use the <code>Deferred&lt;T&gt;</code> class template. Add a call
to its initialization function at an appropriate place during VM
<li><p>Use the <code>DeferredStatic&lt;T&gt;</code> class template. Add
a call to its initialization function at an appropriate place during VM
initialization. The underlying object is never destroyed.</p></li>
<li><p>For objects of class type, use a variable whose value is a
pointer to the class, initialized to <code>nullptr</code>. Provide an

View File

@ -954,7 +954,7 @@ destructors at exit can lead to problems.
Some of the approaches used in HotSpot to avoid dynamic initialization
include:
* Use the `Deferred<T>` class template. Add a call to its initialization
* Use the `DeferredStatic<T>` class template. Add a call to its initialization
function at an appropriate place during VM initialization. The underlying
object is never destroyed.

View File

@ -119,6 +119,9 @@ cover the new source version</li>
and
<code>test/langtools/tools/javac/preview/classReaderTest/Client.preview.out</code>:
update expected messages for preview errors and warnings</li>
<li><code>test/langtools/tools/javac/versions/Versions.java</code>: add
new source version to the set of valid sources and add new enum constant
for the new class file version.</li>
</ul>
</body>
</html>

View File

@ -65,4 +65,4 @@ to be updated for a particular release.
* `test/langtools/tools/javac/lib/JavacTestingAbstractProcessor.java`
update annotation processor extended by `javac` tests to cover the new source version
* `test/langtools/tools/javac/preview/classReaderTest/Client.nopreview.out` and `test/langtools/tools/javac/preview/classReaderTest/Client.preview.out`: update expected messages for preview errors and warnings
* `test/langtools/tools/javac/versions/Versions.java`: add new source version to the set of valid sources and add new enum constant for the new class file version.

View File

@ -125,13 +125,6 @@ define SetupBundleFileBody
&& $(TAR) cf - -$(TAR_INCLUDE_PARAM) $$($1_$$d_LIST_FILE) \
$(TAR_IGNORE_EXIT_VALUE) ) \
| ( $(CD) $(SUPPORT_OUTPUTDIR)/bundles/$1/$$($1_SUBDIR) && $(TAR) xf - )$$(NEWLINE) )
# Rename stripped pdb files
ifeq ($(call isTargetOs, windows)+$(SHIP_DEBUG_SYMBOLS), true+public)
for f in `$(FIND) $(SUPPORT_OUTPUTDIR)/bundles/$1/$$($1_SUBDIR) -name "*.stripped.pdb"`; do \
$(ECHO) Renaming $$$${f} to $$$${f%stripped.pdb}pdb $(LOG_INFO); \
$(MV) $$$${f} $$$${f%stripped.pdb}pdb; \
done
endif
# Unzip any zipped debuginfo files
ifeq ($$($1_UNZIP_DEBUGINFO), true)
for f in `$(FIND) $(SUPPORT_OUTPUTDIR)/bundles/$1/$$($1_SUBDIR) -name "*.diz"`; do \
@ -222,14 +215,6 @@ ifneq ($(filter product-bundles% legacy-bundles, $(MAKECMDGOALS)), )
ifeq ($(call isTargetOs, windows), true)
ifeq ($(SHIP_DEBUG_SYMBOLS), )
JDK_SYMBOLS_EXCLUDE_PATTERN := %.pdb
else
ifeq ($(SHIP_DEBUG_SYMBOLS), public)
JDK_SYMBOLS_EXCLUDE_PATTERN := \
$(filter-out \
%.stripped.pdb, \
$(filter %.pdb, $(ALL_JDK_FILES)) \
)
endif
endif
endif
@ -244,10 +229,7 @@ ifneq ($(filter product-bundles% legacy-bundles, $(MAKECMDGOALS)), )
)
JDK_SYMBOLS_BUNDLE_FILES := \
$(filter-out \
%.stripped.pdb, \
$(call FindFiles, $(SYMBOLS_IMAGE_DIR)) \
)
$(call FindFiles, $(SYMBOLS_IMAGE_DIR))
TEST_DEMOS_BUNDLE_FILES := $(filter $(JDK_DEMOS_IMAGE_HOMEDIR)/demo/%, \
$(ALL_JDK_DEMOS_FILES))
@ -267,14 +249,6 @@ ifneq ($(filter product-bundles% legacy-bundles, $(MAKECMDGOALS)), )
ifeq ($(call isTargetOs, windows), true)
ifeq ($(SHIP_DEBUG_SYMBOLS), )
JRE_SYMBOLS_EXCLUDE_PATTERN := %.pdb
else
ifeq ($(SHIP_DEBUG_SYMBOLS), public)
JRE_SYMBOLS_EXCLUDE_PATTERN := \
$(filter-out \
%.stripped.pdb, \
$(filter %.pdb, $(ALL_JRE_FILES)) \
)
endif
endif
endif

View File

@ -282,29 +282,33 @@ else
endif
CMDS_TARGET_SUBDIR := bin
# Param 1 - either JDK or JRE
# Copy debug info files into symbols bundle.
# In case of Windows and --with-external-symbols-in-bundles=public, take care to remove *.stripped.pdb files
SetupCopyDebuginfo = \
$(foreach m, $(ALL_$1_MODULES), \
$(eval dbgfiles := $(call FindDebuginfoFiles, $(SUPPORT_OUTPUTDIR)/modules_libs/$m)) \
$(eval dbgfiles := $(if $(filter true+public,$(call isTargetOs,windows)+$(SHIP_DEBUG_SYMBOLS)), \
$(filter-out %.stripped.pdb,$(dbgfiles)),$(dbgfiles)) \
) \
$(eval $(call SetupCopyFiles, COPY_$1_LIBS_DEBUGINFO_$m, \
SRC := $(SUPPORT_OUTPUTDIR)/modules_libs/$m, \
DEST := $($1_IMAGE_DIR)/$(LIBS_TARGET_SUBDIR), \
FILES := $(call FindDebuginfoFiles, \
$(SUPPORT_OUTPUTDIR)/modules_libs/$m), \
FILES := $(dbgfiles), \
)) \
$(eval $1_TARGETS += $$(COPY_$1_LIBS_DEBUGINFO_$m)) \
$(eval dbgfiles := $(call FindDebuginfoFiles, $(SUPPORT_OUTPUTDIR)/modules_cmds/$m)) \
$(eval dbgfiles := $(if $(filter true+public,$(call isTargetOs,windows)+$(SHIP_DEBUG_SYMBOLS)), \
$(filter-out %.stripped.pdb,$(dbgfiles)),$(dbgfiles)) \
) \
$(eval $(call SetupCopyFiles, COPY_$1_CMDS_DEBUGINFO_$m, \
SRC := $(SUPPORT_OUTPUTDIR)/modules_cmds/$m, \
DEST := $($1_IMAGE_DIR)/$(CMDS_TARGET_SUBDIR), \
FILES := $(call FindDebuginfoFiles, \
$(SUPPORT_OUTPUTDIR)/modules_cmds/$m), \
FILES := $(dbgfiles), \
)) \
$(eval $1_TARGETS += $$(COPY_$1_CMDS_DEBUGINFO_$m)) \
)
# No space before argument to avoid having to put $(strip ) everywhere in
# implementation above.
$(call SetupCopyDebuginfo,JDK)
$(call SetupCopyDebuginfo,JRE)
# No space before argument to avoid having to put $(strip ) everywhere in implementation above.
$(call SetupCopyDebuginfo,SYMBOLS)
################################################################################

View File

@ -873,7 +873,7 @@ define SetupRunJtregTestBody
$1_JTREG_BASIC_OPTIONS += -testThreadFactoryPath:$$(JTREG_TEST_THREAD_FACTORY_JAR)
$1_JTREG_BASIC_OPTIONS += -testThreadFactory:$$(JTREG_TEST_THREAD_FACTORY)
$1_JTREG_BASIC_OPTIONS += $$(addprefix $$(JTREG_PROBLEM_LIST_PREFIX), $$(wildcard \
$$(addprefix $$($1_TEST_ROOT)/, ProblemList-$$(JTREG_TEST_THREAD_FACTORY).txt) \
$$(addprefix $$($1_TEST_ROOT)/, ProblemList-$$(JTREG_TEST_THREAD_FACTORY).txt) \
))
endif
@ -881,8 +881,8 @@ define SetupRunJtregTestBody
AGENT := $$(LIBRARY_PREFIX)JvmtiStressAgent$$(SHARED_LIBRARY_SUFFIX)=$$(JTREG_JVMTI_STRESS_AGENT)
$1_JTREG_BASIC_OPTIONS += -javaoption:'-agentpath:$(TEST_IMAGE_DIR)/hotspot/jtreg/native/$$(AGENT)'
$1_JTREG_BASIC_OPTIONS += $$(addprefix $$(JTREG_PROBLEM_LIST_PREFIX), $$(wildcard \
$$(addprefix $$($1_TEST_ROOT)/, ProblemList-jvmti-stress-agent.txt) \
))
$$(addprefix $$($1_TEST_ROOT)/, ProblemList-jvmti-stress-agent.txt) \
))
endif
@ -1092,7 +1092,7 @@ define SetupRunJtregTestBody
$$(call MakeDir, $$($1_TEST_RESULTS_DIR) $$($1_TEST_SUPPORT_DIR) \
$$($1_TEST_TMP_DIR))
$$(call ExecuteWithLog, $$($1_TEST_SUPPORT_DIR)/jtreg, \
$$(COV_ENVIRONMENT) $$($1_COMMAND_LINE) \
$$(COV_ENVIRONMENT) $$($1_COMMAND_LINE) \
)
$1_RESULT_FILE := $$($1_TEST_RESULTS_DIR)/text/stats.txt
@ -1102,11 +1102,11 @@ define SetupRunJtregTestBody
$$(call LogWarn, Test report is stored in $$(strip \
$$(subst $$(TOPDIR)/, , $$($1_TEST_RESULTS_DIR))))
# Read jtreg documentation to learn on the test stats categories:
# https://github.com/openjdk/jtreg/blob/master/src/share/doc/javatest/regtest/faq.md#what-do-all-those-numbers-in-the-test-results-line-mean
# In jtreg, "skipped:" category accounts for tests that threw jtreg.SkippedException at runtime.
# At the same time these tests contribute to "passed:" tests.
# In here we don't want that and so we substract number of "skipped:" from "passed:".
# Read jtreg documentation to learn on the test stats categories:
# https://github.com/openjdk/jtreg/blob/master/src/share/doc/javatest/regtest/faq.md#what-do-all-those-numbers-in-the-test-results-line-mean
# In jtreg, "skipped:" category accounts for tests that threw jtreg.SkippedException at runtime.
# At the same time these tests contribute to "passed:" tests.
# In here we don't want that and so we substract number of "skipped:" from "passed:".
$$(if $$(wildcard $$($1_RESULT_FILE)), \
$$(eval $1_PASSED_AND_RUNTIME_SKIPPED := $$(shell $$(AWK) '{ gsub(/[,;]/, ""); \

View File

@ -79,7 +79,7 @@ TOOL_GENERATEEXTRAPROPERTIES = $(JAVA_SMALL) -cp $(BUILDTOOLS_OUTPUTDIR)/jdk_too
build.tools.generateextraproperties.GenerateExtraProperties
TOOL_GENERATECASEFOLDING = $(JAVA_SMALL) -cp $(BUILDTOOLS_OUTPUTDIR)/jdk_tools_classes \
build.tools.generatecharacter.CaseFolding
build.tools.generatecharacter.GenerateCaseFolding
TOOL_MAKEZIPREPRODUCIBLE = $(JAVA_SMALL) -cp $(BUILDTOOLS_OUTPUTDIR)/jdk_tools_classes \
build.tools.makezipreproducible.MakeZipReproducible

View File

@ -353,7 +353,12 @@ AC_DEFUN_ONCE([BASIC_SETUP_DEVKIT],
[set up toolchain on Mac OS using a path to an Xcode installation])])
UTIL_DEPRECATED_ARG_WITH(sys-root)
UTIL_DEPRECATED_ARG_WITH(tools-dir)
AC_ARG_WITH([tools-dir], [AS_HELP_STRING([--with-tools-dir],
[Point to a nonstandard Visual Studio installation location on Windows by
specifying any existing directory 2 or 3 levels below the installation
root.])]
)
if test "x$with_xcode_path" != x; then
if test "x$OPENJDK_BUILD_OS" = "xmacosx"; then

View File

@ -34,7 +34,7 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS],
FLAGS_SETUP_LDFLAGS_CPU_DEP([TARGET])
# Setup the build toolchain
FLAGS_SETUP_LDFLAGS_CPU_DEP([BUILD], [OPENJDK_BUILD_])
FLAGS_SETUP_LDFLAGS_CPU_DEP([BUILD], [OPENJDK_BUILD_], [BUILD_])
AC_SUBST(ADLC_LDFLAGS)
])
@ -52,11 +52,6 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS_HELPER],
# add --no-as-needed to disable default --as-needed link flag on some GCC toolchains
# add --icf=all (Identical Code Folding — merges identical functions)
BASIC_LDFLAGS="-Wl,-z,defs -Wl,-z,relro -Wl,-z,now -Wl,--no-as-needed -Wl,--exclude-libs,ALL"
if test "x$LINKER_TYPE" = "xgold"; then
if test x$DEBUG_LEVEL = xrelease; then
BASIC_LDFLAGS="$BASIC_LDFLAGS -Wl,--icf=all"
fi
fi
# Linux : remove unused code+data in link step
if test "x$ENABLE_LINKTIME_GC" = xtrue; then
@ -68,7 +63,7 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS_HELPER],
fi
BASIC_LDFLAGS_JVM_ONLY=""
LDFLAGS_LTO="-flto=auto -fuse-linker-plugin -fno-strict-aliasing"
LDFLAGS_LTO="-flto=auto -fuse-linker-plugin -fno-strict-aliasing $DEBUG_PREFIX_CFLAGS"
LDFLAGS_CXX_PARTIAL_LINKING="$MACHINE_FLAG -r"
@ -76,7 +71,7 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS_HELPER],
BASIC_LDFLAGS_JVM_ONLY="-mno-omit-leaf-frame-pointer -mstack-alignment=16 \
-fPIC"
LDFLAGS_LTO="-flto=auto -fuse-linker-plugin -fno-strict-aliasing"
LDFLAGS_LTO="-flto=auto -fuse-linker-plugin -fno-strict-aliasing $DEBUG_PREFIX_CFLAGS"
LDFLAGS_CXX_PARTIAL_LINKING="$MACHINE_FLAG -r"
if test "x$OPENJDK_TARGET_OS" = xlinux; then
@ -108,6 +103,9 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS_HELPER],
# Setup OS-dependent LDFLAGS
if test "x$OPENJDK_TARGET_OS" = xmacosx && test "x$TOOLCHAIN_TYPE" = xclang; then
if test x$DEBUG_LEVEL = xrelease; then
BASIC_LDFLAGS_JDK_ONLY="$BASIC_LDFLAGS_JDK_ONLY -Wl,-dead_strip"
fi
# FIXME: We should really generalize SetSharedLibraryOrigin instead.
OS_LDFLAGS_JVM_ONLY="-Wl,-rpath,@loader_path/. -Wl,-rpath,@loader_path/.."
OS_LDFLAGS="-mmacosx-version-min=$MACOSX_VERSION_MIN -Wl,-reproducible"
@ -166,7 +164,8 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS_HELPER],
################################################################################
# $1 - Either BUILD or TARGET to pick the correct OS/CPU variables to check
# conditionals against.
# $2 - Optional prefix for each variable defined.
# $2 - Optional prefix for each variable defined (OPENJDK_BUILD_ or nothing).
# $3 - Optional prefix for toolchain variables (BUILD_ or nothing).
AC_DEFUN([FLAGS_SETUP_LDFLAGS_CPU_DEP],
[
# Setup CPU-dependent basic LDFLAGS. These can differ between the target and
@ -200,6 +199,12 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS_CPU_DEP],
fi
fi
if test "x${$3LD_TYPE}" = "xgold"; then
if test x$DEBUG_LEVEL = xrelease; then
$1_CPU_LDFLAGS="${$1_CPU_LDFLAGS} -Wl,--icf=all"
fi
fi
# Export variables according to old definitions, prefix with $2 if present.
LDFLAGS_JDK_COMMON="$BASIC_LDFLAGS $BASIC_LDFLAGS_JDK_ONLY \
$OS_LDFLAGS $DEBUGLEVEL_LDFLAGS_JDK_ONLY ${$2EXTRA_LDFLAGS}"

View File

@ -516,7 +516,7 @@ AC_DEFUN([TOOLCHAIN_EXTRACT_LD_VERSION],
if [ [[ "$LINKER_VERSION_STRING" == *gold* ]] ]; then
[ LINKER_VERSION_NUMBER=`$ECHO $LINKER_VERSION_STRING | \
$SED -e 's/.* \([0-9][0-9]*\(\.[0-9][0-9]*\)*\).*) .*/\1/'` ]
LINKER_TYPE=gold
$1_TYPE=gold
else
[ LINKER_VERSION_NUMBER=`$ECHO $LINKER_VERSION_STRING | \
$SED -e 's/.* \([0-9][0-9]*\(\.[0-9][0-9]*\)*\).*/\1/'` ]

View File

@ -229,6 +229,14 @@ define SetupLinkerFlags
# TOOLCHAIN_TYPE plus OPENJDK_TARGET_OS
ifeq ($$($1_LINK_TIME_OPTIMIZATION), true)
$1_EXTRA_LDFLAGS += $(LDFLAGS_LTO)
# Instruct the ld64 linker not to delete the temporary object file
# generated during Link Time Optimization
ifeq ($(call isTargetOs, macosx), true)
$1_EXTRA_LDFLAGS += -Wl,-object_path_lto,$$($1_OBJECT_DIR)/$$($1_NAME)_lto_helper.o
endif
ifeq ($(TOOLCHAIN_TYPE), microsoft)
$1_EXTRA_LDFLAGS += -LTCGOUT:$$($1_OBJECT_DIR)/$$($1_NAME).iobj
endif
endif
$1_EXTRA_LDFLAGS += $$($1_LDFLAGS_$(OPENJDK_TARGET_OS_TYPE)) $$($1_LDFLAGS_$(OPENJDK_TARGET_OS)) \

View File

@ -1192,8 +1192,8 @@ var getJibProfilesDependencies = function (input, common) {
server: "jpg",
product: "jcov",
version: "3.0",
build_number: "3",
file: "bundles/jcov-3.0+3.zip",
build_number: "5",
file: "bundles/jcov-3.0+5.zip",
environment_name: "JCOV_HOME",
},

View File

@ -26,17 +26,17 @@
# Default version, product, and vendor information to use,
# unless overridden by configure
DEFAULT_VERSION_FEATURE=26
DEFAULT_VERSION_FEATURE=27
DEFAULT_VERSION_INTERIM=0
DEFAULT_VERSION_UPDATE=0
DEFAULT_VERSION_PATCH=0
DEFAULT_VERSION_EXTRA1=0
DEFAULT_VERSION_EXTRA2=0
DEFAULT_VERSION_EXTRA3=0
DEFAULT_VERSION_DATE=2026-03-17
DEFAULT_VERSION_CLASSFILE_MAJOR=70 # "`$EXPR $DEFAULT_VERSION_FEATURE + 44`"
DEFAULT_VERSION_DATE=2026-09-15
DEFAULT_VERSION_CLASSFILE_MAJOR=71 # "`$EXPR $DEFAULT_VERSION_FEATURE + 44`"
DEFAULT_VERSION_CLASSFILE_MINOR=0
DEFAULT_VERSION_DOCS_API_SINCE=11
DEFAULT_ACCEPTABLE_BOOT_VERSIONS="25 26"
DEFAULT_JDK_SOURCE_TARGET_VERSION=26
DEFAULT_ACCEPTABLE_BOOT_VERSIONS="25 26 27"
DEFAULT_JDK_SOURCE_TARGET_VERSION=27
DEFAULT_PROMOTED_VERSION_PRE=ea

View File

@ -170,6 +170,7 @@ ifeq ($(call check-jvm-feature, compiler2), true)
ifeq ($(HOTSPOT_TARGET_CPU_ARCH), aarch64)
AD_SRC_FILES += $(call uniq, $(wildcard $(foreach d, $(AD_SRC_ROOTS), \
$d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/$(HOTSPOT_TARGET_CPU_ARCH)_vector.ad \
$d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/$(HOTSPOT_TARGET_CPU_ARCH)_atomic.ad \
)))
endif

View File

@ -151,6 +151,12 @@ JVM_STRIPFLAGS ?= $(STRIPFLAGS)
# This source set is reused so save in cache.
$(call FillFindCache, $(JVM_SRC_DIRS))
ifeq ($(SHIP_DEBUG_SYMBOLS), full)
CFLAGS_SHIP_DEBUGINFO := -DSHIP_DEBUGINFO_FULL
else ifeq ($(SHIP_DEBUG_SYMBOLS), public)
CFLAGS_SHIP_DEBUGINFO := -DSHIP_DEBUGINFO_PUBLIC
endif
ifeq ($(call isTargetOs, windows), true)
ifeq ($(STATIC_LIBS), true)
WIN_EXPORT_FILE := $(JVM_OUTPUTDIR)/static-win-exports.def
@ -158,10 +164,6 @@ ifeq ($(call isTargetOs, windows), true)
WIN_EXPORT_FILE := $(JVM_OUTPUTDIR)/win-exports.def
endif
ifeq ($(SHIP_DEBUG_SYMBOLS), public)
CFLAGS_STRIPPED_DEBUGINFO := -DHAS_STRIPPED_DEBUGINFO
endif
JVM_LDFLAGS += -def:$(WIN_EXPORT_FILE)
endif
@ -187,7 +189,7 @@ $(eval $(call SetupJdkLibrary, BUILD_LIBJVM, \
CFLAGS := $(JVM_CFLAGS), \
abstract_vm_version.cpp_CXXFLAGS := $(CFLAGS_VM_VERSION), \
arguments.cpp_CXXFLAGS := $(CFLAGS_VM_VERSION), \
whitebox.cpp_CXXFLAGS := $(CFLAGS_STRIPPED_DEBUGINFO), \
whitebox.cpp_CXXFLAGS := $(CFLAGS_SHIP_DEBUGINFO), \
DISABLED_WARNINGS_gcc := $(DISABLED_WARNINGS_gcc), \
DISABLED_WARNINGS_gcc_ad_$(HOTSPOT_TARGET_CPU_ARCH).cpp := nonnull, \
DISABLED_WARNINGS_gcc_bytecodeInterpreter.cpp := unused-label, \

View File

@ -1,73 +0,0 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package build.tools.generatecharacter;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.nio.file.StandardOpenOption;
import java.util.stream.Collectors;
import java.util.stream.Stream;
public class CaseFolding {
public static void main(String[] args) throws Throwable {
if (args.length != 3) {
System.err.println("Usage: java CaseFolding TemplateFile CaseFolding.txt CaseFolding.java");
System.exit(1);
}
var templateFile = Paths.get(args[0]);
var caseFoldingTxt = Paths.get(args[1]);
var genSrcFile = Paths.get(args[2]);
var supportedTypes = "^.*; [CTS]; .*$";
var caseFoldingEntries = Files.lines(caseFoldingTxt)
.filter(line -> !line.startsWith("#") && line.matches(supportedTypes))
.map(line -> {
String[] cols = line.split("; ");
return new String[] {cols[0], cols[1], cols[2]};
})
.filter(cols -> {
// the folding case doesn't map back to the original char.
var cp1 = Integer.parseInt(cols[0], 16);
var cp2 = Integer.parseInt(cols[2], 16);
return Character.toUpperCase(cp2) != cp1 && Character.toLowerCase(cp2) != cp1;
})
.map(cols -> String.format(" entry(0x%s, 0x%s)", cols[0], cols[2]))
.collect(Collectors.joining(",\n", "", ""));
// hack, hack, hack! the logic does not pick 0131. just add manually to support 'I's.
// 0049; T; 0131; # LATIN CAPITAL LETTER I
final String T_0x0131_0x49 = String.format(" entry(0x%04x, 0x%04x),\n", 0x0131, 0x49);
// Generate .java file
Files.write(
genSrcFile,
Files.lines(templateFile)
.map(line -> line.contains("%%%Entries") ? T_0x0131_0x49 + caseFoldingEntries : line)
.collect(Collectors.toList()),
StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING);
}
}

View File

@ -0,0 +1,134 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package build.tools.generatecharacter;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.nio.file.StandardOpenOption;
import java.util.Arrays;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
public class GenerateCaseFolding {
public static void main(String[] args) throws Throwable {
if (args.length != 3) {
System.err.println("Usage: java GenerateCaseFolding TemplateFile CaseFolding.txt CaseFolding.java");
System.exit(1);
}
var templateFile = Paths.get(args[0]);
var caseFoldingTxt = Paths.get(args[1]);
var genSrcFile = Paths.get(args[2]);
// java.lang
var supportedTypes = "^.*; [CF]; .*$"; // full/1:M case folding
String[][] caseFoldings = Files.lines(caseFoldingTxt)
.filter(line -> !line.startsWith("#") && line.matches(supportedTypes))
.map(line -> {
var fields = line.split("; ");
var cp = fields[0];
fields = fields[2].trim().split(" ");
var folding = new String[fields.length + 1];
folding[0] = cp;
System.arraycopy(fields, 0, folding, 1, fields.length);
return folding;
})
.toArray(size -> new String[size][]);
// util.regex
var expandedSupportedTypes = "^.*; [CTS]; .*$";
var expanded_caseFoldingEntries = Files.lines(caseFoldingTxt)
.filter(line -> !line.startsWith("#") && line.matches(expandedSupportedTypes))
.map(line -> {
String[] cols = line.split("; ");
return new String[]{cols[0], cols[1], cols[2]};
})
.filter(cols -> {
// the folding case doesn't map back to the original char.
var cp1 = Integer.parseInt(cols[0], 16);
var cp2 = Integer.parseInt(cols[2], 16);
return Character.toUpperCase(cp2) != cp1 && Character.toLowerCase(cp2) != cp1;
})
.map(cols -> String.format(" entry(0x%s, 0x%s)", cols[0], cols[2]))
.collect(Collectors.joining(",\n", "", ""));
// hack, hack, hack! the logic does not pick 0131. just add manually to support 'I's.
// 0049; T; 0131; # LATIN CAPITAL LETTER I
final String T_0x0131_0x49 = String.format(" entry(0x%04x, 0x%04x),\n", 0x0131, 0x49);
Files.write(
genSrcFile,
Files.lines(templateFile)
.map(line -> line.contains("%%%Entries") ? genFoldingEntries(caseFoldings) : line)
.map(line -> line.contains("%%%Expanded_Case_Map_Entries") ? T_0x0131_0x49 + expanded_caseFoldingEntries : line)
.collect(Collectors.toList()),
StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING);
}
private static long foldingToLong(String[] folding) {
int cp = Integer.parseInt(folding[0], 16);
long value = (long)Integer.parseInt(folding[1], 16);
if (!Character.isSupplementaryCodePoint(cp) && folding.length != 2) {
var shift = 16;
for (int j = 2; j < folding.length; j++) {
value |= (long)Integer.parseInt(folding[j], 16) << shift;
shift <<= 1;
}
value = value | (long) (folding.length - 1) << 48;
}
return value;
}
private static String genFoldingEntries(String[][] foldings) {
StringBuilder sb = new StringBuilder();
sb.append(" private static final int[] CASE_FOLDING_CPS = {\n");
int width = 10;
for (int i = 0; i < foldings.length; i++) {
if (i % width == 0)
sb.append(" ");
sb.append(String.format("0X%s", foldings[i][0]));
if (i < foldings.length - 1)
sb.append(", ");
if (i % width == width - 1 || i == foldings.length - 1)
sb.append("\n");
}
sb.append(" };\n\n");
sb.append(" private static final long[] CASE_FOLDING_VALUES = {\n");
width = 6;
for (int i = 0; i < foldings.length; i++) {
if (i % width == 0)
sb.append(" "); // indent
sb.append(String.format("0x%013xL", foldingToLong(foldings[i])));
if (i < foldings.length - 1)
sb.append(", ");
if (i % width == width - 1 || i == foldings.length - 1) {
sb.append("\n");
}
}
sb.append(" };\n");
return sb.toString();
}
}

View File

@ -120,3 +120,25 @@ $(INTPOLY_GEN_DONE): $(INTPLOY_HEADER) $(BUILD_TOOLS_JDK)
TARGETS += $(INTPOLY_GEN_DONE)
################################################################################
RELEASE_FILE_TEMPLATE := $(TOPDIR)/src/java.base/share/classes/jdk/internal/misc/resources/release.txt.template
RELEASE_FILE_TARGET := $(SUPPORT_OUTPUTDIR)/gensrc/$(MODULE)/jdk/internal/misc/resources/release.txt
RELEASE_FILE_VARDEPS := $(COMPANY_NAME) $(VERSION_STRING) $(VERSION_DATE)
RELEASE_FILE_VARDEPS_FILE := $(call DependOnVariable, RELEASE_FILE_VARDEPS, \
$(SUPPORT_OUTPUTDIR)/gensrc/$(MODULE)/jlink_release_txt.vardeps)
$(eval $(call SetupTextFileProcessing, BUILD_RELEASE_FILE, \
SOURCE_FILES := $(RELEASE_FILE_TEMPLATE), \
OUTPUT_FILE := $(RELEASE_FILE_TARGET), \
REPLACEMENTS := \
@@COMPANY_NAME@@ => $(COMPANY_NAME) ; \
@@VERSION_STRING@@ => $(VERSION_STRING) ; \
@@VERSION_DATE@@ => $(VERSION_DATE) , \
))
$(BUILD_RELEASE_FILE): $(RELEASE_FILE_VARDEPS_FILE)
TARGETS += $(BUILD_RELEASE_FILE)
################################################################################

View File

@ -34,7 +34,7 @@
DOCLINT += -Xdoclint:all/protected \
'-Xdoclint/package:java.*,javax.*'
JAVAC_FLAGS += -XDstringConcat=inline
COPY += .icu .dat .spp .nrm content-types.properties \
COPY += .icu .dat .spp .nrm .txt content-types.properties \
hijrah-config-Hijrah-umalqura_islamic-umalqura.properties
CLEAN += intrinsic.properties

View File

@ -72,5 +72,22 @@ TARGETS += $(GENSRC_CHARACTERDATA)
################################################################################
GENSRC_STRINGCASEFOLDING := $(SUPPORT_OUTPUTDIR)/gensrc/java.base/jdk/internal/lang/CaseFolding.java
STRINGCASEFOLDING_TEMPLATE := $(MODULE_SRC)/share/classes/jdk/internal/lang/CaseFolding.java.template
CASEFOLDINGTXT := $(MODULE_SRC)/share/data/unicodedata/CaseFolding.txt
$(GENSRC_STRINGCASEFOLDING): $(BUILD_TOOLS_JDK) $(STRINGCASEFOLDING_TEMPLATE) $(CASEFOLDINGTXT)
$(call LogInfo, Generating $@)
$(call MakeTargetDir)
$(TOOL_GENERATECASEFOLDING) \
$(STRINGCASEFOLDING_TEMPLATE) \
$(CASEFOLDINGTXT) \
$(GENSRC_STRINGCASEFOLDING)
TARGETS += $(GENSRC_STRINGCASEFOLDING)
endif # include guard
include MakeIncludeEnd.gmk

View File

@ -50,22 +50,5 @@ TARGETS += $(GENSRC_INDICCONJUNCTBREAK)
################################################################################
GENSRC_CASEFOLDING := $(SUPPORT_OUTPUTDIR)/gensrc/java.base/jdk/internal/util/regex/CaseFolding.java
CASEFOLDINGTEMP := $(MODULE_SRC)/share/classes/jdk/internal/util/regex/CaseFolding.java.template
CASEFOLDINGTXT := $(MODULE_SRC)/share/data/unicodedata/CaseFolding.txt
$(GENSRC_CASEFOLDING): $(BUILD_TOOLS_JDK) $(CASEFOLDINGTEMP) $(CASEFOLDINGTXT)
$(call LogInfo, Generating $@)
$(call MakeTargetDir)
$(TOOL_GENERATECASEFOLDING) \
$(CASEFOLDINGTEMP) \
$(CASEFOLDINGTXT) \
$(GENSRC_CASEFOLDING)
TARGETS += $(GENSRC_CASEFOLDING)
################################################################################
endif # include guard
include MakeIncludeEnd.gmk

View File

@ -164,6 +164,24 @@ ifeq ($(ENABLE_HEADLESS_ONLY), false)
ifeq ($(USE_EXTERNAL_LIBPNG), false)
LIBSPLASHSCREEN_HEADER_DIRS += libsplashscreen/libpng
LIBSPLASHSCREEN_CFLAGS += -DPNG_NO_MMX_CODE -DPNG_ARM_NEON_OPT=0
-DPNG_ARM_NEON_IMPLEMENTATION=0 -DPNG_LOONGARCH_LSX_OPT=0
ifeq ($(call isTargetOs, linux)+$(call isTargetCpuArch, ppc), true+true)
LIBSPLASHSCREEN_CFLAGS += -DPNG_POWERPC_VSX_OPT=0
endif
# The libpng bundled with jdk is a reduced version which does not
# contain .png_init_filter_functions_vsx.
# Therefore we need to disable PNG_POWERPC_VSX_OPT explicitly by setting
# it to 0. If this define is not set, it would be automatically set to 2,
# because
# "#if defined(__PPC64__) && defined(__ALTIVEC__) && defined(__VSX__)"
# expands to true. This would results in the fact that
# .png_init_filter_functions_vsx is needed in libpng.
ifeq ($(call isTargetOs, aix), true)
LIBSPLASHSCREEN_CFLAGS += -DPNG_POWERPC_VSX_OPT=0
endif
else
LIBSPLASHSCREEN_EXCLUDES += libpng
endif
@ -176,25 +194,7 @@ ifeq ($(ENABLE_HEADLESS_ONLY), false)
LIBSPLASHSCREEN_STATIC_LIB_EXCLUDE_OBJS += $(LIBZIP_OBJS)
endif
LIBSPLASHSCREEN_CFLAGS += -DSPLASHSCREEN -DPNG_NO_MMX_CODE \
-DPNG_ARM_NEON_OPT=0 -DPNG_ARM_NEON_IMPLEMENTATION=0 \
-DPNG_LOONGARCH_LSX_OPT=0
ifeq ($(call isTargetOs, linux)+$(call isTargetCpuArch, ppc), true+true)
LIBSPLASHSCREEN_CFLAGS += -DPNG_POWERPC_VSX_OPT=0
endif
# The external libpng submitted in the jdk is a reduced version
# which does not contain .png_init_filter_functions_vsx.
# Therefore we need to disable PNG_POWERPC_VSX_OPT explicitly by setting
# it to 0. If this define is not set, it would be automatically set to 2,
# because
# "#if defined(__PPC64__) && defined(__ALTIVEC__) && defined(__VSX__)"
# expands to true. This would results in the fact that
# .png_init_filter_functions_vsx is needed in libpng.
ifeq ($(call isTargetOs, aix), true)
LIBSPLASHSCREEN_CFLAGS += -DPNG_POWERPC_VSX_OPT=0
endif
LIBSPLASHSCREEN_CFLAGS += -DSPLASHSCREEN
ifeq ($(call isTargetOs, macosx), true)
# libsplashscreen on macosx does not use the unix code

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,909 @@
// Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2016, 2021, Red Hat Inc. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License version 2 only, as
// published by the Free Software Foundation.
//
// This code is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
// version 2 for more details (a copy is included in the LICENSE file that
// accompanied this code).
//
// You should have received a copy of the GNU General Public License version
// 2 along with this work; if not, write to the Free Software Foundation,
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
//
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
// or visit www.oracle.com if you need additional information or have any
// questions.
//
//
// BEGIN This file is automatically generated. Do not edit --------------
// Sundry CAS operations. Note that release is always true,
// regardless of the memory ordering of the CAS. This is because we
// need the volatile case to be sequentially consistent but there is
// no trailing StoreLoad barrier emitted by C2. Unfortunately we
// can't check the type of memory ordering here, so we always emit a
// STLXR.
// This section is generated from aarch64_atomic_ad.m4
instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
ins_cost(2*VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchgb $res = $mem, $oldval, $newval\t# (byte) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::byte, /*acquire*/ false, /*release*/ true,
/*weak*/ false, $res$$Register);
__ sxtbw($res$$Register, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
ins_cost(2*VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchgs $res = $mem, $oldval, $newval\t# (short) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::halfword, /*acquire*/ false, /*release*/ true,
/*weak*/ false, $res$$Register);
__ sxthw($res$$Register, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
ins_cost(2*VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchgw $res = $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ false, /*release*/ true,
/*weak*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
ins_cost(2*VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchg $res = $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::xword, /*acquire*/ false, /*release*/ true,
/*weak*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
predicate(n->as_LoadStore()->barrier_data() == 0);
match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
ins_cost(2*VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ false, /*release*/ true,
/*weak*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
predicate(n->as_LoadStore()->barrier_data() == 0);
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
ins_cost(2*VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchg $res = $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::xword, /*acquire*/ false, /*release*/ true,
/*weak*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::byte, /*acquire*/ true, /*release*/ true,
/*weak*/ false, $res$$Register);
__ sxtbw($res$$Register, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::halfword, /*acquire*/ true, /*release*/ true,
/*weak*/ false, $res$$Register);
__ sxthw($res$$Register, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ true, /*release*/ true,
/*weak*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchg_acq $res = $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::xword, /*acquire*/ true, /*release*/ true,
/*weak*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ true, /*release*/ true,
/*weak*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::xword, /*acquire*/ true, /*release*/ true,
/*weak*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
match(Set res (CompareAndSwapB mem (Binary oldval newval)));
ins_cost(2*VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgb $res = $mem, $oldval, $newval\t# (byte) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::byte, /*acquire*/ false, /*release*/ true,
/*weak*/ false, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
match(Set res (CompareAndSwapS mem (Binary oldval newval)));
ins_cost(2*VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgs $res = $mem, $oldval, $newval\t# (short) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::halfword, /*acquire*/ false, /*release*/ true,
/*weak*/ false, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
match(Set res (CompareAndSwapI mem (Binary oldval newval)));
ins_cost(2*VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgw $res = $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ false, /*release*/ true,
/*weak*/ false, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
match(Set res (CompareAndSwapL mem (Binary oldval newval)));
ins_cost(2*VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchg $res = $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::xword, /*acquire*/ false, /*release*/ true,
/*weak*/ false, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
predicate(n->as_LoadStore()->barrier_data() == 0);
match(Set res (CompareAndSwapN mem (Binary oldval newval)));
ins_cost(2*VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ false, /*release*/ true,
/*weak*/ false, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
predicate(n->as_LoadStore()->barrier_data() == 0);
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
ins_cost(2*VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchg $res = $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::xword, /*acquire*/ false, /*release*/ true,
/*weak*/ false, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set res (CompareAndSwapB mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::byte, /*acquire*/ true, /*release*/ true,
/*weak*/ false, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set res (CompareAndSwapS mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::halfword, /*acquire*/ true, /*release*/ true,
/*weak*/ false, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set res (CompareAndSwapI mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ true, /*release*/ true,
/*weak*/ false, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set res (CompareAndSwapL mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchg_acq $res = $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::xword, /*acquire*/ true, /*release*/ true,
/*weak*/ false, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
match(Set res (CompareAndSwapN mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ true, /*release*/ true,
/*weak*/ false, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::xword, /*acquire*/ true, /*release*/ true,
/*weak*/ false, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
ins_cost(2*VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgb_weak $res = $mem, $oldval, $newval\t# (byte) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::byte, /*acquire*/ false, /*release*/ true,
/*weak*/ true, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
ins_cost(2*VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgs_weak $res = $mem, $oldval, $newval\t# (short) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::halfword, /*acquire*/ false, /*release*/ true,
/*weak*/ true, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
ins_cost(2*VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgw_weak $res = $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ false, /*release*/ true,
/*weak*/ true, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
ins_cost(2*VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchg_weak $res = $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::xword, /*acquire*/ false, /*release*/ true,
/*weak*/ true, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
predicate(n->as_LoadStore()->barrier_data() == 0);
match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
ins_cost(2*VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgw_weak $res = $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ false, /*release*/ true,
/*weak*/ true, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
predicate(n->as_LoadStore()->barrier_data() == 0);
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
ins_cost(2*VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchg_weak $res = $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::xword, /*acquire*/ false, /*release*/ true,
/*weak*/ true, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgb_acq_weak $res = $mem, $oldval, $newval\t# (byte) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::byte, /*acquire*/ true, /*release*/ true,
/*weak*/ true, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgs_acq_weak $res = $mem, $oldval, $newval\t# (short) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::halfword, /*acquire*/ true, /*release*/ true,
/*weak*/ true, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgw_acq_weak $res = $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ true, /*release*/ true,
/*weak*/ true, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchg_acq_weak $res = $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::xword, /*acquire*/ true, /*release*/ true,
/*weak*/ true, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgw_acq_weak $res = $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ true, /*release*/ true,
/*weak*/ true, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchg_acq_weak $res = $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::xword, /*acquire*/ true, /*release*/ true,
/*weak*/ true, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct getAndSetI(indirect mem, iRegI newval, iRegINoSp oldval) %{
match(Set oldval (GetAndSetI mem newval));
ins_cost(2*VOLATILE_REF_COST);
format %{ "atomic_xchgw $oldval, $newval, [$mem]" %}
ins_encode %{
__ atomic_xchgw($oldval$$Register, $newval$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndSetL(indirect mem, iRegL newval, iRegLNoSp oldval) %{
match(Set oldval (GetAndSetL mem newval));
ins_cost(2*VOLATILE_REF_COST);
format %{ "atomic_xchg $oldval, $newval, [$mem]" %}
ins_encode %{
__ atomic_xchg($oldval$$Register, $newval$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndSetN(indirect mem, iRegN newval, iRegNNoSp oldval) %{
predicate(n->as_LoadStore()->barrier_data() == 0);
match(Set oldval (GetAndSetN mem newval));
ins_cost(2*VOLATILE_REF_COST);
format %{ "atomic_xchgw $oldval, $newval, [$mem]" %}
ins_encode %{
__ atomic_xchgw($oldval$$Register, $newval$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndSetP(indirect mem, iRegP newval, iRegPNoSp oldval) %{
predicate(n->as_LoadStore()->barrier_data() == 0);
match(Set oldval (GetAndSetP mem newval));
ins_cost(2*VOLATILE_REF_COST);
format %{ "atomic_xchg $oldval, $newval, [$mem]" %}
ins_encode %{
__ atomic_xchg($oldval$$Register, $newval$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndSetIAcq(indirect mem, iRegI newval, iRegINoSp oldval) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set oldval (GetAndSetI mem newval));
ins_cost(VOLATILE_REF_COST);
format %{ "atomic_xchgw_acq $oldval, $newval, [$mem]" %}
ins_encode %{
__ atomic_xchgalw($oldval$$Register, $newval$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndSetLAcq(indirect mem, iRegL newval, iRegLNoSp oldval) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set oldval (GetAndSetL mem newval));
ins_cost(VOLATILE_REF_COST);
format %{ "atomic_xchg_acq $oldval, $newval, [$mem]" %}
ins_encode %{
__ atomic_xchgal($oldval$$Register, $newval$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndSetNAcq(indirect mem, iRegN newval, iRegNNoSp oldval) %{
predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
match(Set oldval (GetAndSetN mem newval));
ins_cost(VOLATILE_REF_COST);
format %{ "atomic_xchgw_acq $oldval, $newval, [$mem]" %}
ins_encode %{
__ atomic_xchgalw($oldval$$Register, $newval$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndSetPAcq(indirect mem, iRegP newval, iRegPNoSp oldval) %{
predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
match(Set oldval (GetAndSetP mem newval));
ins_cost(VOLATILE_REF_COST);
format %{ "atomic_xchg_acq $oldval, $newval, [$mem]" %}
ins_encode %{
__ atomic_xchgal($oldval$$Register, $newval$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndAddI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
match(Set newval (GetAndAddI mem incr));
ins_cost(2*VOLATILE_REF_COST+1);
format %{ "get_and_addI $newval, [$mem], $incr" %}
ins_encode %{
__ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndAddIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set newval (GetAndAddI mem incr));
ins_cost(VOLATILE_REF_COST+1);
format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
ins_encode %{
__ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndAddINoRes(indirect mem, Universe dummy, iRegIorL2I incr) %{
predicate(n->as_LoadStore()->result_not_used());
match(Set dummy (GetAndAddI mem incr));
ins_cost(2*VOLATILE_REF_COST);
format %{ "get_and_addI noreg, [$mem], $incr" %}
ins_encode %{
__ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndAddIAcqNoRes(indirect mem, Universe dummy, iRegIorL2I incr) %{
predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
match(Set dummy (GetAndAddI mem incr));
ins_cost(VOLATILE_REF_COST);
format %{ "get_and_addI_acq noreg, [$mem], $incr" %}
ins_encode %{
__ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndAddIConst(indirect mem, iRegINoSp newval, immIAddSub incr) %{
match(Set newval (GetAndAddI mem incr));
ins_cost(2*VOLATILE_REF_COST+1);
format %{ "get_and_addI $newval, [$mem], $incr" %}
ins_encode %{
__ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndAddIAcqConst(indirect mem, iRegINoSp newval, immIAddSub incr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set newval (GetAndAddI mem incr));
ins_cost(VOLATILE_REF_COST+1);
format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
ins_encode %{
__ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndAddINoResConst(indirect mem, Universe dummy, immIAddSub incr) %{
predicate(n->as_LoadStore()->result_not_used());
match(Set dummy (GetAndAddI mem incr));
ins_cost(2*VOLATILE_REF_COST);
format %{ "get_and_addI noreg, [$mem], $incr" %}
ins_encode %{
__ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndAddIAcqNoResConst(indirect mem, Universe dummy, immIAddSub incr) %{
predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
match(Set dummy (GetAndAddI mem incr));
ins_cost(VOLATILE_REF_COST);
format %{ "get_and_addI_acq noreg, [$mem], $incr" %}
ins_encode %{
__ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndAddL(indirect mem, iRegLNoSp newval, iRegL incr) %{
match(Set newval (GetAndAddL mem incr));
ins_cost(2*VOLATILE_REF_COST+1);
format %{ "get_and_addL $newval, [$mem], $incr" %}
ins_encode %{
__ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndAddLAcq(indirect mem, iRegLNoSp newval, iRegL incr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set newval (GetAndAddL mem incr));
ins_cost(VOLATILE_REF_COST+1);
format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
ins_encode %{
__ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndAddLNoRes(indirect mem, Universe dummy, iRegL incr) %{
predicate(n->as_LoadStore()->result_not_used());
match(Set dummy (GetAndAddL mem incr));
ins_cost(2*VOLATILE_REF_COST);
format %{ "get_and_addL noreg, [$mem], $incr" %}
ins_encode %{
__ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndAddLAcqNoRes(indirect mem, Universe dummy, iRegL incr) %{
predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
match(Set dummy (GetAndAddL mem incr));
ins_cost(VOLATILE_REF_COST);
format %{ "get_and_addL_acq noreg, [$mem], $incr" %}
ins_encode %{
__ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndAddLConst(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
match(Set newval (GetAndAddL mem incr));
ins_cost(2*VOLATILE_REF_COST+1);
format %{ "get_and_addL $newval, [$mem], $incr" %}
ins_encode %{
__ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndAddLAcqConst(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set newval (GetAndAddL mem incr));
ins_cost(VOLATILE_REF_COST+1);
format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
ins_encode %{
__ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndAddLNoResConst(indirect mem, Universe dummy, immLAddSub incr) %{
predicate(n->as_LoadStore()->result_not_used());
match(Set dummy (GetAndAddL mem incr));
ins_cost(2*VOLATILE_REF_COST);
format %{ "get_and_addL noreg, [$mem], $incr" %}
ins_encode %{
__ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndAddLAcqNoResConst(indirect mem, Universe dummy, immLAddSub incr) %{
predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
match(Set dummy (GetAndAddL mem incr));
ins_cost(VOLATILE_REF_COST);
format %{ "get_and_addL_acq noreg, [$mem], $incr" %}
ins_encode %{
__ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}

View File

@ -0,0 +1,246 @@
// Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2016, 2021, Red Hat Inc. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License version 2 only, as
// published by the Free Software Foundation.
//
// This code is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
// version 2 for more details (a copy is included in the LICENSE file that
// accompanied this code).
//
// You should have received a copy of the GNU General Public License version
// 2 along with this work; if not, write to the Free Software Foundation,
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
//
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
// or visit www.oracle.com if you need additional information or have any
// questions.
//
//
// BEGIN This file is automatically generated. Do not edit --------------
// Sundry CAS operations. Note that release is always true,
// regardless of the memory ordering of the CAS. This is because we
// need the volatile case to be sequentially consistent but there is
// no trailing StoreLoad barrier emitted by C2. Unfortunately we
// can't check the type of memory ordering here, so we always emit a
// STLXR.
// This section is generated from aarch64_atomic_ad.m4
dnl Return Arg1 with two spaces before it. We need this because m4
dnl strips leading spaces from macro args.
define(`INDENT', ` $1')dnl
dnl
dnl
dnl
dnl ====================== CompareAndExchange*
dnl
define(`CAE_INSN1',
`
instruct compareAndExchange$1$7(iReg$2NoSp res, indirect mem, iReg$2 oldval, iReg$2 newval, rFlagsReg cr) %{
ifelse($7,Acq,INDENT(predicate(needs_acquiring_load_exclusive(n));),`dnl')
match(Set res (CompareAndExchange$1 mem (Binary oldval newval)));
ins_cost(`'ifelse($7,Acq,,2*)VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchg$5`'ifelse($7,Acq,_acq,) $res = $mem, $oldval, $newval\t# ($3) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::$4, /*acquire*/ ifelse($7,Acq,true,false), /*release*/ true,
/*weak*/ false, $res$$Register);
__ $6($res$$Register, $res$$Register);
%}
ins_pipe(pipe_slow);
%}')dnl
define(`CAE_INSN2',
`
instruct compareAndExchange$1$6(iReg$2NoSp res, indirect mem, iReg$2 oldval, iReg$2 newval, rFlagsReg cr) %{
ifelse($1$6,PAcq,INDENT(predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));),
$1$6,NAcq,INDENT(predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);),
$1,P,INDENT(predicate(n->as_LoadStore()->barrier_data() == 0);),
$1,N,INDENT(predicate(n->as_LoadStore()->barrier_data() == 0);),
$6,Acq,INDENT(predicate(needs_acquiring_load_exclusive(n));),
`dnl')
match(Set res (CompareAndExchange$1 mem (Binary oldval newval)));
ins_cost(`'ifelse($6,Acq,,2*)VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchg$5`'ifelse($6,Acq,_acq,) $res = $mem, $oldval, $newval\t# ($3) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::$4, /*acquire*/ ifelse($6,Acq,true,false), /*release*/ true,
/*weak*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);
%}')dnl
dnl
CAE_INSN1(B, I, byte, byte, b, sxtbw, )
CAE_INSN1(S, I, short, halfword, s, sxthw, )
CAE_INSN2(I, I, int, word, w, , )
CAE_INSN2(L, L, long, xword, , , )
CAE_INSN2(N, N, narrow oop, word, w, , )
CAE_INSN2(P, P, ptr, xword, , , )
dnl
CAE_INSN1(B, I, byte, byte, b, sxtbw, Acq)
CAE_INSN1(S, I, short, halfword, s, sxthw, Acq)
CAE_INSN2(I, I, int, word, w, Acq)
CAE_INSN2(L, L, long, xword, , Acq)
CAE_INSN2(N, N, narrow oop, word, w, Acq)
CAE_INSN2(P, P, ptr, xword, , Acq)
dnl
dnl
dnl
dnl ====================== (Weak)CompareAndSwap*
dnl
define(`CAS_INSN1',
`
instruct ifelse($7,Weak,'weakCompare`,'compare`)AndSwap$1$6(iRegINoSp res, indirect mem, iReg$2 oldval, iReg$2 newval, rFlagsReg cr) %{
ifelse($6,Acq,INDENT(predicate(needs_acquiring_load_exclusive(n));),`dnl')
match(Set res ($7CompareAndSwap$1 mem (Binary oldval newval)));
ins_cost(`'ifelse($6,Acq,,2*)VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchg$5`'ifelse($6,Acq,_acq,)`'ifelse($7,Weak,_weak) $res = $mem, $oldval, $newval\t# ($3) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::$4, /*acquire*/ ifelse($6,Acq,true,false), /*release*/ true,
/*weak*/ ifelse($7,Weak,true,false), noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}')dnl
dnl
define(`CAS_INSN2',
`
instruct ifelse($7,Weak,'weakCompare`,'compare`)AndSwap$1$6(iRegINoSp res, indirect mem, iReg$2 oldval, iReg$2 newval, rFlagsReg cr) %{
ifelse($1$6,PAcq,INDENT(predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));),
$1$6,NAcq,INDENT(predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);),
$1,P,INDENT(predicate(n->as_LoadStore()->barrier_data() == 0);),
$1,N,INDENT(predicate(n->as_LoadStore()->barrier_data() == 0);),
$6,Acq,INDENT(predicate(needs_acquiring_load_exclusive(n));),
`dnl')
match(Set res ($7CompareAndSwap$1 mem (Binary oldval newval)));
ins_cost(`'ifelse($6,Acq,,2*)VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchg$5`'ifelse($6,Acq,_acq,)`'ifelse($7,Weak,_weak) $res = $mem, $oldval, $newval\t# ($3) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::$4, /*acquire*/ ifelse($6,Acq,true,false), /*release*/ true,
/*weak*/ ifelse($7,Weak,true,false), noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}')dnl
dnl
CAS_INSN1(B, I, byte, byte, b, , )
CAS_INSN1(S, I, short, halfword, s, , )
CAS_INSN2(I, I, int, word, w, , )
CAS_INSN2(L, L, long, xword, , , )
CAS_INSN2(N, N, narrow oop, word, w, , )
CAS_INSN2(P, P, ptr, xword, , , )
dnl
CAS_INSN1(B, I, byte, byte, b, Acq, )
CAS_INSN1(S, I, short, halfword, s, Acq, )
CAS_INSN2(I, I, int, word, w, Acq, )
CAS_INSN2(L, L, long, xword, , Acq, )
CAS_INSN2(N, N, narrow oop, word, w, Acq, )
CAS_INSN2(P, P, ptr, xword, , Acq, )
dnl
CAS_INSN1(B, I, byte, byte, b, , Weak)
CAS_INSN1(S, I, short, halfword, s, , Weak)
CAS_INSN2(I, I, int, word, w, , Weak)
CAS_INSN2(L, L, long, xword, , , Weak)
CAS_INSN2(N, N, narrow oop, word, w, , Weak)
CAS_INSN2(P, P, ptr, xword, , , Weak)
dnl
CAS_INSN1(B, I, byte, byte, b, Acq, Weak)
CAS_INSN1(S, I, short, halfword, s, Acq, Weak)
CAS_INSN2(I, I, int, word, w, Acq, Weak)
CAS_INSN2(L, L, long, xword, , Acq, Weak)
CAS_INSN2(N, N, narrow oop, word, w, Acq, Weak)
CAS_INSN2(P, P, ptr, xword, , Acq, Weak)
dnl
dnl
dnl
dnl ====================== GetAndSet*
dnl
define(`GAS_INSN1',
`
instruct getAndSet$1$3(indirect mem, iReg$1 newval, iReg$1NoSp oldval) %{
ifelse($1$3,PAcq,INDENT(predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));),
$1$3,NAcq,INDENT(predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);),
$1,P,INDENT(predicate(n->as_LoadStore()->barrier_data() == 0);),
$1,N,INDENT(predicate(n->as_LoadStore()->barrier_data() == 0);),
$3,Acq,INDENT(predicate(needs_acquiring_load_exclusive(n));),
`dnl')
match(Set oldval (GetAndSet$1 mem newval));
ins_cost(`'ifelse($3,Acq,,2*)VOLATILE_REF_COST);
format %{ "atomic_xchg$2`'ifelse($3,Acq,_acq) $oldval, $newval, [$mem]" %}
ins_encode %{
__ atomic_xchg`'ifelse($3,Acq,al)$2($oldval$$Register, $newval$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}')dnl
dnl
GAS_INSN1(I, w, )
GAS_INSN1(L, , )
GAS_INSN1(N, w, )
GAS_INSN1(P, , )
dnl
GAS_INSN1(I, w, Acq)
GAS_INSN1(L, , Acq)
GAS_INSN1(N, w, Acq)
GAS_INSN1(P, , Acq)
dnl
dnl
dnl
dnl ====================== GetAndAdd*
dnl
define(`GAA_INSN1',
`
instruct getAndAdd$1$4$5$6(indirect mem, `'ifelse($5,NoRes,Universe dummy,iReg$1NoSp newval), `'ifelse($6,Const,imm$1AddSub incr,iReg$2 incr)) %{
ifelse($4$5,AcqNoRes,INDENT(predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));),
$5,NoRes,INDENT(predicate(n->as_LoadStore()->result_not_used());),
$4,Acq,INDENT(predicate(needs_acquiring_load_exclusive(n));),
`dnl')
match(Set ifelse($5,NoRes,dummy,newval) (GetAndAdd$1 mem incr));
ins_cost(`'ifelse($4,Acq,,2*)VOLATILE_REF_COST`'ifelse($5,NoRes,,+1));
format %{ "get_and_add$1`'ifelse($4,Acq,_acq) `'ifelse($5,NoRes,noreg,$newval), [$mem], $incr" %}
ins_encode %{
__ atomic_add`'ifelse($4,Acq,al)$3(`'ifelse($5,NoRes,noreg,$newval$$Register), `'ifelse($6,Const,$incr$$constant,$incr$$Register), as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}')dnl
dnl
dnl
GAA_INSN1(I, IorL2I, w, , , )
GAA_INSN1(I, IorL2I, w, Acq, , )
GAA_INSN1(I, IorL2I, w, , NoRes, )
GAA_INSN1(I, IorL2I, w, Acq, NoRes, )
GAA_INSN1(I, I, w, , , Const)
GAA_INSN1(I, I, w, Acq, , Const)
GAA_INSN1(I, I, w, , NoRes, Const)
GAA_INSN1(I, I, w, Acq, NoRes, Const)
dnl
GAA_INSN1(L, L, , , , )
GAA_INSN1(L, L, , Acq, , )
GAA_INSN1(L, L, , , NoRes, )
GAA_INSN1(L, L, , Acq, NoRes, )
GAA_INSN1(L, L, , , , Const)
GAA_INSN1(L, L, , Acq, , Const)
GAA_INSN1(L, L, , , NoRes, Const)
GAA_INSN1(L, L, , Acq, NoRes, Const)
dnl

View File

@ -346,8 +346,14 @@ source %{
}
bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) {
// Only SVE has partial vector operations
if (UseSVE == 0) {
// 1. Only SVE requires partial vector operations.
// 2. The vector size in bytes must be smaller than MaxVectorSize.
// 3. Predicated vectors have a mask input, which guarantees that
// out-of-bounds lanes remain inactive.
int length_in_bytes = vt->length_in_bytes();
if (UseSVE == 0 ||
length_in_bytes == MaxVectorSize ||
node->is_predicated_vector()) {
return false;
}
@ -370,21 +376,22 @@ source %{
return !node->in(1)->is_Con();
case Op_LoadVector:
case Op_StoreVector:
// We use NEON load/store instructions if the vector length is <= 128 bits.
return vt->length_in_bytes() > 16;
case Op_AddReductionVI:
case Op_AddReductionVL:
// We may prefer using NEON instructions rather than SVE partial operations.
return !VM_Version::use_neon_for_vector(vt->length_in_bytes());
// For these ops, we prefer using NEON instructions rather than SVE
// predicated instructions for better performance.
return !VM_Version::use_neon_for_vector(length_in_bytes);
case Op_MinReductionV:
case Op_MaxReductionV:
// For BYTE/SHORT/INT/FLOAT/DOUBLE types, we may prefer using NEON
// instructions rather than SVE partial operations.
// For BYTE/SHORT/INT/FLOAT/DOUBLE types, we prefer using NEON
// instructions rather than SVE predicated instructions for
// better performance.
return vt->element_basic_type() == T_LONG ||
!VM_Version::use_neon_for_vector(vt->length_in_bytes());
!VM_Version::use_neon_for_vector(length_in_bytes);
default:
// For other ops whose vector size is smaller than the max vector size, a
// full-sized unpredicated operation does not impact the final vector result.
// For other ops whose vector size is smaller than the max vector
// size, a full-sized unpredicated operation does not impact the
// vector result.
return false;
}
}

View File

@ -336,8 +336,14 @@ source %{
}
bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) {
// Only SVE has partial vector operations
if (UseSVE == 0) {
// 1. Only SVE requires partial vector operations.
// 2. The vector size in bytes must be smaller than MaxVectorSize.
// 3. Predicated vectors have a mask input, which guarantees that
// out-of-bounds lanes remain inactive.
int length_in_bytes = vt->length_in_bytes();
if (UseSVE == 0 ||
length_in_bytes == MaxVectorSize ||
node->is_predicated_vector()) {
return false;
}
@ -360,21 +366,22 @@ source %{
return !node->in(1)->is_Con();
case Op_LoadVector:
case Op_StoreVector:
// We use NEON load/store instructions if the vector length is <= 128 bits.
return vt->length_in_bytes() > 16;
case Op_AddReductionVI:
case Op_AddReductionVL:
// We may prefer using NEON instructions rather than SVE partial operations.
return !VM_Version::use_neon_for_vector(vt->length_in_bytes());
// For these ops, we prefer using NEON instructions rather than SVE
// predicated instructions for better performance.
return !VM_Version::use_neon_for_vector(length_in_bytes);
case Op_MinReductionV:
case Op_MaxReductionV:
// For BYTE/SHORT/INT/FLOAT/DOUBLE types, we may prefer using NEON
// instructions rather than SVE partial operations.
// For BYTE/SHORT/INT/FLOAT/DOUBLE types, we prefer using NEON
// instructions rather than SVE predicated instructions for
// better performance.
return vt->element_basic_type() == T_LONG ||
!VM_Version::use_neon_for_vector(vt->length_in_bytes());
!VM_Version::use_neon_for_vector(length_in_bytes);
default:
// For other ops whose vector size is smaller than the max vector size, a
// full-sized unpredicated operation does not impact the final vector result.
// For other ops whose vector size is smaller than the max vector
// size, a full-sized unpredicated operation does not impact the
// vector result.
return false;
}
}

View File

@ -1,161 +0,0 @@
dnl Copyright (c) 2016, 2021, Red Hat Inc. All rights reserved.
dnl DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
dnl
dnl This code is free software; you can redistribute it and/or modify it
dnl under the terms of the GNU General Public License version 2 only, as
dnl published by the Free Software Foundation.
dnl
dnl This code is distributed in the hope that it will be useful, but WITHOUT
dnl ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
dnl FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
dnl version 2 for more details (a copy is included in the LICENSE file that
dnl accompanied this code).
dnl
dnl You should have received a copy of the GNU General Public License version
dnl 2 along with this work; if not, write to the Free Software Foundation,
dnl Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
dnl
dnl Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
dnl or visit www.oracle.com if you need additional information or have any
dnl questions.
dnl
dnl
dnl Process this file with m4 cas.m4 to generate the CAE and wCAS
dnl instructions used in aarch64.ad.
dnl
// BEGIN This section of the file is automatically generated. Do not edit --------------
// Sundry CAS operations. Note that release is always true,
// regardless of the memory ordering of the CAS. This is because we
// need the volatile case to be sequentially consistent but there is
// no trailing StoreLoad barrier emitted by C2. Unfortunately we
// can't check the type of memory ordering here, so we always emit a
// STLXR.
// This section is generated from cas.m4
dnl Return Arg1 with two spaces before it. We need this because m4
dnl strips leading spaces from macro args.
define(`INDENT', ` $1')dnl
dnl
define(`CAS_INSN',
`
// This pattern is generated automatically from cas.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct compareAndExchange$1$6(iReg$2NoSp res, indirect mem, iReg$2 oldval, iReg$2 newval, rFlagsReg cr) %{
ifelse($1$6,PAcq,INDENT(predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));),
$1$6,NAcq,INDENT(predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);),
$1,P,INDENT(predicate(n->as_LoadStore()->barrier_data() == 0);),
$1,N,INDENT(predicate(n->as_LoadStore()->barrier_data() == 0);),
$6,Acq,INDENT(predicate(needs_acquiring_load_exclusive(n));),
`dnl')
match(Set res (CompareAndExchange$1 mem (Binary oldval newval)));
ifelse($6,Acq,'ins_cost(VOLATILE_REF_COST);`,'ins_cost(2 * VOLATILE_REF_COST);`)
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchg$5`'ifelse($6,Acq,_acq,) $res = $mem, $oldval, $newval\t# ($3, weak) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::$4, /*acquire*/ ifelse($6,Acq,true,false), /*release*/ true,
/*weak*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);
%}')dnl
define(`CAS_INSN4',
`
// This pattern is generated automatically from cas.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct compareAndExchange$1$7(iReg$2NoSp res, indirect mem, iReg$2 oldval, iReg$2 newval, rFlagsReg cr) %{
ifelse($7,Acq,INDENT(predicate(needs_acquiring_load_exclusive(n));),`dnl')
match(Set res (CompareAndExchange$1 mem (Binary oldval newval)));
ifelse($7,Acq,'ins_cost(VOLATILE_REF_COST);`,'ins_cost(2 * VOLATILE_REF_COST);`)
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchg$5`'ifelse($7,Acq,_acq,) $res = $mem, $oldval, $newval\t# ($3, weak) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::$4, /*acquire*/ ifelse($7,Acq,true,false), /*release*/ true,
/*weak*/ false, $res$$Register);
__ $6($res$$Register, $res$$Register);
%}
ins_pipe(pipe_slow);
%}')dnl
CAS_INSN4(B,I,byte,byte,b,sxtbw)
CAS_INSN4(S,I,short,halfword,s,sxthw)
CAS_INSN(I,I,int,word,w)
CAS_INSN(L,L,long,xword)
CAS_INSN(N,N,narrow oop,word,w)
CAS_INSN(P,P,ptr,xword)
dnl
CAS_INSN4(B,I,byte,byte,b,sxtbw,Acq)
CAS_INSN4(S,I,short,halfword,s,sxthw,Acq)
CAS_INSN(I,I,int,word,w,Acq)
CAS_INSN(L,L,long,xword,,Acq)
CAS_INSN(N,N,narrow oop,word,w,Acq)
CAS_INSN(P,P,ptr,xword,,Acq)
dnl
define(`CAS_INSN2',
`
// This pattern is generated automatically from cas.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct weakCompareAndSwap$1$6(iRegINoSp res, indirect mem, iReg$2 oldval, iReg$2 newval, rFlagsReg cr) %{
ifelse($6,Acq,INDENT(predicate(needs_acquiring_load_exclusive(n));),`dnl')
match(Set res (WeakCompareAndSwap$1 mem (Binary oldval newval)));
ifelse($6,Acq,'ins_cost(VOLATILE_REF_COST);`,'ins_cost(2 * VOLATILE_REF_COST);`)
effect(KILL cr);
format %{
"cmpxchg$5`'ifelse($6,Acq,_acq,) $res = $mem, $oldval, $newval\t# ($3, weak) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::$4, /*acquire*/ ifelse($6,Acq,true,false), /*release*/ true,
/*weak*/ true, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}')dnl
define(`CAS_INSN3',
`
// This pattern is generated automatically from cas.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct weakCompareAndSwap$1$6(iRegINoSp res, indirect mem, iReg$2 oldval, iReg$2 newval, rFlagsReg cr) %{
ifelse($1$6,PAcq,INDENT(predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));),
$1$6,NAcq,INDENT(predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);),
$1,P,INDENT(predicate(n->as_LoadStore()->barrier_data() == 0);),
$1,N,INDENT(predicate(n->as_LoadStore()->barrier_data() == 0);),
$6,Acq,INDENT(predicate(needs_acquiring_load_exclusive(n));),
`dnl')
match(Set res (WeakCompareAndSwap$1 mem (Binary oldval newval)));
ifelse($6,Acq,'ins_cost(VOLATILE_REF_COST);`,'ins_cost(2 * VOLATILE_REF_COST);`)
effect(KILL cr);
format %{
"cmpxchg$5`'ifelse($6,Acq,_acq,) $res = $mem, $oldval, $newval\t# ($3, weak) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::$4, /*acquire*/ ifelse($6,Acq,true,false), /*release*/ true,
/*weak*/ true, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}')dnl
CAS_INSN2(B,I,byte,byte,b)
CAS_INSN2(S,I,short,halfword,s)
CAS_INSN3(I,I,int,word,w)
CAS_INSN3(L,L,long,xword)
CAS_INSN3(N,N,narrow oop,word,w)
CAS_INSN3(P,P,ptr,xword)
CAS_INSN2(B,I,byte,byte,b,Acq)
CAS_INSN2(S,I,short,halfword,s,Acq)
CAS_INSN3(I,I,int,word,w,Acq)
CAS_INSN3(L,L,long,xword,,Acq)
CAS_INSN3(N,N,narrow oop,word,w,Acq)
CAS_INSN3(P,P,ptr,xword,,Acq)
dnl
// END This section of the file is automatically generated. Do not edit --------------

View File

@ -5379,7 +5379,6 @@ void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
assert (UseCompressedClassPointers, "should only be used for compressed headers");
assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
int index = oop_recorder()->find_index(k);
assert(! Universe::heap()->is_in(k), "should not be an oop");
InstructionMark im(this);
RelocationHolder rspec = metadata_Relocation::spec(index);

View File

@ -85,7 +85,7 @@ void Relocation::pd_set_call_destination(address x) {
} else {
MacroAssembler::pd_patch_instruction(addr(), x);
}
assert(pd_call_destination(addr()) == x, "fail in reloc");
guarantee(pd_call_destination(addr()) == x, "fail in reloc");
}
void trampoline_stub_Relocation::pd_fix_owner_after_move() {

View File

@ -2879,7 +2879,7 @@ class StubGenerator: public StubCodeGenerator {
// Inputs:
// c_rarg0 - source byte array address
// c_rarg1 - destination byte array address
// c_rarg2 - K (key) in little endian int array
// c_rarg2 - sessionKe (key) in little endian int array
//
address generate_aescrypt_encryptBlock() {
__ align(CodeEntryAlignment);
@ -2912,7 +2912,7 @@ class StubGenerator: public StubCodeGenerator {
// Inputs:
// c_rarg0 - source byte array address
// c_rarg1 - destination byte array address
// c_rarg2 - K (key) in little endian int array
// c_rarg2 - sessionKd (key) in little endian int array
//
address generate_aescrypt_decryptBlock() {
assert(UseAES, "need AES cryptographic extension support");
@ -2946,7 +2946,7 @@ class StubGenerator: public StubCodeGenerator {
// Inputs:
// c_rarg0 - source byte array address
// c_rarg1 - destination byte array address
// c_rarg2 - K (key) in little endian int array
// c_rarg2 - sessionKe (key) in little endian int array
// c_rarg3 - r vector byte array address
// c_rarg4 - input length
//
@ -3051,7 +3051,7 @@ class StubGenerator: public StubCodeGenerator {
// Inputs:
// c_rarg0 - source byte array address
// c_rarg1 - destination byte array address
// c_rarg2 - K (key) in little endian int array
// c_rarg2 - sessionKd (key) in little endian int array
// c_rarg3 - r vector byte array address
// c_rarg4 - input length
//
@ -3178,7 +3178,7 @@ class StubGenerator: public StubCodeGenerator {
// Inputs:
// c_rarg0 - source byte array address
// c_rarg1 - destination byte array address
// c_rarg2 - K (key) in little endian int array
// c_rarg2 - sessionKe (key) in little endian int array
// c_rarg3 - counter vector byte array address
// c_rarg4 - input length
// c_rarg5 - saved encryptedCounter start

View File

@ -1795,10 +1795,13 @@ uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *r
return size; // Self copy, no move.
if (bottom_type()->isa_vect() != nullptr && ideal_reg() == Op_VecX) {
int src_offset = ra_->reg2offset(src_lo);
int dst_offset = ra_->reg2offset(dst_lo);
DEBUG_ONLY(int algm = MIN2(RegMask::num_registers(ideal_reg()), (int)Matcher::stack_alignment_in_slots()) * VMRegImpl::stack_slot_size);
assert((src_lo_rc != rc_stack) || is_aligned(src_offset, algm), "unaligned vector spill sp offset %d (src)", src_offset);
assert((dst_lo_rc != rc_stack) || is_aligned(dst_offset, algm), "unaligned vector spill sp offset %d (dst)", dst_offset);
// Memory->Memory Spill.
if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
int src_offset = ra_->reg2offset(src_lo);
int dst_offset = ra_->reg2offset(dst_lo);
if (masm) {
__ ld(R0, src_offset, R1_SP);
__ std(R0, dst_offset, R1_SP);
@ -1806,26 +1809,20 @@ uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *r
__ std(R0, dst_offset+8, R1_SP);
}
size += 16;
#ifndef PRODUCT
if (st != nullptr) {
st->print("%-7s [R1_SP + #%d] -> [R1_SP + #%d] \t// vector spill copy", "SPILL", src_offset, dst_offset);
}
#endif // !PRODUCT
}
// VectorRegister->Memory Spill.
else if (src_lo_rc == rc_vec && dst_lo_rc == rc_stack) {
VectorSRegister Rsrc = as_VectorRegister(Matcher::_regEncode[src_lo]).to_vsr();
int dst_offset = ra_->reg2offset(dst_lo);
if (PowerArchitecturePPC64 >= 9) {
if (is_aligned(dst_offset, 16)) {
if (masm) {
__ stxv(Rsrc, dst_offset, R1_SP); // matches storeV16_Power9
}
size += 4;
} else {
// Other alignment can be used by Vector API (VectorPayload in rearrangeOp,
// observed with VectorRearrangeTest.java on Power9).
if (masm) {
__ addi(R0, R1_SP, dst_offset);
__ stxvx(Rsrc, R0); // matches storeV16_Power9 (regarding element ordering)
}
size += 8;
if (masm) {
__ stxv(Rsrc, dst_offset, R1_SP); // matches storeV16_Power9
}
size += 4;
} else {
if (masm) {
__ addi(R0, R1_SP, dst_offset);
@ -1833,24 +1830,25 @@ uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *r
}
size += 8;
}
#ifndef PRODUCT
if (st != nullptr) {
if (PowerArchitecturePPC64 >= 9) {
st->print("%-7s %s, [R1_SP + #%d] \t// vector spill copy", "STXV", Matcher::regName[src_lo], dst_offset);
} else {
st->print("%-7s R0, R1_SP, %d \t// vector spill copy\n\t"
"%-7s %s, [R0] \t// vector spill copy", "ADDI", dst_offset, "STXVD2X", Matcher::regName[src_lo]);
}
}
#endif // !PRODUCT
}
// Memory->VectorRegister Spill.
else if (src_lo_rc == rc_stack && dst_lo_rc == rc_vec) {
VectorSRegister Rdst = as_VectorRegister(Matcher::_regEncode[dst_lo]).to_vsr();
int src_offset = ra_->reg2offset(src_lo);
if (PowerArchitecturePPC64 >= 9) {
if (is_aligned(src_offset, 16)) {
if (masm) {
__ lxv(Rdst, src_offset, R1_SP);
}
size += 4;
} else {
if (masm) {
__ addi(R0, R1_SP, src_offset);
__ lxvx(Rdst, R0);
}
size += 8;
if (masm) {
__ lxv(Rdst, src_offset, R1_SP);
}
size += 4;
} else {
if (masm) {
__ addi(R0, R1_SP, src_offset);
@ -1858,6 +1856,16 @@ uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *r
}
size += 8;
}
#ifndef PRODUCT
if (st != nullptr) {
if (PowerArchitecturePPC64 >= 9) {
st->print("%-7s %s, [R1_SP + #%d] \t// vector spill copy", "LXV", Matcher::regName[dst_lo], src_offset);
} else {
st->print("%-7s R0, R1_SP, %d \t// vector spill copy\n\t"
"%-7s %s, [R0] \t// vector spill copy", "ADDI", src_offset, "LXVD2X", Matcher::regName[dst_lo]);
}
}
#endif // !PRODUCT
}
// VectorRegister->VectorRegister.
else if (src_lo_rc == rc_vec && dst_lo_rc == rc_vec) {
@ -1867,6 +1875,12 @@ uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *r
__ xxlor(Rdst, Rsrc, Rsrc);
}
size += 4;
#ifndef PRODUCT
if (st != nullptr) {
st->print("%-7s %s, %s, %s\t// vector spill copy",
"XXLOR", Matcher::regName[dst_lo], Matcher::regName[src_lo], Matcher::regName[src_lo]);
}
#endif // !PRODUCT
}
else {
ShouldNotReachHere(); // No VR spill.
@ -6321,8 +6335,36 @@ instruct loadConD_Ex(regD dst, immD src) %{
// Prefetch instructions.
// Must be safe to execute with invalid address (cannot fault).
// Special prefetch versions which use the dcbz instruction.
instruct prefetch_alloc_zero(indirectMemory mem, iRegLsrc src) %{
match(PrefetchAllocation (AddP mem src));
predicate(AllocatePrefetchStyle == 3);
ins_cost(MEMORY_REF_COST);
format %{ "PREFETCH $mem, 2, $src \t// Prefetch write-many with zero" %}
size(4);
ins_encode %{
__ dcbz($src$$Register, $mem$$base$$Register);
%}
ins_pipe(pipe_class_memory);
%}
instruct prefetch_alloc_zero_no_offset(indirectMemory mem) %{
match(PrefetchAllocation mem);
predicate(AllocatePrefetchStyle == 3);
ins_cost(MEMORY_REF_COST);
format %{ "PREFETCH $mem, 2 \t// Prefetch write-many with zero" %}
size(4);
ins_encode %{
__ dcbz($mem$$base$$Register);
%}
ins_pipe(pipe_class_memory);
%}
instruct prefetch_alloc(indirectMemory mem, iRegLsrc src) %{
match(PrefetchAllocation (AddP mem src));
predicate(AllocatePrefetchStyle != 3);
ins_cost(MEMORY_REF_COST);
format %{ "PREFETCH $mem, 2, $src \t// Prefetch write-many" %}
@ -6335,6 +6377,7 @@ instruct prefetch_alloc(indirectMemory mem, iRegLsrc src) %{
instruct prefetch_alloc_no_offset(indirectMemory mem) %{
match(PrefetchAllocation mem);
predicate(AllocatePrefetchStyle != 3);
ins_cost(MEMORY_REF_COST);
format %{ "PREFETCH $mem, 2 \t// Prefetch write-many" %}

View File

@ -2956,7 +2956,7 @@ class StubGenerator: public StubCodeGenerator {
// Arguments for generated stub:
// R3_ARG1 - source byte array address
// R4_ARG2 - destination byte array address
// R5_ARG3 - K (key) in little endian int array
// R5_ARG3 - sessionKe (key) in little endian int array
address generate_aescrypt_decryptBlock() {
assert(UseAES, "need AES instructions and misaligned SSE support");
StubId stub_id = StubId::stubgen_aescrypt_decryptBlock_id;

View File

@ -2067,6 +2067,83 @@ void C2_MacroAssembler::enc_cmove_cmp_fp(int cmpFlag, FloatRegister op1, FloatRe
}
}
void C2_MacroAssembler::enc_cmove_fp_cmp(int cmpFlag, Register op1, Register op2,
FloatRegister dst, FloatRegister src, bool is_single) {
bool is_unsigned = (cmpFlag & unsigned_branch_mask) == unsigned_branch_mask;
int op_select = cmpFlag & (~unsigned_branch_mask);
switch (op_select) {
case BoolTest::eq:
cmov_fp_eq(op1, op2, dst, src, is_single);
break;
case BoolTest::ne:
cmov_fp_ne(op1, op2, dst, src, is_single);
break;
case BoolTest::le:
if (is_unsigned) {
cmov_fp_leu(op1, op2, dst, src, is_single);
} else {
cmov_fp_le(op1, op2, dst, src, is_single);
}
break;
case BoolTest::ge:
if (is_unsigned) {
cmov_fp_geu(op1, op2, dst, src, is_single);
} else {
cmov_fp_ge(op1, op2, dst, src, is_single);
}
break;
case BoolTest::lt:
if (is_unsigned) {
cmov_fp_ltu(op1, op2, dst, src, is_single);
} else {
cmov_fp_lt(op1, op2, dst, src, is_single);
}
break;
case BoolTest::gt:
if (is_unsigned) {
cmov_fp_gtu(op1, op2, dst, src, is_single);
} else {
cmov_fp_gt(op1, op2, dst, src, is_single);
}
break;
default:
assert(false, "unsupported compare condition");
ShouldNotReachHere();
}
}
void C2_MacroAssembler::enc_cmove_fp_cmp_fp(int cmpFlag,
FloatRegister op1, FloatRegister op2,
FloatRegister dst, FloatRegister src,
bool cmp_single, bool cmov_single) {
int op_select = cmpFlag & (~unsigned_branch_mask);
switch (op_select) {
case BoolTest::eq:
cmov_fp_cmp_fp_eq(op1, op2, dst, src, cmp_single, cmov_single);
break;
case BoolTest::ne:
cmov_fp_cmp_fp_ne(op1, op2, dst, src, cmp_single, cmov_single);
break;
case BoolTest::le:
cmov_fp_cmp_fp_le(op1, op2, dst, src, cmp_single, cmov_single);
break;
case BoolTest::ge:
cmov_fp_cmp_fp_ge(op1, op2, dst, src, cmp_single, cmov_single);
break;
case BoolTest::lt:
cmov_fp_cmp_fp_lt(op1, op2, dst, src, cmp_single, cmov_single);
break;
case BoolTest::gt:
cmov_fp_cmp_fp_gt(op1, op2, dst, src, cmp_single, cmov_single);
break;
default:
assert(false, "unsupported compare condition");
ShouldNotReachHere();
}
}
// Set dst to NaN if any NaN input.
void C2_MacroAssembler::minmax_fp(FloatRegister dst, FloatRegister src1, FloatRegister src2,
FLOAT_TYPE ft, bool is_min) {

View File

@ -132,6 +132,13 @@
FloatRegister op1, FloatRegister op2,
Register dst, Register src, bool is_single);
void enc_cmove_fp_cmp(int cmpFlag, Register op1, Register op2,
FloatRegister dst, FloatRegister src, bool is_single);
void enc_cmove_fp_cmp_fp(int cmpFlag, FloatRegister op1, FloatRegister op2,
FloatRegister dst, FloatRegister src,
bool cmp_single, bool cmov_single);
void spill(Register r, bool is64, int offset) {
is64 ? sd(r, Address(sp, offset))
: sw(r, Address(sp, offset));

View File

@ -1233,7 +1233,119 @@ void MacroAssembler::cmov_gtu(Register cmp1, Register cmp2, Register dst, Regist
bind(no_set);
}
// ----------- cmove, compare float -----------
// ----------- cmove float/double -----------
void MacroAssembler::cmov_fp_eq(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single) {
Label no_set;
bne(cmp1, cmp2, no_set);
if (is_single) {
fmv_s(dst, src);
} else {
fmv_d(dst, src);
}
bind(no_set);
}
void MacroAssembler::cmov_fp_ne(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single) {
Label no_set;
beq(cmp1, cmp2, no_set);
if (is_single) {
fmv_s(dst, src);
} else {
fmv_d(dst, src);
}
bind(no_set);
}
void MacroAssembler::cmov_fp_le(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single) {
Label no_set;
bgt(cmp1, cmp2, no_set);
if (is_single) {
fmv_s(dst, src);
} else {
fmv_d(dst, src);
}
bind(no_set);
}
void MacroAssembler::cmov_fp_leu(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single) {
Label no_set;
bgtu(cmp1, cmp2, no_set);
if (is_single) {
fmv_s(dst, src);
} else {
fmv_d(dst, src);
}
bind(no_set);
}
void MacroAssembler::cmov_fp_ge(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single) {
Label no_set;
blt(cmp1, cmp2, no_set);
if (is_single) {
fmv_s(dst, src);
} else {
fmv_d(dst, src);
}
bind(no_set);
}
void MacroAssembler::cmov_fp_geu(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single) {
Label no_set;
bltu(cmp1, cmp2, no_set);
if (is_single) {
fmv_s(dst, src);
} else {
fmv_d(dst, src);
}
bind(no_set);
}
void MacroAssembler::cmov_fp_lt(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single) {
Label no_set;
bge(cmp1, cmp2, no_set);
if (is_single) {
fmv_s(dst, src);
} else {
fmv_d(dst, src);
}
bind(no_set);
}
void MacroAssembler::cmov_fp_ltu(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single) {
Label no_set;
bgeu(cmp1, cmp2, no_set);
if (is_single) {
fmv_s(dst, src);
} else {
fmv_d(dst, src);
}
bind(no_set);
}
void MacroAssembler::cmov_fp_gt(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single) {
Label no_set;
ble(cmp1, cmp2, no_set);
if (is_single) {
fmv_s(dst, src);
} else {
fmv_d(dst, src);
}
bind(no_set);
}
void MacroAssembler::cmov_fp_gtu(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single) {
Label no_set;
bleu(cmp1, cmp2, no_set);
if (is_single) {
fmv_s(dst, src);
} else {
fmv_d(dst, src);
}
bind(no_set);
}
// ----------- cmove, compare float/double -----------
//
// For CmpF/D + CMoveI/L, ordered ones are quite straight and simple,
// so, just list behaviour of unordered ones as follow.
@ -1391,6 +1503,148 @@ void MacroAssembler::cmov_cmp_fp_gt(FloatRegister cmp1, FloatRegister cmp2, Regi
bind(no_set);
}
// ----------- cmove float/double, compare float/double -----------
// Move src to dst only if cmp1 == cmp2,
// otherwise leave dst unchanged, including the case where one of them is NaN.
// Clarification:
// java code : cmp1 != cmp2 ? dst : src
// transformed to : CMove dst, (cmp1 eq cmp2), dst, src
void MacroAssembler::cmov_fp_cmp_fp_eq(FloatRegister cmp1, FloatRegister cmp2,
FloatRegister dst, FloatRegister src,
bool cmp_single, bool cmov_single) {
Label no_set;
if (cmp_single) {
// jump if cmp1 != cmp2, including the case of NaN
// not jump (i.e. move src to dst) if cmp1 == cmp2
float_bne(cmp1, cmp2, no_set);
} else {
double_bne(cmp1, cmp2, no_set);
}
if (cmov_single) {
fmv_s(dst, src);
} else {
fmv_d(dst, src);
}
bind(no_set);
}
// Keep dst unchanged only if cmp1 == cmp2,
// otherwise move src to dst, including the case where one of them is NaN.
// Clarification:
// java code : cmp1 == cmp2 ? dst : src
// transformed to : CMove dst, (cmp1 ne cmp2), dst, src
void MacroAssembler::cmov_fp_cmp_fp_ne(FloatRegister cmp1, FloatRegister cmp2,
FloatRegister dst, FloatRegister src,
bool cmp_single, bool cmov_single) {
Label no_set;
if (cmp_single) {
// jump if cmp1 == cmp2
// not jump (i.e. move src to dst) if cmp1 != cmp2, including the case of NaN
float_beq(cmp1, cmp2, no_set);
} else {
double_beq(cmp1, cmp2, no_set);
}
if (cmov_single) {
fmv_s(dst, src);
} else {
fmv_d(dst, src);
}
bind(no_set);
}
// When cmp1 <= cmp2 or any of them is NaN then dst = src, otherwise, dst = dst
// Clarification
// scenario 1:
// java code : cmp2 < cmp1 ? dst : src
// transformed to : CMove dst, (cmp1 le cmp2), dst, src
// scenario 2:
// java code : cmp1 > cmp2 ? dst : src
// transformed to : CMove dst, (cmp1 le cmp2), dst, src
void MacroAssembler::cmov_fp_cmp_fp_le(FloatRegister cmp1, FloatRegister cmp2,
FloatRegister dst, FloatRegister src,
bool cmp_single, bool cmov_single) {
Label no_set;
if (cmp_single) {
// jump if cmp1 > cmp2
// not jump (i.e. move src to dst) if cmp1 <= cmp2 or either is NaN
float_bgt(cmp1, cmp2, no_set);
} else {
double_bgt(cmp1, cmp2, no_set);
}
if (cmov_single) {
fmv_s(dst, src);
} else {
fmv_d(dst, src);
}
bind(no_set);
}
void MacroAssembler::cmov_fp_cmp_fp_ge(FloatRegister cmp1, FloatRegister cmp2,
FloatRegister dst, FloatRegister src,
bool cmp_single, bool cmov_single) {
Label no_set;
if (cmp_single) {
// jump if cmp1 < cmp2 or either is NaN
// not jump (i.e. move src to dst) if cmp1 >= cmp2
float_blt(cmp1, cmp2, no_set, false, true);
} else {
double_blt(cmp1, cmp2, no_set, false, true);
}
if (cmov_single) {
fmv_s(dst, src);
} else {
fmv_d(dst, src);
}
bind(no_set);
}
// When cmp1 < cmp2 or any of them is NaN then dst = src, otherwise, dst = dst
// Clarification
// scenario 1:
// java code : cmp2 <= cmp1 ? dst : src
// transformed to : CMove dst, (cmp1 lt cmp2), dst, src
// scenario 2:
// java code : cmp1 >= cmp2 ? dst : src
// transformed to : CMove dst, (cmp1 lt cmp2), dst, src
void MacroAssembler::cmov_fp_cmp_fp_lt(FloatRegister cmp1, FloatRegister cmp2,
FloatRegister dst, FloatRegister src,
bool cmp_single, bool cmov_single) {
Label no_set;
if (cmp_single) {
// jump if cmp1 >= cmp2
// not jump (i.e. move src to dst) if cmp1 < cmp2 or either is NaN
float_bge(cmp1, cmp2, no_set);
} else {
double_bge(cmp1, cmp2, no_set);
}
if (cmov_single) {
fmv_s(dst, src);
} else {
fmv_d(dst, src);
}
bind(no_set);
}
void MacroAssembler::cmov_fp_cmp_fp_gt(FloatRegister cmp1, FloatRegister cmp2,
FloatRegister dst, FloatRegister src,
bool cmp_single, bool cmov_single) {
Label no_set;
if (cmp_single) {
// jump if cmp1 <= cmp2 or either is NaN
// not jump (i.e. move src to dst) if cmp1 > cmp2
float_ble(cmp1, cmp2, no_set, false, true);
} else {
double_ble(cmp1, cmp2, no_set, false, true);
}
if (cmov_single) {
fmv_s(dst, src);
} else {
fmv_d(dst, src);
}
bind(no_set);
}
// Float compare branch instructions
#define INSN(NAME, FLOATCMP, BRANCH) \
@ -4933,7 +5187,6 @@ void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
assert (UseCompressedClassPointers, "should only be used for compressed headers");
assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
int index = oop_recorder()->find_index(k);
assert(!Universe::heap()->is_in(k), "should not be an oop");
narrowKlass nk = CompressedKlassPointers::encode(k);
relocate(metadata_Relocation::spec(index), [&] {

View File

@ -665,6 +665,24 @@ class MacroAssembler: public Assembler {
void cmov_cmp_fp_lt(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
void cmov_cmp_fp_gt(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
void cmov_fp_eq(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single);
void cmov_fp_ne(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single);
void cmov_fp_le(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single);
void cmov_fp_leu(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single);
void cmov_fp_ge(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single);
void cmov_fp_geu(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single);
void cmov_fp_lt(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single);
void cmov_fp_ltu(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single);
void cmov_fp_gt(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single);
void cmov_fp_gtu(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single);
void cmov_fp_cmp_fp_eq(FloatRegister cmp1, FloatRegister cmp2, FloatRegister dst, FloatRegister src, bool cmp_single, bool cmov_single);
void cmov_fp_cmp_fp_ne(FloatRegister cmp1, FloatRegister cmp2, FloatRegister dst, FloatRegister src, bool cmp_single, bool cmov_single);
void cmov_fp_cmp_fp_le(FloatRegister cmp1, FloatRegister cmp2, FloatRegister dst, FloatRegister src, bool cmp_single, bool cmov_single);
void cmov_fp_cmp_fp_ge(FloatRegister cmp1, FloatRegister cmp2, FloatRegister dst, FloatRegister src, bool cmp_single, bool cmov_single);
void cmov_fp_cmp_fp_lt(FloatRegister cmp1, FloatRegister cmp2, FloatRegister dst, FloatRegister src, bool cmp_single, bool cmov_single);
void cmov_fp_cmp_fp_gt(FloatRegister cmp1, FloatRegister cmp2, FloatRegister dst, FloatRegister src, bool cmp_single, bool cmov_single);
public:
// We try to follow risc-v asm menomics.
// But as we don't layout a reachable GOT,

View File

@ -1924,8 +1924,6 @@ bool Matcher::match_rule_supported(int opcode) {
case Op_SubHF:
return UseZfh;
case Op_CMoveF:
case Op_CMoveD:
case Op_CMoveP:
case Op_CMoveN:
return false;
@ -10466,6 +10464,286 @@ instruct cmovL_cmpP(iRegLNoSp dst, iRegL src, iRegP op1, iRegP op2, cmpOpU cop)
ins_pipe(pipe_class_compare);
%}
// --------- CMoveF ---------
instruct cmovF_cmpI(fRegF dst, fRegF src, iRegI op1, iRegI op2, cmpOp cop) %{
match(Set dst (CMoveF (Binary cop (CmpI op1 op2)) (Binary dst src)));
ins_cost(ALU_COST + BRANCH_COST);
format %{
"CMoveF $dst, ($op1 $cop $op2), $dst, $src\t#@cmovF_cmpI\n\t"
%}
ins_encode %{
__ enc_cmove_fp_cmp($cop$$cmpcode,
as_Register($op1$$reg), as_Register($op2$$reg),
as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), true /* is_single */);
%}
ins_pipe(pipe_class_compare);
%}
instruct cmovF_cmpU(fRegF dst, fRegF src, iRegI op1, iRegI op2, cmpOpU cop) %{
match(Set dst (CMoveF (Binary cop (CmpU op1 op2)) (Binary dst src)));
ins_cost(ALU_COST + BRANCH_COST);
format %{
"CMoveF $dst, ($op1 $cop $op2), $dst, $src\t#@cmovF_cmpU\n\t"
%}
ins_encode %{
__ enc_cmove_fp_cmp($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
as_Register($op1$$reg), as_Register($op2$$reg),
as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), true /* is_single */);
%}
ins_pipe(pipe_class_compare);
%}
instruct cmovF_cmpL(fRegF dst, fRegF src, iRegL op1, iRegL op2, cmpOp cop) %{
match(Set dst (CMoveF (Binary cop (CmpL op1 op2)) (Binary dst src)));
ins_cost(ALU_COST + BRANCH_COST);
format %{
"CMoveF $dst, ($op1 $cop $op2), $dst, $src\t#@cmovF_cmpL\n\t"
%}
ins_encode %{
__ enc_cmove_fp_cmp($cop$$cmpcode,
as_Register($op1$$reg), as_Register($op2$$reg),
as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), true /* is_single */);
%}
ins_pipe(pipe_class_compare);
%}
instruct cmovF_cmpUL(fRegF dst, fRegF src, iRegL op1, iRegL op2, cmpOpU cop) %{
match(Set dst (CMoveF (Binary cop (CmpUL op1 op2)) (Binary dst src)));
ins_cost(ALU_COST + BRANCH_COST);
format %{
"CMoveF $dst, ($op1 $cop $op2), $dst, $src\t#@cmovF_cmpUL\n\t"
%}
ins_encode %{
__ enc_cmove_fp_cmp($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
as_Register($op1$$reg), as_Register($op2$$reg),
as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), true /* is_single */);
%}
ins_pipe(pipe_class_compare);
%}
instruct cmovF_cmpF(fRegF dst, fRegF src, fRegF op1, fRegF op2, cmpOp cop) %{
match(Set dst (CMoveF (Binary cop (CmpF op1 op2)) (Binary dst src)));
ins_cost(ALU_COST + BRANCH_COST);
format %{
"CMoveF $dst, ($op1 $cop $op2), $dst, $src\t#@cmovF_cmpF\n\t"
%}
ins_encode %{
__ enc_cmove_fp_cmp_fp($cop$$cmpcode,
as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg),
as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg),
true /* cmp_single */, true /* cmov_single */);
%}
ins_pipe(pipe_class_compare);
%}
instruct cmovF_cmpD(fRegF dst, fRegF src, fRegD op1, fRegD op2, cmpOp cop) %{
match(Set dst (CMoveF (Binary cop (CmpD op1 op2)) (Binary dst src)));
ins_cost(ALU_COST + BRANCH_COST);
format %{
"CMoveF $dst, ($op1 $cop $op2), $dst, $src\t#@cmovF_cmpD\n\t"
%}
ins_encode %{
__ enc_cmove_fp_cmp_fp($cop$$cmpcode | C2_MacroAssembler::double_branch_mask,
as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg),
as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg),
false /* cmp_single */, true /* cmov_single */);
%}
ins_pipe(pipe_class_compare);
%}
instruct cmovF_cmpN(fRegF dst, fRegF src, iRegN op1, iRegN op2, cmpOp cop) %{
match(Set dst (CMoveF (Binary cop (CmpN op1 op2)) (Binary dst src)));
ins_cost(ALU_COST + BRANCH_COST);
format %{
"CMoveF $dst, ($op1 $cop $op2), $dst, $src\t#@cmovF_cmpN\n\t"
%}
ins_encode %{
__ enc_cmove_fp_cmp($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
as_Register($op1$$reg), as_Register($op2$$reg),
as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), true /* is_single */);
%}
ins_pipe(pipe_class_compare);
%}
instruct cmovF_cmpP(fRegF dst, fRegF src, iRegP op1, iRegP op2, cmpOp cop) %{
match(Set dst (CMoveF (Binary cop (CmpP op1 op2)) (Binary dst src)));
ins_cost(ALU_COST + BRANCH_COST);
format %{
"CMoveF $dst, ($op1 $cop $op2), $dst, $src\t#@cmovF_cmpP\n\t"
%}
ins_encode %{
__ enc_cmove_fp_cmp($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
as_Register($op1$$reg), as_Register($op2$$reg),
as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), true /* is_single */);
%}
ins_pipe(pipe_class_compare);
%}
// --------- CMoveD ---------
instruct cmovD_cmpI(fRegD dst, fRegD src, iRegI op1, iRegI op2, cmpOp cop) %{
match(Set dst (CMoveD (Binary cop (CmpI op1 op2)) (Binary dst src)));
ins_cost(ALU_COST + BRANCH_COST);
format %{
"CMoveD $dst, ($op1 $cop $op2), $dst, $src\t#@cmovD_cmpI\n\t"
%}
ins_encode %{
__ enc_cmove_fp_cmp($cop$$cmpcode,
as_Register($op1$$reg), as_Register($op2$$reg),
as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), false /* is_single */);
%}
ins_pipe(pipe_class_compare);
%}
instruct cmovD_cmpU(fRegD dst, fRegD src, iRegI op1, iRegI op2, cmpOpU cop) %{
match(Set dst (CMoveD (Binary cop (CmpU op1 op2)) (Binary dst src)));
ins_cost(ALU_COST + BRANCH_COST);
format %{
"CMoveD $dst, ($op1 $cop $op2), $dst, $src\t#@cmovD_cmpU\n\t"
%}
ins_encode %{
__ enc_cmove_fp_cmp($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
as_Register($op1$$reg), as_Register($op2$$reg),
as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), false /* is_single */);
%}
ins_pipe(pipe_class_compare);
%}
instruct cmovD_cmpL(fRegD dst, fRegD src, iRegL op1, iRegL op2, cmpOp cop) %{
match(Set dst (CMoveD (Binary cop (CmpL op1 op2)) (Binary dst src)));
ins_cost(ALU_COST + BRANCH_COST);
format %{
"CMoveD $dst, ($op1 $cop $op2), $dst, $src\t#@cmovD_cmpL\n\t"
%}
ins_encode %{
__ enc_cmove_fp_cmp($cop$$cmpcode,
as_Register($op1$$reg), as_Register($op2$$reg),
as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), false /* is_single */);
%}
ins_pipe(pipe_class_compare);
%}
instruct cmovD_cmpUL(fRegD dst, fRegD src, iRegL op1, iRegL op2, cmpOpU cop) %{
match(Set dst (CMoveD (Binary cop (CmpUL op1 op2)) (Binary dst src)));
ins_cost(ALU_COST + BRANCH_COST);
format %{
"CMoveD $dst, ($op1 $cop $op2), $dst, $src\t#@cmovD_cmpUL\n\t"
%}
ins_encode %{
__ enc_cmove_fp_cmp($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
as_Register($op1$$reg), as_Register($op2$$reg),
as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), false /* is_single */);
%}
ins_pipe(pipe_class_compare);
%}
instruct cmovD_cmpF(fRegD dst, fRegD src, fRegF op1, fRegF op2, cmpOp cop) %{
match(Set dst (CMoveD (Binary cop (CmpF op1 op2)) (Binary dst src)));
ins_cost(ALU_COST + BRANCH_COST);
format %{
"CMoveD $dst, ($op1 $cop $op2), $dst, $src\t#@cmovD_cmpF\n\t"
%}
ins_encode %{
__ enc_cmove_fp_cmp_fp($cop$$cmpcode,
as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg),
as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg),
true /* cmp_single */, false /* cmov_single */);
%}
ins_pipe(pipe_class_compare);
%}
instruct cmovD_cmpD(fRegD dst, fRegD src, fRegD op1, fRegD op2, cmpOp cop) %{
match(Set dst (CMoveD (Binary cop (CmpD op1 op2)) (Binary dst src)));
ins_cost(ALU_COST + BRANCH_COST);
format %{
"CMoveD $dst, ($op1 $cop $op2), $dst, $src\t#@cmovD_cmpD\n\t"
%}
ins_encode %{
__ enc_cmove_fp_cmp_fp($cop$$cmpcode | C2_MacroAssembler::double_branch_mask,
as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg),
as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg),
false /* cmp_single */, false /* cmov_single */);
%}
ins_pipe(pipe_class_compare);
%}
instruct cmovD_cmpN(fRegD dst, fRegD src, iRegN op1, iRegN op2, cmpOp cop) %{
match(Set dst (CMoveD (Binary cop (CmpN op1 op2)) (Binary dst src)));
ins_cost(ALU_COST + BRANCH_COST);
format %{
"CMoveD $dst, ($op1 $cop $op2), $dst, $src\t#@cmovD_cmpN\n\t"
%}
ins_encode %{
__ enc_cmove_fp_cmp($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
as_Register($op1$$reg), as_Register($op2$$reg),
as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), false /* is_single */);
%}
ins_pipe(pipe_class_compare);
%}
instruct cmovD_cmpP(fRegD dst, fRegD src, iRegP op1, iRegP op2, cmpOp cop) %{
match(Set dst (CMoveD (Binary cop (CmpP op1 op2)) (Binary dst src)));
ins_cost(ALU_COST + BRANCH_COST);
format %{
"CMoveD $dst, ($op1 $cop $op2), $dst, $src\t#@cmovD_cmpP\n\t"
%}
ins_encode %{
__ enc_cmove_fp_cmp($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
as_Register($op1$$reg), as_Register($op2$$reg),
as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), false /* is_single */);
%}
ins_pipe(pipe_class_compare);
%}
// ============================================================================
// Procedure Call/Return Instructions

View File

@ -2463,7 +2463,7 @@ class StubGenerator: public StubCodeGenerator {
// Inputs:
// c_rarg0 - source byte array address
// c_rarg1 - destination byte array address
// c_rarg2 - K (key) in little endian int array
// c_rarg2 - sessionKe (key) in little endian int array
//
address generate_aescrypt_encryptBlock() {
assert(UseAESIntrinsics, "need AES instructions (Zvkned extension) support");
@ -2493,8 +2493,8 @@ class StubGenerator: public StubCodeGenerator {
__ vsetivli(x0, 4, Assembler::e32, Assembler::m1);
__ vle32_v(res, from);
__ mv(t2, 52);
__ blt(keylen, t2, L_aes128);
__ mv(t2, 52); // key length could be only {11, 13, 15} * 4 = {44, 52, 60}
__ bltu(keylen, t2, L_aes128);
__ beq(keylen, t2, L_aes192);
// Else we fallthrough to the biggest case (256-bit key size)
@ -2542,7 +2542,7 @@ class StubGenerator: public StubCodeGenerator {
// Inputs:
// c_rarg0 - source byte array address
// c_rarg1 - destination byte array address
// c_rarg2 - K (key) in little endian int array
// c_rarg2 - sessionKe (key) in little endian int array
//
address generate_aescrypt_decryptBlock() {
assert(UseAESIntrinsics, "need AES instructions (Zvkned extension) support");
@ -2572,8 +2572,8 @@ class StubGenerator: public StubCodeGenerator {
__ vsetivli(x0, 4, Assembler::e32, Assembler::m1);
__ vle32_v(res, from);
__ mv(t2, 52);
__ blt(keylen, t2, L_aes128);
__ mv(t2, 52); // key length could be only {11, 13, 15} * 4 = {44, 52, 60}
__ bltu(keylen, t2, L_aes128);
__ beq(keylen, t2, L_aes192);
// Else we fallthrough to the biggest case (256-bit key size)
@ -2606,6 +2606,401 @@ class StubGenerator: public StubCodeGenerator {
return start;
}
void cipherBlockChaining_encryptAESCrypt(int round, Register from, Register to, Register key,
Register rvec, Register input_len) {
const Register len = x29;
VectorRegister working_vregs[] = {
v1, v2, v3, v4, v5, v6, v7, v8,
v9, v10, v11, v12, v13, v14, v15
};
const unsigned int BLOCK_SIZE = 16;
__ mv(len, input_len);
// load init rvec
__ vsetivli(x0, 4, Assembler::e32, Assembler::m1);
__ vle32_v(v16, rvec);
generate_aes_loadkeys(key, working_vregs, round);
Label L_enc_loop;
__ bind(L_enc_loop);
// Encrypt from source by block size
__ vle32_v(v17, from);
__ addi(from, from, BLOCK_SIZE);
__ vxor_vv(v16, v16, v17);
generate_aes_encrypt(v16, working_vregs, round);
__ vse32_v(v16, to);
__ addi(to, to, BLOCK_SIZE);
__ subi(len, len, BLOCK_SIZE);
__ bnez(len, L_enc_loop);
// save current rvec and return
__ vse32_v(v16, rvec);
__ mv(x10, input_len);
__ leave();
__ ret();
}
// Arguments:
//
// Inputs:
// c_rarg0 - source byte array address
// c_rarg1 - destination byte array address
// c_rarg2 - K (key) in little endian int array
// c_rarg3 - r vector byte array address
// c_rarg4 - input length
//
// Output:
// x10 - input length
//
address generate_cipherBlockChaining_encryptAESCrypt() {
assert(UseAESIntrinsics, "Must be");
assert(UseZvkn, "need AES instructions (Zvkned extension) support");
__ align(CodeEntryAlignment);
StubId stub_id = StubId::stubgen_cipherBlockChaining_encryptAESCrypt_id;
StubCodeMark mark(this, stub_id);
const Register from = c_rarg0;
const Register to = c_rarg1;
const Register key = c_rarg2;
const Register rvec = c_rarg3;
const Register input_len = c_rarg4;
const Register keylen = x28;
address start = __ pc();
__ enter();
Label L_aes128, L_aes192;
// Compute #rounds for AES based on the length of the key array
__ lwu(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
__ mv(t0, 52);
__ bltu(keylen, t0, L_aes128);
__ beq(keylen, t0, L_aes192);
// Else we fallthrough to the biggest case (256-bit key size)
// Note: the following function performs key += 15*16
cipherBlockChaining_encryptAESCrypt(15, from, to, key, rvec, input_len);
// Note: the following function performs key += 11*16
__ bind(L_aes128);
cipherBlockChaining_encryptAESCrypt(11, from, to, key, rvec, input_len);
// Note: the following function performs key += 13*16
__ bind(L_aes192);
cipherBlockChaining_encryptAESCrypt(13, from, to, key, rvec, input_len);
return start;
}
void cipherBlockChaining_decryptAESCrypt(int round, Register from, Register to, Register key,
Register rvec, Register input_len) {
const Register len = x29;
VectorRegister working_vregs[] = {
v1, v2, v3, v4, v5, v6, v7, v8,
v9, v10, v11, v12, v13, v14, v15
};
const unsigned int BLOCK_SIZE = 16;
__ mv(len, input_len);
// load init rvec
__ vsetivli(x0, 4, Assembler::e32, Assembler::m1);
__ vle32_v(v16, rvec);
generate_aes_loadkeys(key, working_vregs, round);
Label L_dec_loop;
// Decrypt from source by block size
__ bind(L_dec_loop);
__ vle32_v(v17, from);
__ addi(from, from, BLOCK_SIZE);
__ vmv_v_v(v18, v17);
generate_aes_decrypt(v17, working_vregs, round);
__ vxor_vv(v17, v17, v16);
__ vse32_v(v17, to);
__ vmv_v_v(v16, v18);
__ addi(to, to, BLOCK_SIZE);
__ subi(len, len, BLOCK_SIZE);
__ bnez(len, L_dec_loop);
// save current rvec and return
__ vse32_v(v16, rvec);
__ mv(x10, input_len);
__ leave();
__ ret();
}
// Arguments:
//
// Inputs:
// c_rarg0 - source byte array address
// c_rarg1 - destination byte array address
// c_rarg2 - K (key) in little endian int array
// c_rarg3 - r vector byte array address
// c_rarg4 - input length
//
// Output:
// x10 - input length
//
address generate_cipherBlockChaining_decryptAESCrypt() {
assert(UseAESIntrinsics, "Must be");
assert(UseZvkn, "need AES instructions (Zvkned extension) support");
__ align(CodeEntryAlignment);
StubId stub_id = StubId::stubgen_cipherBlockChaining_decryptAESCrypt_id;
StubCodeMark mark(this, stub_id);
const Register from = c_rarg0;
const Register to = c_rarg1;
const Register key = c_rarg2;
const Register rvec = c_rarg3;
const Register input_len = c_rarg4;
const Register keylen = x28;
address start = __ pc();
__ enter();
Label L_aes128, L_aes192, L_aes128_loop, L_aes192_loop, L_aes256_loop;
// Compute #rounds for AES based on the length of the key array
__ lwu(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
__ mv(t0, 52);
__ bltu(keylen, t0, L_aes128);
__ beq(keylen, t0, L_aes192);
// Else we fallthrough to the biggest case (256-bit key size)
// Note: the following function performs key += 15*16
cipherBlockChaining_decryptAESCrypt(15, from, to, key, rvec, input_len);
// Note: the following function performs key += 11*16
__ bind(L_aes128);
cipherBlockChaining_decryptAESCrypt(11, from, to, key, rvec, input_len);
// Note: the following function performs key += 13*16
__ bind(L_aes192);
cipherBlockChaining_decryptAESCrypt(13, from, to, key, rvec, input_len);
return start;
}
// Load big-endian 128-bit from memory.
void be_load_counter_128(Register counter_hi, Register counter_lo, Register counter) {
__ ld(counter_lo, Address(counter, 8)); // Load 128-bits from counter
__ ld(counter_hi, Address(counter));
__ rev8(counter_lo, counter_lo); // Convert big-endian to little-endian
__ rev8(counter_hi, counter_hi);
}
// Little-endian 128-bit + 64-bit -> 128-bit addition.
void add_counter_128(Register counter_hi, Register counter_lo) {
assert_different_registers(counter_hi, counter_lo, t0);
__ addi(counter_lo, counter_lo, 1);
__ seqz(t0, counter_lo); // Check for result overflow
__ add(counter_hi, counter_hi, t0); // Add 1 if overflow otherwise 0
}
// Store big-endian 128-bit to memory.
void be_store_counter_128(Register counter_hi, Register counter_lo, Register counter) {
assert_different_registers(counter_hi, counter_lo, t0, t1);
__ rev8(t0, counter_lo); // Convert little-endian to big-endian
__ rev8(t1, counter_hi);
__ sd(t0, Address(counter, 8)); // Store 128-bits to counter
__ sd(t1, Address(counter));
}
void counterMode_AESCrypt(int round, Register in, Register out, Register key, Register counter,
Register input_len, Register saved_encrypted_ctr, Register used_ptr) {
// Algorithm:
//
// generate_aes_loadkeys();
// load_counter_128(counter_hi, counter_lo, counter);
//
// L_next:
// if (used >= BLOCK_SIZE) goto L_main_loop;
//
// L_encrypt_next:
// *out = *in ^ saved_encrypted_ctr[used]);
// out++; in++; used++; len--;
// if (len == 0) goto L_exit;
// goto L_next;
//
// L_main_loop:
// if (len == 0) goto L_exit;
// saved_encrypted_ctr = generate_aes_encrypt(counter);
//
// add_counter_128(counter_hi, counter_lo);
// be_store_counter_128(counter_hi, counter_lo, counter);
// used = 0;
//
// if(len < BLOCK_SIZE) goto L_encrypt_next;
//
// v_in = load_16Byte(in);
// v_out = load_16Byte(out);
// v_saved_encrypted_ctr = load_16Byte(saved_encrypted_ctr);
// v_out = v_in ^ v_saved_encrypted_ctr;
// out += BLOCK_SIZE;
// in += BLOCK_SIZE;
// len -= BLOCK_SIZE;
// used = BLOCK_SIZE;
// goto L_main_loop;
//
//
// L_exit:
// store(used);
// result = input_len
// return result;
const Register used = x28;
const Register len = x29;
const Register counter_hi = x30;
const Register counter_lo = x31;
const Register block_size = t2;
const unsigned int BLOCK_SIZE = 16;
VectorRegister working_vregs[] = {
v1, v2, v3, v4, v5, v6, v7, v8,
v9, v10, v11, v12, v13, v14, v15
};
__ vsetivli(x0, 4, Assembler::e32, Assembler::m1);
__ lwu(used, Address(used_ptr));
__ mv(len, input_len);
__ mv(block_size, BLOCK_SIZE);
// load keys to working_vregs according to round
generate_aes_loadkeys(key, working_vregs, round);
// 128-bit big-endian load
be_load_counter_128(counter_hi, counter_lo, counter);
Label L_next, L_encrypt_next, L_main_loop, L_exit;
// Check the last saved_encrypted_ctr used value, we fall through
// to L_encrypt_next when the used value lower than block_size
__ bind(L_next);
__ bgeu(used, block_size, L_main_loop);
// There is still data left fewer than block_size after L_main_loop
// or last used, we encrypt them one by one.
__ bind(L_encrypt_next);
__ add(t0, saved_encrypted_ctr, used);
__ lbu(t1, Address(t0));
__ lbu(t0, Address(in));
__ xorr(t1, t1, t0);
__ sb(t1, Address(out));
__ addi(in, in, 1);
__ addi(out, out, 1);
__ addi(used, used, 1);
__ subi(len, len, 1);
__ beqz(len, L_exit);
__ j(L_next);
// We will calculate the next saved_encrypted_ctr and encrypt the blocks of data
// one by one until there is less than a full block remaining if len not zero
__ bind(L_main_loop);
__ beqz(len, L_exit);
__ vle32_v(v16, counter);
// encrypt counter according to round
generate_aes_encrypt(v16, working_vregs, round);
__ vse32_v(v16, saved_encrypted_ctr);
// 128-bit little-endian increment
add_counter_128(counter_hi, counter_lo);
// 128-bit big-endian store
be_store_counter_128(counter_hi, counter_lo, counter);
__ mv(used, 0);
// Check if we have a full block_size
__ bltu(len, block_size, L_encrypt_next);
// We have one full block to encrypt at least
__ vle32_v(v17, in);
__ vxor_vv(v16, v16, v17);
__ vse32_v(v16, out);
__ add(out, out, block_size);
__ add(in, in, block_size);
__ sub(len, len, block_size);
__ mv(used, block_size);
__ j(L_main_loop);
__ bind(L_exit);
__ sw(used, Address(used_ptr));
__ mv(x10, input_len);
__ leave();
__ ret();
};
// CTR AES crypt.
// Arguments:
//
// Inputs:
// c_rarg0 - source byte array address
// c_rarg1 - destination byte array address
// c_rarg2 - K (key) in little endian int array
// c_rarg3 - counter vector byte array address
// c_rarg4 - input length
// c_rarg5 - saved encryptedCounter start
// c_rarg6 - saved used length
//
// Output:
// x10 - input length
//
address generate_counterMode_AESCrypt() {
assert(UseAESCTRIntrinsics, "Must be");
assert(UseZvkn, "need AES instructions (Zvkned extension) support");
assert(UseZbb, "need basic bit manipulation (Zbb extension) support");
__ align(CodeEntryAlignment);
StubId stub_id = StubId::stubgen_counterMode_AESCrypt_id;
StubCodeMark mark(this, stub_id);
const Register in = c_rarg0;
const Register out = c_rarg1;
const Register key = c_rarg2;
const Register counter = c_rarg3;
const Register input_len = c_rarg4;
const Register saved_encrypted_ctr = c_rarg5;
const Register used_len_ptr = c_rarg6;
const Register keylen = c_rarg7; // temporary register
const address start = __ pc();
__ enter();
Label L_exit;
__ beqz(input_len, L_exit);
Label L_aes128, L_aes192;
// Compute #rounds for AES based on the length of the key array
__ lwu(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
__ mv(t0, 52); // key length could be only {11, 13, 15} * 4 = {44, 52, 60}
__ bltu(keylen, t0, L_aes128);
__ beq(keylen, t0, L_aes192);
// Else we fallthrough to the biggest case (256-bit key size)
// Note: the following function performs crypt with key += 15*16
counterMode_AESCrypt(15, in, out, key, counter, input_len, saved_encrypted_ctr, used_len_ptr);
// Note: the following function performs crypt with key += 13*16
__ bind(L_aes192);
counterMode_AESCrypt(13, in, out, key, counter, input_len, saved_encrypted_ctr, used_len_ptr);
// Note: the following function performs crypt with key += 11*16
__ bind(L_aes128);
counterMode_AESCrypt(11, in, out, key, counter, input_len, saved_encrypted_ctr, used_len_ptr);
__ bind(L_exit);
__ mv(x10, input_len);
__ leave();
__ ret();
return start;
}
// code for comparing 8 characters of strings with Latin1 and Utf16 encoding
void compare_string_8_x_LU(Register tmpL, Register tmpU,
Register strL, Register strU, Label& DIFF) {
@ -6824,6 +7219,12 @@ static const int64_t right_3_bits = right_n_bits(3);
if (UseAESIntrinsics) {
StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock();
StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock();
StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt();
StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt();
}
if (UseAESCTRIntrinsics) {
StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt();
}
if (UsePoly1305Intrinsics) {

View File

@ -434,6 +434,15 @@ void VM_Version::c2_initialize() {
warning("UseAESIntrinsics enabled, but UseAES not, enabling");
UseAES = true;
}
if (FLAG_IS_DEFAULT(UseAESCTRIntrinsics) && UseZbb) {
FLAG_SET_DEFAULT(UseAESCTRIntrinsics, true);
}
if (UseAESCTRIntrinsics && !UseZbb) {
warning("Cannot enable UseAESCTRIntrinsics on cpu without UseZbb support.");
FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
}
} else {
if (UseAES) {
warning("AES instructions are not available on this CPU");
@ -443,11 +452,10 @@ void VM_Version::c2_initialize() {
warning("AES intrinsics are not available on this CPU");
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
}
}
if (UseAESCTRIntrinsics) {
warning("AES/CTR intrinsics are not available on this CPU");
FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
if (UseAESCTRIntrinsics) {
warning("Cannot enable UseAESCTRIntrinsics on cpu without UseZvkn support.");
FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
}
}
}

View File

@ -1715,6 +1715,8 @@ bool Matcher::match_rule_supported(int opcode) {
switch (opcode) {
case Op_ReverseBytesI:
case Op_ReverseBytesL:
case Op_ReverseBytesS:
case Op_ReverseBytesUS:
return UseByteReverseInstruction;
case Op_PopCountI:
case Op_PopCountL:
@ -11615,6 +11617,38 @@ instruct vround2D_reg(vecX dst, vecX src, immI8 rmode) %{
// Byte reverse
instruct bytes_reverse_short(iRegI dst, iRegI src) %{
match(Set dst (ReverseBytesS src));
predicate(UseByteReverseInstruction);
ins_cost(2 * DEFAULT_COST);
size(8);
format %{ "LRVR $dst, $src\n\t # byte reverse int"
"SRA $dst, 0x0010\t # right shift by 16, sign extended" %}
ins_encode %{
__ z_lrvr($dst$$Register, $src$$Register);
__ z_sra($dst$$Register, 0x0010);
%}
ins_pipe(pipe_class_dummy);
%}
instruct bytes_reverse_unsigned_short(iRegI dst, iRegI src) %{
match(Set dst (ReverseBytesUS src));
predicate(UseByteReverseInstruction);
ins_cost(2 * DEFAULT_COST);
size(8);
format %{ "LRVR $dst, $src\n\t # byte reverse int"
"SRL $dst, 0x0010\t # right shift by 16, zero extended" %}
ins_encode %{
__ z_lrvr($dst$$Register, $src$$Register);
__ z_srl($dst$$Register, 0x0010);
%}
ins_pipe(pipe_class_dummy);
%}
instruct bytes_reverse_int(iRegI dst, iRegI src) %{
match(Set dst (ReverseBytesI src));
predicate(UseByteReverseInstruction); // See Matcher::match_rule_supported

View File

@ -449,8 +449,8 @@ const int FPUStateSizeInWords = 2688 / wordSize;
// imm8[1:0] = 00 (min) / 01 (max)
//
// [1] https://www.intel.com/content/www/us/en/content-details/856721/intel-advanced-vector-extensions-10-2-intel-avx10-2-architecture-specification.html?wapkw=AVX10
const int AVX10_MINMAX_MAX_COMPARE_SIGN = 0x5;
const int AVX10_MINMAX_MIN_COMPARE_SIGN = 0x4;
const int AVX10_2_MINMAX_MAX_COMPARE_SIGN = 0x5;
const int AVX10_2_MINMAX_MIN_COMPARE_SIGN = 0x4;
// The Intel x86/Amd64 Assembler: Pure assembler doing NO optimizations on the instruction
// level (e.g. mov rax, 0 is not translated into xor rax, rax!); i.e., what you write

View File

@ -1033,8 +1033,8 @@ void C2_MacroAssembler::vminmax_fp(int opc, BasicType elem_bt, XMMRegister dst,
assert(opc == Op_MinV || opc == Op_MinReductionV ||
opc == Op_MaxV || opc == Op_MaxReductionV, "sanity");
int imm8 = (opc == Op_MinV || opc == Op_MinReductionV) ? AVX10_MINMAX_MIN_COMPARE_SIGN
: AVX10_MINMAX_MAX_COMPARE_SIGN;
int imm8 = (opc == Op_MinV || opc == Op_MinReductionV) ? AVX10_2_MINMAX_MIN_COMPARE_SIGN
: AVX10_2_MINMAX_MAX_COMPARE_SIGN;
if (elem_bt == T_FLOAT) {
evminmaxps(dst, mask, src1, src2, true, imm8, vlen_enc);
} else {
@ -5163,7 +5163,7 @@ void C2_MacroAssembler::vector_castD2X_evex(BasicType to_elem_bt, XMMRegister ds
}
}
void C2_MacroAssembler::vector_castF2X_avx10(BasicType to_elem_bt, XMMRegister dst, XMMRegister src, int vec_enc) {
void C2_MacroAssembler::vector_castF2X_avx10_2(BasicType to_elem_bt, XMMRegister dst, XMMRegister src, int vec_enc) {
switch(to_elem_bt) {
case T_LONG:
evcvttps2qqs(dst, src, vec_enc);
@ -5183,7 +5183,7 @@ void C2_MacroAssembler::vector_castF2X_avx10(BasicType to_elem_bt, XMMRegister d
}
}
void C2_MacroAssembler::vector_castF2X_avx10(BasicType to_elem_bt, XMMRegister dst, Address src, int vec_enc) {
void C2_MacroAssembler::vector_castF2X_avx10_2(BasicType to_elem_bt, XMMRegister dst, Address src, int vec_enc) {
switch(to_elem_bt) {
case T_LONG:
evcvttps2qqs(dst, src, vec_enc);
@ -5203,7 +5203,7 @@ void C2_MacroAssembler::vector_castF2X_avx10(BasicType to_elem_bt, XMMRegister d
}
}
void C2_MacroAssembler::vector_castD2X_avx10(BasicType to_elem_bt, XMMRegister dst, XMMRegister src, int vec_enc) {
void C2_MacroAssembler::vector_castD2X_avx10_2(BasicType to_elem_bt, XMMRegister dst, XMMRegister src, int vec_enc) {
switch(to_elem_bt) {
case T_LONG:
evcvttpd2qqs(dst, src, vec_enc);
@ -5223,7 +5223,7 @@ void C2_MacroAssembler::vector_castD2X_avx10(BasicType to_elem_bt, XMMRegister d
}
}
void C2_MacroAssembler::vector_castD2X_avx10(BasicType to_elem_bt, XMMRegister dst, Address src, int vec_enc) {
void C2_MacroAssembler::vector_castD2X_avx10_2(BasicType to_elem_bt, XMMRegister dst, Address src, int vec_enc) {
switch(to_elem_bt) {
case T_LONG:
evcvttpd2qqs(dst, src, vec_enc);

View File

@ -347,13 +347,13 @@ public:
XMMRegister xtmp2, XMMRegister xtmp3, XMMRegister xtmp4, XMMRegister xtmp5,
AddressLiteral float_sign_flip, Register rscratch, int vec_enc);
void vector_castF2X_avx10(BasicType to_elem_bt, XMMRegister dst, XMMRegister src, int vec_enc);
void vector_castF2X_avx10_2(BasicType to_elem_bt, XMMRegister dst, XMMRegister src, int vec_enc);
void vector_castF2X_avx10(BasicType to_elem_bt, XMMRegister dst, Address src, int vec_enc);
void vector_castF2X_avx10_2(BasicType to_elem_bt, XMMRegister dst, Address src, int vec_enc);
void vector_castD2X_avx10(BasicType to_elem_bt, XMMRegister dst, XMMRegister src, int vec_enc);
void vector_castD2X_avx10_2(BasicType to_elem_bt, XMMRegister dst, XMMRegister src, int vec_enc);
void vector_castD2X_avx10(BasicType to_elem_bt, XMMRegister dst, Address src, int vec_enc);
void vector_castD2X_avx10_2(BasicType to_elem_bt, XMMRegister dst, Address src, int vec_enc);
void vector_cast_double_to_int_special_cases_avx(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, XMMRegister xtmp2,
XMMRegister xtmp3, XMMRegister xtmp4, XMMRegister xtmp5, Register rscratch,

View File

@ -8899,9 +8899,9 @@ void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XM
case T_LONG:
evpminsq(dst, mask, nds, src, merge, vector_len); break;
case T_FLOAT:
evminmaxps(dst, mask, nds, src, merge, AVX10_MINMAX_MIN_COMPARE_SIGN, vector_len); break;
evminmaxps(dst, mask, nds, src, merge, AVX10_2_MINMAX_MIN_COMPARE_SIGN, vector_len); break;
case T_DOUBLE:
evminmaxpd(dst, mask, nds, src, merge, AVX10_MINMAX_MIN_COMPARE_SIGN, vector_len); break;
evminmaxpd(dst, mask, nds, src, merge, AVX10_2_MINMAX_MIN_COMPARE_SIGN, vector_len); break;
default:
fatal("Unexpected type argument %s", type2name(type)); break;
}
@ -8918,9 +8918,9 @@ void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XM
case T_LONG:
evpmaxsq(dst, mask, nds, src, merge, vector_len); break;
case T_FLOAT:
evminmaxps(dst, mask, nds, src, merge, AVX10_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
evminmaxps(dst, mask, nds, src, merge, AVX10_2_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
case T_DOUBLE:
evminmaxpd(dst, mask, nds, src, merge, AVX10_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
evminmaxpd(dst, mask, nds, src, merge, AVX10_2_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
default:
fatal("Unexpected type argument %s", type2name(type)); break;
}
@ -8937,9 +8937,9 @@ void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XM
case T_LONG:
evpminsq(dst, mask, nds, src, merge, vector_len); break;
case T_FLOAT:
evminmaxps(dst, mask, nds, src, merge, AVX10_MINMAX_MIN_COMPARE_SIGN, vector_len); break;
evminmaxps(dst, mask, nds, src, merge, AVX10_2_MINMAX_MIN_COMPARE_SIGN, vector_len); break;
case T_DOUBLE:
evminmaxpd(dst, mask, nds, src, merge, AVX10_MINMAX_MIN_COMPARE_SIGN, vector_len); break;
evminmaxpd(dst, mask, nds, src, merge, AVX10_2_MINMAX_MIN_COMPARE_SIGN, vector_len); break;
default:
fatal("Unexpected type argument %s", type2name(type)); break;
}
@ -8956,9 +8956,9 @@ void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XM
case T_LONG:
evpmaxsq(dst, mask, nds, src, merge, vector_len); break;
case T_FLOAT:
evminmaxps(dst, mask, nds, src, merge, AVX10_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
evminmaxps(dst, mask, nds, src, merge, AVX10_2_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
case T_DOUBLE:
evminmaxps(dst, mask, nds, src, merge, AVX10_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
evminmaxps(dst, mask, nds, src, merge, AVX10_2_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
default:
fatal("Unexpected type argument %s", type2name(type)); break;
}

View File

@ -73,7 +73,7 @@
do_arch_blob, \
do_arch_entry, \
do_arch_entry_init) \
do_arch_blob(compiler, 109000 WINDOWS_ONLY(+2000)) \
do_arch_blob(compiler, 120000 WINDOWS_ONLY(+2000)) \
do_stub(compiler, vector_float_sign_mask) \
do_arch_entry(x86, compiler, vector_float_sign_mask, \
vector_float_sign_mask, vector_float_sign_mask) \

View File

@ -480,7 +480,7 @@ address StubGenerator::generate_counterMode_VectorAESCrypt() {
// Inputs:
// c_rarg0 - source byte array address
// c_rarg1 - destination byte array address
// c_rarg2 - K (key) in little endian int array
// c_rarg2 - sessionKe (key) in little endian int array
// c_rarg3 - counter vector byte array address
// Linux
// c_rarg4 - input length
@ -1063,7 +1063,7 @@ address StubGenerator::generate_cipherBlockChaining_decryptVectorAESCrypt() {
// Inputs:
// c_rarg0 - source byte array address
// c_rarg1 - destination byte array address
// c_rarg2 - K (key) in little endian int array
// c_rarg2 - sessionKe (key) in little endian int array
//
address StubGenerator::generate_aescrypt_encryptBlock() {
assert(UseAES, "need AES instructions and misaligned SSE support");
@ -1158,7 +1158,7 @@ address StubGenerator::generate_aescrypt_encryptBlock() {
// Inputs:
// c_rarg0 - source byte array address
// c_rarg1 - destination byte array address
// c_rarg2 - K (key) in little endian int array
// c_rarg2 - sessionKd (key) in little endian int array
//
address StubGenerator::generate_aescrypt_decryptBlock() {
assert(UseAES, "need AES instructions and misaligned SSE support");
@ -1255,7 +1255,7 @@ address StubGenerator::generate_aescrypt_decryptBlock() {
// Inputs:
// c_rarg0 - source byte array address
// c_rarg1 - destination byte array address
// c_rarg2 - K (key) in little endian int array
// c_rarg2 - sessionKe (key) in little endian int array
// c_rarg3 - r vector byte array address
// c_rarg4 - input length
//
@ -1407,7 +1407,7 @@ address StubGenerator::generate_cipherBlockChaining_encryptAESCrypt() {
// Inputs:
// c_rarg0 - source byte array address
// c_rarg1 - destination byte array address
// c_rarg2 - K (key) in little endian int array
// c_rarg2 - sessionKd (key) in little endian int array
// c_rarg3 - r vector byte array address
// c_rarg4 - input length
//
@ -3524,10 +3524,10 @@ void StubGenerator::aesgcm_avx512(Register in, Register len, Register ct, Regist
false, true, false, false, false, ghashin_offset, aesout_offset, HashKey_32);
ghash16_avx512(false, true, false, false, true, in, pos, avx512_subkeyHtbl, AAD_HASHx, SHUF_MASK, stack_offset, 16 * 16, 0, HashKey_16);
__ addl(pos, 16 * 16);
__ bind(MESG_BELOW_32_BLKS);
__ subl(len, 16 * 16);
__ addl(pos, 16 * 16);
gcm_enc_dec_last_avx512(len, in, pos, AAD_HASHx, SHUF_MASK, avx512_subkeyHtbl, ghashin_offset, HashKey_16, true, true);
__ bind(GHASH_DONE);
@ -4016,13 +4016,15 @@ void StubGenerator::aesgcm_avx2(Register in, Register len, Register ct, Register
const Register rounds = r10;
const XMMRegister ctr_blockx = xmm9;
const XMMRegister aad_hashx = xmm8;
Label encrypt_done, encrypt_by_8_new, encrypt_by_8;
Label encrypt_done, encrypt_by_8_new, encrypt_by_8, exit;
//This routine should be called only for message sizes of 128 bytes or more.
//Macro flow:
//process 8 16 byte blocks in initial_num_blocks.
//process 8 16 byte blocks at a time until all are done 'encrypt_by_8_new followed by ghash_last_8'
__ xorl(pos, pos);
__ cmpl(len, 128);
__ jcc(Assembler::less, exit);
//Generate 8 constants for htbl
generateHtbl_8_block_avx2(subkeyHtbl);
@ -4090,6 +4092,7 @@ void StubGenerator::aesgcm_avx2(Register in, Register len, Register ct, Register
__ vpxor(xmm0, xmm0, xmm0, Assembler::AVX_128bit);
__ vpxor(xmm13, xmm13, xmm13, Assembler::AVX_128bit);
__ bind(exit);
}
#undef __

View File

@ -3386,6 +3386,11 @@ bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) {
return false;
}
break;
case Op_VectorBlend:
if (UseAVX == 0 && size_in_bits < 128) {
return false;
}
break;
case Op_VectorTest:
if (UseSSE < 4) {
return false; // Implementation limitation
@ -7284,12 +7289,12 @@ instruct loadD(regD dst, memory mem)
%}
// max = java.lang.Math.max(float a, float b)
instruct maxF_avx10_reg(regF dst, regF a, regF b) %{
instruct maxF_reg_avx10_2(regF dst, regF a, regF b) %{
predicate(VM_Version::supports_avx10_2());
match(Set dst (MaxF a b));
format %{ "maxF $dst, $a, $b" %}
ins_encode %{
__ eminmaxss($dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, AVX10_MINMAX_MAX_COMPARE_SIGN);
__ eminmaxss($dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, AVX10_2_MINMAX_MAX_COMPARE_SIGN);
%}
ins_pipe( pipe_slow );
%}
@ -7320,12 +7325,12 @@ instruct maxF_reduction_reg(legRegF dst, legRegF a, legRegF b, legRegF xtmp, rRe
%}
// max = java.lang.Math.max(double a, double b)
instruct maxD_avx10_reg(regD dst, regD a, regD b) %{
instruct maxD_reg_avx10_2(regD dst, regD a, regD b) %{
predicate(VM_Version::supports_avx10_2());
match(Set dst (MaxD a b));
format %{ "maxD $dst, $a, $b" %}
ins_encode %{
__ eminmaxsd($dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, AVX10_MINMAX_MAX_COMPARE_SIGN);
__ eminmaxsd($dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, AVX10_2_MINMAX_MAX_COMPARE_SIGN);
%}
ins_pipe( pipe_slow );
%}
@ -7356,12 +7361,12 @@ instruct maxD_reduction_reg(legRegD dst, legRegD a, legRegD b, legRegD xtmp, rRe
%}
// max = java.lang.Math.min(float a, float b)
instruct minF_avx10_reg(regF dst, regF a, regF b) %{
instruct minF_reg_avx10_2(regF dst, regF a, regF b) %{
predicate(VM_Version::supports_avx10_2());
match(Set dst (MinF a b));
format %{ "minF $dst, $a, $b" %}
ins_encode %{
__ eminmaxss($dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, AVX10_MINMAX_MIN_COMPARE_SIGN);
__ eminmaxss($dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, AVX10_2_MINMAX_MIN_COMPARE_SIGN);
%}
ins_pipe( pipe_slow );
%}
@ -7392,12 +7397,12 @@ instruct minF_reduction_reg(legRegF dst, legRegF a, legRegF b, legRegF xtmp, rRe
%}
// max = java.lang.Math.min(double a, double b)
instruct minD_avx10_reg(regD dst, regD a, regD b) %{
instruct minD_reg_avx10_2(regD dst, regD a, regD b) %{
predicate(VM_Version::supports_avx10_2());
match(Set dst (MinD a b));
format %{ "minD $dst, $a, $b" %}
ins_encode %{
__ eminmaxsd($dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, AVX10_MINMAX_MIN_COMPARE_SIGN);
__ eminmaxsd($dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, AVX10_2_MINMAX_MIN_COMPARE_SIGN);
%}
ins_pipe( pipe_slow );
%}
@ -14581,7 +14586,7 @@ instruct convF2I_reg_reg(rRegI dst, regF src, rFlagsReg cr)
ins_pipe(pipe_slow);
%}
instruct convF2I_reg_reg_avx10(rRegI dst, regF src)
instruct convF2I_reg_reg_avx10_2(rRegI dst, regF src)
%{
predicate(VM_Version::supports_avx10_2());
match(Set dst (ConvF2I src));
@ -14592,7 +14597,7 @@ instruct convF2I_reg_reg_avx10(rRegI dst, regF src)
ins_pipe(pipe_slow);
%}
instruct convF2I_reg_mem_avx10(rRegI dst, memory src)
instruct convF2I_reg_mem_avx10_2(rRegI dst, memory src)
%{
predicate(VM_Version::supports_avx10_2());
match(Set dst (ConvF2I (LoadF src)));
@ -14615,7 +14620,7 @@ instruct convF2L_reg_reg(rRegL dst, regF src, rFlagsReg cr)
ins_pipe(pipe_slow);
%}
instruct convF2L_reg_reg_avx10(rRegL dst, regF src)
instruct convF2L_reg_reg_avx10_2(rRegL dst, regF src)
%{
predicate(VM_Version::supports_avx10_2());
match(Set dst (ConvF2L src));
@ -14626,7 +14631,7 @@ instruct convF2L_reg_reg_avx10(rRegL dst, regF src)
ins_pipe(pipe_slow);
%}
instruct convF2L_reg_mem_avx10(rRegL dst, memory src)
instruct convF2L_reg_mem_avx10_2(rRegL dst, memory src)
%{
predicate(VM_Version::supports_avx10_2());
match(Set dst (ConvF2L (LoadF src)));
@ -14649,7 +14654,7 @@ instruct convD2I_reg_reg(rRegI dst, regD src, rFlagsReg cr)
ins_pipe(pipe_slow);
%}
instruct convD2I_reg_reg_avx10(rRegI dst, regD src)
instruct convD2I_reg_reg_avx10_2(rRegI dst, regD src)
%{
predicate(VM_Version::supports_avx10_2());
match(Set dst (ConvD2I src));
@ -14660,7 +14665,7 @@ instruct convD2I_reg_reg_avx10(rRegI dst, regD src)
ins_pipe(pipe_slow);
%}
instruct convD2I_reg_mem_avx10(rRegI dst, memory src)
instruct convD2I_reg_mem_avx10_2(rRegI dst, memory src)
%{
predicate(VM_Version::supports_avx10_2());
match(Set dst (ConvD2I (LoadD src)));
@ -14683,7 +14688,7 @@ instruct convD2L_reg_reg(rRegL dst, regD src, rFlagsReg cr)
ins_pipe(pipe_slow);
%}
instruct convD2L_reg_reg_avx10(rRegL dst, regD src)
instruct convD2L_reg_reg_avx10_2(rRegL dst, regD src)
%{
predicate(VM_Version::supports_avx10_2());
match(Set dst (ConvD2L src));
@ -14694,7 +14699,7 @@ instruct convD2L_reg_reg_avx10(rRegL dst, regD src)
ins_pipe(pipe_slow);
%}
instruct convD2L_reg_mem_avx10(rRegL dst, memory src)
instruct convD2L_reg_mem_avx10_2(rRegL dst, memory src)
%{
predicate(VM_Version::supports_avx10_2());
match(Set dst (ConvD2L (LoadD src)));
@ -19655,7 +19660,7 @@ instruct minmax_reductionF_av(legRegF dst, legVec src, legVec tmp, legVec atmp,
ins_pipe( pipe_slow );
%}
instruct minmax_reduction2F_avx10(regF dst, immF src1, vec src2, vec xtmp1) %{
instruct minmax_reduction2F_avx10_2(regF dst, immF src1, vec src2, vec xtmp1) %{
predicate(VM_Version::supports_avx10_2() && Matcher::vector_element_basic_type(n->in(2)) == T_FLOAT &&
((n->Opcode() == Op_MinReductionV && n->in(1)->bottom_type() == TypeF::POS_INF) ||
(n->Opcode() == Op_MaxReductionV && n->in(1)->bottom_type() == TypeF::NEG_INF)) &&
@ -19673,7 +19678,7 @@ instruct minmax_reduction2F_avx10(regF dst, immF src1, vec src2, vec xtmp1) %{
ins_pipe( pipe_slow );
%}
instruct minmax_reductionF_avx10(regF dst, immF src1, vec src2, vec xtmp1, vec xtmp2) %{
instruct minmax_reductionF_avx10_2(regF dst, immF src1, vec src2, vec xtmp1, vec xtmp2) %{
predicate(VM_Version::supports_avx10_2() && Matcher::vector_element_basic_type(n->in(2)) == T_FLOAT &&
((n->Opcode() == Op_MinReductionV && n->in(1)->bottom_type() == TypeF::POS_INF) ||
(n->Opcode() == Op_MaxReductionV && n->in(1)->bottom_type() == TypeF::NEG_INF)) &&
@ -19691,7 +19696,7 @@ instruct minmax_reductionF_avx10(regF dst, immF src1, vec src2, vec xtmp1, vec x
ins_pipe( pipe_slow );
%}
instruct minmax_reduction2F_avx10_av(regF dst, vec src, vec xtmp1) %{
instruct minmax_reduction2F_av_avx10_2(regF dst, vec src, vec xtmp1) %{
predicate(VM_Version::supports_avx10_2() && Matcher::vector_element_basic_type(n->in(2)) == T_FLOAT &&
Matcher::vector_length(n->in(2)) == 2);
match(Set dst (MinReductionV dst src));
@ -19707,7 +19712,7 @@ instruct minmax_reduction2F_avx10_av(regF dst, vec src, vec xtmp1) %{
ins_pipe( pipe_slow );
%}
instruct minmax_reductionF_avx10_av(regF dst, vec src, vec xtmp1, vec xtmp2) %{
instruct minmax_reductionF_av_avx10_2(regF dst, vec src, vec xtmp1, vec xtmp2) %{
predicate(VM_Version::supports_avx10_2() && Matcher::vector_element_basic_type(n->in(2)) == T_FLOAT &&
Matcher::vector_length(n->in(2)) >= 4);
match(Set dst (MinReductionV dst src));
@ -19805,7 +19810,7 @@ instruct minmax_reductionD_av(legRegD dst, legVec src, legVec tmp1, legVec tmp2,
ins_pipe( pipe_slow );
%}
instruct minmax_reduction2D_avx10(regD dst, immD src1, vec src2, vec xtmp1) %{
instruct minmax_reduction2D_avx10_2(regD dst, immD src1, vec src2, vec xtmp1) %{
predicate(VM_Version::supports_avx10_2() && Matcher::vector_element_basic_type(n->in(2)) == T_DOUBLE &&
((n->Opcode() == Op_MinReductionV && n->in(1)->bottom_type() == TypeD::POS_INF) ||
(n->Opcode() == Op_MaxReductionV && n->in(1)->bottom_type() == TypeD::NEG_INF)) &&
@ -19823,7 +19828,7 @@ instruct minmax_reduction2D_avx10(regD dst, immD src1, vec src2, vec xtmp1) %{
ins_pipe( pipe_slow );
%}
instruct minmax_reductionD_avx10(regD dst, immD src1, vec src2, vec xtmp1, vec xtmp2) %{
instruct minmax_reductionD_avx10_2(regD dst, immD src1, vec src2, vec xtmp1, vec xtmp2) %{
predicate(VM_Version::supports_avx10_2() && Matcher::vector_element_basic_type(n->in(2)) == T_DOUBLE &&
((n->Opcode() == Op_MinReductionV && n->in(1)->bottom_type() == TypeD::POS_INF) ||
(n->Opcode() == Op_MaxReductionV && n->in(1)->bottom_type() == TypeD::NEG_INF)) &&
@ -19842,7 +19847,7 @@ instruct minmax_reductionD_avx10(regD dst, immD src1, vec src2, vec xtmp1, vec x
%}
instruct minmax_reduction2D_av_avx10(regD dst, vec src, vec xtmp1) %{
instruct minmax_reduction2D_av_avx10_2(regD dst, vec src, vec xtmp1) %{
predicate(VM_Version::supports_avx10_2() && Matcher::vector_element_basic_type(n->in(2)) == T_DOUBLE &&
Matcher::vector_length(n->in(2)) == 2);
match(Set dst (MinReductionV dst src));
@ -19858,7 +19863,7 @@ instruct minmax_reduction2D_av_avx10(regD dst, vec src, vec xtmp1) %{
ins_pipe( pipe_slow );
%}
instruct minmax_reductionD_av_avx10(regD dst, vec src, vec xtmp1, vec xtmp2) %{
instruct minmax_reductionD_av_avx10_2(regD dst, vec src, vec xtmp1, vec xtmp2) %{
predicate(VM_Version::supports_avx10_2() && Matcher::vector_element_basic_type(n->in(2)) == T_DOUBLE &&
Matcher::vector_length(n->in(2)) >= 4);
match(Set dst (MinReductionV dst src));
@ -20761,7 +20766,7 @@ instruct vminmaxL_reg_evex(vec dst, vec src1, vec src2) %{
%}
// Float/Double vector Min/Max
instruct minmaxFP_avx10_reg(vec dst, vec a, vec b) %{
instruct minmaxFP_reg_avx10_2(vec dst, vec a, vec b) %{
predicate(VM_Version::supports_avx10_2() &&
is_floating_point_type(Matcher::vector_element_basic_type(n))); // T_FLOAT, T_DOUBLE
match(Set dst (MinV a b));
@ -22108,29 +22113,29 @@ instruct castFtoX_reg_evex(vec dst, vec src, vec xtmp1, vec xtmp2, kReg ktmp1, k
ins_pipe( pipe_slow );
%}
instruct castFtoX_reg_avx10(vec dst, vec src) %{
instruct castFtoX_reg_avx10_2(vec dst, vec src) %{
predicate(VM_Version::supports_avx10_2() &&
is_integral_type(Matcher::vector_element_basic_type(n)));
match(Set dst (VectorCastF2X src));
format %{ "vector_cast_f2x_avx10 $dst, $src\t!" %}
format %{ "vector_cast_f2x_avx10_2 $dst, $src\t!" %}
ins_encode %{
BasicType to_elem_bt = Matcher::vector_element_basic_type(this);
int vlen_enc = (to_elem_bt == T_LONG) ? vector_length_encoding(this) : vector_length_encoding(this, $src);
__ vector_castF2X_avx10(to_elem_bt, $dst$$XMMRegister, $src$$XMMRegister, vlen_enc);
__ vector_castF2X_avx10_2(to_elem_bt, $dst$$XMMRegister, $src$$XMMRegister, vlen_enc);
%}
ins_pipe( pipe_slow );
%}
instruct castFtoX_mem_avx10(vec dst, memory src) %{
instruct castFtoX_mem_avx10_2(vec dst, memory src) %{
predicate(VM_Version::supports_avx10_2() &&
is_integral_type(Matcher::vector_element_basic_type(n)));
match(Set dst (VectorCastF2X (LoadVector src)));
format %{ "vector_cast_f2x_avx10 $dst, $src\t!" %}
format %{ "vector_cast_f2x_avx10_2 $dst, $src\t!" %}
ins_encode %{
int vlen = Matcher::vector_length(this);
BasicType to_elem_bt = Matcher::vector_element_basic_type(this);
int vlen_enc = (to_elem_bt == T_LONG) ? vector_length_encoding(this) : vector_length_encoding(vlen * sizeof(jfloat));
__ vector_castF2X_avx10(to_elem_bt, $dst$$XMMRegister, $src$$Address, vlen_enc);
__ vector_castF2X_avx10_2(to_elem_bt, $dst$$XMMRegister, $src$$Address, vlen_enc);
%}
ins_pipe( pipe_slow );
%}
@ -22182,29 +22187,29 @@ instruct castDtoX_reg_evex(vec dst, vec src, vec xtmp1, vec xtmp2, kReg ktmp1, k
ins_pipe( pipe_slow );
%}
instruct castDtoX_reg_avx10(vec dst, vec src) %{
instruct castDtoX_reg_avx10_2(vec dst, vec src) %{
predicate(VM_Version::supports_avx10_2() &&
is_integral_type(Matcher::vector_element_basic_type(n)));
match(Set dst (VectorCastD2X src));
format %{ "vector_cast_d2x_avx10 $dst, $src\t!" %}
format %{ "vector_cast_d2x_avx10_2 $dst, $src\t!" %}
ins_encode %{
int vlen_enc = vector_length_encoding(this, $src);
BasicType to_elem_bt = Matcher::vector_element_basic_type(this);
__ vector_castD2X_avx10(to_elem_bt, $dst$$XMMRegister, $src$$XMMRegister, vlen_enc);
__ vector_castD2X_avx10_2(to_elem_bt, $dst$$XMMRegister, $src$$XMMRegister, vlen_enc);
%}
ins_pipe( pipe_slow );
%}
instruct castDtoX_mem_avx10(vec dst, memory src) %{
instruct castDtoX_mem_avx10_2(vec dst, memory src) %{
predicate(VM_Version::supports_avx10_2() &&
is_integral_type(Matcher::vector_element_basic_type(n)));
match(Set dst (VectorCastD2X (LoadVector src)));
format %{ "vector_cast_d2x_avx10 $dst, $src\t!" %}
format %{ "vector_cast_d2x_avx10_2 $dst, $src\t!" %}
ins_encode %{
int vlen = Matcher::vector_length(this);
int vlen_enc = vector_length_encoding(vlen * sizeof(jdouble));
BasicType to_elem_bt = Matcher::vector_element_basic_type(this);
__ vector_castD2X_avx10(to_elem_bt, $dst$$XMMRegister, $src$$Address, vlen_enc);
__ vector_castD2X_avx10_2(to_elem_bt, $dst$$XMMRegister, $src$$Address, vlen_enc);
%}
ins_pipe( pipe_slow );
%}
@ -25176,14 +25181,14 @@ instruct scalar_binOps_HF_reg(regF dst, regF src1, regF src2)
ins_pipe(pipe_slow);
%}
instruct scalar_minmax_HF_avx10_reg(regF dst, regF src1, regF src2)
instruct scalar_minmax_HF_reg_avx10_2(regF dst, regF src1, regF src2)
%{
predicate(VM_Version::supports_avx10_2());
match(Set dst (MaxHF src1 src2));
match(Set dst (MinHF src1 src2));
format %{ "scalar_min_max_fp16 $dst, $src1, $src2" %}
ins_encode %{
int function = this->ideal_Opcode() == Op_MinHF ? AVX10_MINMAX_MIN_COMPARE_SIGN : AVX10_MINMAX_MAX_COMPARE_SIGN;
int function = this->ideal_Opcode() == Op_MinHF ? AVX10_2_MINMAX_MIN_COMPARE_SIGN : AVX10_2_MINMAX_MAX_COMPARE_SIGN;
__ eminmaxsh($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, function);
%}
ins_pipe( pipe_slow );
@ -25291,7 +25296,7 @@ instruct vector_fma_HF_mem(vec dst, memory src1, vec src2)
ins_pipe( pipe_slow );
%}
instruct vector_minmax_HF_avx10_mem(vec dst, vec src1, memory src2)
instruct vector_minmax_HF_mem_avx10_2(vec dst, vec src1, memory src2)
%{
predicate(VM_Version::supports_avx10_2());
match(Set dst (MinVHF src1 (VectorReinterpret (LoadVector src2))));
@ -25299,13 +25304,13 @@ instruct vector_minmax_HF_avx10_mem(vec dst, vec src1, memory src2)
format %{ "vector_min_max_fp16_mem $dst, $src1, $src2" %}
ins_encode %{
int vlen_enc = vector_length_encoding(this);
int function = this->ideal_Opcode() == Op_MinVHF ? AVX10_MINMAX_MIN_COMPARE_SIGN : AVX10_MINMAX_MAX_COMPARE_SIGN;
int function = this->ideal_Opcode() == Op_MinVHF ? AVX10_2_MINMAX_MIN_COMPARE_SIGN : AVX10_2_MINMAX_MAX_COMPARE_SIGN;
__ evminmaxph($dst$$XMMRegister, k0, $src1$$XMMRegister, $src2$$Address, true, function, vlen_enc);
%}
ins_pipe( pipe_slow );
%}
instruct vector_minmax_HF_avx10_reg(vec dst, vec src1, vec src2)
instruct vector_minmax_HF_reg_avx10_2(vec dst, vec src1, vec src2)
%{
predicate(VM_Version::supports_avx10_2());
match(Set dst (MinVHF src1 src2));
@ -25313,7 +25318,7 @@ instruct vector_minmax_HF_avx10_reg(vec dst, vec src1, vec src2)
format %{ "vector_min_max_fp16 $dst, $src1, $src2" %}
ins_encode %{
int vlen_enc = vector_length_encoding(this);
int function = this->ideal_Opcode() == Op_MinVHF ? AVX10_MINMAX_MIN_COMPARE_SIGN : AVX10_MINMAX_MAX_COMPARE_SIGN;
int function = this->ideal_Opcode() == Op_MinVHF ? AVX10_2_MINMAX_MIN_COMPARE_SIGN : AVX10_2_MINMAX_MAX_COMPARE_SIGN;
__ evminmaxph($dst$$XMMRegister, k0, $src1$$XMMRegister, $src2$$XMMRegister, true, function, vlen_enc);
%}
ins_pipe( pipe_slow );

View File

@ -1038,6 +1038,8 @@ static void* dll_load_library(const char *filename, int *eno, char *ebuf, int eb
dflags |= RTLD_MEMBER;
}
Events::log_dll_message(nullptr, "Attempting to load shared library %s", filename);
void* result;
const char* error_report = nullptr;
JFR_ONLY(NativeLibraryLoadEvent load_event(filename, &result);)
@ -2331,8 +2333,8 @@ int os::open(const char *path, int oflag, int mode) {
if (ret != -1) {
if ((st_mode & S_IFMT) == S_IFDIR) {
errno = EISDIR;
::close(fd);
errno = EISDIR;
return -1;
}
} else {

View File

@ -861,6 +861,90 @@ pid_t os::Bsd::gettid() {
}
}
// Returns the uid of a process or -1 on error.
uid_t os::Bsd::get_process_uid(pid_t pid) {
struct kinfo_proc kp;
size_t size = sizeof kp;
int mib_kern[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PID, pid};
if (sysctl(mib_kern, 4, &kp, &size, nullptr, 0) == 0) {
if (size > 0 && kp.kp_proc.p_pid == pid) {
return kp.kp_eproc.e_ucred.cr_uid;
}
}
return (uid_t)-1;
}
// Returns true if the process is running as root.
bool os::Bsd::is_process_root(pid_t pid) {
uid_t uid = get_process_uid(pid);
return (uid != (uid_t)-1) ? os::Posix::is_root(uid) : false;
}
#ifdef __APPLE__
// macOS has a secure per-user temporary directory.
// Root can attach to a non-root process, hence it needs
// to lookup /var/folders for the user specific temporary directory
// of the form /var/folders/*/*/T, that contains PERFDATA_NAME_user
// directory.
static const char VAR_FOLDERS[] = "/var/folders/";
int os::Bsd::get_user_tmp_dir_macos(const char* user, int vmid, char* output_path, int output_size) {
// read the var/folders directory
DIR* varfolders_dir = os::opendir(VAR_FOLDERS);
if (varfolders_dir != nullptr) {
// var/folders directory contains 2-characters subdirectories (buckets)
struct dirent* bucket_de;
// loop until the PERFDATA_NAME_user directory has been found
while ((bucket_de = os::readdir(varfolders_dir)) != nullptr) {
// skip over files and special "." and ".."
if (bucket_de->d_type != DT_DIR || bucket_de->d_name[0] == '.') {
continue;
}
// absolute path to the bucket
char bucket[PATH_MAX];
int b = os::snprintf(bucket, PATH_MAX, "%s%s/", VAR_FOLDERS, bucket_de->d_name);
// the total length of the absolute path must not exceed the buffer size
if (b >= PATH_MAX || b < 0) {
continue;
}
// each bucket contains next level subdirectories
DIR* bucket_dir = os::opendir(bucket);
if (bucket_dir == nullptr) {
continue;
}
// read each subdirectory, skipping over regular files
struct dirent* subbucket_de;
while ((subbucket_de = os::readdir(bucket_dir)) != nullptr) {
if (subbucket_de->d_type != DT_DIR || subbucket_de->d_name[0] == '.') {
continue;
}
// If the PERFDATA_NAME_user directory exists in the T subdirectory,
// this means the subdirectory is the temporary directory of the user.
char perfdata_path[PATH_MAX];
int p = os::snprintf(perfdata_path, PATH_MAX, "%s%s/T/%s_%s/", bucket, subbucket_de->d_name, PERFDATA_NAME, user);
// the total length must not exceed the output buffer size
if (p >= PATH_MAX || p < 0) {
continue;
}
// check if the subdirectory exists
if (os::file_exists(perfdata_path)) {
// the return value of snprintf is not checked for the second time
return os::snprintf(output_path, output_size, "%s%s/T", bucket, subbucket_de->d_name);
}
}
os::closedir(bucket_dir);
}
os::closedir(varfolders_dir);
}
return -1;
}
#endif
intx os::current_thread_id() {
#ifdef __APPLE__
return (intx)os::Bsd::gettid();
@ -1035,6 +1119,8 @@ void *os::Bsd::dlopen_helper(const char *filename, int mode, char *ebuf, int ebu
int rtn = fegetenv(&default_fenv);
assert(rtn == 0, "fegetenv must succeed");
Events::log_dll_message(nullptr, "Attempting to load shared library %s", filename);
void* result;
JFR_ONLY(NativeLibraryLoadEvent load_event(filename, &result);)
result = ::dlopen(filename, RTLD_LAZY);
@ -2275,8 +2361,8 @@ int os::open(const char *path, int oflag, int mode) {
if (ret != -1) {
if ((st_mode & S_IFMT) == S_IFDIR) {
errno = EISDIR;
::close(fd);
errno = EISDIR;
return -1;
}
} else {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -61,6 +61,12 @@ class os::Bsd {
static pthread_t main_thread(void) { return _main_thread; }
static pid_t gettid();
static uid_t get_process_uid(pid_t pid);
static bool is_process_root(pid_t pid);
#ifdef __APPLE__
static int get_user_tmp_dir_macos(const char* user, int vmid, char* output_buffer, int buffer_size);
#endif
static intptr_t* ucontext_get_sp(const ucontext_t* uc);
static intptr_t* ucontext_get_fp(const ucontext_t* uc);

View File

@ -467,9 +467,9 @@ void CgroupV1MemoryController::print_version_specific_info(outputStream* st, phy
kmem_max_usage.set_value(temp);
}
OSContainer::print_container_helper(st, kmem_limit, "kernel_memory_limit_in_bytes");
OSContainer::print_container_helper(st, kmem_usage, "kernel_memory_usage_in_bytes");
OSContainer::print_container_helper(st, kmem_max_usage, "kernel_memory_max_usage_in_bytes");
OSContainer::print_container_helper(st, kmem_limit, "kernel_memory_limit");
OSContainer::print_container_helper(st, kmem_usage, "kernel_memory_usage");
OSContainer::print_container_helper(st, kmem_max_usage, "kernel_memory_max_usage");
}
char* CgroupV1Subsystem::cpu_cpuset_cpus() {

View File

@ -378,8 +378,8 @@ void CgroupV2MemoryController::print_version_specific_info(outputStream* st, phy
if (memory_swap_limit_value(reader(), swap_limit_val)) {
swap_limit.set_value(swap_limit_val);
}
OSContainer::print_container_helper(st, swap_current, "memory_swap_current_in_bytes");
OSContainer::print_container_helper(st, swap_limit, "memory_swap_max_limit_in_bytes");
OSContainer::print_container_helper(st, swap_current, "memory_swap_current");
OSContainer::print_container_helper(st, swap_limit, "memory_swap_max_limit");
}
char* CgroupV2Controller::construct_path(char* mount_path, const char* cgroup_path) {

View File

@ -287,20 +287,44 @@ bool OSContainer::pids_current(uint64_t& value) {
return cgroup_subsystem->pids_current(value);
}
template<typename T> struct metric_fmt;
template<> struct metric_fmt<unsigned long long int> { static constexpr const char* fmt = "%llu"; };
template<> struct metric_fmt<unsigned long int> { static constexpr const char* fmt = "%lu"; };
template<> struct metric_fmt<int> { static constexpr const char* fmt = "%d"; };
template<> struct metric_fmt<const char*> { static constexpr const char* fmt = "%s"; };
template void OSContainer::print_container_metric<unsigned long long int>(outputStream*, const char*, unsigned long long int, const char*);
template void OSContainer::print_container_metric<unsigned long int>(outputStream*, const char*, unsigned long int, const char*);
template void OSContainer::print_container_metric<int>(outputStream*, const char*, int, const char*);
template void OSContainer::print_container_metric<const char*>(outputStream*, const char*, const char*, const char*);
template <typename T>
void OSContainer::print_container_metric(outputStream* st, const char* metrics, T value, const char* unit) {
constexpr int max_length = 38; // Longest "metric: value" string ("maximum number of tasks: not supported")
constexpr int longest_value = max_length - 11; // Max length - shortest "metric: " string ("cpu_quota: ")
char value_str[longest_value + 1] = {};
os::snprintf_checked(value_str, longest_value, metric_fmt<T>::fmt, value);
st->print("%s: %*s", metrics, max_length - static_cast<int>(strlen(metrics)) - 2, value_str); // -2 for the ": "
if (unit[0] != '\0') {
st->print_cr(" %s", unit);
} else {
st->print_cr("");
}
}
void OSContainer::print_container_helper(outputStream* st, MetricResult& res, const char* metrics) {
st->print("%s: ", metrics);
if (res.success()) {
if (res.value() != value_unlimited) {
if (res.value() >= 1024) {
st->print_cr(PHYS_MEM_TYPE_FORMAT " k", (physical_memory_size_type)(res.value() / K));
print_container_metric(st, metrics, res.value() / K, "kB");
} else {
st->print_cr(PHYS_MEM_TYPE_FORMAT, res.value());
print_container_metric(st, metrics, res.value(), "B");
}
} else {
st->print_cr("%s", "unlimited");
print_container_metric(st, metrics, "unlimited");
}
} else {
// Not supported
st->print_cr("%s", "unavailable");
print_container_metric(st, metrics, "unavailable");
}
}

View File

@ -65,6 +65,8 @@ class OSContainer: AllStatic {
static void init();
static void print_version_specific_info(outputStream* st);
static void print_container_helper(outputStream* st, MetricResult& res, const char* metrics);
template <typename T>
static void print_container_metric(outputStream* st, const char* metrics, T value, const char* unit = "");
static inline bool is_containerized();
static const char * container_type();

View File

@ -1875,6 +1875,8 @@ void * os::Linux::dlopen_helper(const char *filename, char *ebuf, int ebuflen) {
assert(rtn == 0, "fegetenv must succeed");
#endif // IA32
Events::log_dll_message(nullptr, "Attempting to load shared library %s", filename);
void* result;
JFR_ONLY(NativeLibraryLoadEvent load_event(filename, &result);)
result = ::dlopen(filename, RTLD_LAZY);
@ -2430,62 +2432,57 @@ bool os::Linux::print_container_info(outputStream* st) {
st->print_cr("container (cgroup) information:");
const char *p_ct = OSContainer::container_type();
st->print_cr("container_type: %s", p_ct != nullptr ? p_ct : "not supported");
OSContainer::print_container_metric(st, "container_type", p_ct != nullptr ? p_ct : "not supported");
char *p = OSContainer::cpu_cpuset_cpus();
st->print_cr("cpu_cpuset_cpus: %s", p != nullptr ? p : "not supported");
OSContainer::print_container_metric(st, "cpu_cpuset_cpus", p != nullptr ? p : "not supported");
free(p);
p = OSContainer::cpu_cpuset_memory_nodes();
st->print_cr("cpu_memory_nodes: %s", p != nullptr ? p : "not supported");
OSContainer::print_container_metric(st, "cpu_memory_nodes", p != nullptr ? p : "not supported");
free(p);
int i = -1;
bool supported = OSContainer::active_processor_count(i);
st->print("active_processor_count: ");
if (supported) {
assert(i > 0, "must be");
if (ActiveProcessorCount > 0) {
st->print_cr("%d, but overridden by -XX:ActiveProcessorCount %d", i, ActiveProcessorCount);
OSContainer::print_container_metric(st, "active_processor_count", ActiveProcessorCount, "(from -XX:ActiveProcessorCount)");
} else {
st->print_cr("%d", i);
OSContainer::print_container_metric(st, "active_processor_count", i);
}
} else {
st->print_cr("not supported");
OSContainer::print_container_metric(st, "active_processor_count", "not supported");
}
supported = OSContainer::cpu_quota(i);
st->print("cpu_quota: ");
if (supported && i > 0) {
st->print_cr("%d", i);
OSContainer::print_container_metric(st, "cpu_quota", i);
} else {
st->print_cr("%s", !supported ? "not supported" : "no quota");
OSContainer::print_container_metric(st, "cpu_quota", !supported ? "not supported" : "no quota");
}
supported = OSContainer::cpu_period(i);
st->print("cpu_period: ");
if (supported && i > 0) {
st->print_cr("%d", i);
OSContainer::print_container_metric(st, "cpu_period", i);
} else {
st->print_cr("%s", !supported ? "not supported" : "no period");
OSContainer::print_container_metric(st, "cpu_period", !supported ? "not supported" : "no period");
}
supported = OSContainer::cpu_shares(i);
st->print("cpu_shares: ");
if (supported && i > 0) {
st->print_cr("%d", i);
OSContainer::print_container_metric(st, "cpu_shares", i);
} else {
st->print_cr("%s", !supported ? "not supported" : "no shares");
OSContainer::print_container_metric(st, "cpu_shares", !supported ? "not supported" : "no shares");
}
uint64_t j = 0;
supported = OSContainer::cpu_usage_in_micros(j);
st->print("cpu_usage_in_micros: ");
if (supported && j > 0) {
st->print_cr(UINT64_FORMAT, j);
OSContainer::print_container_metric(st, "cpu_usage", j, "us");
} else {
st->print_cr("%s", !supported ? "not supported" : "no usage");
OSContainer::print_container_metric(st, "cpu_usage", !supported ? "not supported" : "no usage");
}
MetricResult memory_limit;
@ -2528,31 +2525,29 @@ bool os::Linux::print_container_info(outputStream* st) {
if (OSContainer::cache_usage_in_bytes(val)) {
cache_usage.set_value(val);
}
OSContainer::print_container_helper(st, memory_limit, "memory_limit_in_bytes");
OSContainer::print_container_helper(st, mem_swap_limit, "memory_and_swap_limit_in_bytes");
OSContainer::print_container_helper(st, mem_soft_limit, "memory_soft_limit_in_bytes");
OSContainer::print_container_helper(st, mem_throttle_limit, "memory_throttle_limit_in_bytes");
OSContainer::print_container_helper(st, mem_usage, "memory_usage_in_bytes");
OSContainer::print_container_helper(st, mem_max_usage, "memory_max_usage_in_bytes");
OSContainer::print_container_helper(st, rss_usage, "rss_usage_in_bytes");
OSContainer::print_container_helper(st, cache_usage, "cache_usage_in_bytes");
OSContainer::print_container_helper(st, memory_limit, "memory_limit");
OSContainer::print_container_helper(st, mem_swap_limit, "memory_and_swap_limit");
OSContainer::print_container_helper(st, mem_soft_limit, "memory_soft_limit");
OSContainer::print_container_helper(st, mem_throttle_limit, "memory_throttle_limit");
OSContainer::print_container_helper(st, mem_usage, "memory_usage");
OSContainer::print_container_helper(st, mem_max_usage, "memory_max_usage");
OSContainer::print_container_helper(st, rss_usage, "rss_usage");
OSContainer::print_container_helper(st, cache_usage, "cache_usage");
OSContainer::print_version_specific_info(st);
supported = OSContainer::pids_max(j);
st->print("maximum number of tasks: ");
if (supported && j != value_unlimited) {
st->print_cr(UINT64_FORMAT, j);
OSContainer::print_container_metric(st, "maximum number of tasks", j);
} else {
st->print_cr("%s", !supported ? "not supported" : "unlimited");
OSContainer::print_container_metric(st, "maximum number of tasks", !supported ? "not supported" : "unlimited");
}
supported = OSContainer::pids_current(j);
st->print("current number of tasks: ");
if (supported && j > 0) {
st->print_cr(UINT64_FORMAT, j);
OSContainer::print_container_metric(st, "current number of tasks", j);
} else {
st->print_cr("%s", !supported ? "not supported" : "no current tasks");
OSContainer::print_container_metric(st, "current number of tasks", !supported ? "not supported" : "no tasks");
}
return true;
@ -4296,14 +4291,7 @@ OSReturn os::get_native_priority(const Thread* const thread,
return (*priority_ptr != -1 || errno == 0 ? OS_OK : OS_ERR);
}
// This is the fastest way to get thread cpu time on Linux.
// Returns cpu time (user+sys) for any thread, not only for current.
// POSIX compliant clocks are implemented in the kernels 2.6.16+.
// It might work on 2.6.10+ with a special kernel/glibc patch.
// For reference, please, see IEEE Std 1003.1-2004:
// http://www.unix.org/single_unix_specification
jlong os::Linux::total_thread_cpu_time(clockid_t clockid) {
jlong os::Linux::thread_cpu_time(clockid_t clockid) {
struct timespec tp;
int status = clock_gettime(clockid, &tp);
assert(status == 0, "clock_gettime error: %s", os::strerror(errno));
@ -4930,8 +4918,8 @@ int os::open(const char *path, int oflag, int mode) {
if (ret != -1) {
if ((st_mode & S_IFMT) == S_IFDIR) {
errno = EISDIR;
::close(fd);
errno = EISDIR;
return -1;
}
} else {
@ -4958,20 +4946,42 @@ int os::open(const char *path, int oflag, int mode) {
return fd;
}
// Since kernel v2.6.12 the Linux ABI has had support for encoding the clock
// types in the last three bits. Bit 2 indicates whether a cpu clock refers to a
// thread or a process. Bits 1 and 0 give the type: PROF=0, VIRT=1, SCHED=2, or
// FD=3. The clock CPUCLOCK_VIRT (0b001) reports the thread's consumed user
// time. POSIX compliant implementations of pthread_getcpuclockid return the
// clock CPUCLOCK_SCHED (0b010) which reports the thread's consumed system+user
// time (as mandated by the POSIX standard POSIX.1-2024/IEEE Std 1003.1-2024
// §3.90).
static bool get_thread_clockid(Thread* thread, clockid_t* clockid, bool total) {
constexpr clockid_t CLOCK_TYPE_MASK = 3;
constexpr clockid_t CPUCLOCK_VIRT = 1;
int rc = pthread_getcpuclockid(thread->osthread()->pthread_id(), clockid);
if (rc != 0) {
// It's possible to encounter a terminated native thread that failed
// to detach itself from the VM - which should result in ESRCH.
assert_status(rc == ESRCH, rc, "pthread_getcpuclockid failed");
return false;
}
if (!total) {
clockid_t clockid_tmp = *clockid;
clockid_tmp = (clockid_tmp & ~CLOCK_TYPE_MASK) | CPUCLOCK_VIRT;
*clockid = clockid_tmp;
}
return true;
}
static jlong user_thread_cpu_time(Thread *thread);
static jlong total_thread_cpu_time(Thread *thread) {
clockid_t clockid;
int rc = pthread_getcpuclockid(thread->osthread()->pthread_id(),
&clockid);
if (rc == 0) {
return os::Linux::total_thread_cpu_time(clockid);
} else {
// It's possible to encounter a terminated native thread that failed
// to detach itself from the VM - which should result in ESRCH.
assert_status(rc == ESRCH, rc, "pthread_getcpuclockid failed");
return -1;
}
clockid_t clockid;
bool success = get_thread_clockid(thread, &clockid, true);
return success ? os::Linux::thread_cpu_time(clockid) : -1;
}
// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
@ -4982,7 +4992,7 @@ static jlong total_thread_cpu_time(Thread *thread) {
// the fast estimate available on the platform.
jlong os::current_thread_cpu_time() {
return os::Linux::total_thread_cpu_time(CLOCK_THREAD_CPUTIME_ID);
return os::Linux::thread_cpu_time(CLOCK_THREAD_CPUTIME_ID);
}
jlong os::thread_cpu_time(Thread* thread) {
@ -4991,7 +5001,7 @@ jlong os::thread_cpu_time(Thread* thread) {
jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
if (user_sys_cpu_time) {
return os::Linux::total_thread_cpu_time(CLOCK_THREAD_CPUTIME_ID);
return os::Linux::thread_cpu_time(CLOCK_THREAD_CPUTIME_ID);
} else {
return user_thread_cpu_time(Thread::current());
}
@ -5005,46 +5015,11 @@ jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
}
}
// -1 on error.
static jlong user_thread_cpu_time(Thread *thread) {
pid_t tid = thread->osthread()->thread_id();
char *s;
char stat[2048];
size_t statlen;
char proc_name[64];
int count;
long sys_time, user_time;
char cdummy;
int idummy;
long ldummy;
FILE *fp;
clockid_t clockid;
bool success = get_thread_clockid(thread, &clockid, false);
os::snprintf_checked(proc_name, 64, "/proc/self/task/%d/stat", tid);
fp = os::fopen(proc_name, "r");
if (fp == nullptr) return -1;
statlen = fread(stat, 1, 2047, fp);
stat[statlen] = '\0';
fclose(fp);
// Skip pid and the command string. Note that we could be dealing with
// weird command names, e.g. user could decide to rename java launcher
// to "java 1.4.2 :)", then the stat file would look like
// 1234 (java 1.4.2 :)) R ... ...
// We don't really need to know the command string, just find the last
// occurrence of ")" and then start parsing from there. See bug 4726580.
s = strrchr(stat, ')');
if (s == nullptr) return -1;
// Skip blank chars
do { s++; } while (s && isspace((unsigned char) *s));
count = sscanf(s,"%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu",
&cdummy, &idummy, &idummy, &idummy, &idummy, &idummy,
&ldummy, &ldummy, &ldummy, &ldummy, &ldummy,
&user_time, &sys_time);
if (count != 13) return -1;
return (jlong)user_time * (1000000000 / os::Posix::clock_tics_per_second());
return success ? os::Linux::thread_cpu_time(clockid) : -1;
}
void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {

View File

@ -142,7 +142,7 @@ class os::Linux {
static bool manually_expand_stack(JavaThread * t, address addr);
static void expand_stack_to(address bottom);
static jlong total_thread_cpu_time(clockid_t clockid);
static jlong thread_cpu_time(clockid_t clockid);
static jlong sendfile(int out_fd, int in_fd, jlong* offset, jlong count);

View File

@ -50,7 +50,14 @@ ProcSmapsParser::~ProcSmapsParser() {
bool ProcSmapsParser::read_line() {
_line[0] = '\0';
return ::fgets(_line, _linelen, _f) != nullptr;
if (::fgets(_line, _linelen, _f) == nullptr) {
// On error or EOF, ensure deterministic empty buffer
_line[0] = '\0';
return false;
} else {
return true;
}
}
bool ProcSmapsParser::is_header_line() {
@ -101,8 +108,6 @@ void ProcSmapsParser::scan_additional_line(ProcSmapsInfo& out) {
}
}
// Starts or continues parsing. Returns true on success,
// false on EOF or on error.
bool ProcSmapsParser::parse_next(ProcSmapsInfo& out) {
// Information about a single mapping reaches across several lines.
@ -117,15 +122,13 @@ bool ProcSmapsParser::parse_next(ProcSmapsInfo& out) {
assert(is_header_line(), "Not a header line: \"%s\".", _line);
scan_header_line(out);
// Now read until we encounter the next header line or EOF or an error.
bool ok = false, stop = false;
do {
ok = read_line();
stop = !ok || is_header_line();
if (!stop) {
scan_additional_line(out);
while (true) {
bool ok = read_line();
if (!ok || is_header_line()) {
break; // EOF or next header
}
} while (!stop);
scan_additional_line(out);
}
return ok;
return true; // always return true if a mapping was parsed
}

View File

@ -84,8 +84,7 @@ public:
ProcSmapsParser(FILE* f);
~ProcSmapsParser();
// Starts or continues parsing. Returns true on success,
// false on EOF or on error.
// Starts or continues parsing. Returns true iff a mapping was parsed.
bool parse_next(ProcSmapsInfo& out);
};

View File

@ -1028,6 +1028,7 @@ char* os::realpath(const char* filename, char* outbuf, size_t outbuflen) {
} else {
errno = ENAMETOOLONG;
}
ErrnoPreserver ep;
permit_forbidden_function::free(p); // *not* os::free
} else {
// Fallback for platforms struggling with modern Posix standards (AIX 5.3, 6.1). If realpath
@ -1351,6 +1352,10 @@ bool os::Posix::is_root(uid_t uid){
return ROOT_UID == uid;
}
bool os::Posix::is_current_user_root(){
return is_root(geteuid());
}
bool os::Posix::matches_effective_uid_or_root(uid_t uid) {
return is_root(uid) || geteuid() == uid;
}

View File

@ -76,6 +76,9 @@ public:
// Returns true if given uid is root.
static bool is_root(uid_t uid);
// Returns true if the current user is root.
static bool is_current_user_root();
// Returns true if given uid is effective or root uid.
static bool matches_effective_uid_or_root(uid_t uid);

View File

@ -40,6 +40,9 @@
#if defined(LINUX)
#include "os_linux.hpp"
#endif
#if defined(BSD)
#include "os_bsd.hpp"
#endif
# include <errno.h>
# include <pwd.h>
@ -142,6 +145,18 @@ static char* get_user_tmp_dir(const char* user, int vmid, int nspid) {
jio_snprintf(buffer, TMP_BUFFER_LEN, "/proc/%d/root%s", vmid, tmpdir);
tmpdir = buffer;
}
#endif
#ifdef __APPLE__
char buffer[PATH_MAX] = {0};
// Check if the current user is root and the target VM is running as non-root.
// Otherwise the output of os::get_temp_directory() is used.
//
if (os::Posix::is_current_user_root() && !os::Bsd::is_process_root(vmid)) {
int path_size = os::Bsd::get_user_tmp_dir_macos(user, vmid, buffer, sizeof buffer);
if (path_size > 0 && (size_t)path_size < sizeof buffer) {
tmpdir = buffer;
}
}
#endif
const char* perfdir = PERFDATA_NAME;
size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 3;
@ -1138,7 +1153,8 @@ static void mmap_attach_shared(int vmid, char** addr, size_t* sizep, TRAPS) {
// for linux, determine if vmid is for a containerized process
int nspid = LINUX_ONLY(os::Linux::get_namespace_pid(vmid)) NOT_LINUX(-1);
const char* luser = get_user_name(vmid, &nspid, CHECK);
const char* luser = NOT_MACOS(get_user_name(vmid, &nspid, CHECK))
MACOS_ONLY(get_user_name(os::Bsd::get_process_uid(vmid)));
if (luser == nullptr) {
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),

View File

@ -1645,7 +1645,7 @@ static void SR_handler(int sig, siginfo_t* siginfo, void* context) {
// Save and restore errno to avoid confusing native code with EINTR
// after sigsuspend.
int old_errno = errno;
ErrnoPreserver ep;
PosixSignals::unblock_error_signals();
@ -1727,7 +1727,6 @@ static void SR_handler(int sig, siginfo_t* siginfo, void* context) {
// ignore
}
errno = old_errno;
}
static int SR_initialize() {

View File

@ -1715,6 +1715,8 @@ static int _print_module(const char* fname, address base_address,
// same architecture as Hotspot is running on
void * os::dll_load(const char *name, char *ebuf, int ebuflen) {
log_info(os)("attempting shared library load of %s", name);
Events::log_dll_message(nullptr, "Attempting to load shared library %s", name);
void* result;
JFR_ONLY(NativeLibraryLoadEvent load_event(name, &result);)
result = LoadLibrary(name);
@ -4780,8 +4782,8 @@ int os::stat(const char *path, struct stat *sbuf) {
path_to_target = get_path_to_target(wide_path);
if (path_to_target == nullptr) {
// it is a symbolic link, but we failed to resolve it
errno = ENOENT;
os::free(wide_path);
errno = ENOENT;
return -1;
}
}
@ -4792,14 +4794,14 @@ int os::stat(const char *path, struct stat *sbuf) {
// if getting attributes failed, GetLastError should be called immediately after that
if (!bret) {
DWORD errcode = ::GetLastError();
log_debug(os)("os::stat() failed to GetFileAttributesExW: GetLastError->%lu.", errcode);
os::free(wide_path);
os::free(path_to_target);
if (errcode == ERROR_FILE_NOT_FOUND || errcode == ERROR_PATH_NOT_FOUND) {
errno = ENOENT;
} else {
errno = 0;
}
log_debug(os)("os::stat() failed to GetFileAttributesExW: GetLastError->%lu.", errcode);
os::free(wide_path);
os::free(path_to_target);
return -1;
}
@ -4998,8 +5000,8 @@ int os::open(const char *path, int oflag, int mode) {
path_to_target = get_path_to_target(wide_path);
if (path_to_target == nullptr) {
// it is a symbolic link, but we failed to resolve it
errno = ENOENT;
os::free(wide_path);
errno = ENOENT;
return -1;
}
}
@ -5273,6 +5275,7 @@ char* os::realpath(const char* filename, char* outbuf, size_t outbuflen) {
} else {
errno = ENAMETOOLONG;
}
ErrnoPreserver ep;
permit_forbidden_function::free(p); // *not* os::free
}
return result;

View File

@ -50,11 +50,9 @@ double SharedRuntime::fmod_winx64(double x, double y)
hx ^= sx; /* |x| */
hy &= 0x7fffffff; /* |y| */
#pragma warning( disable : 4146 )
/* purge off exception values */
if ((hy | ly) == 0 || (hx >= 0x7ff00000) || /* y=0,or x not finite */
((hy | ((ly | -ly) >> 31))>0x7ff00000)) /* or y is NaN */
#pragma warning( default : 4146 )
((hy | ((ly | -ly) >> 31))>0x7ff00000)) /* or y is NaN */
return (x*y) / (x*y);
if (hx <= hy) {
if ((hx<hy) || (lx<ly)) return x; /* |x|<|y| return x */

View File

@ -90,7 +90,7 @@ typedef CodeBuffer::csize_t csize_t; // file-local definition
// External buffer, in a predefined CodeBlob.
// Important: The code_start must be taken exactly, and not realigned.
CodeBuffer::CodeBuffer(CodeBlob* blob) DEBUG_ONLY(: Scrubber(this, sizeof(*this))) {
CodeBuffer::CodeBuffer(const CodeBlob* blob) DEBUG_ONLY(: Scrubber(this, sizeof(*this))) {
// Provide code buffer with meaningful name
initialize_misc(blob->name());
initialize(blob->content_begin(), blob->content_size());

View File

@ -672,7 +672,7 @@ class CodeBuffer: public StackObj DEBUG_ONLY(COMMA private Scrubber) {
}
// (2) CodeBuffer referring to pre-allocated CodeBlob.
CodeBuffer(CodeBlob* blob);
CodeBuffer(const CodeBlob* blob);
// (3) code buffer allocating codeBlob memory for code & relocation
// info but with lazy initialization. The name must be something

View File

@ -86,9 +86,9 @@ void AOTMappedHeapWriter::init() {
if (CDSConfig::is_dumping_heap()) {
Universe::heap()->collect(GCCause::_java_lang_system_gc);
_buffer_offset_to_source_obj_table = new BufferOffsetToSourceObjectTable(/*size (prime)*/36137, /*max size*/1 * M);
_buffer_offset_to_source_obj_table = new (mtClassShared) BufferOffsetToSourceObjectTable(/*size (prime)*/36137, /*max size*/1 * M);
_dumped_interned_strings = new (mtClass)DumpedInternedStrings(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE);
_fillers = new FillersTable();
_fillers = new (mtClassShared) FillersTable();
_requested_bottom = nullptr;
_requested_top = nullptr;

View File

@ -96,6 +96,7 @@
#include "runtime/vmOperations.hpp"
#include "runtime/vmThread.hpp"
#include "sanitizers/leak.hpp"
#include "services/management.hpp"
#include "utilities/align.hpp"
#include "utilities/bitMap.inline.hpp"
#include "utilities/defaultStream.hpp"
@ -2204,7 +2205,7 @@ void AOTMetaspace::initialize_shared_spaces() {
CountSharedSymbols cl;
SymbolTable::shared_symbols_do(&cl);
tty->print_cr("Number of shared symbols: %zu", cl.total());
if (HeapShared::is_loading_mapping_mode()) {
if (HeapShared::is_loading() && HeapShared::is_loading_mapping_mode()) {
tty->print_cr("Number of shared strings: %zu", StringTable::shared_entry_count());
}
tty->print_cr("VM version: %s\r\n", static_mapinfo->vm_version());

View File

@ -184,6 +184,7 @@ static size_t archive_object_size(oopDesc* archive_object) {
oop AOTStreamedHeapLoader::allocate_object(oopDesc* archive_object, markWord mark, size_t size, TRAPS) {
assert(!archive_object->is_stackChunk(), "no such objects are archived");
NoJvmtiEventsMark njem;
oop heap_object;
Klass* klass = archive_object->klass();

View File

@ -63,7 +63,7 @@ void AOTThread::initialize() {
// This is important because this thread runs before JVMTI monitors are set up appropriately.
// Therefore, callbacks would not work as intended. JVMTI has no business peeking at how we
// materialize primordial objects from the AOT cache.
thread->toggle_is_disable_suspend();
thread->disable_jvmti_events();
#endif
JavaThread::vm_exit_on_osthread_failure(thread);

View File

@ -357,7 +357,7 @@ InstanceKlass* LambdaProxyClassDictionary::load_and_init_lambda_proxy_class(Inst
InstanceKlass* nest_host = caller_ik->nest_host(THREAD);
assert(nest_host == shared_nest_host, "mismatched nest host");
EventClassLoad class_load_start_event;
EventClassLoad class_load_event;
// Add to class hierarchy, and do possible deoptimizations.
lambda_ik->add_to_hierarchy(THREAD);
@ -368,8 +368,8 @@ InstanceKlass* LambdaProxyClassDictionary::load_and_init_lambda_proxy_class(Inst
if (JvmtiExport::should_post_class_load()) {
JvmtiExport::post_class_load(THREAD, lambda_ik);
}
if (class_load_start_event.should_commit()) {
SystemDictionary::post_class_load_event(&class_load_start_event, lambda_ik, ClassLoaderData::class_loader_data(class_loader()));
if (class_load_event.should_commit()) {
JFR_ONLY(SystemDictionary::post_class_load_event(&class_load_event, lambda_ik, ClassLoaderData::class_loader_data(class_loader()));)
}
lambda_ik->initialize(CHECK_NULL);

View File

@ -149,6 +149,10 @@ public:
assert(is_loaded(), "must be loaded");
return _flags;
}
// Fetch Klass::access_flags.
jint access_flags() { return flags().as_int(); }
bool has_finalizer() {
assert(is_loaded(), "must be loaded");
return _has_finalizer; }

View File

@ -216,15 +216,6 @@ jint ciKlass::modifier_flags() {
)
}
// ------------------------------------------------------------------
// ciKlass::access_flags
jint ciKlass::access_flags() {
assert(is_loaded(), "not loaded");
GUARDED_VM_ENTRY(
return get_Klass()->access_flags().as_unsigned_short();
)
}
// ------------------------------------------------------------------
// ciKlass::misc_flags
klass_flags_t ciKlass::misc_flags() {

View File

@ -122,9 +122,6 @@ public:
// Fetch modifier flags.
jint modifier_flags();
// Fetch Klass::access_flags.
jint access_flags();
// Fetch Klass::misc_flags.
klass_flags_t misc_flags();

View File

@ -89,9 +89,6 @@
#if INCLUDE_CDS
#include "classfile/systemDictionaryShared.hpp"
#endif
#if INCLUDE_JFR
#include "jfr/support/jfrTraceIdExtension.hpp"
#endif
// We generally try to create the oops directly when parsing, rather than
// allocating temporary data structures and copying the bytes twice. A
@ -157,6 +154,8 @@
#define JAVA_26_VERSION 70
#define JAVA_27_VERSION 71
void ClassFileParser::set_class_bad_constant_seen(short bad_constant) {
assert((bad_constant == JVM_CONSTANT_Module ||
bad_constant == JVM_CONSTANT_Package) && _major_version >= JAVA_9_VERSION,
@ -5272,8 +5271,6 @@ void ClassFileParser::fill_instance_klass(InstanceKlass* ik,
}
}
JFR_ONLY(INIT_ID(ik);)
// If we reach here, all is well.
// Now remove the InstanceKlass* from the _klass_to_deallocate field
// in order for it to not be destroyed in the ClassFileParser destructor.

View File

@ -500,6 +500,8 @@ class ClassFileParser {
InstanceKlass* create_instance_klass(bool cf_changed_in_CFLH, const ClassInstanceInfo& cl_inst_info, TRAPS);
const ClassFileStream& stream() const { return *_stream; }
const ClassFileStream* clone_stream() const;
void set_klass_to_deallocate(InstanceKlass* klass);

View File

@ -439,7 +439,7 @@ class MethodFamily : public ResourceObj {
StreamIndentor si(str, indent * 2);
str->print("Selected method: ");
print_method(str, _selected_target);
Klass* method_holder = _selected_target->method_holder();
InstanceKlass* method_holder = _selected_target->method_holder();
if (!method_holder->is_interface()) {
str->print(" : in superclass");
}

View File

@ -1091,10 +1091,6 @@ void java_lang_Class::allocate_mirror(Klass* k, bool is_scratch, Handle protecti
// Set the modifiers flag.
u2 computed_modifiers = k->compute_modifier_flags();
set_modifiers(mirror(), computed_modifiers);
// Set the raw access_flags, this is used by reflection instead of modifier flags.
// The Java code for array classes gets the access flags from the element type.
assert(!k->is_array_klass() || k->access_flags().as_unsigned_short() == 0, "access flags are not set for arrays");
set_raw_access_flags(mirror(), k->access_flags().as_unsigned_short());
InstanceMirrorKlass* mk = InstanceMirrorKlass::cast(mirror->klass());
assert(oop_size(mirror()) == mk->instance_size(k), "should have been set");
@ -1103,6 +1099,8 @@ void java_lang_Class::allocate_mirror(Klass* k, bool is_scratch, Handle protecti
// It might also have a component mirror. This mirror must already exist.
if (k->is_array_klass()) {
// The Java code for array classes gets the access flags from the element type.
set_raw_access_flags(mirror(), 0);
if (k->is_typeArray_klass()) {
BasicType type = TypeArrayKlass::cast(k)->element_type();
if (is_scratch) {
@ -1129,6 +1127,8 @@ void java_lang_Class::allocate_mirror(Klass* k, bool is_scratch, Handle protecti
// and java_mirror in this klass.
} else {
assert(k->is_instance_klass(), "Must be");
// Set the raw access_flags, this is used by reflection instead of modifier flags.
set_raw_access_flags(mirror(), InstanceKlass::cast(k)->access_flags().as_unsigned_short());
initialize_mirror_fields(InstanceKlass::cast(k), mirror, protection_domain, classData, THREAD);
if (HAS_PENDING_EXCEPTION) {
// If any of the fields throws an exception like OOM remove the klass field
@ -1684,8 +1684,8 @@ int java_lang_Thread::_name_offset;
int java_lang_Thread::_contextClassLoader_offset;
int java_lang_Thread::_eetop_offset;
int java_lang_Thread::_jvmti_thread_state_offset;
int java_lang_Thread::_jvmti_VTMS_transition_disable_count_offset;
int java_lang_Thread::_jvmti_is_in_VTMS_transition_offset;
int java_lang_Thread::_vthread_transition_disable_count_offset;
int java_lang_Thread::_is_in_vthread_transition_offset;
int java_lang_Thread::_interrupted_offset;
int java_lang_Thread::_interruptLock_offset;
int java_lang_Thread::_tid_offset;
@ -1745,34 +1745,34 @@ void java_lang_Thread::set_jvmti_thread_state(oop java_thread, JvmtiThreadState*
java_thread->address_field_put(_jvmti_thread_state_offset, (address)state);
}
int java_lang_Thread::VTMS_transition_disable_count(oop java_thread) {
return java_thread->int_field(_jvmti_VTMS_transition_disable_count_offset);
int java_lang_Thread::vthread_transition_disable_count(oop java_thread) {
jint* addr = java_thread->field_addr<jint>(_vthread_transition_disable_count_offset);
return AtomicAccess::load(addr);
}
void java_lang_Thread::inc_VTMS_transition_disable_count(oop java_thread) {
assert(JvmtiVTMSTransition_lock->owned_by_self(), "Must be locked");
int val = VTMS_transition_disable_count(java_thread);
java_thread->int_field_put(_jvmti_VTMS_transition_disable_count_offset, val + 1);
void java_lang_Thread::inc_vthread_transition_disable_count(oop java_thread) {
assert(VThreadTransition_lock->owned_by_self(), "Must be locked");
jint* addr = java_thread->field_addr<jint>(_vthread_transition_disable_count_offset);
int val = AtomicAccess::load(addr);
AtomicAccess::store(addr, val + 1);
}
void java_lang_Thread::dec_VTMS_transition_disable_count(oop java_thread) {
assert(JvmtiVTMSTransition_lock->owned_by_self(), "Must be locked");
int val = VTMS_transition_disable_count(java_thread);
assert(val > 0, "VTMS_transition_disable_count should never be negative");
java_thread->int_field_put(_jvmti_VTMS_transition_disable_count_offset, val - 1);
void java_lang_Thread::dec_vthread_transition_disable_count(oop java_thread) {
assert(VThreadTransition_lock->owned_by_self(), "Must be locked");
jint* addr = java_thread->field_addr<jint>(_vthread_transition_disable_count_offset);
int val = AtomicAccess::load(addr);
AtomicAccess::store(addr, val - 1);
}
bool java_lang_Thread::is_in_VTMS_transition(oop java_thread) {
return java_thread->bool_field_volatile(_jvmti_is_in_VTMS_transition_offset);
bool java_lang_Thread::is_in_vthread_transition(oop java_thread) {
jboolean* addr = java_thread->field_addr<jboolean>(_is_in_vthread_transition_offset);
return AtomicAccess::load(addr);
}
void java_lang_Thread::set_is_in_VTMS_transition(oop java_thread, bool val) {
assert(is_in_VTMS_transition(java_thread) != val, "already %s transition", val ? "inside" : "outside");
java_thread->bool_field_put_volatile(_jvmti_is_in_VTMS_transition_offset, val);
}
int java_lang_Thread::is_in_VTMS_transition_offset() {
return _jvmti_is_in_VTMS_transition_offset;
void java_lang_Thread::set_is_in_vthread_transition(oop java_thread, bool val) {
assert(is_in_vthread_transition(java_thread) != val, "already %s transition", val ? "inside" : "outside");
jboolean* addr = java_thread->field_addr<jboolean>(_is_in_vthread_transition_offset);
AtomicAccess::store(addr, (jboolean)val);
}
void java_lang_Thread::clear_scopedValueBindings(oop java_thread) {

View File

@ -375,8 +375,8 @@ class java_lang_Class : AllStatic {
#define THREAD_INJECTED_FIELDS(macro) \
macro(java_lang_Thread, jvmti_thread_state, intptr_signature, false) \
macro(java_lang_Thread, jvmti_VTMS_transition_disable_count, int_signature, false) \
macro(java_lang_Thread, jvmti_is_in_VTMS_transition, bool_signature, false) \
macro(java_lang_Thread, vthread_transition_disable_count, int_signature, false) \
macro(java_lang_Thread, is_in_vthread_transition, bool_signature, false) \
JFR_ONLY(macro(java_lang_Thread, jfr_epoch, short_signature, false))
class java_lang_Thread : AllStatic {
@ -390,8 +390,8 @@ class java_lang_Thread : AllStatic {
static int _contextClassLoader_offset;
static int _eetop_offset;
static int _jvmti_thread_state_offset;
static int _jvmti_VTMS_transition_disable_count_offset;
static int _jvmti_is_in_VTMS_transition_offset;
static int _vthread_transition_disable_count_offset;
static int _is_in_vthread_transition_offset;
static int _interrupted_offset;
static int _interruptLock_offset;
static int _tid_offset;
@ -444,12 +444,15 @@ class java_lang_Thread : AllStatic {
static JvmtiThreadState* jvmti_thread_state(oop java_thread);
static void set_jvmti_thread_state(oop java_thread, JvmtiThreadState* state);
static int VTMS_transition_disable_count(oop java_thread);
static void inc_VTMS_transition_disable_count(oop java_thread);
static void dec_VTMS_transition_disable_count(oop java_thread);
static bool is_in_VTMS_transition(oop java_thread);
static void set_is_in_VTMS_transition(oop java_thread, bool val);
static int is_in_VTMS_transition_offset();
static int vthread_transition_disable_count(oop java_thread);
static void inc_vthread_transition_disable_count(oop java_thread);
static void dec_vthread_transition_disable_count(oop java_thread);
static int vthread_transition_disable_count_offset() { return _vthread_transition_disable_count_offset; }
static bool is_in_vthread_transition(oop java_thread);
static void set_is_in_vthread_transition(oop java_thread, bool val);
static int is_in_vthread_transition_offset() { return _is_in_vthread_transition_offset; }
// Clear all scoped value bindings on error
static void clear_scopedValueBindings(oop java_thread);

View File

@ -37,7 +37,7 @@
#include "runtime/handles.inline.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_JFR
#include "jfr/support/jfrKlassExtension.hpp"
#include "jfr/jfr.hpp"
#endif
@ -99,6 +99,9 @@ InstanceKlass* KlassFactory::check_shared_class_file_load_hook(
new_ik->set_classpath_index(path_index);
}
JFR_ONLY(Jfr::on_klass_creation(new_ik, parser, THREAD);)
return new_ik;
}
}
@ -213,7 +216,7 @@ InstanceKlass* KlassFactory::create_from_stream(ClassFileStream* stream,
result->set_cached_class_file(cached_class_file);
}
JFR_ONLY(ON_KLASS_CREATION(result, parser, THREAD);)
JFR_ONLY(Jfr::on_klass_creation(result, parser, THREAD);)
#if INCLUDE_CDS
if (CDSConfig::is_dumping_archive()) {

View File

@ -73,7 +73,7 @@ void ResolutionErrorTable::add_entry(const constantPoolHandle& pool, int cp_inde
ResolutionErrorKey key(pool(), cp_index);
ResolutionErrorEntry *entry = new ResolutionErrorEntry(error, message, cause, cause_msg);
_resolution_error_table->put(key, entry);
_resolution_error_table->put_when_absent(key, entry);
}
// create new nest host error entry
@ -85,7 +85,7 @@ void ResolutionErrorTable::add_entry(const constantPoolHandle& pool, int cp_inde
ResolutionErrorKey key(pool(), cp_index);
ResolutionErrorEntry *entry = new ResolutionErrorEntry(message);
_resolution_error_table->put(key, entry);
_resolution_error_table->put_when_absent(key, entry);
}
// find entry in the table
@ -126,6 +126,13 @@ ResolutionErrorEntry::~ResolutionErrorEntry() {
}
}
void ResolutionErrorEntry::set_nest_host_error(const char* message) {
assert(_nest_host_error == nullptr, "caller should have checked");
assert_lock_strong(SystemDictionary_lock);
_nest_host_error = message;
}
class ResolutionErrorDeleteIterate : StackObj {
ConstantPool* p;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -91,10 +91,7 @@ class ResolutionErrorEntry : public CHeapObj<mtClass> {
~ResolutionErrorEntry();
// The incoming nest host error message is already in the C-Heap.
void set_nest_host_error(const char* message) {
_nest_host_error = message;
}
void set_nest_host_error(const char* message);
Symbol* error() const { return _error; }
const char* message() const { return _message; }

View File

@ -560,15 +560,6 @@ static InstanceKlass* handle_parallel_loading(JavaThread* current,
return nullptr;
}
void SystemDictionary::post_class_load_event(EventClassLoad* event, const InstanceKlass* k, const ClassLoaderData* init_cld) {
assert(event != nullptr, "invariant");
assert(k != nullptr, "invariant");
event->set_loadedClass(k);
event->set_definingClassLoader(k->class_loader_data());
event->set_initiatingClassLoader(init_cld);
event->commit();
}
// SystemDictionary::resolve_instance_class_or_null is the main function for class name resolution.
// After checking if the InstanceKlass already exists, it checks for ClassCircularityError and
// whether the thread must wait for loading in parallel. It eventually calls load_instance_class,
@ -582,7 +573,7 @@ InstanceKlass* SystemDictionary::resolve_instance_class_or_null(Symbol* name,
assert(name != nullptr && !Signature::is_array(name) &&
!Signature::has_envelope(name), "invalid class name: %s", name == nullptr ? "nullptr" : name->as_C_string());
EventClassLoad class_load_start_event;
EventClassLoad class_load_event;
HandleMark hm(THREAD);
@ -713,8 +704,8 @@ InstanceKlass* SystemDictionary::resolve_instance_class_or_null(Symbol* name,
return nullptr;
}
if (class_load_start_event.should_commit()) {
post_class_load_event(&class_load_start_event, loaded_class, loader_data);
if (class_load_event.should_commit()) {
JFR_ONLY(post_class_load_event(&class_load_event, loaded_class, loader_data);)
}
// Make sure we have the right class in the dictionary
@ -789,7 +780,7 @@ InstanceKlass* SystemDictionary::resolve_hidden_class_from_stream(
const ClassLoadInfo& cl_info,
TRAPS) {
EventClassLoad class_load_start_event;
EventClassLoad class_load_event;
ClassLoaderData* loader_data;
// - for hidden classes that are not strong: create a new CLD that has a class holder and
@ -819,15 +810,16 @@ InstanceKlass* SystemDictionary::resolve_hidden_class_from_stream(
k->add_to_hierarchy(THREAD);
// But, do not add to dictionary.
if (class_load_event.should_commit()) {
JFR_ONLY(post_class_load_event(&class_load_event, k, loader_data);)
}
k->link_class(CHECK_NULL);
// notify jvmti
if (JvmtiExport::should_post_class_load()) {
JvmtiExport::post_class_load(THREAD, k);
}
if (class_load_start_event.should_commit()) {
post_class_load_event(&class_load_start_event, k, loader_data);
}
return k;
}
@ -1182,6 +1174,8 @@ void SystemDictionary::preload_class(Handle class_loader, InstanceKlass* ik, TRA
}
#endif
EventClassLoad class_load_event;
ClassLoaderData* loader_data = ClassLoaderData::class_loader_data(class_loader());
oop java_mirror = ik->archived_java_mirror();
precond(java_mirror != nullptr);
@ -1203,11 +1197,26 @@ void SystemDictionary::preload_class(Handle class_loader, InstanceKlass* ik, TRA
update_dictionary(THREAD, ik, loader_data);
}
if (class_load_event.should_commit()) {
JFR_ONLY(post_class_load_event(&class_load_event, ik, loader_data);)
}
assert(ik->is_loaded(), "Must be in at least loaded state");
}
#endif // INCLUDE_CDS
#if INCLUDE_JFR
void SystemDictionary::post_class_load_event(EventClassLoad* event, const InstanceKlass* k, const ClassLoaderData* init_cld) {
assert(event != nullptr, "invariant");
assert(k != nullptr, "invariant");
event->set_loadedClass(k);
event->set_definingClassLoader(k->class_loader_data());
event->set_initiatingClassLoader(init_cld);
event->commit();
}
#endif // INCLUDE_JFR
InstanceKlass* SystemDictionary::load_instance_class_impl(Symbol* class_name, Handle class_loader, TRAPS) {
if (class_loader.is_null()) {
@ -1380,15 +1389,6 @@ InstanceKlass* SystemDictionary::load_instance_class(Symbol* name,
return loaded_class;
}
static void post_class_define_event(InstanceKlass* k, const ClassLoaderData* def_cld) {
EventClassDefine event;
if (event.should_commit()) {
event.set_definedClass(k);
event.set_definingClassLoader(def_cld);
event.commit();
}
}
void SystemDictionary::define_instance_class(InstanceKlass* k, Handle class_loader, TRAPS) {
ClassLoaderData* loader_data = k->class_loader_data();
@ -1440,7 +1440,6 @@ void SystemDictionary::define_instance_class(InstanceKlass* k, Handle class_load
if (JvmtiExport::should_post_class_load()) {
JvmtiExport::post_class_load(THREAD, k);
}
post_class_define_event(k, loader_data);
}
// Support parallel classloading
@ -1860,18 +1859,28 @@ Symbol* SystemDictionary::find_resolution_error(const constantPoolHandle& pool,
void SystemDictionary::add_nest_host_error(const constantPoolHandle& pool,
int which,
const char* message) {
const stringStream& message) {
{
MutexLocker ml(Thread::current(), SystemDictionary_lock);
ResolutionErrorEntry* entry = ResolutionErrorTable::find_entry(pool, which);
if (entry != nullptr && entry->nest_host_error() == nullptr) {
if (entry == nullptr) {
// Only add a new entry to the resolution error table if one hasn't been found for this
// constant pool index. In this case resolution succeeded but there's an error in this nest host
// that we use the table to record.
assert(pool->resolved_klass_at(which) != nullptr, "klass should be resolved if there is no entry");
ResolutionErrorTable::add_entry(pool, which, message.as_string(true /* on C-heap */));
} else {
// An existing entry means we had a true resolution failure (LinkageError) with our nest host, but we
// still want to add the error message for the higher-level access checks to report. We should
// only reach here under the same error condition, so we can ignore the potential race with setting
// the message. If we see it is already set then we can ignore it.
entry->set_nest_host_error(message);
} else {
ResolutionErrorTable::add_entry(pool, which, message);
// the message.
const char* nhe = entry->nest_host_error();
if (nhe == nullptr) {
entry->set_nest_host_error(message.as_string(true /* on C-heap */));
} else {
DEBUG_ONLY(const char* msg = message.base();)
assert(strcmp(nhe, msg) == 0, "New message %s, differs from original %s", msg, nhe);
}
}
}
}
@ -2168,9 +2177,10 @@ static bool is_always_visible_class(oop mirror) {
return true; // primitive array
}
assert(klass->is_instance_klass(), "%s", klass->external_name());
return klass->is_public() &&
(InstanceKlass::cast(klass)->is_same_class_package(vmClasses::Object_klass()) || // java.lang
InstanceKlass::cast(klass)->is_same_class_package(vmClasses::MethodHandle_klass())); // java.lang.invoke
InstanceKlass* ik = InstanceKlass::cast(klass);
return ik->is_public() &&
(ik->is_same_class_package(vmClasses::Object_klass()) || // java.lang
ik->is_same_class_package(vmClasses::MethodHandle_klass())); // java.lang.invoke
}
// Find or construct the Java mirror (java.lang.Class instance) for

View File

@ -280,7 +280,7 @@ public:
// Record a nest host resolution/validation error
static void add_nest_host_error(const constantPoolHandle& pool, int which,
const char* message);
const stringStream& message);
static const char* find_nest_host_error(const constantPoolHandle& pool, int which);
static void add_to_initiating_loader(JavaThread* current, InstanceKlass* k,
@ -326,11 +326,10 @@ private:
static void restore_archived_method_handle_intrinsics_impl(TRAPS) NOT_CDS_RETURN;
protected:
// Used by AOTLinkedClassBulkLoader, LambdaProxyClassDictionary, and SystemDictionaryShared
// Used by AOTLinkedClassBulkLoader, LambdaProxyClassDictionary, VMClasses and SystemDictionaryShared
static bool add_loader_constraint(Symbol* name, Klass* klass_being_linked, Handle loader1,
Handle loader2);
static void post_class_load_event(EventClassLoad* event, const InstanceKlass* k, const ClassLoaderData* init_cld);
static InstanceKlass* load_shared_class(InstanceKlass* ik,
Handle class_loader,
Handle protection_domain,
@ -342,6 +341,9 @@ protected:
static InstanceKlass* find_or_define_instance_class(Symbol* class_name,
Handle class_loader,
InstanceKlass* k, TRAPS);
JFR_ONLY(static void post_class_load_event(EventClassLoad* event,
const InstanceKlass* k,
const ClassLoaderData* init_cld);)
public:
static bool is_system_class_loader(oop class_loader);

View File

@ -35,6 +35,7 @@
#include "classfile/vmClasses.hpp"
#include "classfile/vmSymbols.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "jfr/jfrEvents.hpp"
#include "memory/metaspaceClosure.hpp"
#include "memory/universe.hpp"
#include "oops/instanceKlass.hpp"
@ -240,6 +241,8 @@ void vmClasses::resolve_shared_class(InstanceKlass* klass, ClassLoaderData* load
return;
}
EventClassLoad class_load_event;
// add super and interfaces first
InstanceKlass* super = klass->super();
if (super != nullptr && super->class_loader_data() == nullptr) {
@ -261,6 +264,10 @@ void vmClasses::resolve_shared_class(InstanceKlass* klass, ClassLoaderData* load
dictionary->add_klass(THREAD, klass->name(), klass);
klass->add_to_hierarchy(THREAD);
assert(klass->is_loaded(), "Must be in at least loaded state");
if (class_load_event.should_commit()) {
JFR_ONLY(SystemDictionary::post_class_load_event(&class_load_event, klass, loader_data);)
}
}
#endif // INCLUDE_CDS

View File

@ -649,10 +649,10 @@ class methodHandle;
do_intrinsic(_Continuation_unpin, jdk_internal_vm_Continuation, unpin_name, void_method_signature, F_SN) \
\
/* java/lang/VirtualThread */ \
do_intrinsic(_notifyJvmtiVThreadStart, java_lang_VirtualThread, notifyJvmtiStart_name, void_method_signature, F_RN) \
do_intrinsic(_notifyJvmtiVThreadEnd, java_lang_VirtualThread, notifyJvmtiEnd_name, void_method_signature, F_RN) \
do_intrinsic(_notifyJvmtiVThreadMount, java_lang_VirtualThread, notifyJvmtiMount_name, bool_void_signature, F_RN) \
do_intrinsic(_notifyJvmtiVThreadUnmount, java_lang_VirtualThread, notifyJvmtiUnmount_name, bool_void_signature, F_RN) \
do_intrinsic(_vthreadEndFirstTransition, java_lang_VirtualThread, endFirstTransition_name, void_method_signature, F_RN) \
do_intrinsic(_vthreadStartFinalTransition, java_lang_VirtualThread, startFinalTransition_name, void_method_signature, F_RN) \
do_intrinsic(_vthreadStartTransition, java_lang_VirtualThread, startTransition_name, bool_void_signature, F_RN) \
do_intrinsic(_vthreadEndTransition, java_lang_VirtualThread, endTransition_name, bool_void_signature, F_RN) \
do_intrinsic(_notifyJvmtiVThreadDisableSuspend, java_lang_VirtualThread, notifyJvmtiDisableSuspend_name, bool_void_signature, F_SN) \
\
/* support for UnsafeConstants */ \

View File

@ -395,10 +395,10 @@ class SerializeClosure;
template(run_finalization_name, "runFinalization") \
template(dispatchUncaughtException_name, "dispatchUncaughtException") \
template(loadClass_name, "loadClass") \
template(notifyJvmtiStart_name, "notifyJvmtiStart") \
template(notifyJvmtiEnd_name, "notifyJvmtiEnd") \
template(notifyJvmtiMount_name, "notifyJvmtiMount") \
template(notifyJvmtiUnmount_name, "notifyJvmtiUnmount") \
template(startTransition_name, "startTransition") \
template(endTransition_name, "endTransition") \
template(startFinalTransition_name, "startFinalTransition") \
template(endFirstTransition_name, "endFirstTransition") \
template(notifyJvmtiDisableSuspend_name, "notifyJvmtiDisableSuspend") \
template(doYield_name, "doYield") \
template(enter_name, "enter") \
@ -497,8 +497,8 @@ class SerializeClosure;
template(java_lang_Boolean_signature, "Ljava/lang/Boolean;") \
template(url_code_signer_array_void_signature, "(Ljava/net/URL;[Ljava/security/CodeSigner;)V") \
template(jvmti_thread_state_name, "jvmti_thread_state") \
template(jvmti_VTMS_transition_disable_count_name, "jvmti_VTMS_transition_disable_count") \
template(jvmti_is_in_VTMS_transition_name, "jvmti_is_in_VTMS_transition") \
template(vthread_transition_disable_count_name, "vthread_transition_disable_count") \
template(is_in_vthread_transition_name, "is_in_vthread_transition") \
template(module_entry_name, "module_entry") \
template(resolved_references_name, "<resolved_references>") \
template(init_lock_name, "<init_lock>") \

View File

@ -1346,18 +1346,16 @@ void AOTCodeAddressTable::init_extrs() {
SET_ADDRESS(_extrs, OptoRuntime::multianewarray4_C);
SET_ADDRESS(_extrs, OptoRuntime::multianewarray5_C);
SET_ADDRESS(_extrs, OptoRuntime::multianewarrayN_C);
#if INCLUDE_JVMTI
SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_start);
SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_end);
SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_mount);
SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_unmount);
#endif
SET_ADDRESS(_extrs, OptoRuntime::complete_monitor_locking_C);
SET_ADDRESS(_extrs, OptoRuntime::monitor_notify_C);
SET_ADDRESS(_extrs, OptoRuntime::monitor_notifyAll_C);
SET_ADDRESS(_extrs, OptoRuntime::rethrow_C);
SET_ADDRESS(_extrs, OptoRuntime::slow_arraycopy_C);
SET_ADDRESS(_extrs, OptoRuntime::register_finalizer_C);
SET_ADDRESS(_extrs, OptoRuntime::vthread_end_first_transition_C);
SET_ADDRESS(_extrs, OptoRuntime::vthread_start_final_transition_C);
SET_ADDRESS(_extrs, OptoRuntime::vthread_start_transition_C);
SET_ADDRESS(_extrs, OptoRuntime::vthread_end_transition_C);
#if defined(AARCH64)
SET_ADDRESS(_extrs, JavaThread::verify_cross_modify_fence_failure);
#endif // AARCH64

View File

@ -227,11 +227,6 @@ void CodeCache::initialize_heaps() {
if (!non_nmethod.set) {
non_nmethod.size += compiler_buffer_size;
// Further down, just before FLAG_SET_ERGO(), all segment sizes are
// aligned down to the next lower multiple of min_size. For large page
// sizes, this may result in (non_nmethod.size == 0) which is not acceptable.
// Therefore, force non_nmethod.size to at least min_size.
non_nmethod.size = MAX2(non_nmethod.size, min_size);
}
if (!profiled.set && !non_profiled.set) {
@ -307,11 +302,10 @@ void CodeCache::initialize_heaps() {
// Note: if large page support is enabled, min_size is at least the large
// page size. This ensures that the code cache is covered by large pages.
non_profiled.size += non_nmethod.size & alignment_mask(min_size);
non_profiled.size += profiled.size & alignment_mask(min_size);
non_nmethod.size = align_down(non_nmethod.size, min_size);
profiled.size = align_down(profiled.size, min_size);
non_profiled.size = align_down(non_profiled.size, min_size);
non_nmethod.size = align_up(non_nmethod.size, min_size);
profiled.size = align_up(profiled.size, min_size);
non_profiled.size = align_up(non_profiled.size, min_size);
cache_size = non_nmethod.size + profiled.size + non_profiled.size;
FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod.size);
FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled.size);

Some files were not shown because too many files have changed in this diff Show More