Merge branch 'master' into JDK-8328894

This commit is contained in:
EunHyunsu 2025-12-06 21:41:26 +09:00 committed by GitHub
commit e768e07983
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
1827 changed files with 81048 additions and 37774 deletions

View File

@ -59,7 +59,7 @@ on:
jobs:
build-linux:
name: build
runs-on: ubuntu-22.04
runs-on: ubuntu-24.04
container:
image: alpine:3.20

View File

@ -48,7 +48,7 @@ on:
jobs:
build-cross-compile:
name: build
runs-on: ubuntu-22.04
runs-on: ubuntu-24.04
strategy:
fail-fast: false

View File

@ -75,7 +75,7 @@ on:
jobs:
build-linux:
name: build
runs-on: ubuntu-22.04
runs-on: ubuntu-24.04
strategy:
fail-fast: false
@ -115,9 +115,21 @@ jobs:
if [[ '${{ inputs.apt-architecture }}' != '' ]]; then
sudo dpkg --add-architecture ${{ inputs.apt-architecture }}
fi
sudo apt-get update
sudo apt-get install --only-upgrade apt
sudo apt-get install gcc-${{ inputs.gcc-major-version }}${{ inputs.gcc-package-suffix }} g++-${{ inputs.gcc-major-version }}${{ inputs.gcc-package-suffix }} libxrandr-dev${{ steps.arch.outputs.suffix }} libxtst-dev${{ steps.arch.outputs.suffix }} libcups2-dev${{ steps.arch.outputs.suffix }} libasound2-dev${{ steps.arch.outputs.suffix }} ${{ inputs.apt-extra-packages }}
sudo apt update
sudo apt install --only-upgrade apt
sudo apt install \
gcc-${{ inputs.gcc-major-version }}${{ inputs.gcc-package-suffix }} \
g++-${{ inputs.gcc-major-version }}${{ inputs.gcc-package-suffix }} \
libasound2-dev${{ steps.arch.outputs.suffix }} \
libcups2-dev${{ steps.arch.outputs.suffix }} \
libfontconfig1-dev${{ steps.arch.outputs.suffix }} \
libx11-dev${{ steps.arch.outputs.suffix }} \
libxext-dev${{ steps.arch.outputs.suffix }} \
libxrandr-dev${{ steps.arch.outputs.suffix }} \
libxrender-dev${{ steps.arch.outputs.suffix }} \
libxt-dev${{ steps.arch.outputs.suffix }} \
libxtst-dev${{ steps.arch.outputs.suffix }} \
${{ inputs.apt-extra-packages }}
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-${{ inputs.gcc-major-version }} 100 --slave /usr/bin/g++ g++ /usr/bin/g++-${{ inputs.gcc-major-version }}
- name: 'Configure'

View File

@ -57,7 +57,7 @@ jobs:
prepare:
name: 'Prepare the run'
runs-on: ubuntu-22.04
runs-on: ubuntu-24.04
env:
# List of platforms to exclude by default
EXCLUDED_PLATFORMS: 'alpine-linux-x64'
@ -405,7 +405,7 @@ jobs:
with:
platform: linux-x64
bootjdk-platform: linux-x64
runs-on: ubuntu-22.04
runs-on: ubuntu-24.04
dry-run: ${{ needs.prepare.outputs.dry-run == 'true' }}
debug-suffix: -debug
@ -419,7 +419,7 @@ jobs:
with:
platform: linux-x64
bootjdk-platform: linux-x64
runs-on: ubuntu-22.04
runs-on: ubuntu-24.04
dry-run: ${{ needs.prepare.outputs.dry-run == 'true' }}
static-suffix: "-static"

5
.gitignore vendored
View File

@ -26,3 +26,8 @@ NashornProfile.txt
*.rej
*.orig
test/benchmarks/**/target
/src/hotspot/CMakeLists.txt
/src/hotspot/compile_commands.json
/src/hotspot/cmake-build-debug/
/src/hotspot/.cache/
/src/hotspot/.idea/

View File

@ -1,7 +1,7 @@
[general]
project=jdk
jbs=JDK
version=26
version=27
[checks]
error=author,committer,reviewers,merge,issues,executable,symlink,message,hg-tag,whitespace,problemlists,copyright

View File

@ -38,7 +38,7 @@
# directory.
# - open a terminal program and run these commands:
# cd "${JDK_CHECKOUT}"/src/jdk.compiler/share/data/symbols
# bash ../../../../../make/scripts/generate-symbol-data.sh "${JDK_N_INSTALL}"
# bash ../../../../../bin/generate-symbol-data.sh "${JDK_N_INSTALL}"
# - this command will generate or update data for "--release N" into the ${JDK_CHECKOUT}/src/jdk.compiler/share/data/symbols
# directory, updating all registration necessary. If the goal was to update the data, and there are no
# new or changed files in the ${JDK_CHECKOUT}/src/jdk.compiler/share/data/symbols directory after running this script,

View File

@ -541,6 +541,11 @@ href="#apple-xcode">Apple Xcode</a> on some strategies to deal with
this.</p>
<p>It is recommended that you use at least macOS 14 and Xcode 15.4, but
earlier versions may also work.</p>
<p>Starting with Xcode 26, introduced in macOS 26, the Metal toolchain
no longer comes bundled with Xcode, so it needs to be installed
separately. This can either be done via the Xcode's Settings/Components
UI, or in the command line calling
<code>xcodebuild -downloadComponent metalToolchain</code>.</p>
<p>The standard macOS environment contains the basic tooling needed to
build, but for external libraries a package manager is recommended. The
JDK uses <a href="https://brew.sh/">homebrew</a> in the examples, but
@ -668,7 +673,7 @@ update.</p>
(Note that this version is often presented as "MSVC 14.28", and reported
by cl.exe as 19.28.) Older versions will not be accepted by
<code>configure</code> and will not work. The maximum accepted version
of Visual Studio is 2022.</p>
of Visual Studio is 2026.</p>
<p>If you have multiple versions of Visual Studio installed,
<code>configure</code> will by default pick the latest. You can request
a specific version to be used by setting

View File

@ -352,6 +352,11 @@ on some strategies to deal with this.
It is recommended that you use at least macOS 14 and Xcode 15.4, but
earlier versions may also work.
Starting with Xcode 26, introduced in macOS 26, the Metal toolchain no longer
comes bundled with Xcode, so it needs to be installed separately. This can
either be done via the Xcode's Settings/Components UI, or in the command line
calling `xcodebuild -downloadComponent metalToolchain`.
The standard macOS environment contains the basic tooling needed to build, but
for external libraries a package manager is recommended. The JDK uses
[homebrew](https://brew.sh/) in the examples, but feel free to use whatever
@ -468,7 +473,7 @@ available for this update.
The minimum accepted version is Visual Studio 2019 version 16.8. (Note that
this version is often presented as "MSVC 14.28", and reported by cl.exe as
19.28.) Older versions will not be accepted by `configure` and will not work.
The maximum accepted version of Visual Studio is 2022.
The maximum accepted version of Visual Studio is 2026.
If you have multiple versions of Visual Studio installed, `configure` will by
default pick the latest. You can request a specific version to be used by

View File

@ -1037,8 +1037,8 @@ running destructors at exit can lead to problems.</p>
<p>Some of the approaches used in HotSpot to avoid dynamic
initialization include:</p>
<ul>
<li><p>Use the <code>Deferred&lt;T&gt;</code> class template. Add a call
to its initialization function at an appropriate place during VM
<li><p>Use the <code>DeferredStatic&lt;T&gt;</code> class template. Add
a call to its initialization function at an appropriate place during VM
initialization. The underlying object is never destroyed.</p></li>
<li><p>For objects of class type, use a variable whose value is a
pointer to the class, initialized to <code>nullptr</code>. Provide an

View File

@ -954,7 +954,7 @@ destructors at exit can lead to problems.
Some of the approaches used in HotSpot to avoid dynamic initialization
include:
* Use the `Deferred<T>` class template. Add a call to its initialization
* Use the `DeferredStatic<T>` class template. Add a call to its initialization
function at an appropriate place during VM initialization. The underlying
object is never destroyed.

View File

@ -305,11 +305,11 @@ recognize your tests.</p>
the product.</p>
<ul>
<li><p>All unit tests for a class from <code>foo/bar/baz.cpp</code>
should be placed <code>foo/bar/test_baz.cpp</code> in
<code>hotspot/test/native/</code> directory. Having all tests for a
class in one file is a common practice for unit tests, it helps to see
all existing tests at once, share functions and/or resources without
losing encapsulation.</p></li>
should be placed <code>foo/bar/test_baz.cpp</code> in the
<code>test/hotspot/gtest/</code> directory. Having all tests for a class
in one file is a common practice for unit tests, it helps to see all
existing tests at once, share functions and/or resources without losing
encapsulation.</p></li>
<li><p>For tests which test more than one class, directory hierarchy
should be the same as product hierarchy, and file name should reflect
the name of the tested subsystem/functionality. For example, if a
@ -319,7 +319,7 @@ placed in <code>gc/g1</code> directory.</p></li>
<p>Please note that framework prepends directory name to a test group
name. For example, if <code>TEST(foo, check_this)</code> and
<code>TEST(bar, check_that)</code> are defined in
<code>hotspot/test/native/gc/shared/test_foo.cpp</code> file, they will
<code>test/hotspot/gtest/gc/shared/test_foo.cpp</code> file, they will
be reported as <code>gc/shared/foo::check_this</code> and
<code>gc/shared/bar::check_that</code>.</p>
<h3 id="test-names">Test names</h3>

View File

@ -241,7 +241,7 @@ recognize your tests.
Test file location should reflect a location of the tested part of the product.
* All unit tests for a class from `foo/bar/baz.cpp` should be placed
`foo/bar/test_baz.cpp` in `hotspot/test/native/` directory. Having all
`foo/bar/test_baz.cpp` in the `test/hotspot/gtest/` directory. Having all
tests for a class in one file is a common practice for unit tests, it
helps to see all existing tests at once, share functions and/or
resources without losing encapsulation.
@ -254,7 +254,7 @@ sub-system under tests belongs to `gc/g1`, tests should be placed in
Please note that framework prepends directory name to a test group
name. For example, if `TEST(foo, check_this)` and `TEST(bar, check_that)`
are defined in `hotspot/test/native/gc/shared/test_foo.cpp` file, they
are defined in `test/hotspot/gtest/gc/shared/test_foo.cpp` file, they
will be reported as `gc/shared/foo::check_this` and
`gc/shared/bar::check_that`.

View File

@ -119,6 +119,9 @@ cover the new source version</li>
and
<code>test/langtools/tools/javac/preview/classReaderTest/Client.preview.out</code>:
update expected messages for preview errors and warnings</li>
<li><code>test/langtools/tools/javac/versions/Versions.java</code>: add
new source version to the set of valid sources and add new enum constant
for the new class file version.</li>
</ul>
</body>
</html>

View File

@ -65,4 +65,4 @@ to be updated for a particular release.
* `test/langtools/tools/javac/lib/JavacTestingAbstractProcessor.java`
update annotation processor extended by `javac` tests to cover the new source version
* `test/langtools/tools/javac/preview/classReaderTest/Client.nopreview.out` and `test/langtools/tools/javac/preview/classReaderTest/Client.preview.out`: update expected messages for preview errors and warnings
* `test/langtools/tools/javac/versions/Versions.java`: add new source version to the set of valid sources and add new enum constant for the new class file version.

View File

@ -535,6 +535,8 @@ failure. This helps to reproduce intermittent test failures. Defaults to
<h4 id="report">REPORT</h4>
<p>Use this report style when reporting test results (sent to JTReg as
<code>-report</code>). Defaults to <code>files</code>.</p>
<h4 id="manual">MANUAL</h4>
<p>Set to <code>true</code> to execute manual tests only.</p>
<h3 id="gtest-keywords">Gtest keywords</h3>
<h4 id="repeat">REPEAT</h4>
<p>The number of times to repeat the tests

View File

@ -512,6 +512,10 @@ helps to reproduce intermittent test failures. Defaults to 0.
Use this report style when reporting test results (sent to JTReg as `-report`).
Defaults to `files`.
#### MANUAL
Set to `true` to execute manual tests only.
### Gtest keywords
#### REPEAT

View File

@ -125,13 +125,6 @@ define SetupBundleFileBody
&& $(TAR) cf - -$(TAR_INCLUDE_PARAM) $$($1_$$d_LIST_FILE) \
$(TAR_IGNORE_EXIT_VALUE) ) \
| ( $(CD) $(SUPPORT_OUTPUTDIR)/bundles/$1/$$($1_SUBDIR) && $(TAR) xf - )$$(NEWLINE) )
# Rename stripped pdb files
ifeq ($(call isTargetOs, windows)+$(SHIP_DEBUG_SYMBOLS), true+public)
for f in `$(FIND) $(SUPPORT_OUTPUTDIR)/bundles/$1/$$($1_SUBDIR) -name "*.stripped.pdb"`; do \
$(ECHO) Renaming $$$${f} to $$$${f%stripped.pdb}pdb $(LOG_INFO); \
$(MV) $$$${f} $$$${f%stripped.pdb}pdb; \
done
endif
# Unzip any zipped debuginfo files
ifeq ($$($1_UNZIP_DEBUGINFO), true)
for f in `$(FIND) $(SUPPORT_OUTPUTDIR)/bundles/$1/$$($1_SUBDIR) -name "*.diz"`; do \
@ -222,14 +215,6 @@ ifneq ($(filter product-bundles% legacy-bundles, $(MAKECMDGOALS)), )
ifeq ($(call isTargetOs, windows), true)
ifeq ($(SHIP_DEBUG_SYMBOLS), )
JDK_SYMBOLS_EXCLUDE_PATTERN := %.pdb
else
ifeq ($(SHIP_DEBUG_SYMBOLS), public)
JDK_SYMBOLS_EXCLUDE_PATTERN := \
$(filter-out \
%.stripped.pdb, \
$(filter %.pdb, $(ALL_JDK_FILES)) \
)
endif
endif
endif
@ -244,10 +229,7 @@ ifneq ($(filter product-bundles% legacy-bundles, $(MAKECMDGOALS)), )
)
JDK_SYMBOLS_BUNDLE_FILES := \
$(filter-out \
%.stripped.pdb, \
$(call FindFiles, $(SYMBOLS_IMAGE_DIR)) \
)
$(call FindFiles, $(SYMBOLS_IMAGE_DIR))
TEST_DEMOS_BUNDLE_FILES := $(filter $(JDK_DEMOS_IMAGE_HOMEDIR)/demo/%, \
$(ALL_JDK_DEMOS_FILES))
@ -267,14 +249,6 @@ ifneq ($(filter product-bundles% legacy-bundles, $(MAKECMDGOALS)), )
ifeq ($(call isTargetOs, windows), true)
ifeq ($(SHIP_DEBUG_SYMBOLS), )
JRE_SYMBOLS_EXCLUDE_PATTERN := %.pdb
else
ifeq ($(SHIP_DEBUG_SYMBOLS), public)
JRE_SYMBOLS_EXCLUDE_PATTERN := \
$(filter-out \
%.stripped.pdb, \
$(filter %.pdb, $(ALL_JRE_FILES)) \
)
endif
endif
endif

View File

@ -291,8 +291,7 @@ define SetupApiDocsGenerationBody
$1_INDIRECT_EXPORTS := $$(call FindTransitiveIndirectDepsForModules, $$($1_MODULES))
$1_ALL_MODULES := $$(sort $$($1_MODULES) $$($1_INDIRECT_EXPORTS))
$1_JAVA_ARGS := -Dextlink.spec.version=$$(VERSION_SPECIFICATION) \
-Djspec.version=$$(VERSION_SPECIFICATION)
$1_JAVA_ARGS := -Dextlink.spec.version=$$(VERSION_SPECIFICATION)
ifeq ($$(ENABLE_FULL_DOCS), true)
$1_SEALED_GRAPHS_DIR := $$(SUPPORT_OUTPUTDIR)/docs/$1-sealed-graphs

View File

@ -282,29 +282,33 @@ else
endif
CMDS_TARGET_SUBDIR := bin
# Param 1 - either JDK or JRE
# Copy debug info files into symbols bundle.
# In case of Windows and --with-external-symbols-in-bundles=public, take care to remove *.stripped.pdb files
SetupCopyDebuginfo = \
$(foreach m, $(ALL_$1_MODULES), \
$(eval dbgfiles := $(call FindDebuginfoFiles, $(SUPPORT_OUTPUTDIR)/modules_libs/$m)) \
$(eval dbgfiles := $(if $(filter true+public,$(call isTargetOs,windows)+$(SHIP_DEBUG_SYMBOLS)), \
$(filter-out %.stripped.pdb,$(dbgfiles)),$(dbgfiles)) \
) \
$(eval $(call SetupCopyFiles, COPY_$1_LIBS_DEBUGINFO_$m, \
SRC := $(SUPPORT_OUTPUTDIR)/modules_libs/$m, \
DEST := $($1_IMAGE_DIR)/$(LIBS_TARGET_SUBDIR), \
FILES := $(call FindDebuginfoFiles, \
$(SUPPORT_OUTPUTDIR)/modules_libs/$m), \
FILES := $(dbgfiles), \
)) \
$(eval $1_TARGETS += $$(COPY_$1_LIBS_DEBUGINFO_$m)) \
$(eval dbgfiles := $(call FindDebuginfoFiles, $(SUPPORT_OUTPUTDIR)/modules_cmds/$m)) \
$(eval dbgfiles := $(if $(filter true+public,$(call isTargetOs,windows)+$(SHIP_DEBUG_SYMBOLS)), \
$(filter-out %.stripped.pdb,$(dbgfiles)),$(dbgfiles)) \
) \
$(eval $(call SetupCopyFiles, COPY_$1_CMDS_DEBUGINFO_$m, \
SRC := $(SUPPORT_OUTPUTDIR)/modules_cmds/$m, \
DEST := $($1_IMAGE_DIR)/$(CMDS_TARGET_SUBDIR), \
FILES := $(call FindDebuginfoFiles, \
$(SUPPORT_OUTPUTDIR)/modules_cmds/$m), \
FILES := $(dbgfiles), \
)) \
$(eval $1_TARGETS += $$(COPY_$1_CMDS_DEBUGINFO_$m)) \
)
# No space before argument to avoid having to put $(strip ) everywhere in
# implementation above.
$(call SetupCopyDebuginfo,JDK)
$(call SetupCopyDebuginfo,JRE)
# No space before argument to avoid having to put $(strip ) everywhere in implementation above.
$(call SetupCopyDebuginfo,SYMBOLS)
################################################################################

View File

@ -206,7 +206,7 @@ $(eval $(call ParseKeywordVariable, JTREG, \
SINGLE_KEYWORDS := JOBS TIMEOUT_FACTOR FAILURE_HANDLER_TIMEOUT \
TEST_MODE ASSERT VERBOSE RETAIN TEST_THREAD_FACTORY JVMTI_STRESS_AGENT \
MAX_MEM RUN_PROBLEM_LISTS RETRY_COUNT REPEAT_COUNT MAX_OUTPUT REPORT \
AOT_JDK $(CUSTOM_JTREG_SINGLE_KEYWORDS), \
AOT_JDK MANUAL $(CUSTOM_JTREG_SINGLE_KEYWORDS), \
STRING_KEYWORDS := OPTIONS JAVA_OPTIONS VM_OPTIONS KEYWORDS \
EXTRA_PROBLEM_LISTS LAUNCHER_OPTIONS \
$(CUSTOM_JTREG_STRING_KEYWORDS), \
@ -873,7 +873,7 @@ define SetupRunJtregTestBody
$1_JTREG_BASIC_OPTIONS += -testThreadFactoryPath:$$(JTREG_TEST_THREAD_FACTORY_JAR)
$1_JTREG_BASIC_OPTIONS += -testThreadFactory:$$(JTREG_TEST_THREAD_FACTORY)
$1_JTREG_BASIC_OPTIONS += $$(addprefix $$(JTREG_PROBLEM_LIST_PREFIX), $$(wildcard \
$$(addprefix $$($1_TEST_ROOT)/, ProblemList-$$(JTREG_TEST_THREAD_FACTORY).txt) \
$$(addprefix $$($1_TEST_ROOT)/, ProblemList-$$(JTREG_TEST_THREAD_FACTORY).txt) \
))
endif
@ -881,8 +881,8 @@ define SetupRunJtregTestBody
AGENT := $$(LIBRARY_PREFIX)JvmtiStressAgent$$(SHARED_LIBRARY_SUFFIX)=$$(JTREG_JVMTI_STRESS_AGENT)
$1_JTREG_BASIC_OPTIONS += -javaoption:'-agentpath:$(TEST_IMAGE_DIR)/hotspot/jtreg/native/$$(AGENT)'
$1_JTREG_BASIC_OPTIONS += $$(addprefix $$(JTREG_PROBLEM_LIST_PREFIX), $$(wildcard \
$$(addprefix $$($1_TEST_ROOT)/, ProblemList-jvmti-stress-agent.txt) \
))
$$(addprefix $$($1_TEST_ROOT)/, ProblemList-jvmti-stress-agent.txt) \
))
endif
@ -911,7 +911,13 @@ define SetupRunJtregTestBody
-vmoption:-Dtest.boot.jdk="$$(BOOT_JDK)" \
-vmoption:-Djava.io.tmpdir="$$($1_TEST_TMP_DIR)"
$1_JTREG_BASIC_OPTIONS += -automatic -ignore:quiet
$1_JTREG_BASIC_OPTIONS += -ignore:quiet
ifeq ($$(JTREG_MANUAL), true)
$1_JTREG_BASIC_OPTIONS += -manual
else
$1_JTREG_BASIC_OPTIONS += -automatic
endif
# Make it possible to specify the JIB_DATA_DIR for tests using the
# JIB Artifact resolver
@ -1086,7 +1092,7 @@ define SetupRunJtregTestBody
$$(call MakeDir, $$($1_TEST_RESULTS_DIR) $$($1_TEST_SUPPORT_DIR) \
$$($1_TEST_TMP_DIR))
$$(call ExecuteWithLog, $$($1_TEST_SUPPORT_DIR)/jtreg, \
$$(COV_ENVIRONMENT) $$($1_COMMAND_LINE) \
$$(COV_ENVIRONMENT) $$($1_COMMAND_LINE) \
)
$1_RESULT_FILE := $$($1_TEST_RESULTS_DIR)/text/stats.txt
@ -1096,11 +1102,11 @@ define SetupRunJtregTestBody
$$(call LogWarn, Test report is stored in $$(strip \
$$(subst $$(TOPDIR)/, , $$($1_TEST_RESULTS_DIR))))
# Read jtreg documentation to learn on the test stats categories:
# https://github.com/openjdk/jtreg/blob/master/src/share/doc/javatest/regtest/faq.md#what-do-all-those-numbers-in-the-test-results-line-mean
# In jtreg, "skipped:" category accounts for tests that threw jtreg.SkippedException at runtime.
# At the same time these tests contribute to "passed:" tests.
# In here we don't want that and so we substract number of "skipped:" from "passed:".
# Read jtreg documentation to learn on the test stats categories:
# https://github.com/openjdk/jtreg/blob/master/src/share/doc/javatest/regtest/faq.md#what-do-all-those-numbers-in-the-test-results-line-mean
# In jtreg, "skipped:" category accounts for tests that threw jtreg.SkippedException at runtime.
# At the same time these tests contribute to "passed:" tests.
# In here we don't want that and so we substract number of "skipped:" from "passed:".
$$(if $$(wildcard $$($1_RESULT_FILE)), \
$$(eval $1_PASSED_AND_RUNTIME_SKIPPED := $$(shell $$(AWK) '{ gsub(/[,;]/, ""); \
@ -1151,6 +1157,7 @@ define SetupRunJtregTestBody
$$(EXPR) $$($1_PASSED) + $$($1_FAILED) + $$($1_ERROR) + $$($1_SKIPPED))) \
, \
$$(eval $1_PASSED_AND_RUNTIME_SKIPPED := 0) \
$$(eval $1_PASSED := 0) \
$$(eval $1_RUNTIME_SKIPPED := 0) \
$$(eval $1_SKIPPED := 0) \
$$(eval $1_FAILED := 0) \

View File

@ -79,7 +79,7 @@ TOOL_GENERATEEXTRAPROPERTIES = $(JAVA_SMALL) -cp $(BUILDTOOLS_OUTPUTDIR)/jdk_too
build.tools.generateextraproperties.GenerateExtraProperties
TOOL_GENERATECASEFOLDING = $(JAVA_SMALL) -cp $(BUILDTOOLS_OUTPUTDIR)/jdk_tools_classes \
build.tools.generatecharacter.CaseFolding
build.tools.generatecharacter.GenerateCaseFolding
TOOL_MAKEZIPREPRODUCIBLE = $(JAVA_SMALL) -cp $(BUILDTOOLS_OUTPUTDIR)/jdk_tools_classes \
build.tools.makezipreproducible.MakeZipReproducible

View File

@ -353,7 +353,12 @@ AC_DEFUN_ONCE([BASIC_SETUP_DEVKIT],
[set up toolchain on Mac OS using a path to an Xcode installation])])
UTIL_DEPRECATED_ARG_WITH(sys-root)
UTIL_DEPRECATED_ARG_WITH(tools-dir)
AC_ARG_WITH([tools-dir], [AS_HELP_STRING([--with-tools-dir],
[Point to a nonstandard Visual Studio installation location on Windows by
specifying any existing directory 2 or 3 levels below the installation
root.])]
)
if test "x$with_xcode_path" != x; then
if test "x$OPENJDK_BUILD_OS" = "xmacosx"; then

View File

@ -282,10 +282,17 @@ AC_DEFUN([FLAGS_SETUP_OPTIMIZATION],
C_O_FLAG_DEBUG_JVM="-O0"
C_O_FLAG_NONE="-O0"
if test "x$TOOLCHAIN_TYPE" = xgcc; then
C_O_FLAG_LTO="-flto=auto -fuse-linker-plugin -fno-strict-aliasing -fno-fat-lto-objects"
else
C_O_FLAG_LTO="-flto -fno-strict-aliasing"
fi
if test "x$TOOLCHAIN_TYPE" = xclang && test "x$OPENJDK_TARGET_OS" = xaix; then
C_O_FLAG_HIGHEST_JVM="${C_O_FLAG_HIGHEST_JVM} -finline-functions"
C_O_FLAG_HIGHEST="${C_O_FLAG_HIGHEST} -finline-functions"
C_O_FLAG_HI="${C_O_FLAG_HI} -finline-functions"
C_O_FLAG_LTO="${C_O_FLAG_LTO} -ffat-lto-objects"
fi
# -D_FORTIFY_SOURCE=2 hardening option needs optimization (at least -O1) enabled
@ -317,6 +324,7 @@ AC_DEFUN([FLAGS_SETUP_OPTIMIZATION],
C_O_FLAG_DEBUG_JVM=""
C_O_FLAG_NONE="-Od"
C_O_FLAG_SIZE="-O1"
C_O_FLAG_LTO="-GL"
fi
# Now copy to C++ flags
@ -328,6 +336,7 @@ AC_DEFUN([FLAGS_SETUP_OPTIMIZATION],
CXX_O_FLAG_DEBUG_JVM="$C_O_FLAG_DEBUG_JVM"
CXX_O_FLAG_NONE="$C_O_FLAG_NONE"
CXX_O_FLAG_SIZE="$C_O_FLAG_SIZE"
CXX_O_FLAG_LTO="$C_O_FLAG_LTO"
# Adjust optimization flags according to debug level.
case $DEBUG_LEVEL in
@ -360,12 +369,15 @@ AC_DEFUN([FLAGS_SETUP_OPTIMIZATION],
AC_SUBST(C_O_FLAG_NORM)
AC_SUBST(C_O_FLAG_NONE)
AC_SUBST(C_O_FLAG_SIZE)
AC_SUBST(C_O_FLAG_LTO)
AC_SUBST(CXX_O_FLAG_HIGHEST_JVM)
AC_SUBST(CXX_O_FLAG_HIGHEST)
AC_SUBST(CXX_O_FLAG_HI)
AC_SUBST(CXX_O_FLAG_NORM)
AC_SUBST(CXX_O_FLAG_NONE)
AC_SUBST(CXX_O_FLAG_SIZE)
AC_SUBST(CXX_O_FLAG_LTO)
])
AC_DEFUN([FLAGS_SETUP_CFLAGS],

View File

@ -34,7 +34,7 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS],
FLAGS_SETUP_LDFLAGS_CPU_DEP([TARGET])
# Setup the build toolchain
FLAGS_SETUP_LDFLAGS_CPU_DEP([BUILD], [OPENJDK_BUILD_])
FLAGS_SETUP_LDFLAGS_CPU_DEP([BUILD], [OPENJDK_BUILD_], [BUILD_])
AC_SUBST(ADLC_LDFLAGS)
])
@ -50,7 +50,9 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS_HELPER],
# add -z,relro (mark relocations read only) for all libs
# add -z,now ("full relro" - more of the Global Offset Table GOT is marked read only)
# add --no-as-needed to disable default --as-needed link flag on some GCC toolchains
# add --icf=all (Identical Code Folding — merges identical functions)
BASIC_LDFLAGS="-Wl,-z,defs -Wl,-z,relro -Wl,-z,now -Wl,--no-as-needed -Wl,--exclude-libs,ALL"
# Linux : remove unused code+data in link step
if test "x$ENABLE_LINKTIME_GC" = xtrue; then
if test "x$OPENJDK_TARGET_CPU" = xs390x; then
@ -61,6 +63,7 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS_HELPER],
fi
BASIC_LDFLAGS_JVM_ONLY=""
LDFLAGS_LTO="-flto=auto -fuse-linker-plugin -fno-strict-aliasing"
LDFLAGS_CXX_PARTIAL_LINKING="$MACHINE_FLAG -r"
@ -68,6 +71,7 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS_HELPER],
BASIC_LDFLAGS_JVM_ONLY="-mno-omit-leaf-frame-pointer -mstack-alignment=16 \
-fPIC"
LDFLAGS_LTO="-flto=auto -fuse-linker-plugin -fno-strict-aliasing"
LDFLAGS_CXX_PARTIAL_LINKING="$MACHINE_FLAG -r"
if test "x$OPENJDK_TARGET_OS" = xlinux; then
@ -87,6 +91,7 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS_HELPER],
BASIC_LDFLAGS="-opt:ref"
BASIC_LDFLAGS_JDK_ONLY="-incremental:no"
BASIC_LDFLAGS_JVM_ONLY="-opt:icf,8 -subsystem:windows"
LDFLAGS_LTO="-LTCG:INCREMENTAL"
fi
if (test "x$TOOLCHAIN_TYPE" = xgcc || test "x$TOOLCHAIN_TYPE" = xclang) \
@ -98,6 +103,9 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS_HELPER],
# Setup OS-dependent LDFLAGS
if test "x$OPENJDK_TARGET_OS" = xmacosx && test "x$TOOLCHAIN_TYPE" = xclang; then
if test x$DEBUG_LEVEL = xrelease; then
BASIC_LDFLAGS_JDK_ONLY="$BASIC_LDFLAGS_JDK_ONLY -Wl,-dead_strip"
fi
# FIXME: We should really generalize SetSharedLibraryOrigin instead.
OS_LDFLAGS_JVM_ONLY="-Wl,-rpath,@loader_path/. -Wl,-rpath,@loader_path/.."
OS_LDFLAGS="-mmacosx-version-min=$MACOSX_VERSION_MIN -Wl,-reproducible"
@ -148,6 +156,7 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS_HELPER],
# Export some intermediate variables for compatibility
LDFLAGS_CXX_JDK="$DEBUGLEVEL_LDFLAGS_JDK_ONLY"
AC_SUBST(LDFLAGS_LTO)
AC_SUBST(LDFLAGS_CXX_JDK)
AC_SUBST(LDFLAGS_CXX_PARTIAL_LINKING)
])
@ -155,7 +164,8 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS_HELPER],
################################################################################
# $1 - Either BUILD or TARGET to pick the correct OS/CPU variables to check
# conditionals against.
# $2 - Optional prefix for each variable defined.
# $2 - Optional prefix for each variable defined (OPENJDK_BUILD_ or nothing).
# $3 - Optional prefix for toolchain variables (BUILD_ or nothing).
AC_DEFUN([FLAGS_SETUP_LDFLAGS_CPU_DEP],
[
# Setup CPU-dependent basic LDFLAGS. These can differ between the target and
@ -189,6 +199,12 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS_CPU_DEP],
fi
fi
if test "x${$3LD_TYPE}" = "xgold"; then
if test x$DEBUG_LEVEL = xrelease; then
$1_CPU_LDFLAGS="${$1_CPU_LDFLAGS} -Wl,--icf=all"
fi
fi
# Export variables according to old definitions, prefix with $2 if present.
LDFLAGS_JDK_COMMON="$BASIC_LDFLAGS $BASIC_LDFLAGS_JDK_ONLY \
$OS_LDFLAGS $DEBUGLEVEL_LDFLAGS_JDK_ONLY ${$2EXTRA_LDFLAGS}"

View File

@ -513,12 +513,14 @@ C_O_FLAG_HI := @C_O_FLAG_HI@
C_O_FLAG_NORM := @C_O_FLAG_NORM@
C_O_FLAG_NONE := @C_O_FLAG_NONE@
C_O_FLAG_SIZE := @C_O_FLAG_SIZE@
C_O_FLAG_LTO := @C_O_FLAG_LTO@
CXX_O_FLAG_HIGHEST_JVM := @CXX_O_FLAG_HIGHEST_JVM@
CXX_O_FLAG_HIGHEST := @CXX_O_FLAG_HIGHEST@
CXX_O_FLAG_HI := @CXX_O_FLAG_HI@
CXX_O_FLAG_NORM := @CXX_O_FLAG_NORM@
CXX_O_FLAG_NONE := @CXX_O_FLAG_NONE@
CXX_O_FLAG_SIZE := @CXX_O_FLAG_SIZE@
CXX_O_FLAG_LTO := @CXX_O_FLAG_LTO@
GENDEPS_FLAGS := @GENDEPS_FLAGS@
@ -587,6 +589,9 @@ LDFLAGS_CXX_JDK := @LDFLAGS_CXX_JDK@
# LDFLAGS specific to partial linking.
LDFLAGS_CXX_PARTIAL_LINKING := @LDFLAGS_CXX_PARTIAL_LINKING@
# LDFLAGS specific to link time optimization
LDFLAGS_LTO := @LDFLAGS_LTO@
# Sometimes a different linker is needed for c++ libs
LDCXX := @LDCXX@
# The flags for linking libstdc++ linker.

View File

@ -516,6 +516,7 @@ AC_DEFUN([TOOLCHAIN_EXTRACT_LD_VERSION],
if [ [[ "$LINKER_VERSION_STRING" == *gold* ]] ]; then
[ LINKER_VERSION_NUMBER=`$ECHO $LINKER_VERSION_STRING | \
$SED -e 's/.* \([0-9][0-9]*\(\.[0-9][0-9]*\)*\).*) .*/\1/'` ]
$1_TYPE=gold
else
[ LINKER_VERSION_NUMBER=`$ECHO $LINKER_VERSION_STRING | \
$SED -e 's/.* \([0-9][0-9]*\(\.[0-9][0-9]*\)*\).*/\1/'` ]

View File

@ -25,7 +25,7 @@
################################################################################
# The order of these defines the priority by which we try to find them.
VALID_VS_VERSIONS="2022 2019"
VALID_VS_VERSIONS="2022 2019 2026"
VS_DESCRIPTION_2019="Microsoft Visual Studio 2019"
VS_VERSION_INTERNAL_2019=142
@ -57,6 +57,21 @@ VS_SDK_PLATFORM_NAME_2022=
VS_SUPPORTED_2022=true
VS_TOOLSET_SUPPORTED_2022=true
VS_DESCRIPTION_2026="Microsoft Visual Studio 2026"
VS_VERSION_INTERNAL_2026=145
VS_MSVCR_2026=vcruntime140.dll
VS_VCRUNTIME_1_2026=vcruntime140_1.dll
VS_MSVCP_2026=msvcp140.dll
VS_ENVVAR_2026="VS180COMNTOOLS"
VS_USE_UCRT_2026="true"
VS_VS_INSTALLDIR_2026="Microsoft Visual Studio/18"
VS_EDITIONS_2026="BuildTools Community Professional Enterprise"
VS_SDK_INSTALLDIR_2026=
VS_VS_PLATFORM_NAME_2026="v145"
VS_SDK_PLATFORM_NAME_2026=
VS_SUPPORTED_2026=true
VS_TOOLSET_SUPPORTED_2026=true
################################################################################
AC_DEFUN([TOOLCHAIN_CHECK_POSSIBLE_VISUAL_STUDIO_ROOT],

View File

@ -98,6 +98,7 @@ include native/Paths.gmk
# SYSROOT_CFLAGS the compiler flags for using the specific sysroot
# SYSROOT_LDFLAGS the linker flags for using the specific sysroot
# OPTIMIZATION sets optimization level to NONE, LOW, HIGH, HIGHEST, HIGHEST_JVM, SIZE
# LINK_TIME_OPTIMIZATION if set to true, enables link time optimization
# DISABLED_WARNINGS_<toolchain> Disable the given warnings for the specified toolchain
# DISABLED_WARNINGS_<toolchain>_<OS> Disable the given warnings for the specified
# toolchain and target OS

View File

@ -194,6 +194,11 @@ define SetupCompilerFlags
$1_EXTRA_CXXFLAGS += $(CFLAGS_WARNINGS_ARE_ERRORS)
endif
ifeq (true, $$($1_LINK_TIME_OPTIMIZATION))
$1_EXTRA_CFLAGS += $(C_O_FLAG_LTO)
$1_EXTRA_CXXFLAGS += $(CXX_O_FLAG_LTO)
endif
ifeq (NONE, $$($1_OPTIMIZATION))
$1_OPT_CFLAGS := $(C_O_FLAG_NONE)
$1_OPT_CXXFLAGS := $(CXX_O_FLAG_NONE)
@ -222,6 +227,15 @@ define SetupLinkerFlags
# Pickup extra OPENJDK_TARGET_OS_TYPE, OPENJDK_TARGET_OS and TOOLCHAIN_TYPE
# dependent variables for LDFLAGS and LIBS, and additionally the pair dependent
# TOOLCHAIN_TYPE plus OPENJDK_TARGET_OS
ifeq ($$($1_LINK_TIME_OPTIMIZATION), true)
$1_EXTRA_LDFLAGS += $(LDFLAGS_LTO)
# Instruct the ld64 linker not to delete the temporary object file
# generated during Link Time Optimization
ifeq ($(call isTargetOs, macosx), true)
$1_EXTRA_LDFLAGS += -Wl,-object_path_lto,$$($1_OBJECT_DIR)/$$($1_NAME)_lto_helper.o
endif
endif
$1_EXTRA_LDFLAGS += $$($1_LDFLAGS_$(OPENJDK_TARGET_OS_TYPE)) $$($1_LDFLAGS_$(OPENJDK_TARGET_OS)) \
$$($1_LDFLAGS_$(TOOLCHAIN_TYPE)) $$($1_LDFLAGS_$(TOOLCHAIN_TYPE)_$(OPENJDK_TARGET_OS))
$1_EXTRA_LIBS += $$($1_LIBS_$(OPENJDK_TARGET_OS_TYPE)) $$($1_LIBS_$(OPENJDK_TARGET_OS)) \

View File

@ -26,17 +26,17 @@
# Default version, product, and vendor information to use,
# unless overridden by configure
DEFAULT_VERSION_FEATURE=26
DEFAULT_VERSION_FEATURE=27
DEFAULT_VERSION_INTERIM=0
DEFAULT_VERSION_UPDATE=0
DEFAULT_VERSION_PATCH=0
DEFAULT_VERSION_EXTRA1=0
DEFAULT_VERSION_EXTRA2=0
DEFAULT_VERSION_EXTRA3=0
DEFAULT_VERSION_DATE=2026-03-17
DEFAULT_VERSION_CLASSFILE_MAJOR=70 # "`$EXPR $DEFAULT_VERSION_FEATURE + 44`"
DEFAULT_VERSION_DATE=2026-09-15
DEFAULT_VERSION_CLASSFILE_MAJOR=71 # "`$EXPR $DEFAULT_VERSION_FEATURE + 44`"
DEFAULT_VERSION_CLASSFILE_MINOR=0
DEFAULT_VERSION_DOCS_API_SINCE=11
DEFAULT_ACCEPTABLE_BOOT_VERSIONS="25 26"
DEFAULT_JDK_SOURCE_TARGET_VERSION=26
DEFAULT_ACCEPTABLE_BOOT_VERSIONS="25 26 27"
DEFAULT_JDK_SOURCE_TARGET_VERSION=27
DEFAULT_PROMOTED_VERSION_PRE=ea

View File

@ -170,6 +170,7 @@ ifeq ($(call check-jvm-feature, compiler2), true)
ifeq ($(HOTSPOT_TARGET_CPU_ARCH), aarch64)
AD_SRC_FILES += $(call uniq, $(wildcard $(foreach d, $(AD_SRC_ROOTS), \
$d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/$(HOTSPOT_TARGET_CPU_ARCH)_vector.ad \
$d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/$(HOTSPOT_TARGET_CPU_ARCH)_atomic.ad \
)))
endif

View File

@ -95,6 +95,7 @@ $(eval $(call SetupJdkLibrary, BUILD_GTEST_LIBJVM, \
EXTRA_OBJECT_FILES := $(BUILD_LIBJVM_ALL_OBJS), \
DEFAULT_CFLAGS := false, \
CFLAGS := $(JVM_CFLAGS) \
-DHOTSPOT_GTEST \
-I$(GTEST_FRAMEWORK_SRC)/googletest/include \
-I$(GTEST_FRAMEWORK_SRC)/googlemock/include \
$(addprefix -I, $(GTEST_TEST_SRC)), \

View File

@ -151,6 +151,12 @@ JVM_STRIPFLAGS ?= $(STRIPFLAGS)
# This source set is reused so save in cache.
$(call FillFindCache, $(JVM_SRC_DIRS))
ifeq ($(SHIP_DEBUG_SYMBOLS), full)
CFLAGS_SHIP_DEBUGINFO := -DSHIP_DEBUGINFO_FULL
else ifeq ($(SHIP_DEBUG_SYMBOLS), public)
CFLAGS_SHIP_DEBUGINFO := -DSHIP_DEBUGINFO_PUBLIC
endif
ifeq ($(call isTargetOs, windows), true)
ifeq ($(STATIC_LIBS), true)
WIN_EXPORT_FILE := $(JVM_OUTPUTDIR)/static-win-exports.def
@ -158,10 +164,6 @@ ifeq ($(call isTargetOs, windows), true)
WIN_EXPORT_FILE := $(JVM_OUTPUTDIR)/win-exports.def
endif
ifeq ($(SHIP_DEBUG_SYMBOLS), public)
CFLAGS_STRIPPED_DEBUGINFO := -DHAS_STRIPPED_DEBUGINFO
endif
JVM_LDFLAGS += -def:$(WIN_EXPORT_FILE)
endif
@ -187,7 +189,7 @@ $(eval $(call SetupJdkLibrary, BUILD_LIBJVM, \
CFLAGS := $(JVM_CFLAGS), \
abstract_vm_version.cpp_CXXFLAGS := $(CFLAGS_VM_VERSION), \
arguments.cpp_CXXFLAGS := $(CFLAGS_VM_VERSION), \
whitebox.cpp_CXXFLAGS := $(CFLAGS_STRIPPED_DEBUGINFO), \
whitebox.cpp_CXXFLAGS := $(CFLAGS_SHIP_DEBUGINFO), \
DISABLED_WARNINGS_gcc := $(DISABLED_WARNINGS_gcc), \
DISABLED_WARNINGS_gcc_ad_$(HOTSPOT_TARGET_CPU_ARCH).cpp := nonnull, \
DISABLED_WARNINGS_gcc_bytecodeInterpreter.cpp := unused-label, \
@ -234,6 +236,7 @@ $(eval $(call SetupJdkLibrary, BUILD_LIBJVM, \
LDFLAGS := $(JVM_LDFLAGS), \
LIBS := $(JVM_LIBS), \
OPTIMIZATION := $(JVM_OPTIMIZATION), \
LINK_TIME_OPTIMIZATION := $(JVM_LTO), \
OBJECT_DIR := $(JVM_OUTPUTDIR)/objs, \
STRIPFLAGS := $(JVM_STRIPFLAGS), \
EMBED_MANIFEST := true, \
@ -337,6 +340,30 @@ TARGETS += $(BUILD_LIBJVM)
# for the associated class. If the class doesn't provide a more specific
# declaration (either directly or by inheriting from a class that provides
# one) then the global definition will be used, triggering this check.
#
# The HotSpot wrapper for <new> declares as deprecated all the allocation and
# deallocation functions that use the global allocator. But that blocking
# isn't a bullet-proof. Some of these functions are implicitly available in
# every translation unit, without the need to include <new>. So even with that
# wrapper we still need this link-time check. The implicitly declared
# functions and their mangled names are - from C++17 6.7.4:
#
# void* operator new(size_t) // _Znwm
# void* operator new(size_t, align_val_t) // _ZnwmSt11align_val_t
#
# void operator delete(void*) noexcept // _ZdlPv
# void operator delete(void*, size_t) noexcept // _ZdlPvm
# void operator delete(void*, align_val_t) noexcept // _ZdlPvSt11align_val_t
# void operator delete(void*, size_t, align_val_t) noexcept // _ZdlPvmSt11align_val_t
#
# void* operator new[](size_t) // _Znam
# void* operator new[](size_t, align_val_t) // _ZnamSt11align_val_t
#
# void operator delete[](void*) noexcept // _ZdaPv
# void operator delete[](void*, size_t) noexcept // _ZdaPvm
# void operator delete[](void*, align_val_t) noexcept // _ZdaPvSt11align_val_t
# void operator delete[](void*, size_t, align_val_t) noexcept // _ZdaPvmSt11align_val_t
ifneq ($(GENERATE_COMPILE_COMMANDS_ONLY), true)
ifneq ($(filter $(TOOLCHAIN_TYPE), gcc clang), )
@ -347,10 +374,18 @@ ifneq ($(GENERATE_COMPILE_COMMANDS_ONLY), true)
# so use mangled names when looking for symbols.
# Save the demangling for when something is actually found.
MANGLED_SYMS := \
_ZdaPv \
_ZdlPv \
_Znam \
_Znwm \
_ZnwmSt11align_val_t \
_ZdlPv \
_ZdlPvm \
_ZdlPvSt11align_val_t \
_ZdlPvmSt11align_val_t \
_Znam \
_ZnamSt11align_val_t \
_ZdaPv \
_ZdaPvm \
_ZdaPvSt11align_val_t \
_ZdaPvmSt11align_val_t \
#
UNDEF_PATTERN := ' U '

View File

@ -175,22 +175,12 @@ ifeq ($(call check-jvm-feature, link-time-opt), true)
# Set JVM_OPTIMIZATION directly so other jvm-feature flags can override it
# later on if desired
JVM_OPTIMIZATION := HIGHEST_JVM
ifeq ($(call isCompiler, gcc), true)
JVM_CFLAGS_FEATURES += -flto=auto -fuse-linker-plugin -fno-strict-aliasing \
-fno-fat-lto-objects
JVM_LDFLAGS_FEATURES += $(CXX_O_FLAG_HIGHEST_JVM) -flto=auto \
-fuse-linker-plugin -fno-strict-aliasing
else ifeq ($(call isCompiler, clang), true)
JVM_CFLAGS_FEATURES += -flto -fno-strict-aliasing
ifeq ($(call isBuildOs, aix), true)
JVM_CFLAGS_FEATURES += -ffat-lto-objects
endif
JVM_LDFLAGS_FEATURES += $(CXX_O_FLAG_HIGHEST_JVM) -flto -fno-strict-aliasing
else ifeq ($(call isCompiler, microsoft), true)
JVM_CFLAGS_FEATURES += -GL
JVM_LDFLAGS_FEATURES += -LTCG:INCREMENTAL
JVM_LTO := true
ifneq ($(call isCompiler, microsoft), true)
JVM_LDFLAGS_FEATURES += $(CXX_O_FLAG_HIGHEST_JVM)
endif
else
JVM_LTO := false
ifeq ($(call isCompiler, gcc), true)
JVM_LDFLAGS_FEATURES += -O1
endif

View File

@ -1,73 +0,0 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package build.tools.generatecharacter;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.nio.file.StandardOpenOption;
import java.util.stream.Collectors;
import java.util.stream.Stream;
public class CaseFolding {
public static void main(String[] args) throws Throwable {
if (args.length != 3) {
System.err.println("Usage: java CaseFolding TemplateFile CaseFolding.txt CaseFolding.java");
System.exit(1);
}
var templateFile = Paths.get(args[0]);
var caseFoldingTxt = Paths.get(args[1]);
var genSrcFile = Paths.get(args[2]);
var supportedTypes = "^.*; [CTS]; .*$";
var caseFoldingEntries = Files.lines(caseFoldingTxt)
.filter(line -> !line.startsWith("#") && line.matches(supportedTypes))
.map(line -> {
String[] cols = line.split("; ");
return new String[] {cols[0], cols[1], cols[2]};
})
.filter(cols -> {
// the folding case doesn't map back to the original char.
var cp1 = Integer.parseInt(cols[0], 16);
var cp2 = Integer.parseInt(cols[2], 16);
return Character.toUpperCase(cp2) != cp1 && Character.toLowerCase(cp2) != cp1;
})
.map(cols -> String.format(" entry(0x%s, 0x%s)", cols[0], cols[2]))
.collect(Collectors.joining(",\n", "", ""));
// hack, hack, hack! the logic does not pick 0131. just add manually to support 'I's.
// 0049; T; 0131; # LATIN CAPITAL LETTER I
final String T_0x0131_0x49 = String.format(" entry(0x%04x, 0x%04x),\n", 0x0131, 0x49);
// Generate .java file
Files.write(
genSrcFile,
Files.lines(templateFile)
.map(line -> line.contains("%%%Entries") ? T_0x0131_0x49 + caseFoldingEntries : line)
.collect(Collectors.toList()),
StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING);
}
}

View File

@ -0,0 +1,134 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package build.tools.generatecharacter;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.nio.file.StandardOpenOption;
import java.util.Arrays;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
public class GenerateCaseFolding {
public static void main(String[] args) throws Throwable {
if (args.length != 3) {
System.err.println("Usage: java GenerateCaseFolding TemplateFile CaseFolding.txt CaseFolding.java");
System.exit(1);
}
var templateFile = Paths.get(args[0]);
var caseFoldingTxt = Paths.get(args[1]);
var genSrcFile = Paths.get(args[2]);
// java.lang
var supportedTypes = "^.*; [CF]; .*$"; // full/1:M case folding
String[][] caseFoldings = Files.lines(caseFoldingTxt)
.filter(line -> !line.startsWith("#") && line.matches(supportedTypes))
.map(line -> {
var fields = line.split("; ");
var cp = fields[0];
fields = fields[2].trim().split(" ");
var folding = new String[fields.length + 1];
folding[0] = cp;
System.arraycopy(fields, 0, folding, 1, fields.length);
return folding;
})
.toArray(size -> new String[size][]);
// util.regex
var expandedSupportedTypes = "^.*; [CTS]; .*$";
var expanded_caseFoldingEntries = Files.lines(caseFoldingTxt)
.filter(line -> !line.startsWith("#") && line.matches(expandedSupportedTypes))
.map(line -> {
String[] cols = line.split("; ");
return new String[]{cols[0], cols[1], cols[2]};
})
.filter(cols -> {
// the folding case doesn't map back to the original char.
var cp1 = Integer.parseInt(cols[0], 16);
var cp2 = Integer.parseInt(cols[2], 16);
return Character.toUpperCase(cp2) != cp1 && Character.toLowerCase(cp2) != cp1;
})
.map(cols -> String.format(" entry(0x%s, 0x%s)", cols[0], cols[2]))
.collect(Collectors.joining(",\n", "", ""));
// hack, hack, hack! the logic does not pick 0131. just add manually to support 'I's.
// 0049; T; 0131; # LATIN CAPITAL LETTER I
final String T_0x0131_0x49 = String.format(" entry(0x%04x, 0x%04x),\n", 0x0131, 0x49);
Files.write(
genSrcFile,
Files.lines(templateFile)
.map(line -> line.contains("%%%Entries") ? genFoldingEntries(caseFoldings) : line)
.map(line -> line.contains("%%%Expanded_Case_Map_Entries") ? T_0x0131_0x49 + expanded_caseFoldingEntries : line)
.collect(Collectors.toList()),
StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING);
}
private static long foldingToLong(String[] folding) {
int cp = Integer.parseInt(folding[0], 16);
long value = (long)Integer.parseInt(folding[1], 16);
if (!Character.isSupplementaryCodePoint(cp) && folding.length != 2) {
var shift = 16;
for (int j = 2; j < folding.length; j++) {
value |= (long)Integer.parseInt(folding[j], 16) << shift;
shift <<= 1;
}
value = value | (long) (folding.length - 1) << 48;
}
return value;
}
private static String genFoldingEntries(String[][] foldings) {
StringBuilder sb = new StringBuilder();
sb.append(" private static final int[] CASE_FOLDING_CPS = {\n");
int width = 10;
for (int i = 0; i < foldings.length; i++) {
if (i % width == 0)
sb.append(" ");
sb.append(String.format("0X%s", foldings[i][0]));
if (i < foldings.length - 1)
sb.append(", ");
if (i % width == width - 1 || i == foldings.length - 1)
sb.append("\n");
}
sb.append(" };\n\n");
sb.append(" private static final long[] CASE_FOLDING_VALUES = {\n");
width = 6;
for (int i = 0; i < foldings.length; i++) {
if (i % width == 0)
sb.append(" "); // indent
sb.append(String.format("0x%013xL", foldingToLong(foldings[i])));
if (i < foldings.length - 1)
sb.append(", ");
if (i % width == width - 1 || i == foldings.length - 1) {
sb.append("\n");
}
}
sb.append(" };\n");
return sb.toString();
}
}

View File

@ -33,6 +33,8 @@ import java.util.regex.Pattern;
import java.util.stream.Collectors;
import javax.lang.model.element.Element;
import javax.lang.model.element.PackageElement;
import javax.lang.model.element.TypeElement;
import com.sun.source.doctree.DocTree;
import com.sun.source.doctree.LiteralTree;
@ -44,7 +46,7 @@ import jdk.javadoc.doclet.Taglet;
import static com.sun.source.doctree.DocTree.Kind.*;
/**
* A base class for block tags to insert a link to an external copy of JLS or JVMS.
* A base class for block tags to insert a link to a local copy of JLS or JVMS.
* The tags can be used as follows:
*
* <pre>
@ -57,30 +59,23 @@ import static com.sun.source.doctree.DocTree.Kind.*;
* &commat;jls 3.4 Line Terminators
* </pre>
*
* will produce the following HTML for a docs build configured for Java SE 12.
* will produce the following HTML, depending on the file containing
* the tag.
*
* <pre>{@code
* <dt>See <i>Java Language Specification</i>:
* <dd><a href="https://docs.oracle.com/javase/specs/jls/se12/html/jls-3.html#jls-3.4">3.4 Line terminators</a>
* <dd><a href="../../specs/jls/jls-3.html#jls-3.4">3.4 Line terminators</a>
* }</pre>
*
* The version of the spec must be set in the jspec.version system property.
* Copies of JLS and JVMS are expected to have been placed in the {@code specs}
* folder. These documents are not included in open-source repositories.
*/
public class JSpec implements Taglet {
static final String SPEC_VERSION;
static {
SPEC_VERSION = System.getProperty("jspec.version");
if (SPEC_VERSION == null) {
throw new RuntimeException("jspec.version property not set");
}
}
public static class JLS extends JSpec {
public JLS() {
super("jls",
"Java Language Specification",
"https://docs.oracle.com/javase/specs/jls/se" + SPEC_VERSION + "/html",
"jls");
}
}
@ -89,20 +84,17 @@ public class JSpec implements Taglet {
public JVMS() {
super("jvms",
"Java Virtual Machine Specification",
"https://docs.oracle.com/javase/specs/jvms/se" + SPEC_VERSION + "/html",
"jvms");
}
}
private String tagName;
private String specTitle;
private String baseURL;
private String idPrefix;
JSpec(String tagName, String specTitle, String baseURL, String idPrefix) {
JSpec(String tagName, String specTitle, String idPrefix) {
this.tagName = tagName;
this.specTitle = specTitle;
this.baseURL = baseURL;
this.idPrefix = idPrefix;
}
@ -169,8 +161,8 @@ public class JSpec implements Taglet {
String chapter = m.group("chapter");
String section = m.group("section");
String url = String.format("%1$s/%2$s-%3$s.html#%2$s-%3$s%4$s",
baseURL, idPrefix, chapter, section);
String url = String.format("%1$s/../specs/%2$s/%2$s-%3$s.html#%2$s-%3$s%4$s",
docRoot(elem), idPrefix, chapter, section);
sb.append("<a href=\"")
.append(url)
@ -216,4 +208,35 @@ public class JSpec implements Taglet {
}
}).visit(trees, new StringBuilder()).toString();
}
private String docRoot(Element elem) {
switch (elem.getKind()) {
case MODULE:
return "..";
case PACKAGE:
PackageElement pe = (PackageElement)elem;
String pkgPart = pe.getQualifiedName()
.toString()
.replace('.', '/')
.replaceAll("[^/]+", "..");
return pe.getEnclosingElement() != null
? "../" + pkgPart
: pkgPart;
case CLASS, ENUM, RECORD, INTERFACE, ANNOTATION_TYPE:
TypeElement te = (TypeElement)elem;
return te.getQualifiedName()
.toString()
.replace('.', '/')
.replaceAll("[^/]+", "..");
default:
var enclosing = elem.getEnclosingElement();
if (enclosing == null)
throw new IllegalArgumentException(elem.getKind().toString());
return docRoot(enclosing);
}
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -219,13 +219,13 @@ public final class SealedGraph implements Taglet {
// This implies the module is always the same.
private String relativeLink(TypeElement node) {
var util = SealedGraph.this.docletEnvironment.getElementUtils();
var rootPackage = util.getPackageOf(rootNode);
var nodePackage = util.getPackageOf(node);
var backNavigator = rootPackage.getQualifiedName().toString().chars()
// Note: SVG files for nested types use the simple names of containing types as parent directories.
// We therefore need to convert all dots in the qualified name to "../" below.
var backNavigator = rootNode.getQualifiedName().toString().chars()
.filter(c -> c == '.')
.mapToObj(c -> "../")
.collect(joining()) +
"../";
.collect(joining());
var forwardNavigator = nodePackage.getQualifiedName().toString()
.replace(".", "/");

View File

@ -156,7 +156,8 @@ public class ToolGuide implements Taglet {
return pe.getEnclosingElement() != null
? "../" + pkgPart
: pkgPart;
case CLASS:
case CLASS, ENUM, RECORD, INTERFACE, ANNOTATION_TYPE:
TypeElement te = (TypeElement)elem;
return te.getQualifiedName()
.toString()
@ -164,7 +165,10 @@ public class ToolGuide implements Taglet {
.replaceAll("[^/]+", "..");
default:
throw new IllegalArgumentException(elem.getKind().toString());
var enclosing = elem.getEnclosingElement();
if (enclosing == null)
throw new IllegalArgumentException(elem.getKind().toString());
return docRoot(enclosing);
}
}
}

View File

@ -84,6 +84,7 @@ public interface MessageType {
FILE_OBJECT("file object", "JavaFileObject", "javax.tools"),
PATH("path", "Path", "java.nio.file"),
NAME("name", "Name", "com.sun.tools.javac.util"),
LONG("long", "long", null),
NUMBER("number", "int", null),
OPTION_NAME("option name", "Option", "com.sun.tools.javac.main"),
PROFILE("profile", "Profile", "com.sun.tools.javac.jvm"),

View File

@ -120,3 +120,25 @@ $(INTPOLY_GEN_DONE): $(INTPLOY_HEADER) $(BUILD_TOOLS_JDK)
TARGETS += $(INTPOLY_GEN_DONE)
################################################################################
RELEASE_FILE_TEMPLATE := $(TOPDIR)/src/java.base/share/classes/jdk/internal/misc/resources/release.txt.template
RELEASE_FILE_TARGET := $(SUPPORT_OUTPUTDIR)/gensrc/$(MODULE)/jdk/internal/misc/resources/release.txt
RELEASE_FILE_VARDEPS := $(COMPANY_NAME) $(VERSION_STRING) $(VERSION_DATE)
RELEASE_FILE_VARDEPS_FILE := $(call DependOnVariable, RELEASE_FILE_VARDEPS, \
$(SUPPORT_OUTPUTDIR)/gensrc/$(MODULE)/jlink_release_txt.vardeps)
$(eval $(call SetupTextFileProcessing, BUILD_RELEASE_FILE, \
SOURCE_FILES := $(RELEASE_FILE_TEMPLATE), \
OUTPUT_FILE := $(RELEASE_FILE_TARGET), \
REPLACEMENTS := \
@@COMPANY_NAME@@ => $(COMPANY_NAME) ; \
@@VERSION_STRING@@ => $(VERSION_STRING) ; \
@@VERSION_DATE@@ => $(VERSION_DATE) , \
))
$(BUILD_RELEASE_FILE): $(RELEASE_FILE_VARDEPS_FILE)
TARGETS += $(BUILD_RELEASE_FILE)
################################################################################

View File

@ -34,7 +34,7 @@
DOCLINT += -Xdoclint:all/protected \
'-Xdoclint/package:java.*,javax.*'
JAVAC_FLAGS += -XDstringConcat=inline
COPY += .icu .dat .spp .nrm content-types.properties \
COPY += .icu .dat .spp .nrm .txt content-types.properties \
hijrah-config-Hijrah-umalqura_islamic-umalqura.properties
CLEAN += intrinsic.properties

View File

@ -72,5 +72,22 @@ TARGETS += $(GENSRC_CHARACTERDATA)
################################################################################
GENSRC_STRINGCASEFOLDING := $(SUPPORT_OUTPUTDIR)/gensrc/java.base/jdk/internal/lang/CaseFolding.java
STRINGCASEFOLDING_TEMPLATE := $(MODULE_SRC)/share/classes/jdk/internal/lang/CaseFolding.java.template
CASEFOLDINGTXT := $(MODULE_SRC)/share/data/unicodedata/CaseFolding.txt
$(GENSRC_STRINGCASEFOLDING): $(BUILD_TOOLS_JDK) $(STRINGCASEFOLDING_TEMPLATE) $(CASEFOLDINGTXT)
$(call LogInfo, Generating $@)
$(call MakeTargetDir)
$(TOOL_GENERATECASEFOLDING) \
$(STRINGCASEFOLDING_TEMPLATE) \
$(CASEFOLDINGTXT) \
$(GENSRC_STRINGCASEFOLDING)
TARGETS += $(GENSRC_STRINGCASEFOLDING)
endif # include guard
include MakeIncludeEnd.gmk

View File

@ -50,22 +50,5 @@ TARGETS += $(GENSRC_INDICCONJUNCTBREAK)
################################################################################
GENSRC_CASEFOLDING := $(SUPPORT_OUTPUTDIR)/gensrc/java.base/jdk/internal/util/regex/CaseFolding.java
CASEFOLDINGTEMP := $(MODULE_SRC)/share/classes/jdk/internal/util/regex/CaseFolding.java.template
CASEFOLDINGTXT := $(MODULE_SRC)/share/data/unicodedata/CaseFolding.txt
$(GENSRC_CASEFOLDING): $(BUILD_TOOLS_JDK) $(CASEFOLDINGTEMP) $(CASEFOLDINGTXT)
$(call LogInfo, Generating $@)
$(call MakeTargetDir)
$(TOOL_GENERATECASEFOLDING) \
$(CASEFOLDINGTEMP) \
$(CASEFOLDINGTXT) \
$(GENSRC_CASEFOLDING)
TARGETS += $(GENSRC_CASEFOLDING)
################################################################################
endif # include guard
include MakeIncludeEnd.gmk

View File

@ -164,6 +164,24 @@ ifeq ($(ENABLE_HEADLESS_ONLY), false)
ifeq ($(USE_EXTERNAL_LIBPNG), false)
LIBSPLASHSCREEN_HEADER_DIRS += libsplashscreen/libpng
LIBSPLASHSCREEN_CFLAGS += -DPNG_NO_MMX_CODE -DPNG_ARM_NEON_OPT=0
-DPNG_ARM_NEON_IMPLEMENTATION=0 -DPNG_LOONGARCH_LSX_OPT=0
ifeq ($(call isTargetOs, linux)+$(call isTargetCpuArch, ppc), true+true)
LIBSPLASHSCREEN_CFLAGS += -DPNG_POWERPC_VSX_OPT=0
endif
# The libpng bundled with jdk is a reduced version which does not
# contain .png_init_filter_functions_vsx.
# Therefore we need to disable PNG_POWERPC_VSX_OPT explicitly by setting
# it to 0. If this define is not set, it would be automatically set to 2,
# because
# "#if defined(__PPC64__) && defined(__ALTIVEC__) && defined(__VSX__)"
# expands to true. This would results in the fact that
# .png_init_filter_functions_vsx is needed in libpng.
ifeq ($(call isTargetOs, aix), true)
LIBSPLASHSCREEN_CFLAGS += -DPNG_POWERPC_VSX_OPT=0
endif
else
LIBSPLASHSCREEN_EXCLUDES += libpng
endif
@ -176,25 +194,7 @@ ifeq ($(ENABLE_HEADLESS_ONLY), false)
LIBSPLASHSCREEN_STATIC_LIB_EXCLUDE_OBJS += $(LIBZIP_OBJS)
endif
LIBSPLASHSCREEN_CFLAGS += -DSPLASHSCREEN -DPNG_NO_MMX_CODE \
-DPNG_ARM_NEON_OPT=0 -DPNG_ARM_NEON_IMPLEMENTATION=0 \
-DPNG_LOONGARCH_LSX_OPT=0
ifeq ($(call isTargetOs, linux)+$(call isTargetCpuArch, ppc), true+true)
LIBSPLASHSCREEN_CFLAGS += -DPNG_POWERPC_VSX_OPT=0
endif
# The external libpng submitted in the jdk is a reduced version
# which does not contain .png_init_filter_functions_vsx.
# Therefore we need to disable PNG_POWERPC_VSX_OPT explicitly by setting
# it to 0. If this define is not set, it would be automatically set to 2,
# because
# "#if defined(__PPC64__) && defined(__ALTIVEC__) && defined(__VSX__)"
# expands to true. This would results in the fact that
# .png_init_filter_functions_vsx is needed in libpng.
ifeq ($(call isTargetOs, aix), true)
LIBSPLASHSCREEN_CFLAGS += -DPNG_POWERPC_VSX_OPT=0
endif
LIBSPLASHSCREEN_CFLAGS += -DSPLASHSCREEN
ifeq ($(call isTargetOs, macosx), true)
# libsplashscreen on macosx does not use the unix code
@ -226,6 +226,7 @@ ifeq ($(ENABLE_HEADLESS_ONLY), false)
EXCLUDE_FILES := imageioJPEG.c jpegdecoder.c pngtest.c, \
EXCLUDES := $(LIBSPLASHSCREEN_EXCLUDES), \
OPTIMIZATION := SIZE, \
LINK_TIME_OPTIMIZATION := true, \
CFLAGS := $(LIBSPLASHSCREEN_CFLAGS) \
$(GIFLIB_CFLAGS) $(LIBJPEG_CFLAGS) $(PNG_CFLAGS) $(LIBZ_CFLAGS) \
$(ICONV_CFLAGS), \
@ -236,7 +237,7 @@ ifeq ($(ENABLE_HEADLESS_ONLY), false)
DISABLED_WARNINGS_gcc_dgif_lib.c := sign-compare, \
DISABLED_WARNINGS_gcc_jcmaster.c := implicit-fallthrough, \
DISABLED_WARNINGS_gcc_jdphuff.c := shift-negative-value, \
DISABLED_WARNINGS_gcc_png.c := maybe-uninitialized unused-function, \
DISABLED_WARNINGS_gcc_png.c := maybe-uninitialized, \
DISABLED_WARNINGS_gcc_pngerror.c := maybe-uninitialized, \
DISABLED_WARNINGS_gcc_splashscreen_gfx_impl.c := implicit-fallthrough \
maybe-uninitialized, \
@ -247,7 +248,6 @@ ifeq ($(ENABLE_HEADLESS_ONLY), false)
DISABLED_WARNINGS_clang := deprecated-non-prototype, \
DISABLED_WARNINGS_clang_dgif_lib.c := sign-compare, \
DISABLED_WARNINGS_clang_gzwrite.c := format-nonliteral, \
DISABLED_WARNINGS_clang_png.c := unused-function, \
DISABLED_WARNINGS_clang_splashscreen_impl.c := sign-compare \
unused-but-set-variable unused-function, \
DISABLED_WARNINGS_clang_splashscreen_png.c := \

View File

@ -80,6 +80,7 @@ else
BUILD_JDK_JTREG_LIBRARIES_LDFLAGS_libExplicitAttach := -pthread
BUILD_JDK_JTREG_LIBRARIES_LDFLAGS_libImplicitAttach := -pthread
BUILD_JDK_JTREG_LIBRARIES_LDFLAGS_libJNIAttachMutator := -pthread
BUILD_JDK_JTREG_EXCLUDE += exerevokeall.c
ifeq ($(call isTargetOs, linux), true)
BUILD_JDK_JTREG_EXECUTABLES_LIBS_exelauncher := -ldl

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,909 @@
// Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2016, 2021, Red Hat Inc. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License version 2 only, as
// published by the Free Software Foundation.
//
// This code is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
// version 2 for more details (a copy is included in the LICENSE file that
// accompanied this code).
//
// You should have received a copy of the GNU General Public License version
// 2 along with this work; if not, write to the Free Software Foundation,
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
//
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
// or visit www.oracle.com if you need additional information or have any
// questions.
//
//
// BEGIN This file is automatically generated. Do not edit --------------
// Sundry CAS operations. Note that release is always true,
// regardless of the memory ordering of the CAS. This is because we
// need the volatile case to be sequentially consistent but there is
// no trailing StoreLoad barrier emitted by C2. Unfortunately we
// can't check the type of memory ordering here, so we always emit a
// STLXR.
// This section is generated from aarch64_atomic_ad.m4
instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
ins_cost(2*VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchgb $res = $mem, $oldval, $newval\t# (byte) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::byte, /*acquire*/ false, /*release*/ true,
/*weak*/ false, $res$$Register);
__ sxtbw($res$$Register, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
ins_cost(2*VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchgs $res = $mem, $oldval, $newval\t# (short) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::halfword, /*acquire*/ false, /*release*/ true,
/*weak*/ false, $res$$Register);
__ sxthw($res$$Register, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
ins_cost(2*VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchgw $res = $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ false, /*release*/ true,
/*weak*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
ins_cost(2*VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchg $res = $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::xword, /*acquire*/ false, /*release*/ true,
/*weak*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
predicate(n->as_LoadStore()->barrier_data() == 0);
match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
ins_cost(2*VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ false, /*release*/ true,
/*weak*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
predicate(n->as_LoadStore()->barrier_data() == 0);
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
ins_cost(2*VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchg $res = $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::xword, /*acquire*/ false, /*release*/ true,
/*weak*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::byte, /*acquire*/ true, /*release*/ true,
/*weak*/ false, $res$$Register);
__ sxtbw($res$$Register, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::halfword, /*acquire*/ true, /*release*/ true,
/*weak*/ false, $res$$Register);
__ sxthw($res$$Register, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ true, /*release*/ true,
/*weak*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchg_acq $res = $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::xword, /*acquire*/ true, /*release*/ true,
/*weak*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ true, /*release*/ true,
/*weak*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::xword, /*acquire*/ true, /*release*/ true,
/*weak*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
match(Set res (CompareAndSwapB mem (Binary oldval newval)));
ins_cost(2*VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgb $res = $mem, $oldval, $newval\t# (byte) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::byte, /*acquire*/ false, /*release*/ true,
/*weak*/ false, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
match(Set res (CompareAndSwapS mem (Binary oldval newval)));
ins_cost(2*VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgs $res = $mem, $oldval, $newval\t# (short) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::halfword, /*acquire*/ false, /*release*/ true,
/*weak*/ false, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
match(Set res (CompareAndSwapI mem (Binary oldval newval)));
ins_cost(2*VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgw $res = $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ false, /*release*/ true,
/*weak*/ false, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
match(Set res (CompareAndSwapL mem (Binary oldval newval)));
ins_cost(2*VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchg $res = $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::xword, /*acquire*/ false, /*release*/ true,
/*weak*/ false, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
predicate(n->as_LoadStore()->barrier_data() == 0);
match(Set res (CompareAndSwapN mem (Binary oldval newval)));
ins_cost(2*VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ false, /*release*/ true,
/*weak*/ false, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
predicate(n->as_LoadStore()->barrier_data() == 0);
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
ins_cost(2*VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchg $res = $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::xword, /*acquire*/ false, /*release*/ true,
/*weak*/ false, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set res (CompareAndSwapB mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::byte, /*acquire*/ true, /*release*/ true,
/*weak*/ false, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set res (CompareAndSwapS mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::halfword, /*acquire*/ true, /*release*/ true,
/*weak*/ false, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set res (CompareAndSwapI mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ true, /*release*/ true,
/*weak*/ false, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set res (CompareAndSwapL mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchg_acq $res = $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::xword, /*acquire*/ true, /*release*/ true,
/*weak*/ false, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
match(Set res (CompareAndSwapN mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ true, /*release*/ true,
/*weak*/ false, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::xword, /*acquire*/ true, /*release*/ true,
/*weak*/ false, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
ins_cost(2*VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgb_weak $res = $mem, $oldval, $newval\t# (byte) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::byte, /*acquire*/ false, /*release*/ true,
/*weak*/ true, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
ins_cost(2*VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgs_weak $res = $mem, $oldval, $newval\t# (short) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::halfword, /*acquire*/ false, /*release*/ true,
/*weak*/ true, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
ins_cost(2*VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgw_weak $res = $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ false, /*release*/ true,
/*weak*/ true, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
ins_cost(2*VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchg_weak $res = $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::xword, /*acquire*/ false, /*release*/ true,
/*weak*/ true, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
predicate(n->as_LoadStore()->barrier_data() == 0);
match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
ins_cost(2*VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgw_weak $res = $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ false, /*release*/ true,
/*weak*/ true, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
predicate(n->as_LoadStore()->barrier_data() == 0);
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
ins_cost(2*VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchg_weak $res = $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::xword, /*acquire*/ false, /*release*/ true,
/*weak*/ true, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgb_acq_weak $res = $mem, $oldval, $newval\t# (byte) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::byte, /*acquire*/ true, /*release*/ true,
/*weak*/ true, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgs_acq_weak $res = $mem, $oldval, $newval\t# (short) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::halfword, /*acquire*/ true, /*release*/ true,
/*weak*/ true, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgw_acq_weak $res = $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ true, /*release*/ true,
/*weak*/ true, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchg_acq_weak $res = $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::xword, /*acquire*/ true, /*release*/ true,
/*weak*/ true, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgw_acq_weak $res = $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ true, /*release*/ true,
/*weak*/ true, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchg_acq_weak $res = $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::xword, /*acquire*/ true, /*release*/ true,
/*weak*/ true, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
instruct getAndSetI(indirect mem, iRegI newval, iRegINoSp oldval) %{
match(Set oldval (GetAndSetI mem newval));
ins_cost(2*VOLATILE_REF_COST);
format %{ "atomic_xchgw $oldval, $newval, [$mem]" %}
ins_encode %{
__ atomic_xchgw($oldval$$Register, $newval$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndSetL(indirect mem, iRegL newval, iRegLNoSp oldval) %{
match(Set oldval (GetAndSetL mem newval));
ins_cost(2*VOLATILE_REF_COST);
format %{ "atomic_xchg $oldval, $newval, [$mem]" %}
ins_encode %{
__ atomic_xchg($oldval$$Register, $newval$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndSetN(indirect mem, iRegN newval, iRegNNoSp oldval) %{
predicate(n->as_LoadStore()->barrier_data() == 0);
match(Set oldval (GetAndSetN mem newval));
ins_cost(2*VOLATILE_REF_COST);
format %{ "atomic_xchgw $oldval, $newval, [$mem]" %}
ins_encode %{
__ atomic_xchgw($oldval$$Register, $newval$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndSetP(indirect mem, iRegP newval, iRegPNoSp oldval) %{
predicate(n->as_LoadStore()->barrier_data() == 0);
match(Set oldval (GetAndSetP mem newval));
ins_cost(2*VOLATILE_REF_COST);
format %{ "atomic_xchg $oldval, $newval, [$mem]" %}
ins_encode %{
__ atomic_xchg($oldval$$Register, $newval$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndSetIAcq(indirect mem, iRegI newval, iRegINoSp oldval) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set oldval (GetAndSetI mem newval));
ins_cost(VOLATILE_REF_COST);
format %{ "atomic_xchgw_acq $oldval, $newval, [$mem]" %}
ins_encode %{
__ atomic_xchgalw($oldval$$Register, $newval$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndSetLAcq(indirect mem, iRegL newval, iRegLNoSp oldval) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set oldval (GetAndSetL mem newval));
ins_cost(VOLATILE_REF_COST);
format %{ "atomic_xchg_acq $oldval, $newval, [$mem]" %}
ins_encode %{
__ atomic_xchgal($oldval$$Register, $newval$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndSetNAcq(indirect mem, iRegN newval, iRegNNoSp oldval) %{
predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
match(Set oldval (GetAndSetN mem newval));
ins_cost(VOLATILE_REF_COST);
format %{ "atomic_xchgw_acq $oldval, $newval, [$mem]" %}
ins_encode %{
__ atomic_xchgalw($oldval$$Register, $newval$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndSetPAcq(indirect mem, iRegP newval, iRegPNoSp oldval) %{
predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
match(Set oldval (GetAndSetP mem newval));
ins_cost(VOLATILE_REF_COST);
format %{ "atomic_xchg_acq $oldval, $newval, [$mem]" %}
ins_encode %{
__ atomic_xchgal($oldval$$Register, $newval$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndAddI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
match(Set newval (GetAndAddI mem incr));
ins_cost(2*VOLATILE_REF_COST+1);
format %{ "get_and_addI $newval, [$mem], $incr" %}
ins_encode %{
__ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndAddIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set newval (GetAndAddI mem incr));
ins_cost(VOLATILE_REF_COST+1);
format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
ins_encode %{
__ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndAddINoRes(indirect mem, Universe dummy, iRegIorL2I incr) %{
predicate(n->as_LoadStore()->result_not_used());
match(Set dummy (GetAndAddI mem incr));
ins_cost(2*VOLATILE_REF_COST);
format %{ "get_and_addI noreg, [$mem], $incr" %}
ins_encode %{
__ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndAddIAcqNoRes(indirect mem, Universe dummy, iRegIorL2I incr) %{
predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
match(Set dummy (GetAndAddI mem incr));
ins_cost(VOLATILE_REF_COST);
format %{ "get_and_addI_acq noreg, [$mem], $incr" %}
ins_encode %{
__ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndAddIConst(indirect mem, iRegINoSp newval, immIAddSub incr) %{
match(Set newval (GetAndAddI mem incr));
ins_cost(2*VOLATILE_REF_COST+1);
format %{ "get_and_addI $newval, [$mem], $incr" %}
ins_encode %{
__ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndAddIAcqConst(indirect mem, iRegINoSp newval, immIAddSub incr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set newval (GetAndAddI mem incr));
ins_cost(VOLATILE_REF_COST+1);
format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
ins_encode %{
__ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndAddINoResConst(indirect mem, Universe dummy, immIAddSub incr) %{
predicate(n->as_LoadStore()->result_not_used());
match(Set dummy (GetAndAddI mem incr));
ins_cost(2*VOLATILE_REF_COST);
format %{ "get_and_addI noreg, [$mem], $incr" %}
ins_encode %{
__ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndAddIAcqNoResConst(indirect mem, Universe dummy, immIAddSub incr) %{
predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
match(Set dummy (GetAndAddI mem incr));
ins_cost(VOLATILE_REF_COST);
format %{ "get_and_addI_acq noreg, [$mem], $incr" %}
ins_encode %{
__ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndAddL(indirect mem, iRegLNoSp newval, iRegL incr) %{
match(Set newval (GetAndAddL mem incr));
ins_cost(2*VOLATILE_REF_COST+1);
format %{ "get_and_addL $newval, [$mem], $incr" %}
ins_encode %{
__ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndAddLAcq(indirect mem, iRegLNoSp newval, iRegL incr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set newval (GetAndAddL mem incr));
ins_cost(VOLATILE_REF_COST+1);
format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
ins_encode %{
__ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndAddLNoRes(indirect mem, Universe dummy, iRegL incr) %{
predicate(n->as_LoadStore()->result_not_used());
match(Set dummy (GetAndAddL mem incr));
ins_cost(2*VOLATILE_REF_COST);
format %{ "get_and_addL noreg, [$mem], $incr" %}
ins_encode %{
__ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndAddLAcqNoRes(indirect mem, Universe dummy, iRegL incr) %{
predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
match(Set dummy (GetAndAddL mem incr));
ins_cost(VOLATILE_REF_COST);
format %{ "get_and_addL_acq noreg, [$mem], $incr" %}
ins_encode %{
__ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndAddLConst(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
match(Set newval (GetAndAddL mem incr));
ins_cost(2*VOLATILE_REF_COST+1);
format %{ "get_and_addL $newval, [$mem], $incr" %}
ins_encode %{
__ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndAddLAcqConst(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set newval (GetAndAddL mem incr));
ins_cost(VOLATILE_REF_COST+1);
format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
ins_encode %{
__ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndAddLNoResConst(indirect mem, Universe dummy, immLAddSub incr) %{
predicate(n->as_LoadStore()->result_not_used());
match(Set dummy (GetAndAddL mem incr));
ins_cost(2*VOLATILE_REF_COST);
format %{ "get_and_addL noreg, [$mem], $incr" %}
ins_encode %{
__ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct getAndAddLAcqNoResConst(indirect mem, Universe dummy, immLAddSub incr) %{
predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
match(Set dummy (GetAndAddL mem incr));
ins_cost(VOLATILE_REF_COST);
format %{ "get_and_addL_acq noreg, [$mem], $incr" %}
ins_encode %{
__ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}

View File

@ -0,0 +1,246 @@
// Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2016, 2021, Red Hat Inc. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License version 2 only, as
// published by the Free Software Foundation.
//
// This code is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
// version 2 for more details (a copy is included in the LICENSE file that
// accompanied this code).
//
// You should have received a copy of the GNU General Public License version
// 2 along with this work; if not, write to the Free Software Foundation,
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
//
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
// or visit www.oracle.com if you need additional information or have any
// questions.
//
//
// BEGIN This file is automatically generated. Do not edit --------------
// Sundry CAS operations. Note that release is always true,
// regardless of the memory ordering of the CAS. This is because we
// need the volatile case to be sequentially consistent but there is
// no trailing StoreLoad barrier emitted by C2. Unfortunately we
// can't check the type of memory ordering here, so we always emit a
// STLXR.
// This section is generated from aarch64_atomic_ad.m4
dnl Return Arg1 with two spaces before it. We need this because m4
dnl strips leading spaces from macro args.
define(`INDENT', ` $1')dnl
dnl
dnl
dnl
dnl ====================== CompareAndExchange*
dnl
define(`CAE_INSN1',
`
instruct compareAndExchange$1$7(iReg$2NoSp res, indirect mem, iReg$2 oldval, iReg$2 newval, rFlagsReg cr) %{
ifelse($7,Acq,INDENT(predicate(needs_acquiring_load_exclusive(n));),`dnl')
match(Set res (CompareAndExchange$1 mem (Binary oldval newval)));
ins_cost(`'ifelse($7,Acq,,2*)VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchg$5`'ifelse($7,Acq,_acq,) $res = $mem, $oldval, $newval\t# ($3) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::$4, /*acquire*/ ifelse($7,Acq,true,false), /*release*/ true,
/*weak*/ false, $res$$Register);
__ $6($res$$Register, $res$$Register);
%}
ins_pipe(pipe_slow);
%}')dnl
define(`CAE_INSN2',
`
instruct compareAndExchange$1$6(iReg$2NoSp res, indirect mem, iReg$2 oldval, iReg$2 newval, rFlagsReg cr) %{
ifelse($1$6,PAcq,INDENT(predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));),
$1$6,NAcq,INDENT(predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);),
$1,P,INDENT(predicate(n->as_LoadStore()->barrier_data() == 0);),
$1,N,INDENT(predicate(n->as_LoadStore()->barrier_data() == 0);),
$6,Acq,INDENT(predicate(needs_acquiring_load_exclusive(n));),
`dnl')
match(Set res (CompareAndExchange$1 mem (Binary oldval newval)));
ins_cost(`'ifelse($6,Acq,,2*)VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchg$5`'ifelse($6,Acq,_acq,) $res = $mem, $oldval, $newval\t# ($3) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::$4, /*acquire*/ ifelse($6,Acq,true,false), /*release*/ true,
/*weak*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);
%}')dnl
dnl
CAE_INSN1(B, I, byte, byte, b, sxtbw, )
CAE_INSN1(S, I, short, halfword, s, sxthw, )
CAE_INSN2(I, I, int, word, w, , )
CAE_INSN2(L, L, long, xword, , , )
CAE_INSN2(N, N, narrow oop, word, w, , )
CAE_INSN2(P, P, ptr, xword, , , )
dnl
CAE_INSN1(B, I, byte, byte, b, sxtbw, Acq)
CAE_INSN1(S, I, short, halfword, s, sxthw, Acq)
CAE_INSN2(I, I, int, word, w, Acq)
CAE_INSN2(L, L, long, xword, , Acq)
CAE_INSN2(N, N, narrow oop, word, w, Acq)
CAE_INSN2(P, P, ptr, xword, , Acq)
dnl
dnl
dnl
dnl ====================== (Weak)CompareAndSwap*
dnl
define(`CAS_INSN1',
`
instruct ifelse($7,Weak,'weakCompare`,'compare`)AndSwap$1$6(iRegINoSp res, indirect mem, iReg$2 oldval, iReg$2 newval, rFlagsReg cr) %{
ifelse($6,Acq,INDENT(predicate(needs_acquiring_load_exclusive(n));),`dnl')
match(Set res ($7CompareAndSwap$1 mem (Binary oldval newval)));
ins_cost(`'ifelse($6,Acq,,2*)VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchg$5`'ifelse($6,Acq,_acq,)`'ifelse($7,Weak,_weak) $res = $mem, $oldval, $newval\t# ($3) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::$4, /*acquire*/ ifelse($6,Acq,true,false), /*release*/ true,
/*weak*/ ifelse($7,Weak,true,false), noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}')dnl
dnl
define(`CAS_INSN2',
`
instruct ifelse($7,Weak,'weakCompare`,'compare`)AndSwap$1$6(iRegINoSp res, indirect mem, iReg$2 oldval, iReg$2 newval, rFlagsReg cr) %{
ifelse($1$6,PAcq,INDENT(predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));),
$1$6,NAcq,INDENT(predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);),
$1,P,INDENT(predicate(n->as_LoadStore()->barrier_data() == 0);),
$1,N,INDENT(predicate(n->as_LoadStore()->barrier_data() == 0);),
$6,Acq,INDENT(predicate(needs_acquiring_load_exclusive(n));),
`dnl')
match(Set res ($7CompareAndSwap$1 mem (Binary oldval newval)));
ins_cost(`'ifelse($6,Acq,,2*)VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchg$5`'ifelse($6,Acq,_acq,)`'ifelse($7,Weak,_weak) $res = $mem, $oldval, $newval\t# ($3) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::$4, /*acquire*/ ifelse($6,Acq,true,false), /*release*/ true,
/*weak*/ ifelse($7,Weak,true,false), noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}')dnl
dnl
CAS_INSN1(B, I, byte, byte, b, , )
CAS_INSN1(S, I, short, halfword, s, , )
CAS_INSN2(I, I, int, word, w, , )
CAS_INSN2(L, L, long, xword, , , )
CAS_INSN2(N, N, narrow oop, word, w, , )
CAS_INSN2(P, P, ptr, xword, , , )
dnl
CAS_INSN1(B, I, byte, byte, b, Acq, )
CAS_INSN1(S, I, short, halfword, s, Acq, )
CAS_INSN2(I, I, int, word, w, Acq, )
CAS_INSN2(L, L, long, xword, , Acq, )
CAS_INSN2(N, N, narrow oop, word, w, Acq, )
CAS_INSN2(P, P, ptr, xword, , Acq, )
dnl
CAS_INSN1(B, I, byte, byte, b, , Weak)
CAS_INSN1(S, I, short, halfword, s, , Weak)
CAS_INSN2(I, I, int, word, w, , Weak)
CAS_INSN2(L, L, long, xword, , , Weak)
CAS_INSN2(N, N, narrow oop, word, w, , Weak)
CAS_INSN2(P, P, ptr, xword, , , Weak)
dnl
CAS_INSN1(B, I, byte, byte, b, Acq, Weak)
CAS_INSN1(S, I, short, halfword, s, Acq, Weak)
CAS_INSN2(I, I, int, word, w, Acq, Weak)
CAS_INSN2(L, L, long, xword, , Acq, Weak)
CAS_INSN2(N, N, narrow oop, word, w, Acq, Weak)
CAS_INSN2(P, P, ptr, xword, , Acq, Weak)
dnl
dnl
dnl
dnl ====================== GetAndSet*
dnl
define(`GAS_INSN1',
`
instruct getAndSet$1$3(indirect mem, iReg$1 newval, iReg$1NoSp oldval) %{
ifelse($1$3,PAcq,INDENT(predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));),
$1$3,NAcq,INDENT(predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);),
$1,P,INDENT(predicate(n->as_LoadStore()->barrier_data() == 0);),
$1,N,INDENT(predicate(n->as_LoadStore()->barrier_data() == 0);),
$3,Acq,INDENT(predicate(needs_acquiring_load_exclusive(n));),
`dnl')
match(Set oldval (GetAndSet$1 mem newval));
ins_cost(`'ifelse($3,Acq,,2*)VOLATILE_REF_COST);
format %{ "atomic_xchg$2`'ifelse($3,Acq,_acq) $oldval, $newval, [$mem]" %}
ins_encode %{
__ atomic_xchg`'ifelse($3,Acq,al)$2($oldval$$Register, $newval$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}')dnl
dnl
GAS_INSN1(I, w, )
GAS_INSN1(L, , )
GAS_INSN1(N, w, )
GAS_INSN1(P, , )
dnl
GAS_INSN1(I, w, Acq)
GAS_INSN1(L, , Acq)
GAS_INSN1(N, w, Acq)
GAS_INSN1(P, , Acq)
dnl
dnl
dnl
dnl ====================== GetAndAdd*
dnl
define(`GAA_INSN1',
`
instruct getAndAdd$1$4$5$6(indirect mem, `'ifelse($5,NoRes,Universe dummy,iReg$1NoSp newval), `'ifelse($6,Const,imm$1AddSub incr,iReg$2 incr)) %{
ifelse($4$5,AcqNoRes,INDENT(predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));),
$5,NoRes,INDENT(predicate(n->as_LoadStore()->result_not_used());),
$4,Acq,INDENT(predicate(needs_acquiring_load_exclusive(n));),
`dnl')
match(Set ifelse($5,NoRes,dummy,newval) (GetAndAdd$1 mem incr));
ins_cost(`'ifelse($4,Acq,,2*)VOLATILE_REF_COST`'ifelse($5,NoRes,,+1));
format %{ "get_and_add$1`'ifelse($4,Acq,_acq) `'ifelse($5,NoRes,noreg,$newval), [$mem], $incr" %}
ins_encode %{
__ atomic_add`'ifelse($4,Acq,al)$3(`'ifelse($5,NoRes,noreg,$newval$$Register), `'ifelse($6,Const,$incr$$constant,$incr$$Register), as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}')dnl
dnl
dnl
GAA_INSN1(I, IorL2I, w, , , )
GAA_INSN1(I, IorL2I, w, Acq, , )
GAA_INSN1(I, IorL2I, w, , NoRes, )
GAA_INSN1(I, IorL2I, w, Acq, NoRes, )
GAA_INSN1(I, I, w, , , Const)
GAA_INSN1(I, I, w, Acq, , Const)
GAA_INSN1(I, I, w, , NoRes, Const)
GAA_INSN1(I, I, w, Acq, NoRes, Const)
dnl
GAA_INSN1(L, L, , , , )
GAA_INSN1(L, L, , Acq, , )
GAA_INSN1(L, L, , , NoRes, )
GAA_INSN1(L, L, , Acq, NoRes, )
GAA_INSN1(L, L, , , , Const)
GAA_INSN1(L, L, , Acq, , Const)
GAA_INSN1(L, L, , , NoRes, Const)
GAA_INSN1(L, L, , Acq, NoRes, Const)
dnl

View File

@ -393,6 +393,32 @@ source %{
return false;
}
bool Matcher::mask_op_prefers_predicate(int opcode, const TypeVect* vt) {
// Only SVE supports the predicate feature.
if (UseSVE == 0) {
// On architectures that do not support predicate, masks are stored in
// general vector registers (TypeVect) with sizes ranging from TypeVectA
// to TypeVectX based on the vector size in bytes.
assert(vt->isa_vectmask() == nullptr, "mask type is not matched");
return false;
}
assert(vt->isa_vectmask() != nullptr, "expected TypeVectMask on SVE");
switch (opcode) {
case Op_VectorMaskToLong:
case Op_VectorLongToMask:
// These operations lack native SVE predicate instructions and are
// implemented using general vector instructions instead. Use vector
// registers rather than predicate registers to save the mask for
// better performance.
return false;
default:
// By default, the mask operations are implemented with predicate
// instructions with a predicate input/output.
return true;
}
}
// Assert that the given node is not a variable shift.
bool assert_not_var_shift(const Node* n) {
assert(!n->as_ShiftV()->is_var_shift(), "illegal variable shift");
@ -6249,31 +6275,44 @@ instruct vmask_tolong_neon(iRegLNoSp dst, vReg src) %{
ins_pipe(pipe_slow);
%}
instruct vmask_tolong_sve(iRegLNoSp dst, pReg src, vReg tmp1, vReg tmp2) %{
predicate(UseSVE > 0);
instruct vmask_tolong_sve(iRegLNoSp dst, vReg src, vReg tmp) %{
predicate(UseSVE > 0 && !VM_Version::supports_svebitperm());
match(Set dst (VectorMaskToLong src));
effect(TEMP tmp);
format %{ "vmask_tolong_sve $dst, $src\t# KILL $tmp" %}
ins_encode %{
// Input "src" is a vector of boolean represented as
// bytes with 0x00/0x01 as element values.
__ sve_vmask_tolong($dst$$Register, $src$$FloatRegister,
$tmp$$FloatRegister, Matcher::vector_length(this, $src));
%}
ins_pipe(pipe_slow);
%}
instruct vmask_tolong_sve2(iRegLNoSp dst, vReg src, vReg tmp1, vReg tmp2) %{
predicate(VM_Version::supports_svebitperm());
match(Set dst (VectorMaskToLong src));
effect(TEMP tmp1, TEMP tmp2);
format %{ "vmask_tolong_sve $dst, $src\t# KILL $tmp1, $tmp2" %}
format %{ "vmask_tolong_sve2 $dst, $src\t# KILL $tmp1, $tmp2" %}
ins_encode %{
__ sve_vmask_tolong($dst$$Register, $src$$PRegister,
Matcher::vector_element_basic_type(this, $src),
Matcher::vector_length(this, $src),
$tmp1$$FloatRegister, $tmp2$$FloatRegister);
// Input "src" is a vector of boolean represented as
// bytes with 0x00/0x01 as element values.
__ sve2_vmask_tolong($dst$$Register, $src$$FloatRegister,
$tmp1$$FloatRegister, $tmp2$$FloatRegister,
Matcher::vector_length(this, $src));
%}
ins_pipe(pipe_slow);
%}
// fromlong
instruct vmask_fromlong(pReg dst, iRegL src, vReg tmp1, vReg tmp2) %{
instruct vmask_fromlong(vReg dst, iRegL src, vReg tmp) %{
match(Set dst (VectorLongToMask src));
effect(TEMP tmp1, TEMP tmp2);
format %{ "vmask_fromlong $dst, $src\t# vector (sve2). KILL $tmp1, $tmp2" %}
effect(TEMP_DEF dst, TEMP tmp);
format %{ "vmask_fromlong $dst, $src\t# vector (sve2). KILL $tmp" %}
ins_encode %{
__ sve_vmask_fromlong($dst$$PRegister, $src$$Register,
Matcher::vector_element_basic_type(this),
Matcher::vector_length(this),
$tmp1$$FloatRegister, $tmp2$$FloatRegister);
__ sve_vmask_fromlong($dst$$FloatRegister, $src$$Register,
$tmp$$FloatRegister, Matcher::vector_length(this));
%}
ins_pipe(pipe_slow);
%}

View File

@ -383,6 +383,32 @@ source %{
return false;
}
bool Matcher::mask_op_prefers_predicate(int opcode, const TypeVect* vt) {
// Only SVE supports the predicate feature.
if (UseSVE == 0) {
// On architectures that do not support predicate, masks are stored in
// general vector registers (TypeVect) with sizes ranging from TypeVectA
// to TypeVectX based on the vector size in bytes.
assert(vt->isa_vectmask() == nullptr, "mask type is not matched");
return false;
}
assert(vt->isa_vectmask() != nullptr, "expected TypeVectMask on SVE");
switch (opcode) {
case Op_VectorMaskToLong:
case Op_VectorLongToMask:
// These operations lack native SVE predicate instructions and are
// implemented using general vector instructions instead. Use vector
// registers rather than predicate registers to save the mask for
// better performance.
return false;
default:
// By default, the mask operations are implemented with predicate
// instructions with a predicate input/output.
return true;
}
}
// Assert that the given node is not a variable shift.
bool assert_not_var_shift(const Node* n) {
assert(!n->as_ShiftV()->is_var_shift(), "illegal variable shift");
@ -4303,31 +4329,44 @@ instruct vmask_tolong_neon(iRegLNoSp dst, vReg src) %{
ins_pipe(pipe_slow);
%}
instruct vmask_tolong_sve(iRegLNoSp dst, pReg src, vReg tmp1, vReg tmp2) %{
predicate(UseSVE > 0);
instruct vmask_tolong_sve(iRegLNoSp dst, vReg src, vReg tmp) %{
predicate(UseSVE > 0 && !VM_Version::supports_svebitperm());
match(Set dst (VectorMaskToLong src));
effect(TEMP tmp);
format %{ "vmask_tolong_sve $dst, $src\t# KILL $tmp" %}
ins_encode %{
// Input "src" is a vector of boolean represented as
// bytes with 0x00/0x01 as element values.
__ sve_vmask_tolong($dst$$Register, $src$$FloatRegister,
$tmp$$FloatRegister, Matcher::vector_length(this, $src));
%}
ins_pipe(pipe_slow);
%}
instruct vmask_tolong_sve2(iRegLNoSp dst, vReg src, vReg tmp1, vReg tmp2) %{
predicate(VM_Version::supports_svebitperm());
match(Set dst (VectorMaskToLong src));
effect(TEMP tmp1, TEMP tmp2);
format %{ "vmask_tolong_sve $dst, $src\t# KILL $tmp1, $tmp2" %}
format %{ "vmask_tolong_sve2 $dst, $src\t# KILL $tmp1, $tmp2" %}
ins_encode %{
__ sve_vmask_tolong($dst$$Register, $src$$PRegister,
Matcher::vector_element_basic_type(this, $src),
Matcher::vector_length(this, $src),
$tmp1$$FloatRegister, $tmp2$$FloatRegister);
// Input "src" is a vector of boolean represented as
// bytes with 0x00/0x01 as element values.
__ sve2_vmask_tolong($dst$$Register, $src$$FloatRegister,
$tmp1$$FloatRegister, $tmp2$$FloatRegister,
Matcher::vector_length(this, $src));
%}
ins_pipe(pipe_slow);
%}
// fromlong
instruct vmask_fromlong(pReg dst, iRegL src, vReg tmp1, vReg tmp2) %{
instruct vmask_fromlong(vReg dst, iRegL src, vReg tmp) %{
match(Set dst (VectorLongToMask src));
effect(TEMP tmp1, TEMP tmp2);
format %{ "vmask_fromlong $dst, $src\t# vector (sve2). KILL $tmp1, $tmp2" %}
effect(TEMP_DEF dst, TEMP tmp);
format %{ "vmask_fromlong $dst, $src\t# vector (sve2). KILL $tmp" %}
ins_encode %{
__ sve_vmask_fromlong($dst$$PRegister, $src$$Register,
Matcher::vector_element_basic_type(this),
Matcher::vector_length(this),
$tmp1$$FloatRegister, $tmp2$$FloatRegister);
__ sve_vmask_fromlong($dst$$FloatRegister, $src$$Register,
$tmp$$FloatRegister, Matcher::vector_length(this));
%}
ins_pipe(pipe_slow);
%}

View File

@ -449,12 +449,20 @@ int LIR_Assembler::emit_deopt_handler() {
int offset = code_offset();
__ adr(lr, pc());
__ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
Label start;
__ bind(start);
__ far_call(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
int entry_offset = __ offset();
__ b(start);
guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
assert(code_offset() - entry_offset >= NativePostCallNop::first_check_size,
"out of bounds read in post-call NOP check");
__ end_a_stub();
return offset;
return entry_offset;
}
void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {

View File

@ -71,7 +71,7 @@ friend class ArrayCopyStub;
// CompiledDirectCall::to_trampoline_stub_size()
_call_stub_size = 13 * NativeInstruction::instruction_size,
_exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(175),
_deopt_handler_size = 7 * NativeInstruction::instruction_size
_deopt_handler_size = 4 * NativeInstruction::instruction_size
};
public:

View File

@ -1399,137 +1399,125 @@ void C2_MacroAssembler::bytemask_compress(Register dst) {
andr(dst, dst, 0xff); // dst = 0x8D
}
// Pack the lowest-numbered bit of each mask element in src into a long value
// in dst, at most the first 64 lane elements.
// Clobbers: rscratch1, if UseSVE=1 or the hardware doesn't support FEAT_BITPERM.
void C2_MacroAssembler::sve_vmask_tolong(Register dst, PRegister src, BasicType bt, int lane_cnt,
FloatRegister vtmp1, FloatRegister vtmp2) {
// Pack the value of each mask element in "src" into a long value in "dst", at most
// the first 64 lane elements. The input "src" is a vector of boolean represented as
// bytes with 0x00/0x01 as element values. Each lane value from "src" is packed into
// one bit in "dst".
//
// Example: src = 0x0001010000010001 0100000001010001, lane_cnt = 16
// Expected: dst = 0x658D
//
// Clobbers: rscratch1
void C2_MacroAssembler::sve_vmask_tolong(Register dst, FloatRegister src,
FloatRegister vtmp, int lane_cnt) {
assert(lane_cnt <= 64 && is_power_of_2(lane_cnt), "Unsupported lane count");
assert_different_registers(dst, rscratch1);
assert_different_registers(vtmp1, vtmp2);
assert_different_registers(src, vtmp);
assert(UseSVE > 0, "must be");
Assembler::SIMD_RegVariant size = elemType_to_regVariant(bt);
// Example: src = 0b01100101 10001101, bt = T_BYTE, lane_cnt = 16
// Expected: dst = 0x658D
// Compress the lowest 8 bytes.
fmovd(dst, src);
bytemask_compress(dst);
if (lane_cnt <= 8) return;
// Convert the mask into vector with sequential bytes.
// vtmp1 = 0x00010100 0x00010001 0x01000000 0x01010001
sve_cpy(vtmp1, size, src, 1, false);
if (bt != T_BYTE) {
sve_vector_narrow(vtmp1, B, vtmp1, size, vtmp2);
}
if (UseSVE > 1 && VM_Version::supports_svebitperm()) {
// Given a vector with the value 0x00 or 0x01 in each byte, the basic idea
// is to compress each significant bit of the byte in a cross-lane way. Due
// to the lack of a cross-lane bit-compress instruction, we use BEXT
// (bit-compress in each lane) with the biggest lane size (T = D) then
// concatenate the results.
// The second source input of BEXT, initialized with 0x01 in each byte.
// vtmp2 = 0x01010101 0x01010101 0x01010101 0x01010101
sve_dup(vtmp2, B, 1);
// BEXT vtmp1.D, vtmp1.D, vtmp2.D
// vtmp1 = 0x0001010000010001 | 0x0100000001010001
// vtmp2 = 0x0101010101010101 | 0x0101010101010101
// ---------------------------------------
// vtmp1 = 0x0000000000000065 | 0x000000000000008D
sve_bext(vtmp1, D, vtmp1, vtmp2);
// Concatenate the lowest significant 8 bits in each 8 bytes, and extract the
// result to dst.
// vtmp1 = 0x0000000000000000 | 0x000000000000658D
// dst = 0x658D
if (lane_cnt <= 8) {
// No need to concatenate.
umov(dst, vtmp1, B, 0);
} else if (lane_cnt <= 16) {
ins(vtmp1, B, vtmp1, 1, 8);
umov(dst, vtmp1, H, 0);
} else {
// As the lane count is 64 at most, the final expected value must be in
// the lowest 64 bits after narrowing vtmp1 from D to B.
sve_vector_narrow(vtmp1, B, vtmp1, D, vtmp2);
umov(dst, vtmp1, D, 0);
}
} else if (UseSVE > 0) {
// Compress the lowest 8 bytes.
fmovd(dst, vtmp1);
bytemask_compress(dst);
if (lane_cnt <= 8) return;
// Repeat on higher bytes and join the results.
// Compress 8 bytes in each iteration.
for (int idx = 1; idx < (lane_cnt / 8); idx++) {
sve_extract_integral(rscratch1, T_LONG, vtmp1, idx, vtmp2);
bytemask_compress(rscratch1);
orr(dst, dst, rscratch1, Assembler::LSL, idx << 3);
}
} else {
assert(false, "unsupported");
ShouldNotReachHere();
// Repeat on higher bytes and join the results.
// Compress 8 bytes in each iteration.
for (int idx = 1; idx < (lane_cnt / 8); idx++) {
sve_extract_integral(rscratch1, T_LONG, src, idx, vtmp);
bytemask_compress(rscratch1);
orr(dst, dst, rscratch1, Assembler::LSL, idx << 3);
}
}
// Unpack the mask, a long value in src, into predicate register dst based on the
// corresponding data type. Note that dst can support at most 64 lanes.
// Below example gives the expected dst predicate register in different types, with
// a valid src(0x658D) on a 1024-bit vector size machine.
// BYTE: dst = 0x00 00 00 00 00 00 00 00 00 00 00 00 00 00 65 8D
// SHORT: dst = 0x00 00 00 00 00 00 00 00 00 00 00 00 14 11 40 51
// INT: dst = 0x00 00 00 00 00 00 00 00 01 10 01 01 10 00 11 01
// LONG: dst = 0x00 01 01 00 00 01 00 01 01 00 00 00 01 01 00 01
//
// The number of significant bits of src must be equal to lane_cnt. E.g., 0xFF658D which
// has 24 significant bits would be an invalid input if dst predicate register refers to
// a LONG type 1024-bit vector, which has at most 16 lanes.
void C2_MacroAssembler::sve_vmask_fromlong(PRegister dst, Register src, BasicType bt, int lane_cnt,
FloatRegister vtmp1, FloatRegister vtmp2) {
assert(UseSVE == 2 && VM_Version::supports_svebitperm() &&
lane_cnt <= 64 && is_power_of_2(lane_cnt), "unsupported");
Assembler::SIMD_RegVariant size = elemType_to_regVariant(bt);
// Example: src = 0x658D, bt = T_BYTE, size = B, lane_cnt = 16
// Expected: dst = 0b01101001 10001101
// The function is same as above "sve_vmask_tolong", but it uses SVE2's BEXT
// instruction which requires the FEAT_BITPERM feature.
void C2_MacroAssembler::sve2_vmask_tolong(Register dst, FloatRegister src,
FloatRegister vtmp1, FloatRegister vtmp2,
int lane_cnt) {
assert(lane_cnt <= 64 && is_power_of_2(lane_cnt), "Unsupported lane count");
assert_different_registers(src, vtmp1, vtmp2);
assert(UseSVE > 1 && VM_Version::supports_svebitperm(), "must be");
// Put long value from general purpose register into the first lane of vector.
// vtmp1 = 0x0000000000000000 | 0x000000000000658D
sve_dup(vtmp1, B, 0);
mov(vtmp1, D, 0, src);
// Given a vector with the value 0x00 or 0x01 in each byte, the basic idea
// is to compress each significant bit of the byte in a cross-lane way. Due
// to the lack of a cross-lane bit-compress instruction, we use BEXT
// (bit-compress in each lane) with the biggest lane size (T = D) then
// concatenate the results.
// As sve_cmp generates mask value with the minimum unit in byte, we should
// transform the value in the first lane which is mask in bit now to the
// mask in byte, which can be done by SVE2's BDEP instruction.
// The first source input of BDEP instruction. Deposite each byte in every 8 bytes.
// vtmp1 = 0x0000000000000065 | 0x000000000000008D
if (lane_cnt <= 8) {
// Nothing. As only one byte exsits.
} else if (lane_cnt <= 16) {
ins(vtmp1, B, vtmp1, 8, 1);
mov(vtmp1, B, 1, zr);
} else {
sve_vector_extend(vtmp1, D, vtmp1, B);
}
// The second source input of BDEP instruction, initialized with 0x01 for each byte.
// The second source input of BEXT, initialized with 0x01 in each byte.
// vtmp2 = 0x01010101 0x01010101 0x01010101 0x01010101
sve_dup(vtmp2, B, 1);
// BDEP vtmp1.D, vtmp1.D, vtmp2.D
// vtmp1 = 0x0000000000000065 | 0x000000000000008D
// BEXT vtmp1.D, src.D, vtmp2.D
// src = 0x0001010000010001 | 0x0100000001010001
// vtmp2 = 0x0101010101010101 | 0x0101010101010101
// ---------------------------------------
// vtmp1 = 0x0001010000010001 | 0x0100000001010001
sve_bdep(vtmp1, D, vtmp1, vtmp2);
// vtmp1 = 0x0000000000000065 | 0x000000000000008D
sve_bext(vtmp1, D, src, vtmp2);
if (bt != T_BYTE) {
sve_vector_extend(vtmp1, size, vtmp1, B);
// Concatenate the lowest significant 8 bits in each 8 bytes, and extract the
// result to dst.
// vtmp1 = 0x0000000000000000 | 0x000000000000658D
// dst = 0x658D
if (lane_cnt <= 8) {
// No need to concatenate.
umov(dst, vtmp1, B, 0);
} else if (lane_cnt <= 16) {
ins(vtmp1, B, vtmp1, 1, 8);
umov(dst, vtmp1, H, 0);
} else {
// As the lane count is 64 at most, the final expected value must be in
// the lowest 64 bits after narrowing vtmp1 from D to B.
sve_vector_narrow(vtmp1, B, vtmp1, D, vtmp2);
umov(dst, vtmp1, D, 0);
}
// Generate mask according to the given vector, in which the elements have been
// extended to expected type.
// dst = 0b01101001 10001101
sve_cmp(Assembler::NE, dst, size, ptrue, vtmp1, 0);
}
// Unpack the mask, a long value in "src", into a vector register of boolean
// represented as bytes with 0x00/0x01 as element values in "dst". Each bit in
// "src" is unpacked into one byte lane in "dst". Note that "dst" can support at
// most 64 lanes.
//
// Below example gives the expected dst vector register, with a valid src(0x658D)
// on a 128-bit vector size machine.
// dst = 0x00 01 01 00 00 01 00 01 01 00 00 00 01 01 00 01
void C2_MacroAssembler::sve_vmask_fromlong(FloatRegister dst, Register src,
FloatRegister vtmp, int lane_cnt) {
assert_different_registers(dst, vtmp);
assert(UseSVE == 2 && VM_Version::supports_svebitperm() &&
lane_cnt <= 64 && is_power_of_2(lane_cnt), "unsupported");
// Example: src = 0x658D, lane_cnt = 16
// Expected: dst = 0x00 01 01 00 00 01 00 01 01 00 00 00 01 01 00 01
// Put long value from general purpose register into the first lane of vector.
// vtmp = 0x0000000000000000 | 0x000000000000658D
sve_dup(vtmp, B, 0);
mov(vtmp, D, 0, src);
// Transform the value in the first lane which is mask in bit now to the mask in
// byte, which can be done by SVE2's BDEP instruction.
// The first source input of BDEP instruction. Deposite each byte in every 8 bytes.
// vtmp = 0x0000000000000065 | 0x000000000000008D
if (lane_cnt <= 8) {
// Nothing. As only one byte exsits.
} else if (lane_cnt <= 16) {
ins(vtmp, B, vtmp, 8, 1);
} else {
sve_vector_extend(vtmp, D, vtmp, B);
}
// The second source input of BDEP instruction, initialized with 0x01 for each byte.
// dst = 0x01010101 0x01010101 0x01010101 0x01010101
sve_dup(dst, B, 1);
// BDEP dst.D, vtmp.D, dst.D
// vtmp = 0x0000000000000065 | 0x000000000000008D
// dst = 0x0101010101010101 | 0x0101010101010101
// ---------------------------------------
// dst = 0x0001010000010001 | 0x0100000001010001
sve_bdep(dst, D, vtmp, dst);
}
// Clobbers: rflags

View File

@ -85,15 +85,19 @@
// the higher garbage bits.
void bytemask_compress(Register dst);
// Pack the lowest-numbered bit of each mask element in src into a long value
// in dst, at most the first 64 lane elements.
void sve_vmask_tolong(Register dst, PRegister src, BasicType bt, int lane_cnt,
FloatRegister vtmp1, FloatRegister vtmp2);
// Pack the value of each mask element in "src" into a long value in "dst", at most the
// first 64 lane elements. The input "src" is a vector of boolean represented as bytes
// with 0x00/0x01 as element values. Each lane value from "src" is packed into one bit in
// "dst".
void sve_vmask_tolong(Register dst, FloatRegister src, FloatRegister vtmp, int lane_cnt);
// Unpack the mask, a long value in src, into predicate register dst based on the
// corresponding data type. Note that dst can support at most 64 lanes.
void sve_vmask_fromlong(PRegister dst, Register src, BasicType bt, int lane_cnt,
FloatRegister vtmp1, FloatRegister vtmp2);
void sve2_vmask_tolong(Register dst, FloatRegister src, FloatRegister vtmp1,
FloatRegister vtmp2, int lane_cnt);
// Unpack the mask, a long value in "src", into vector register "dst" with boolean type.
// Each bit in "src" is unpacked into one byte lane in "dst". Note that "dst" can support
// at most 64 lanes.
void sve_vmask_fromlong(FloatRegister dst, Register src, FloatRegister vtmp, int lane_cnt);
// SIMD&FP comparison
void neon_compare(FloatRegister dst, BasicType bt, FloatRegister src1,

View File

@ -1,161 +0,0 @@
dnl Copyright (c) 2016, 2021, Red Hat Inc. All rights reserved.
dnl DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
dnl
dnl This code is free software; you can redistribute it and/or modify it
dnl under the terms of the GNU General Public License version 2 only, as
dnl published by the Free Software Foundation.
dnl
dnl This code is distributed in the hope that it will be useful, but WITHOUT
dnl ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
dnl FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
dnl version 2 for more details (a copy is included in the LICENSE file that
dnl accompanied this code).
dnl
dnl You should have received a copy of the GNU General Public License version
dnl 2 along with this work; if not, write to the Free Software Foundation,
dnl Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
dnl
dnl Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
dnl or visit www.oracle.com if you need additional information or have any
dnl questions.
dnl
dnl
dnl Process this file with m4 cas.m4 to generate the CAE and wCAS
dnl instructions used in aarch64.ad.
dnl
// BEGIN This section of the file is automatically generated. Do not edit --------------
// Sundry CAS operations. Note that release is always true,
// regardless of the memory ordering of the CAS. This is because we
// need the volatile case to be sequentially consistent but there is
// no trailing StoreLoad barrier emitted by C2. Unfortunately we
// can't check the type of memory ordering here, so we always emit a
// STLXR.
// This section is generated from cas.m4
dnl Return Arg1 with two spaces before it. We need this because m4
dnl strips leading spaces from macro args.
define(`INDENT', ` $1')dnl
dnl
define(`CAS_INSN',
`
// This pattern is generated automatically from cas.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct compareAndExchange$1$6(iReg$2NoSp res, indirect mem, iReg$2 oldval, iReg$2 newval, rFlagsReg cr) %{
ifelse($1$6,PAcq,INDENT(predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));),
$1$6,NAcq,INDENT(predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);),
$1,P,INDENT(predicate(n->as_LoadStore()->barrier_data() == 0);),
$1,N,INDENT(predicate(n->as_LoadStore()->barrier_data() == 0);),
$6,Acq,INDENT(predicate(needs_acquiring_load_exclusive(n));),
`dnl')
match(Set res (CompareAndExchange$1 mem (Binary oldval newval)));
ifelse($6,Acq,'ins_cost(VOLATILE_REF_COST);`,'ins_cost(2 * VOLATILE_REF_COST);`)
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchg$5`'ifelse($6,Acq,_acq,) $res = $mem, $oldval, $newval\t# ($3, weak) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::$4, /*acquire*/ ifelse($6,Acq,true,false), /*release*/ true,
/*weak*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);
%}')dnl
define(`CAS_INSN4',
`
// This pattern is generated automatically from cas.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct compareAndExchange$1$7(iReg$2NoSp res, indirect mem, iReg$2 oldval, iReg$2 newval, rFlagsReg cr) %{
ifelse($7,Acq,INDENT(predicate(needs_acquiring_load_exclusive(n));),`dnl')
match(Set res (CompareAndExchange$1 mem (Binary oldval newval)));
ifelse($7,Acq,'ins_cost(VOLATILE_REF_COST);`,'ins_cost(2 * VOLATILE_REF_COST);`)
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchg$5`'ifelse($7,Acq,_acq,) $res = $mem, $oldval, $newval\t# ($3, weak) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::$4, /*acquire*/ ifelse($7,Acq,true,false), /*release*/ true,
/*weak*/ false, $res$$Register);
__ $6($res$$Register, $res$$Register);
%}
ins_pipe(pipe_slow);
%}')dnl
CAS_INSN4(B,I,byte,byte,b,sxtbw)
CAS_INSN4(S,I,short,halfword,s,sxthw)
CAS_INSN(I,I,int,word,w)
CAS_INSN(L,L,long,xword)
CAS_INSN(N,N,narrow oop,word,w)
CAS_INSN(P,P,ptr,xword)
dnl
CAS_INSN4(B,I,byte,byte,b,sxtbw,Acq)
CAS_INSN4(S,I,short,halfword,s,sxthw,Acq)
CAS_INSN(I,I,int,word,w,Acq)
CAS_INSN(L,L,long,xword,,Acq)
CAS_INSN(N,N,narrow oop,word,w,Acq)
CAS_INSN(P,P,ptr,xword,,Acq)
dnl
define(`CAS_INSN2',
`
// This pattern is generated automatically from cas.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct weakCompareAndSwap$1$6(iRegINoSp res, indirect mem, iReg$2 oldval, iReg$2 newval, rFlagsReg cr) %{
ifelse($6,Acq,INDENT(predicate(needs_acquiring_load_exclusive(n));),`dnl')
match(Set res (WeakCompareAndSwap$1 mem (Binary oldval newval)));
ifelse($6,Acq,'ins_cost(VOLATILE_REF_COST);`,'ins_cost(2 * VOLATILE_REF_COST);`)
effect(KILL cr);
format %{
"cmpxchg$5`'ifelse($6,Acq,_acq,) $res = $mem, $oldval, $newval\t# ($3, weak) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::$4, /*acquire*/ ifelse($6,Acq,true,false), /*release*/ true,
/*weak*/ true, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}')dnl
define(`CAS_INSN3',
`
// This pattern is generated automatically from cas.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct weakCompareAndSwap$1$6(iRegINoSp res, indirect mem, iReg$2 oldval, iReg$2 newval, rFlagsReg cr) %{
ifelse($1$6,PAcq,INDENT(predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));),
$1$6,NAcq,INDENT(predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);),
$1,P,INDENT(predicate(n->as_LoadStore()->barrier_data() == 0);),
$1,N,INDENT(predicate(n->as_LoadStore()->barrier_data() == 0);),
$6,Acq,INDENT(predicate(needs_acquiring_load_exclusive(n));),
`dnl')
match(Set res (WeakCompareAndSwap$1 mem (Binary oldval newval)));
ifelse($6,Acq,'ins_cost(VOLATILE_REF_COST);`,'ins_cost(2 * VOLATILE_REF_COST);`)
effect(KILL cr);
format %{
"cmpxchg$5`'ifelse($6,Acq,_acq,) $res = $mem, $oldval, $newval\t# ($3, weak) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::$4, /*acquire*/ ifelse($6,Acq,true,false), /*release*/ true,
/*weak*/ true, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}')dnl
CAS_INSN2(B,I,byte,byte,b)
CAS_INSN2(S,I,short,halfword,s)
CAS_INSN3(I,I,int,word,w)
CAS_INSN3(L,L,long,xword)
CAS_INSN3(N,N,narrow oop,word,w)
CAS_INSN3(P,P,ptr,xword)
CAS_INSN2(B,I,byte,byte,b,Acq)
CAS_INSN2(S,I,short,halfword,s,Acq)
CAS_INSN3(I,I,int,word,w,Acq)
CAS_INSN3(L,L,long,xword,,Acq)
CAS_INSN3(N,N,narrow oop,word,w,Acq)
CAS_INSN3(P,P,ptr,xword,,Acq)
dnl
// END This section of the file is automatically generated. Do not edit --------------

View File

@ -879,7 +879,6 @@ void ZBarrierSetAssembler::patch_barrier_relocation(address addr, int format) {
ShouldNotReachHere();
}
OrderAccess::fence();
ICache::invalidate_word((address)patch_addr);
}

View File

@ -394,12 +394,6 @@ void NativePostCallNop::make_deopt() {
NativeDeoptInstruction::insert(addr_at(0));
}
#ifdef ASSERT
static bool is_movk_to_zr(uint32_t insn) {
return ((insn & 0xffe0001f) == 0xf280001f);
}
#endif
bool NativePostCallNop::patch(int32_t oopmap_slot, int32_t cb_offset) {
if (((oopmap_slot & 0xff) != oopmap_slot) || ((cb_offset & 0xffffff) != cb_offset)) {
return false; // cannot encode

View File

@ -526,14 +526,31 @@ inline NativeLdSt* NativeLdSt_at(address addr) {
// can store an offset from the initial nop to the nmethod.
class NativePostCallNop: public NativeInstruction {
private:
static bool is_movk_to_zr(uint32_t insn) {
return ((insn & 0xffe0001f) == 0xf280001f);
}
public:
enum AArch64_specific_constants {
// The two parts should be checked separately to prevent out of bounds access in case
// the return address points to the deopt handler stub code entry point which could be
// at the end of page.
first_check_size = instruction_size
};
bool check() const {
uint64_t insns = *(uint64_t*)addr_at(0);
// Check for two instructions: nop; movk zr, xx
// These instructions only ever appear together in a post-call
// NOP, so it's unnecessary to check that the third instruction is
// a MOVK as well.
return (insns & 0xffe0001fffffffff) == 0xf280001fd503201f;
// Check the first instruction is NOP.
if (is_nop()) {
uint32_t insn = *(uint32_t*)addr_at(first_check_size);
// Check next instruction is MOVK zr, xx.
// These instructions only ever appear together in a post-call
// NOP, so it's unnecessary to check that the third instruction is
// a MOVK as well.
return is_movk_to_zr(insn);
}
return false;
}
bool decode(int32_t& oopmap_slot, int32_t& cb_offset) const {

View File

@ -85,7 +85,7 @@ void Relocation::pd_set_call_destination(address x) {
} else {
MacroAssembler::pd_patch_instruction(addr(), x);
}
assert(pd_call_destination(addr()) == x, "fail in reloc");
guarantee(pd_call_destination(addr()) == x, "fail in reloc");
}
void trampoline_stub_Relocation::pd_fix_owner_after_move() {

View File

@ -260,8 +260,6 @@ UncommonTrapBlob* OptoRuntime::generate_uncommon_trap_blob() {
//------------------------------generate_exception_blob---------------------------
// creates exception blob at the end
// Using exception blob, this code is jumped from a compiled method.
// (see emit_exception_handler in aarch64.ad file)
//
// Given an exception pc at a call we call into the runtime for the
// handler in this method. This handler might merely restore state

View File

@ -2879,7 +2879,7 @@ class StubGenerator: public StubCodeGenerator {
// Inputs:
// c_rarg0 - source byte array address
// c_rarg1 - destination byte array address
// c_rarg2 - K (key) in little endian int array
// c_rarg2 - sessionKe (key) in little endian int array
//
address generate_aescrypt_encryptBlock() {
__ align(CodeEntryAlignment);
@ -2912,7 +2912,7 @@ class StubGenerator: public StubCodeGenerator {
// Inputs:
// c_rarg0 - source byte array address
// c_rarg1 - destination byte array address
// c_rarg2 - K (key) in little endian int array
// c_rarg2 - sessionKd (key) in little endian int array
//
address generate_aescrypt_decryptBlock() {
assert(UseAES, "need AES cryptographic extension support");
@ -2946,7 +2946,7 @@ class StubGenerator: public StubCodeGenerator {
// Inputs:
// c_rarg0 - source byte array address
// c_rarg1 - destination byte array address
// c_rarg2 - K (key) in little endian int array
// c_rarg2 - sessionKe (key) in little endian int array
// c_rarg3 - r vector byte array address
// c_rarg4 - input length
//
@ -3051,7 +3051,7 @@ class StubGenerator: public StubCodeGenerator {
// Inputs:
// c_rarg0 - source byte array address
// c_rarg1 - destination byte array address
// c_rarg2 - K (key) in little endian int array
// c_rarg2 - sessionKd (key) in little endian int array
// c_rarg3 - r vector byte array address
// c_rarg4 - input length
//
@ -3178,7 +3178,7 @@ class StubGenerator: public StubCodeGenerator {
// Inputs:
// c_rarg0 - source byte array address
// c_rarg1 - destination byte array address
// c_rarg2 - K (key) in little endian int array
// c_rarg2 - sessionKe (key) in little endian int array
// c_rarg3 - counter vector byte array address
// c_rarg4 - input length
// c_rarg5 - saved encryptedCounter start

View File

@ -1375,7 +1375,6 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
__ ldr(r10, Address(rmethod, Method::native_function_offset()));
ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
__ lea(rscratch2, unsatisfied);
__ ldr(rscratch2, rscratch2);
__ cmp(r10, rscratch2);
__ br(Assembler::NE, L);
__ call_VM(noreg,

View File

@ -378,8 +378,8 @@ void VM_Version::initialize() {
if (UseSHA && VM_Version::supports_sha3()) {
// Auto-enable UseSHA3Intrinsics on hardware with performance benefit.
// Note that the evaluation of UseSHA3Intrinsics shows better performance
// on Apple silicon but worse performance on Neoverse V1 and N2.
if (_cpu == CPU_APPLE) { // Apple silicon
// on Apple and Qualcomm silicon but worse performance on Neoverse V1 and N2.
if (_cpu == CPU_APPLE || _cpu == CPU_QUALCOMM) { // Apple or Qualcomm silicon
if (FLAG_IS_DEFAULT(UseSHA3Intrinsics)) {
FLAG_SET_DEFAULT(UseSHA3Intrinsics, true);
}

View File

@ -106,7 +106,7 @@ public:
CPU_MOTOROLA = 'M',
CPU_NVIDIA = 'N',
CPU_AMCC = 'P',
CPU_QUALCOM = 'Q',
CPU_QUALCOMM = 'Q',
CPU_MARVELL = 'V',
CPU_INTEL = 'i',
CPU_APPLE = 'a',

View File

@ -105,14 +105,8 @@ class HandlerImpl {
public:
static int emit_exception_handler(C2_MacroAssembler *masm);
static int emit_deopt_handler(C2_MacroAssembler* masm);
static uint size_exception_handler() {
return ( 3 * 4 );
}
static uint size_deopt_handler() {
return ( 9 * 4 );
}
@ -876,26 +870,6 @@ uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
//=============================================================================
// Emit exception handler code.
int HandlerImpl::emit_exception_handler(C2_MacroAssembler* masm) {
address base = __ start_a_stub(size_exception_handler());
if (base == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return 0; // CodeBuffer::expand failed
}
int offset = __ offset();
// OK to trash LR, because exception blob will kill it
__ jump(OptoRuntime::exception_blob()->entry_point(), relocInfo::runtime_call_type, LR_tmp);
assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
__ end_a_stub();
return offset;
}
int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm) {
// Can't use any of the current frame's registers as we may have deopted
// at a poll and everything can be live.
@ -906,19 +880,28 @@ int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm) {
}
int offset = __ offset();
address deopt_pc = __ pc();
__ sub(SP, SP, wordSize); // make room for saved PC
__ push(LR); // save LR that may be live when we get here
__ mov_relative_address(LR, deopt_pc);
__ str(LR, Address(SP, wordSize)); // save deopt PC
__ pop(LR); // restore LR
Label start;
__ bind(start);
__ jump(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type, noreg);
int entry_offset = __ offset();
address deopt_pc = __ pc();
// Preserve R0 and reserve space for the address of the entry point
__ push(RegisterSet(R0) | RegisterSet(R1));
// Store the entry point address
__ mov_relative_address(R0, deopt_pc);
__ str(R0, Address(SP, wordSize));
__ pop(R0); // restore R0
__ b(start);
assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
assert(__ offset() - entry_offset >= NativePostCallNop::first_check_size,
"out of bounds read in post-call NOP check");
__ end_a_stub();
return offset;
return entry_offset;
}
bool Matcher::match_rule_supported(int opcode) {
@ -1003,6 +986,10 @@ bool Matcher::vector_rearrange_requires_load_shuffle(BasicType elem_bt, int vlen
return false;
}
bool Matcher::mask_op_prefers_predicate(int opcode, const TypeVect* vt) {
return false;
}
const RegMask* Matcher::predicate_reg_mask(void) {
return nullptr;
}
@ -1076,6 +1063,10 @@ bool Matcher::is_reg2reg_move(MachNode* m) {
return false;
}
bool Matcher::is_register_biasing_candidate(const MachNode* mdef, int oper_index) {
return false;
}
bool Matcher::is_generic_vector(MachOper* opnd) {
ShouldNotReachHere(); // generic vector operands not supported
return false;

View File

@ -62,22 +62,22 @@ register %{
// Integer/Long Registers
// ----------------------------
reg_def R_R0 (SOC, SOC, Op_RegI, 0, R(0)->as_VMReg());
reg_def R_R1 (SOC, SOC, Op_RegI, 1, R(1)->as_VMReg());
reg_def R_R2 (SOC, SOC, Op_RegI, 2, R(2)->as_VMReg());
reg_def R_R3 (SOC, SOC, Op_RegI, 3, R(3)->as_VMReg());
reg_def R_R4 (SOC, SOE, Op_RegI, 4, R(4)->as_VMReg());
reg_def R_R5 (SOC, SOE, Op_RegI, 5, R(5)->as_VMReg());
reg_def R_R6 (SOC, SOE, Op_RegI, 6, R(6)->as_VMReg());
reg_def R_R7 (SOC, SOE, Op_RegI, 7, R(7)->as_VMReg());
reg_def R_R8 (SOC, SOE, Op_RegI, 8, R(8)->as_VMReg());
reg_def R_R9 (SOC, SOE, Op_RegI, 9, R(9)->as_VMReg());
reg_def R_R10(NS, SOE, Op_RegI, 10, R(10)->as_VMReg());
reg_def R_R11(NS, SOE, Op_RegI, 11, R(11)->as_VMReg());
reg_def R_R12(SOC, SOC, Op_RegI, 12, R(12)->as_VMReg());
reg_def R_R13(NS, NS, Op_RegI, 13, R(13)->as_VMReg());
reg_def R_R14(SOC, SOC, Op_RegI, 14, R(14)->as_VMReg());
reg_def R_R15(NS, NS, Op_RegI, 15, R(15)->as_VMReg());
reg_def R_R0 (SOC, SOC, Op_RegI, 0, as_Register(0)->as_VMReg());
reg_def R_R1 (SOC, SOC, Op_RegI, 1, as_Register(1)->as_VMReg());
reg_def R_R2 (SOC, SOC, Op_RegI, 2, as_Register(2)->as_VMReg());
reg_def R_R3 (SOC, SOC, Op_RegI, 3, as_Register(3)->as_VMReg());
reg_def R_R4 (SOC, SOE, Op_RegI, 4, as_Register(4)->as_VMReg());
reg_def R_R5 (SOC, SOE, Op_RegI, 5, as_Register(5)->as_VMReg());
reg_def R_R6 (SOC, SOE, Op_RegI, 6, as_Register(6)->as_VMReg());
reg_def R_R7 (SOC, SOE, Op_RegI, 7, as_Register(7)->as_VMReg());
reg_def R_R8 (SOC, SOE, Op_RegI, 8, as_Register(8)->as_VMReg());
reg_def R_R9 (SOC, SOE, Op_RegI, 9, as_Register(9)->as_VMReg());
reg_def R_R10(NS, SOE, Op_RegI, 10, as_Register(10)->as_VMReg());
reg_def R_R11(NS, SOE, Op_RegI, 11, as_Register(11)->as_VMReg());
reg_def R_R12(SOC, SOC, Op_RegI, 12, as_Register(12)->as_VMReg());
reg_def R_R13(NS, NS, Op_RegI, 13, as_Register(13)->as_VMReg());
reg_def R_R14(SOC, SOC, Op_RegI, 14, as_Register(14)->as_VMReg());
reg_def R_R15(NS, NS, Op_RegI, 15, as_Register(15)->as_VMReg());
// ----------------------------
// Float/Double Registers

View File

@ -114,7 +114,7 @@ class RegisterSet {
}
RegisterSet(Register first, Register last) {
assert(first < last, "encoding constraint");
assert(first->encoding() < last->encoding(), "encoding constraint");
_encoding = (1 << (last->encoding() + 1)) - (1 << first->encoding());
}

View File

@ -181,7 +181,7 @@ void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
const Register lock_reg = _lock_reg->as_pointer_register();
ce->verify_reserved_argument_area_size(2);
if (obj_reg < lock_reg) {
if (obj_reg->encoding() < lock_reg->encoding()) {
__ stmia(SP, RegisterSet(obj_reg) | RegisterSet(lock_reg));
} else {
__ str(obj_reg, Address(SP));

View File

@ -272,14 +272,22 @@ int LIR_Assembler::emit_deopt_handler() {
int offset = code_offset();
__ mov_relative_address(LR, __ pc());
__ push(LR); // stub expects LR to be saved
Label start;
__ bind(start);
__ jump(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type, noreg);
int entry_offset = __ offset();
__ mov_relative_address(LR, __ pc());
__ push(LR); // stub expects LR to be saved
__ b(start);
assert(code_offset() - offset <= deopt_handler_size(), "overflow");
assert(code_offset() - entry_offset >= NativePostCallNop::first_check_size,
"out of bounds read in post-call NOP check");
__ end_a_stub();
return offset;
return entry_offset;
}
@ -2631,11 +2639,11 @@ void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type,
const Register src_hi = src->as_register_hi();
assert(addr->index()->is_illegal() && addr->disp() == 0, "The address is simple already");
if (src_lo < src_hi) {
if (src_lo->encoding() < src_hi->encoding()) {
null_check_offset = __ offset();
__ stmia(addr->base()->as_register(), RegisterSet(src_lo) | RegisterSet(src_hi));
} else {
assert(src_lo < Rtemp, "Rtemp is higher than any allocatable register");
assert(src_lo->encoding() < Rtemp->encoding(), "Rtemp is higher than any allocatable register");
__ mov(Rtemp, src_hi);
null_check_offset = __ offset();
__ stmia(addr->base()->as_register(), RegisterSet(src_lo) | RegisterSet(Rtemp));
@ -2648,10 +2656,10 @@ void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type,
assert(addr->index()->is_illegal() && addr->disp() == 0, "The address is simple already");
null_check_offset = __ offset();
if (dest_lo < dest_hi) {
if (dest_lo->encoding() < dest_hi->encoding()) {
__ ldmia(addr->base()->as_register(), RegisterSet(dest_lo) | RegisterSet(dest_hi));
} else {
assert(dest_lo < Rtemp, "Rtemp is higher than any allocatable register");
assert(dest_lo->encoding() < Rtemp->encoding(), "Rtemp is higher than any allocatable register");
__ ldmia(addr->base()->as_register(), RegisterSet(dest_lo) | RegisterSet(Rtemp));
__ mov(dest_hi, Rtemp);
}

View File

@ -54,7 +54,7 @@
enum {
_call_stub_size = 16,
_exception_handler_size = PRODUCT_ONLY(68) NOT_PRODUCT(68+60),
_deopt_handler_size = 16
_deopt_handler_size = 20
};
public:

View File

@ -409,7 +409,7 @@ void InterpreterMacroAssembler::pop_i(Register r) {
void InterpreterMacroAssembler::pop_l(Register lo, Register hi) {
assert_different_registers(lo, hi);
assert(lo < hi, "lo must be < hi");
assert(lo->encoding() < hi->encoding(), "lo must be < hi");
pop(RegisterSet(lo) | RegisterSet(hi));
}
@ -459,7 +459,7 @@ void InterpreterMacroAssembler::push_i(Register r) {
void InterpreterMacroAssembler::push_l(Register lo, Register hi) {
assert_different_registers(lo, hi);
assert(lo < hi, "lo must be < hi");
assert(lo->encoding() < hi->encoding(), "lo must be < hi");
push(RegisterSet(lo) | RegisterSet(hi));
}

View File

@ -430,6 +430,13 @@ inline NativeCall* nativeCall_before(address return_address) {
class NativePostCallNop: public NativeInstruction {
public:
enum arm_specific_constants {
// If the check is adjusted to read beyond size of the instruction sequence at the deopt
// handler stub code entry point, it has to happen in two stages - to prevent out of bounds
// access in case the return address points to the entry point which could be at
// the end of page.
first_check_size = instruction_size
};
bool check() const { return is_nop(); }
bool decode(int32_t& oopmap_slot, int32_t& cb_offset) const { return false; }
bool patch(int32_t oopmap_slot, int32_t cb_offset) { return false; }

View File

@ -25,12 +25,19 @@
#include "register_arm.hpp"
#include "utilities/debug.hpp"
const int ConcreteRegisterImpl::max_gpr = ConcreteRegisterImpl::num_gpr;
const int ConcreteRegisterImpl::max_fpr = ConcreteRegisterImpl::num_fpr +
ConcreteRegisterImpl::max_gpr;
Register::RegisterImpl all_RegisterImpls [Register::number_of_registers + 1];
FloatRegister::FloatRegisterImpl all_FloatRegisterImpls [FloatRegister::number_of_registers + 1];
VFPSystemRegister::VFPSystemRegisterImpl all_VFPSystemRegisterImpls [VFPSystemRegister::number_of_registers + 1] {
{ -1 }, //vfpsnoreg
{ VFPSystemRegister::FPSID },
{ VFPSystemRegister::FPSCR },
{ VFPSystemRegister::MVFR0 },
{ VFPSystemRegister::MVFR1 }
};
const char* RegisterImpl::name() const {
const char* names[number_of_registers] = {
const char* Register::RegisterImpl::name() const {
static const char* names[number_of_registers + 1] = {
"noreg",
"r0", "r1", "r2", "r3", "r4", "r5", "r6",
#if (FP_REG_NUM == 7)
"fp",
@ -45,13 +52,14 @@ const char* RegisterImpl::name() const {
#endif
"r12", "sp", "lr", "pc"
};
return is_valid() ? names[encoding()] : "noreg";
return names[encoding() + 1];
}
const char* FloatRegisterImpl::name() const {
const char* names[number_of_registers] = {
"s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
"s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
const char* FloatRegister::FloatRegisterImpl::name() const {
static const char* names[number_of_registers + 1] = {
"fnoreg",
"s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
"s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
"s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
"s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31"
#ifdef COMPILER2
@ -61,5 +69,5 @@ const char* FloatRegisterImpl::name() const {
"s56", "s57?","s58", "s59?","s60", "s61?","s62", "s63?"
#endif
};
return is_valid() ? names[encoding()] : "fnoreg";
return names[encoding() + 1];
}

View File

@ -31,26 +31,6 @@
class VMRegImpl;
typedef VMRegImpl* VMReg;
// These are declared ucontext.h
#undef R0
#undef R1
#undef R2
#undef R3
#undef R4
#undef R5
#undef R6
#undef R7
#undef R8
#undef R9
#undef R10
#undef R11
#undef R12
#undef R13
#undef R14
#undef R15
#define R(r) ((Register)(r))
/////////////////////////////////
// Support for different ARM ABIs
// Note: default ABI is for linux
@ -94,25 +74,86 @@ typedef VMRegImpl* VMReg;
#define ALIGN_WIDE_ARGUMENTS 1
#endif
#define R0 ((Register)0)
#define R1 ((Register)1)
#define R2 ((Register)2)
#define R3 ((Register)3)
#define R4 ((Register)4)
#define R5 ((Register)5)
#define R6 ((Register)6)
#define R7 ((Register)7)
#define R8 ((Register)8)
#define R9 ((Register)9)
#define R10 ((Register)10)
#define R11 ((Register)11)
#define R12 ((Register)12)
#define R13 ((Register)13)
#define R14 ((Register)14)
#define R15 ((Register)15)
class Register {
private:
int _encoding;
constexpr explicit Register(int encoding) : _encoding(encoding) {}
public:
enum {
number_of_registers = 16,
max_slots_per_register = 1
};
class RegisterImpl : public AbstractRegisterImpl {
friend class Register;
static constexpr const RegisterImpl* first();
public:
// accessors and testers
int raw_encoding() const { return this - first(); }
int encoding() const { assert(is_valid(), "invalid register"); return raw_encoding(); }
bool is_valid() const { return 0 <= raw_encoding() && raw_encoding() < number_of_registers; }
inline Register successor() const;
VMReg as_VMReg() const;
const char* name() const;
};
#define FP ((Register)FP_REG_NUM)
inline friend constexpr Register as_Register(int encoding);
constexpr Register() : _encoding(-1) {} //noreg
int operator==(const Register r) const { return _encoding == r._encoding; }
int operator!=(const Register r) const { return _encoding != r._encoding; }
const RegisterImpl* operator->() const { return RegisterImpl::first() + _encoding; }
};
extern Register::RegisterImpl all_RegisterImpls[Register::number_of_registers + 1] INTERNAL_VISIBILITY;
inline constexpr const Register::RegisterImpl* Register::RegisterImpl::first() {
return all_RegisterImpls + 1;
}
constexpr Register noreg = Register();
inline constexpr Register as_Register(int encoding) {
if (0 <= encoding && encoding < Register::number_of_registers) {
return Register(encoding);
}
return noreg;
}
inline Register Register::RegisterImpl::successor() const {
assert(is_valid(), "sainty");
return as_Register(encoding() + 1);
}
constexpr Register R0 = as_Register( 0);
constexpr Register R1 = as_Register( 1);
constexpr Register R2 = as_Register( 2);
constexpr Register R3 = as_Register( 3);
constexpr Register R4 = as_Register( 4);
constexpr Register R5 = as_Register( 5);
constexpr Register R6 = as_Register( 6);
constexpr Register R7 = as_Register( 7);
constexpr Register R8 = as_Register( 8);
constexpr Register R9 = as_Register( 9);
constexpr Register R10 = as_Register(10);
constexpr Register R11 = as_Register(11);
constexpr Register R12 = as_Register(12);
constexpr Register R13 = as_Register(13);
constexpr Register R14 = as_Register(14);
constexpr Register R15 = as_Register(15);
constexpr Register FP = as_Register(FP_REG_NUM);
// Safe use of registers which may be FP on some platforms.
//
@ -122,185 +163,170 @@ typedef VMRegImpl* VMReg;
// as FP on supported ABIs (and replace R# by altFP_#_11). altFP_#_11
// must be #define to R11 if and only if # is FP_REG_NUM.
#if (FP_REG_NUM == 7)
#define altFP_7_11 ((Register)11)
constexpr Register altFP_7_11 = R11;
#else
#define altFP_7_11 ((Register)7)
constexpr Register altFP_7_11 = R7;
#endif
#define SP R13
#define LR R14
#define PC R15
constexpr Register SP = R13;
constexpr Register LR = R14;
constexpr Register PC = R15;
class RegisterImpl;
typedef RegisterImpl* Register;
class FloatRegister {
private:
int _encoding;
inline Register as_Register(int encoding) {
return (Register)(intptr_t)encoding;
}
constexpr explicit FloatRegister(int encoding) : _encoding(encoding) {}
class RegisterImpl : public AbstractRegisterImpl {
public:
enum {
number_of_registers = 16
number_of_registers = NOT_COMPILER2(32) COMPILER2_PRESENT(64),
max_slots_per_register = 1
};
Register successor() const { return as_Register(encoding() + 1); }
class FloatRegisterImpl : public AbstractRegisterImpl {
friend class FloatRegister;
inline friend Register as_Register(int encoding);
static constexpr const FloatRegisterImpl* first();
VMReg as_VMReg();
public:
// accessors
int encoding() const { assert(is_valid(), "invalid register"); return value(); }
const char* name() const;
// accessors and testers
int raw_encoding() const { return this - first(); }
int encoding() const { assert(is_valid(), "invalid register"); return raw_encoding(); }
bool is_valid() const { return 0 <= raw_encoding() && raw_encoding() < number_of_registers; }
inline FloatRegister successor() const;
// testers
bool is_valid() const { return 0 <= value() && value() < number_of_registers; }
VMReg as_VMReg() const;
int hi_bits() const {
return (encoding() >> 1) & 0xf;
}
int lo_bit() const {
return encoding() & 1;
}
int hi_bit() const {
return encoding() >> 5;
}
const char* name() const;
};
inline friend constexpr FloatRegister as_FloatRegister(int encoding);
constexpr FloatRegister() : _encoding(-1) {} // fnoreg
int operator==(const FloatRegister r) const { return _encoding == r._encoding; }
int operator!=(const FloatRegister r) const { return _encoding != r._encoding; }
const FloatRegisterImpl* operator->() const { return FloatRegisterImpl::first() + _encoding; }
};
CONSTANT_REGISTER_DECLARATION(Register, noreg, (-1));
extern FloatRegister::FloatRegisterImpl all_FloatRegisterImpls[FloatRegister::number_of_registers + 1] INTERNAL_VISIBILITY;
// Use FloatRegister as shortcut
class FloatRegisterImpl;
typedef FloatRegisterImpl* FloatRegister;
inline FloatRegister as_FloatRegister(int encoding) {
return (FloatRegister)(intptr_t)encoding;
inline constexpr const FloatRegister::FloatRegisterImpl* FloatRegister::FloatRegisterImpl::first() {
return all_FloatRegisterImpls + 1;
}
class FloatRegisterImpl : public AbstractRegisterImpl {
public:
enum {
number_of_registers = NOT_COMPILER2(32) COMPILER2_PRESENT(64)
};
constexpr FloatRegister fnoreg = FloatRegister();
inline friend FloatRegister as_FloatRegister(int encoding);
VMReg as_VMReg();
int encoding() const { assert(is_valid(), "invalid register"); return value(); }
bool is_valid() const { return 0 <= (intx)this && (intx)this < number_of_registers; }
FloatRegister successor() const { return as_FloatRegister(encoding() + 1); }
const char* name() const;
int hi_bits() const {
return (encoding() >> 1) & 0xf;
inline constexpr FloatRegister as_FloatRegister(int encoding) {
if (0 <= encoding && encoding < FloatRegister::number_of_registers) {
return FloatRegister(encoding);
}
return fnoreg;
}
int lo_bit() const {
return encoding() & 1;
}
int hi_bit() const {
return encoding() >> 5;
}
};
CONSTANT_REGISTER_DECLARATION(FloatRegister, fnoreg, (-1));
inline FloatRegister FloatRegister::FloatRegisterImpl::successor() const {
assert(is_valid(), "sainty");
return as_FloatRegister(encoding() + 1);
}
/*
* S1-S6 are named with "_reg" suffix to avoid conflict with
* constants defined in sharedRuntimeTrig.cpp
*/
CONSTANT_REGISTER_DECLARATION(FloatRegister, S0, ( 0));
CONSTANT_REGISTER_DECLARATION(FloatRegister, S1_reg, ( 1));
CONSTANT_REGISTER_DECLARATION(FloatRegister, S2_reg, ( 2));
CONSTANT_REGISTER_DECLARATION(FloatRegister, S3_reg, ( 3));
CONSTANT_REGISTER_DECLARATION(FloatRegister, S4_reg, ( 4));
CONSTANT_REGISTER_DECLARATION(FloatRegister, S5_reg, ( 5));
CONSTANT_REGISTER_DECLARATION(FloatRegister, S6_reg, ( 6));
CONSTANT_REGISTER_DECLARATION(FloatRegister, S7, ( 7));
CONSTANT_REGISTER_DECLARATION(FloatRegister, S8, ( 8));
CONSTANT_REGISTER_DECLARATION(FloatRegister, S9, ( 9));
CONSTANT_REGISTER_DECLARATION(FloatRegister, S10, (10));
CONSTANT_REGISTER_DECLARATION(FloatRegister, S11, (11));
CONSTANT_REGISTER_DECLARATION(FloatRegister, S12, (12));
CONSTANT_REGISTER_DECLARATION(FloatRegister, S13, (13));
CONSTANT_REGISTER_DECLARATION(FloatRegister, S14, (14));
CONSTANT_REGISTER_DECLARATION(FloatRegister, S15, (15));
CONSTANT_REGISTER_DECLARATION(FloatRegister, S16, (16));
CONSTANT_REGISTER_DECLARATION(FloatRegister, S17, (17));
CONSTANT_REGISTER_DECLARATION(FloatRegister, S18, (18));
CONSTANT_REGISTER_DECLARATION(FloatRegister, S19, (19));
CONSTANT_REGISTER_DECLARATION(FloatRegister, S20, (20));
CONSTANT_REGISTER_DECLARATION(FloatRegister, S21, (21));
CONSTANT_REGISTER_DECLARATION(FloatRegister, S22, (22));
CONSTANT_REGISTER_DECLARATION(FloatRegister, S23, (23));
CONSTANT_REGISTER_DECLARATION(FloatRegister, S24, (24));
CONSTANT_REGISTER_DECLARATION(FloatRegister, S25, (25));
CONSTANT_REGISTER_DECLARATION(FloatRegister, S26, (26));
CONSTANT_REGISTER_DECLARATION(FloatRegister, S27, (27));
CONSTANT_REGISTER_DECLARATION(FloatRegister, S28, (28));
CONSTANT_REGISTER_DECLARATION(FloatRegister, S29, (29));
CONSTANT_REGISTER_DECLARATION(FloatRegister, S30, (30));
CONSTANT_REGISTER_DECLARATION(FloatRegister, S31, (31));
CONSTANT_REGISTER_DECLARATION(FloatRegister, Stemp, (30));
constexpr FloatRegister S0 = as_FloatRegister( 0);
constexpr FloatRegister S1_reg = as_FloatRegister(1);
constexpr FloatRegister S2_reg = as_FloatRegister(2);
constexpr FloatRegister S3_reg = as_FloatRegister(3);
constexpr FloatRegister S4_reg = as_FloatRegister(4);
constexpr FloatRegister S5_reg = as_FloatRegister(5);
constexpr FloatRegister S6_reg = as_FloatRegister(6);
constexpr FloatRegister S7 = as_FloatRegister( 7);
constexpr FloatRegister S8 = as_FloatRegister( 8);
constexpr FloatRegister S9 = as_FloatRegister( 9);
constexpr FloatRegister S10 = as_FloatRegister(10);
constexpr FloatRegister S11 = as_FloatRegister(11);
constexpr FloatRegister S12 = as_FloatRegister(12);
constexpr FloatRegister S13 = as_FloatRegister(13);
constexpr FloatRegister S14 = as_FloatRegister(14);
constexpr FloatRegister S15 = as_FloatRegister(15);
constexpr FloatRegister S16 = as_FloatRegister(16);
constexpr FloatRegister S17 = as_FloatRegister(17);
constexpr FloatRegister S18 = as_FloatRegister(18);
constexpr FloatRegister S19 = as_FloatRegister(19);
constexpr FloatRegister S20 = as_FloatRegister(20);
constexpr FloatRegister S21 = as_FloatRegister(21);
constexpr FloatRegister S22 = as_FloatRegister(22);
constexpr FloatRegister S23 = as_FloatRegister(23);
constexpr FloatRegister S24 = as_FloatRegister(24);
constexpr FloatRegister S25 = as_FloatRegister(25);
constexpr FloatRegister S26 = as_FloatRegister(26);
constexpr FloatRegister S27 = as_FloatRegister(27);
constexpr FloatRegister S28 = as_FloatRegister(28);
constexpr FloatRegister S29 = as_FloatRegister(29);
constexpr FloatRegister S30 = as_FloatRegister(30);
constexpr FloatRegister S31 = as_FloatRegister(31);
constexpr FloatRegister Stemp = S30;
CONSTANT_REGISTER_DECLARATION(FloatRegister, D0, ( 0));
CONSTANT_REGISTER_DECLARATION(FloatRegister, D1, ( 2));
CONSTANT_REGISTER_DECLARATION(FloatRegister, D2, ( 4));
CONSTANT_REGISTER_DECLARATION(FloatRegister, D3, ( 6));
CONSTANT_REGISTER_DECLARATION(FloatRegister, D4, ( 8));
CONSTANT_REGISTER_DECLARATION(FloatRegister, D5, ( 10));
CONSTANT_REGISTER_DECLARATION(FloatRegister, D6, ( 12));
CONSTANT_REGISTER_DECLARATION(FloatRegister, D7, ( 14));
CONSTANT_REGISTER_DECLARATION(FloatRegister, D8, ( 16));
CONSTANT_REGISTER_DECLARATION(FloatRegister, D9, ( 18));
CONSTANT_REGISTER_DECLARATION(FloatRegister, D10, ( 20));
CONSTANT_REGISTER_DECLARATION(FloatRegister, D11, ( 22));
CONSTANT_REGISTER_DECLARATION(FloatRegister, D12, ( 24));
CONSTANT_REGISTER_DECLARATION(FloatRegister, D13, ( 26));
CONSTANT_REGISTER_DECLARATION(FloatRegister, D14, ( 28));
CONSTANT_REGISTER_DECLARATION(FloatRegister, D15, (30));
CONSTANT_REGISTER_DECLARATION(FloatRegister, D16, (32));
CONSTANT_REGISTER_DECLARATION(FloatRegister, D17, (34));
CONSTANT_REGISTER_DECLARATION(FloatRegister, D18, (36));
CONSTANT_REGISTER_DECLARATION(FloatRegister, D19, (38));
CONSTANT_REGISTER_DECLARATION(FloatRegister, D20, (40));
CONSTANT_REGISTER_DECLARATION(FloatRegister, D21, (42));
CONSTANT_REGISTER_DECLARATION(FloatRegister, D22, (44));
CONSTANT_REGISTER_DECLARATION(FloatRegister, D23, (46));
CONSTANT_REGISTER_DECLARATION(FloatRegister, D24, (48));
CONSTANT_REGISTER_DECLARATION(FloatRegister, D25, (50));
CONSTANT_REGISTER_DECLARATION(FloatRegister, D26, (52));
CONSTANT_REGISTER_DECLARATION(FloatRegister, D27, (54));
CONSTANT_REGISTER_DECLARATION(FloatRegister, D28, (56));
CONSTANT_REGISTER_DECLARATION(FloatRegister, D29, (58));
CONSTANT_REGISTER_DECLARATION(FloatRegister, D30, (60));
CONSTANT_REGISTER_DECLARATION(FloatRegister, D31, (62));
constexpr FloatRegister D0 = as_FloatRegister( 0);
constexpr FloatRegister D1 = as_FloatRegister( 2);
constexpr FloatRegister D2 = as_FloatRegister( 4);
constexpr FloatRegister D3 = as_FloatRegister( 6);
constexpr FloatRegister D4 = as_FloatRegister( 8);
constexpr FloatRegister D5 = as_FloatRegister(10);
constexpr FloatRegister D6 = as_FloatRegister(12);
constexpr FloatRegister D7 = as_FloatRegister(14);
constexpr FloatRegister D8 = as_FloatRegister(16);
constexpr FloatRegister D9 = as_FloatRegister(18);
constexpr FloatRegister D10 = as_FloatRegister(20);
constexpr FloatRegister D11 = as_FloatRegister(22);
constexpr FloatRegister D12 = as_FloatRegister(24);
constexpr FloatRegister D13 = as_FloatRegister(26);
constexpr FloatRegister D14 = as_FloatRegister(28);
constexpr FloatRegister D15 = as_FloatRegister(30);
constexpr FloatRegister D16 = as_FloatRegister(32);
constexpr FloatRegister D17 = as_FloatRegister(34);
constexpr FloatRegister D18 = as_FloatRegister(36);
constexpr FloatRegister D19 = as_FloatRegister(38);
constexpr FloatRegister D20 = as_FloatRegister(40);
constexpr FloatRegister D21 = as_FloatRegister(42);
constexpr FloatRegister D22 = as_FloatRegister(44);
constexpr FloatRegister D23 = as_FloatRegister(46);
constexpr FloatRegister D24 = as_FloatRegister(48);
constexpr FloatRegister D25 = as_FloatRegister(50);
constexpr FloatRegister D26 = as_FloatRegister(52);
constexpr FloatRegister D27 = as_FloatRegister(54);
constexpr FloatRegister D28 = as_FloatRegister(56);
constexpr FloatRegister D29 = as_FloatRegister(58);
constexpr FloatRegister D30 = as_FloatRegister(60);
constexpr FloatRegister D31 = as_FloatRegister(62);
class ConcreteRegisterImpl : public AbstractRegisterImpl {
public:
enum {
log_vmregs_per_word = LogBytesPerWord - LogBytesPerInt, // VMRegs are of 4-byte size
#ifdef COMPILER2
log_bytes_per_fpr = 2, // quad vectors
#else
log_bytes_per_fpr = 2, // double vectors
#endif
log_words_per_fpr = log_bytes_per_fpr - LogBytesPerWord,
words_per_fpr = 1 << log_words_per_fpr,
log_vmregs_per_fpr = log_bytes_per_fpr - LogBytesPerInt,
log_vmregs_per_gpr = log_vmregs_per_word,
vmregs_per_gpr = 1 << log_vmregs_per_gpr,
vmregs_per_fpr = 1 << log_vmregs_per_fpr,
max_gpr = Register::number_of_registers * Register::max_slots_per_register,
max_fpr = max_gpr + FloatRegister::number_of_registers * FloatRegister::max_slots_per_register,
num_gpr = RegisterImpl::number_of_registers << log_vmregs_per_gpr,
max_gpr0 = num_gpr,
num_fpr = FloatRegisterImpl::number_of_registers << log_vmregs_per_fpr,
max_fpr0 = max_gpr0 + num_fpr,
number_of_registers = num_gpr + num_fpr + 1+1 // APSR and FPSCR so that c2's REG_COUNT <= ConcreteRegisterImpl::number_of_registers
number_of_registers = max_fpr + 1+1 // APSR and FPSCR so that c2's REG_COUNT <= ConcreteRegisterImpl::number_of_registers
};
static const int max_gpr;
static const int max_fpr;
};
typedef AbstractRegSet<Register> RegSet;
@ -328,100 +354,156 @@ inline FloatRegister AbstractRegSet<FloatRegister>::last() {
class VFPSystemRegisterImpl;
typedef VFPSystemRegisterImpl* VFPSystemRegister;
class VFPSystemRegisterImpl : public AbstractRegisterImpl {
class VFPSystemRegister {
private:
int _store_idx;
constexpr explicit VFPSystemRegister(int store_idx) : _store_idx(store_idx) {}
enum {
_FPSID_store_idx = 0,
_FPSCR_store_idx = 1,
_MVFR0_store_idx = 2,
_MVFR1_store_idx = 3
};
public:
int encoding() const { return value(); }
enum {
FPSID = 0,
FPSCR = 1,
MVFR0 = 6,
MVFR1 = 7,
number_of_registers = 4
};
class VFPSystemRegisterImpl : public AbstractRegisterImpl {
friend class VFPSystemRegister;
int _encoding;
static constexpr const VFPSystemRegisterImpl* first();
public:
constexpr VFPSystemRegisterImpl(int encoding) : _encoding(encoding) {}
int encoding() const { return _encoding; }
};
inline friend constexpr VFPSystemRegister as_VFPSystemRegister(int encoding);
constexpr VFPSystemRegister() : _store_idx(-1) {} // vfpsnoreg
int operator==(const VFPSystemRegister r) const { return _store_idx == r._store_idx; }
int operator!=(const VFPSystemRegister r) const { return _store_idx != r._store_idx; }
const VFPSystemRegisterImpl* operator->() const { return VFPSystemRegisterImpl::first() + _store_idx; }
};
#define FPSID ((VFPSystemRegister)0)
#define FPSCR ((VFPSystemRegister)1)
#define MVFR0 ((VFPSystemRegister)0x6)
#define MVFR1 ((VFPSystemRegister)0x7)
extern VFPSystemRegister::VFPSystemRegisterImpl all_VFPSystemRegisterImpls[VFPSystemRegister::number_of_registers + 1] INTERNAL_VISIBILITY;
inline constexpr const VFPSystemRegister::VFPSystemRegisterImpl* VFPSystemRegister::VFPSystemRegisterImpl::first() {
return all_VFPSystemRegisterImpls + 1;
}
constexpr VFPSystemRegister vfpsnoreg = VFPSystemRegister();
inline constexpr VFPSystemRegister as_VFPSystemRegister(int encoding) {
switch (encoding) {
case VFPSystemRegister::FPSID: return VFPSystemRegister(VFPSystemRegister::_FPSID_store_idx);
case VFPSystemRegister::FPSCR: return VFPSystemRegister(VFPSystemRegister::_FPSCR_store_idx);
case VFPSystemRegister::MVFR0: return VFPSystemRegister(VFPSystemRegister::_MVFR0_store_idx);
case VFPSystemRegister::MVFR1: return VFPSystemRegister(VFPSystemRegister::_MVFR1_store_idx);
default: return vfpsnoreg;
}
}
constexpr VFPSystemRegister FPSID = as_VFPSystemRegister(VFPSystemRegister::FPSID);
constexpr VFPSystemRegister FPSCR = as_VFPSystemRegister(VFPSystemRegister::FPSCR);
constexpr VFPSystemRegister MVFR0 = as_VFPSystemRegister(VFPSystemRegister::MVFR0);
constexpr VFPSystemRegister MVFR1 = as_VFPSystemRegister(VFPSystemRegister::MVFR1);
/*
* Register definitions shared across interpreter and compiler
*/
#define Rexception_obj R4
#define Rexception_pc R5
constexpr Register Rexception_obj = R4;
constexpr Register Rexception_pc = R5;
/*
* Interpreter register definitions common to C++ and template interpreters.
*/
#define Rlocals R8
#define Rmethod R9
#define Rthread R10
#define Rtemp R12
constexpr Register Rlocals = R8;
constexpr Register Rmethod = R9;
constexpr Register Rthread = R10;
constexpr Register Rtemp = R12;
// Interpreter calling conventions
#define Rparams SP
#define Rsender_sp R4
constexpr Register Rparams = SP;
constexpr Register Rsender_sp = R4;
// JSR292
// Note: R5_mh is needed only during the call setup, including adapters
// This does not seem to conflict with Rexception_pc
// In case of issues, R3 might be OK but adapters calling the runtime would have to save it
#define R5_mh R5 // MethodHandle register, used during the call setup
constexpr Register R5_mh = R5; // MethodHandle register, used during the call setup
/*
* C++ Interpreter Register Defines
*/
#define Rsave0 R4
#define Rsave1 R5
#define Rsave2 R6
#define Rstate altFP_7_11 // R7 or R11
#define Ricklass R8
constexpr Register Rsave0 = R4;
constexpr Register Rsave1 = R5;
constexpr Register Rsave2 = R6;
constexpr Register Rstate = altFP_7_11; // R7 or R11
constexpr Register Ricklass = R8;
/*
* TemplateTable Interpreter Register Usage
*/
// Temporary registers
#define R0_tmp R0
#define R1_tmp R1
#define R2_tmp R2
#define R3_tmp R3
#define R4_tmp R4
#define R5_tmp R5
#define R12_tmp R12
#define LR_tmp LR
constexpr Register R0_tmp = R0;
constexpr Register R1_tmp = R1;
constexpr Register R2_tmp = R2;
constexpr Register R3_tmp = R3;
constexpr Register R4_tmp = R4;
constexpr Register R5_tmp = R5;
constexpr Register R12_tmp = R12;
constexpr Register LR_tmp = LR;
#define S0_tmp S0
#define S1_tmp S1_reg
constexpr FloatRegister S0_tmp = S0;
constexpr FloatRegister S1_tmp = S1_reg;
#define D0_tmp D0
#define D1_tmp D1
constexpr FloatRegister D0_tmp = D0;
constexpr FloatRegister D1_tmp = D1;
// Temporary registers saved across VM calls (according to C calling conventions)
#define Rtmp_save0 R4
#define Rtmp_save1 R5
constexpr Register Rtmp_save0 = R4;
constexpr Register Rtmp_save1 = R5;
// Cached TOS value
#define R0_tos R0
constexpr Register R0_tos = R0;
#define R0_tos_lo R0
#define R1_tos_hi R1
constexpr Register R0_tos_lo = R0;
constexpr Register R1_tos_hi = R1;
#define S0_tos S0
#define D0_tos D0
constexpr FloatRegister S0_tos = S0;
constexpr FloatRegister D0_tos = D0;
// Dispatch table
#define RdispatchTable R6
constexpr Register RdispatchTable = R6;
// Bytecode pointer
#define Rbcp altFP_7_11
constexpr Register Rbcp = altFP_7_11;
// Pre-loaded next bytecode for the dispatch
#define R3_bytecode R3
constexpr Register R3_bytecode = R3;
// Conventions between bytecode templates and stubs
#define R2_ClassCastException_obj R2
#define R4_ArrayIndexOutOfBounds_index R4
constexpr Register R2_ClassCastException_obj = R2;
constexpr Register R4_ArrayIndexOutOfBounds_index = R4;
// Interpreter expression stack top
#define Rstack_top SP
constexpr Register Rstack_top = SP;
/*
* Linux 32-bit ARM C ABI Register calling conventions
@ -444,10 +526,11 @@ class VFPSystemRegisterImpl : public AbstractRegisterImpl {
* R14 (LR) Link register
* R15 (PC) Program Counter
*/
#define c_rarg0 R0
#define c_rarg1 R1
#define c_rarg2 R2
#define c_rarg3 R3
constexpr Register c_rarg0 = R0;
constexpr Register c_rarg1 = R1;
constexpr Register c_rarg2 = R2;
constexpr Register c_rarg3 = R3;
#define GPR_PARAMS 4
@ -455,10 +538,10 @@ class VFPSystemRegisterImpl : public AbstractRegisterImpl {
// Java ABI
// XXX Is this correct?
#define j_rarg0 c_rarg0
#define j_rarg1 c_rarg1
#define j_rarg2 c_rarg2
#define j_rarg3 c_rarg3
constexpr Register j_rarg0 = c_rarg0;
constexpr Register j_rarg1 = c_rarg1;
constexpr Register j_rarg2 = c_rarg2;
constexpr Register j_rarg3 = c_rarg3;
#endif // CPU_ARM_REGISTER_ARM_HPP

View File

@ -182,8 +182,6 @@ UncommonTrapBlob* OptoRuntime::generate_uncommon_trap_blob() {
//------------------------------ generate_exception_blob ---------------------------
// creates exception blob at the end
// Using exception blob, this code is jumped from a compiled method.
// (see emit_exception_handler in sparc.ad file)
//
// Given an exception pc at a call we call into the runtime for the
// handler in this method. This handler might merely restore state

View File

@ -70,7 +70,7 @@ public:
enum RegisterLayout {
fpu_save_size = FloatRegisterImpl::number_of_registers,
fpu_save_size = FloatRegister::number_of_registers,
#ifndef __SOFTFP__
D0_offset = 0,
#endif
@ -139,8 +139,8 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm,
if (VM_Version::has_vfp3_32()) {
__ fpush(FloatRegisterSet(D16, 16));
} else {
if (FloatRegisterImpl::number_of_registers > 32) {
assert(FloatRegisterImpl::number_of_registers == 64, "nb fp registers should be 64");
if (FloatRegister::number_of_registers > 32) {
assert(FloatRegister::number_of_registers == 64, "nb fp registers should be 64");
__ sub(SP, SP, 32 * wordSize);
}
}
@ -182,8 +182,8 @@ void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_lr
if (VM_Version::has_vfp3_32()) {
__ fpop(FloatRegisterSet(D16, 16));
} else {
if (FloatRegisterImpl::number_of_registers > 32) {
assert(FloatRegisterImpl::number_of_registers == 64, "nb fp registers should be 64");
if (FloatRegister::number_of_registers > 32) {
assert(FloatRegister::number_of_registers == 64, "nb fp registers should be 64");
__ add(SP, SP, 32 * wordSize);
}
}

View File

@ -30,14 +30,14 @@ void VMRegImpl::set_regName() {
Register reg = ::as_Register(0);
int i;
for (i = 0; i < ConcreteRegisterImpl::max_gpr; reg = reg->successor()) {
for (int j = 0; j < (1 << ConcreteRegisterImpl::log_vmregs_per_gpr); j++) {
for (int j = 0; j < Register::max_slots_per_register; j++) {
regName[i++] = reg->name();
}
}
#ifndef __SOFTFP__
FloatRegister freg = ::as_FloatRegister(0);
for ( ; i < ConcreteRegisterImpl::max_fpr ; ) {
for (int j = 0; j < (1 << ConcreteRegisterImpl::log_vmregs_per_fpr); j++) {
for (int j = 0; j < Register::max_slots_per_register; j++) {
regName[i++] = freg->name();
}
freg = freg->successor();

View File

@ -36,20 +36,20 @@
inline Register as_Register() {
assert(is_Register(), "must be");
assert(is_concrete(), "concrete register expected");
return ::as_Register(value() >> ConcreteRegisterImpl::log_vmregs_per_gpr);
return ::as_Register(value() / Register::max_slots_per_register);
}
inline FloatRegister as_FloatRegister() {
assert(is_FloatRegister(), "must be");
assert(is_concrete(), "concrete register expected");
return ::as_FloatRegister((value() - ConcreteRegisterImpl::max_gpr) >> ConcreteRegisterImpl::log_vmregs_per_fpr);
return ::as_FloatRegister((value() - ConcreteRegisterImpl::max_gpr) / FloatRegister::max_slots_per_register);
}
inline bool is_concrete() {
if (is_Register()) {
return ((value() & right_n_bits(ConcreteRegisterImpl::log_vmregs_per_gpr)) == 0);
return (value() % Register::max_slots_per_register == 0);
} else if (is_FloatRegister()) {
return (((value() - ConcreteRegisterImpl::max_gpr) & right_n_bits(ConcreteRegisterImpl::log_vmregs_per_fpr)) == 0);
return (value() % FloatRegister::max_slots_per_register == 0); // Single slot
} else {
return false;
}

View File

@ -25,11 +25,11 @@
#ifndef CPU_ARM_VMREG_ARM_INLINE_HPP
#define CPU_ARM_VMREG_ARM_INLINE_HPP
inline VMReg RegisterImpl::as_VMReg() {
return VMRegImpl::as_VMReg(encoding() << ConcreteRegisterImpl::log_vmregs_per_gpr);
inline VMReg Register::RegisterImpl::as_VMReg() const {
return VMRegImpl::as_VMReg(encoding() * Register::max_slots_per_register);
}
inline VMReg FloatRegisterImpl::as_VMReg() {
return VMRegImpl::as_VMReg((encoding() << ConcreteRegisterImpl::log_vmregs_per_fpr) + ConcreteRegisterImpl::max_gpr);
inline VMReg FloatRegister::FloatRegisterImpl::as_VMReg() const {
return VMRegImpl::as_VMReg((encoding() * FloatRegister::max_slots_per_register) + ConcreteRegisterImpl::max_gpr);
}
#endif // CPU_ARM_VMREG_ARM_INLINE_HPP

View File

@ -157,6 +157,9 @@ inline D AtomicAccess::PlatformAdd<8>::add_then_fetch(D volatile* dest, I add_va
return result;
}
template<>
struct AtomicAccess::PlatformXchg<1> : AtomicAccess::XchgUsingCmpxchg<1> {};
template<>
template<typename T>
inline T AtomicAccess::PlatformXchg<4>::operator()(T volatile* dest,

View File

@ -264,12 +264,19 @@ int LIR_Assembler::emit_deopt_handler() {
}
int offset = code_offset();
Label start;
__ bind(start);
__ bl64_patchable(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type);
int entry_offset = __ offset();
__ b(start);
guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
assert(code_offset() - entry_offset >= NativePostCallNop::first_check_size,
"out of bounds read in post-call NOP check");
__ end_a_stub();
return offset;
return entry_offset;
}

View File

@ -63,7 +63,7 @@ enum {
_static_call_stub_size = 4 * BytesPerInstWord + MacroAssembler::b64_patchable_size, // or smaller
_call_stub_size = _static_call_stub_size + MacroAssembler::trampoline_stub_size, // or smaller
_exception_handler_size = MacroAssembler::b64_patchable_size, // or smaller
_deopt_handler_size = MacroAssembler::bl64_patchable_size
_deopt_handler_size = MacroAssembler::bl64_patchable_size + BytesPerInstWord
};
// '_static_call_stub_size' is only used on ppc (see LIR_Assembler::emit_static_call_stub()

View File

@ -51,8 +51,6 @@ class NativeInstruction {
friend class Relocation;
public:
bool is_post_call_nop() const { return MacroAssembler::is_post_call_nop(long_at(0)); }
bool is_jump() const { return Assembler::is_b(long_at(0)); } // See NativeGeneralJump.
bool is_sigtrap_ic_miss_check() {
@ -531,6 +529,14 @@ class NativePostCallNop: public NativeInstruction {
};
public:
enum ppc_specific_constants {
// If the check is adjusted to read beyond size of the instruction at the deopt handler stub
// code entry point, it has to happen in two stages - to prevent out of bounds access in case
// the return address points to the entry point which could be at the end of page.
first_check_size = BytesPerInstWord
};
bool is_post_call_nop() const { return MacroAssembler::is_post_call_nop(long_at(0)); }
bool check() const { return is_post_call_nop(); }
bool decode(int32_t& oopmap_slot, int32_t& cb_offset) const {
uint32_t instr_bits = long_at(0);

View File

@ -1795,10 +1795,13 @@ uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *r
return size; // Self copy, no move.
if (bottom_type()->isa_vect() != nullptr && ideal_reg() == Op_VecX) {
int src_offset = ra_->reg2offset(src_lo);
int dst_offset = ra_->reg2offset(dst_lo);
DEBUG_ONLY(int algm = MIN2(RegMask::num_registers(ideal_reg()), (int)Matcher::stack_alignment_in_slots()) * VMRegImpl::stack_slot_size);
assert((src_lo_rc != rc_stack) || is_aligned(src_offset, algm), "unaligned vector spill sp offset %d (src)", src_offset);
assert((dst_lo_rc != rc_stack) || is_aligned(dst_offset, algm), "unaligned vector spill sp offset %d (dst)", dst_offset);
// Memory->Memory Spill.
if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
int src_offset = ra_->reg2offset(src_lo);
int dst_offset = ra_->reg2offset(dst_lo);
if (masm) {
__ ld(R0, src_offset, R1_SP);
__ std(R0, dst_offset, R1_SP);
@ -1806,26 +1809,20 @@ uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *r
__ std(R0, dst_offset+8, R1_SP);
}
size += 16;
#ifndef PRODUCT
if (st != nullptr) {
st->print("%-7s [R1_SP + #%d] -> [R1_SP + #%d] \t// vector spill copy", "SPILL", src_offset, dst_offset);
}
#endif // !PRODUCT
}
// VectorRegister->Memory Spill.
else if (src_lo_rc == rc_vec && dst_lo_rc == rc_stack) {
VectorSRegister Rsrc = as_VectorRegister(Matcher::_regEncode[src_lo]).to_vsr();
int dst_offset = ra_->reg2offset(dst_lo);
if (PowerArchitecturePPC64 >= 9) {
if (is_aligned(dst_offset, 16)) {
if (masm) {
__ stxv(Rsrc, dst_offset, R1_SP); // matches storeV16_Power9
}
size += 4;
} else {
// Other alignment can be used by Vector API (VectorPayload in rearrangeOp,
// observed with VectorRearrangeTest.java on Power9).
if (masm) {
__ addi(R0, R1_SP, dst_offset);
__ stxvx(Rsrc, R0); // matches storeV16_Power9 (regarding element ordering)
}
size += 8;
if (masm) {
__ stxv(Rsrc, dst_offset, R1_SP); // matches storeV16_Power9
}
size += 4;
} else {
if (masm) {
__ addi(R0, R1_SP, dst_offset);
@ -1833,24 +1830,25 @@ uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *r
}
size += 8;
}
#ifndef PRODUCT
if (st != nullptr) {
if (PowerArchitecturePPC64 >= 9) {
st->print("%-7s %s, [R1_SP + #%d] \t// vector spill copy", "STXV", Matcher::regName[src_lo], dst_offset);
} else {
st->print("%-7s R0, R1_SP, %d \t// vector spill copy\n\t"
"%-7s %s, [R0] \t// vector spill copy", "ADDI", dst_offset, "STXVD2X", Matcher::regName[src_lo]);
}
}
#endif // !PRODUCT
}
// Memory->VectorRegister Spill.
else if (src_lo_rc == rc_stack && dst_lo_rc == rc_vec) {
VectorSRegister Rdst = as_VectorRegister(Matcher::_regEncode[dst_lo]).to_vsr();
int src_offset = ra_->reg2offset(src_lo);
if (PowerArchitecturePPC64 >= 9) {
if (is_aligned(src_offset, 16)) {
if (masm) {
__ lxv(Rdst, src_offset, R1_SP);
}
size += 4;
} else {
if (masm) {
__ addi(R0, R1_SP, src_offset);
__ lxvx(Rdst, R0);
}
size += 8;
if (masm) {
__ lxv(Rdst, src_offset, R1_SP);
}
size += 4;
} else {
if (masm) {
__ addi(R0, R1_SP, src_offset);
@ -1858,6 +1856,16 @@ uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *r
}
size += 8;
}
#ifndef PRODUCT
if (st != nullptr) {
if (PowerArchitecturePPC64 >= 9) {
st->print("%-7s %s, [R1_SP + #%d] \t// vector spill copy", "LXV", Matcher::regName[dst_lo], src_offset);
} else {
st->print("%-7s R0, R1_SP, %d \t// vector spill copy\n\t"
"%-7s %s, [R0] \t// vector spill copy", "ADDI", src_offset, "LXVD2X", Matcher::regName[dst_lo]);
}
}
#endif // !PRODUCT
}
// VectorRegister->VectorRegister.
else if (src_lo_rc == rc_vec && dst_lo_rc == rc_vec) {
@ -1867,6 +1875,12 @@ uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *r
__ xxlor(Rdst, Rsrc, Rsrc);
}
size += 4;
#ifndef PRODUCT
if (st != nullptr) {
st->print("%-7s %s, %s, %s\t// vector spill copy",
"XXLOR", Matcher::regName[dst_lo], Matcher::regName[src_lo], Matcher::regName[src_lo]);
}
#endif // !PRODUCT
}
else {
ShouldNotReachHere(); // No VR spill.
@ -2088,17 +2102,11 @@ class HandlerImpl {
public:
static int emit_exception_handler(C2_MacroAssembler *masm);
static int emit_deopt_handler(C2_MacroAssembler* masm);
static uint size_exception_handler() {
// The exception_handler is a b64_patchable.
return MacroAssembler::b64_patchable_size;
}
static uint size_deopt_handler() {
// The deopt_handler is a bl64_patchable.
return MacroAssembler::bl64_patchable_size;
return MacroAssembler::bl64_patchable_size + BytesPerInstWord;
}
};
@ -2114,22 +2122,6 @@ public:
source %{
int HandlerImpl::emit_exception_handler(C2_MacroAssembler *masm) {
address base = __ start_a_stub(size_exception_handler());
if (base == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return 0; // CodeBuffer::expand failed
}
int offset = __ offset();
__ b64_patchable((address)OptoRuntime::exception_blob()->content_begin(),
relocInfo::runtime_call_type);
assert(__ offset() - offset == (int)size_exception_handler(), "must be fixed size");
__ end_a_stub();
return offset;
}
// The deopt_handler is like the exception handler, but it calls to
// the deoptimization blob instead of jumping to the exception blob.
int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm) {
@ -2140,12 +2132,23 @@ int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm) {
}
int offset = __ offset();
Label start;
__ bind(start);
__ bl64_patchable((address)SharedRuntime::deopt_blob()->unpack(),
relocInfo::runtime_call_type);
int entry_offset = __ offset();
__ b(start);
assert(__ offset() - offset == (int) size_deopt_handler(), "must be fixed size");
assert(__ offset() - entry_offset >= NativePostCallNop::first_check_size,
"out of bounds read in post-call NOP check");
__ end_a_stub();
return offset;
return entry_offset;
}
//=============================================================================
@ -2292,6 +2295,10 @@ bool Matcher::vector_rearrange_requires_load_shuffle(BasicType elem_bt, int vlen
return false;
}
bool Matcher::mask_op_prefers_predicate(int opcode, const TypeVect* vt) {
return false;
}
const RegMask* Matcher::predicate_reg_mask(void) {
return nullptr;
}
@ -2390,6 +2397,10 @@ bool Matcher::is_reg2reg_move(MachNode* m) {
return false;
}
bool Matcher::is_register_biasing_candidate(const MachNode* mdef, int oper_index) {
return false;
}
bool Matcher::is_generic_vector(MachOper* opnd) {
ShouldNotReachHere(); // generic vector operands not supported
return false;

View File

@ -46,7 +46,6 @@
//------------------------------generate_exception_blob---------------------------
// Creates exception blob at the end.
// Using exception blob, this code is jumped from a compiled method.
//
// Given an exception pc at a call we call into the runtime for the
// handler in this method. This handler might merely restore state

View File

@ -83,7 +83,6 @@ class RegisterSaver {
static OopMap* push_frame_reg_args_and_save_live_registers(MacroAssembler* masm,
int* out_frame_size_in_bytes,
bool generate_oop_map,
int return_pc_adjustment,
ReturnPCLocation return_pc_location,
bool save_vectors = false);
static void restore_live_registers_and_pop_frame(MacroAssembler* masm,
@ -262,7 +261,6 @@ static const RegisterSaver::LiveRegType RegisterSaver_LiveVecRegs[] = {
OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssembler* masm,
int* out_frame_size_in_bytes,
bool generate_oop_map,
int return_pc_adjustment,
ReturnPCLocation return_pc_location,
bool save_vectors) {
// Push an abi_reg_args-frame and store all registers which may be live.
@ -271,7 +269,6 @@ OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssemble
// propagated to the RegisterMap of the caller frame during
// StackFrameStream construction (needed for deoptimization; see
// compiledVFrame::create_stack_value).
// If return_pc_adjustment != 0 adjust the return pc by return_pc_adjustment.
// Updated return pc is returned in R31 (if not return_pc_is_pre_saved).
// calculate frame size
@ -305,14 +302,11 @@ OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssemble
// Do the save_LR by hand and adjust the return pc if requested.
switch (return_pc_location) {
case return_pc_is_lr: __ mflr(R31); break;
case return_pc_is_pre_saved: assert(return_pc_adjustment == 0, "unsupported"); break;
case return_pc_is_pre_saved: break;
case return_pc_is_thread_saved_exception_pc: __ ld(R31, thread_(saved_exception_pc)); break;
default: ShouldNotReachHere();
}
if (return_pc_location != return_pc_is_pre_saved) {
if (return_pc_adjustment != 0) {
__ addi(R31, R31, return_pc_adjustment);
}
__ std(R31, frame_size_in_bytes + _abi0(lr), R1_SP);
}
@ -2907,22 +2901,15 @@ void SharedRuntime::generate_deopt_blob() {
// deopt_handler: call_deopt_stub
// cur. return pc --> ...
//
// So currently SR_LR points behind the call in the deopt handler.
// We adjust it such that it points to the start of the deopt handler.
// The return_pc has been stored in the frame of the deoptee and
// will replace the address of the deopt_handler in the call
// to Deoptimization::fetch_unroll_info below.
// We can't grab a free register here, because all registers may
// contain live values, so let the RegisterSaver do the adjustment
// of the return pc.
const int return_pc_adjustment_no_exception = -MacroAssembler::bl64_patchable_size;
// Push the "unpack frame"
// Save everything in sight.
map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
&first_frame_size_in_bytes,
/*generate_oop_map=*/ true,
return_pc_adjustment_no_exception,
RegisterSaver::return_pc_is_lr);
assert(map != nullptr, "OopMap must have been created");
@ -2957,7 +2944,6 @@ void SharedRuntime::generate_deopt_blob() {
RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
&first_frame_size_in_bytes,
/*generate_oop_map=*/ false,
/*return_pc_adjustment_exception=*/ 0,
RegisterSaver::return_pc_is_pre_saved);
// Deopt during an exception. Save exec mode for unpack_frames.
@ -2975,7 +2961,6 @@ void SharedRuntime::generate_deopt_blob() {
RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
&first_frame_size_in_bytes,
/*generate_oop_map=*/ false,
/*return_pc_adjustment_reexecute=*/ 0,
RegisterSaver::return_pc_is_pre_saved);
__ li(exec_mode_reg, Deoptimization::Unpack_reexecute);
#endif
@ -3266,7 +3251,6 @@ SafepointBlob* SharedRuntime::generate_handler_blob(StubId id, address call_ptr)
map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
&frame_size_in_bytes,
/*generate_oop_map=*/ true,
/*return_pc_adjustment=*/0,
return_pc_location, save_vectors);
// The following is basically a call_VM. However, we need the precise
@ -3367,7 +3351,6 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(StubId id, address destination
map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
&frame_size_in_bytes,
/*generate_oop_map*/ true,
/*return_pc_adjustment*/ 0,
RegisterSaver::return_pc_is_lr);
// Use noreg as last_Java_pc, the return pc will be reconstructed

View File

@ -2956,7 +2956,7 @@ class StubGenerator: public StubCodeGenerator {
// Arguments for generated stub:
// R3_ARG1 - source byte array address
// R4_ARG2 - destination byte array address
// R5_ARG3 - K (key) in little endian int array
// R5_ARG3 - sessionKe (key) in little endian int array
address generate_aescrypt_decryptBlock() {
assert(UseAES, "need AES instructions and misaligned SSE support");
StubId stub_id = StubId::stubgen_aescrypt_decryptBlock_id;

View File

@ -111,6 +111,10 @@ void VM_Version::initialize() {
}
MaxVectorSize = SuperwordUseVSX ? 16 : 8;
if (!SuperwordUseVSX && FLAG_IS_DEFAULT(EnableVectorSupport)) {
// VectorSupport intrinsics currently have issues with MaxVectorSize < 16 (JDK-8370803).
FLAG_SET_ERGO(EnableVectorSupport, false);
}
if (FLAG_IS_DEFAULT(AlignVector)) {
FLAG_SET_ERGO(AlignVector, false);
}

View File

@ -377,12 +377,20 @@ int LIR_Assembler::emit_deopt_handler() {
int offset = code_offset();
__ auipc(ra, 0);
__ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
Label start;
__ bind(start);
__ far_call(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
int entry_offset = __ offset();
__ j(start);
guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
assert(code_offset() - entry_offset >= NativePostCallNop::first_check_size,
"out of bounds read in post-call NOP check");
__ end_a_stub();
return offset;
return entry_offset;
}
void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {

View File

@ -72,7 +72,7 @@ private:
// See emit_exception_handler for detail
_exception_handler_size = DEBUG_ONLY(256) NOT_DEBUG(32), // or smaller
// See emit_deopt_handler for detail
// auipc (1) + far_jump (2)
// far_call (2) + j (1)
_deopt_handler_size = 1 * MacroAssembler::instruction_size +
2 * MacroAssembler::instruction_size
};

View File

@ -311,12 +311,19 @@ inline bool NativeInstruction::is_jump_or_nop() {
// can store an offset from the initial nop to the nmethod.
class NativePostCallNop: public NativeInstruction {
public:
enum RISCV_specific_constants {
// The two parts should be checked separately to prevent out of bounds access in
// case the return address points to the deopt handler stub code entry point
// which could be at the end of page.
first_check_size = instruction_size
};
bool check() const {
// Check for two instructions: nop; lui zr, hi20
// These instructions only ever appear together in a post-call
// NOP, so it's unnecessary to check that the third instruction is
// an addiw as well.
return is_nop() && MacroAssembler::is_lui_to_zr_at(addr_at(4));
return is_nop() && MacroAssembler::is_lui_to_zr_at(addr_at(first_check_size));
}
bool decode(int32_t& oopmap_slot, int32_t& cb_offset) const;
bool patch(int32_t oopmap_slot, int32_t cb_offset);

View File

@ -1049,15 +1049,10 @@ class HandlerImpl {
public:
static int emit_exception_handler(C2_MacroAssembler *masm);
static int emit_deopt_handler(C2_MacroAssembler* masm);
static uint size_exception_handler() {
return MacroAssembler::far_branch_size();
}
static uint size_deopt_handler() {
// count auipc + far branch
// count far call + j
return NativeInstruction::instruction_size + MacroAssembler::far_branch_size();
}
};
@ -1838,25 +1833,6 @@ uint MachUEPNode::size(PhaseRegAlloc* ra_) const
//=============================================================================
// Emit exception handler code.
int HandlerImpl::emit_exception_handler(C2_MacroAssembler* masm)
{
// auipc t1, #exception_blob_entry_point
// jr (offset)t1
// Note that the code buffer's insts_mark is always relative to insts.
// That's why we must use the macroassembler to generate a handler.
address base = __ start_a_stub(size_exception_handler());
if (base == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return 0; // CodeBuffer::expand failed
}
int offset = __ offset();
__ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
__ end_a_stub();
return offset;
}
// Emit deopt handler code.
int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm)
{
@ -1867,12 +1843,19 @@ int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm)
}
int offset = __ offset();
__ auipc(ra, 0);
__ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
Label start;
__ bind(start);
__ far_call(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
int entry_offset = __ offset();
__ j(start);
assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
assert(__ offset() - entry_offset >= NativePostCallNop::first_check_size,
"out of bounds read in post-call NOP check");
__ end_a_stub();
return offset;
return entry_offset;
}
// REQUIRED MATCHER CODE
@ -2070,6 +2053,10 @@ bool Matcher::is_reg2reg_move(MachNode* m) {
return false;
}
bool Matcher::is_register_biasing_candidate(const MachNode* mdef, int oper_index) {
return false;
}
bool Matcher::is_generic_vector(MachOper* opnd) {
ShouldNotReachHere(); // generic vector operands not supported
return false;

View File

@ -164,6 +164,11 @@ source %{
bool Matcher::vector_rearrange_requires_load_shuffle(BasicType elem_bt, int vlen) {
return false;
}
bool Matcher::mask_op_prefers_predicate(int opcode, const TypeVect* vt) {
// Prefer predicate if the mask type is "TypeVectMask".
return vt->isa_vectmask() != nullptr;
}
%}
// All VEC instructions

View File

@ -249,8 +249,6 @@ UncommonTrapBlob* OptoRuntime::generate_uncommon_trap_blob() {
//------------------------------generate_exception_blob---------------------------
// creates exception blob at the end
// Using exception blob, this code is jumped from a compiled method.
// (see emit_exception_handler in riscv.ad file)
//
// Given an exception pc at a call we call into the runtime for the
// handler in this method. This handler might merely restore state

View File

@ -2463,7 +2463,7 @@ class StubGenerator: public StubCodeGenerator {
// Inputs:
// c_rarg0 - source byte array address
// c_rarg1 - destination byte array address
// c_rarg2 - K (key) in little endian int array
// c_rarg2 - sessionKe (key) in little endian int array
//
address generate_aescrypt_encryptBlock() {
assert(UseAESIntrinsics, "need AES instructions (Zvkned extension) support");
@ -2493,8 +2493,8 @@ class StubGenerator: public StubCodeGenerator {
__ vsetivli(x0, 4, Assembler::e32, Assembler::m1);
__ vle32_v(res, from);
__ mv(t2, 52);
__ blt(keylen, t2, L_aes128);
__ mv(t2, 52); // key length could be only {11, 13, 15} * 4 = {44, 52, 60}
__ bltu(keylen, t2, L_aes128);
__ beq(keylen, t2, L_aes192);
// Else we fallthrough to the biggest case (256-bit key size)
@ -2542,7 +2542,7 @@ class StubGenerator: public StubCodeGenerator {
// Inputs:
// c_rarg0 - source byte array address
// c_rarg1 - destination byte array address
// c_rarg2 - K (key) in little endian int array
// c_rarg2 - sessionKe (key) in little endian int array
//
address generate_aescrypt_decryptBlock() {
assert(UseAESIntrinsics, "need AES instructions (Zvkned extension) support");
@ -2572,8 +2572,8 @@ class StubGenerator: public StubCodeGenerator {
__ vsetivli(x0, 4, Assembler::e32, Assembler::m1);
__ vle32_v(res, from);
__ mv(t2, 52);
__ blt(keylen, t2, L_aes128);
__ mv(t2, 52); // key length could be only {11, 13, 15} * 4 = {44, 52, 60}
__ bltu(keylen, t2, L_aes128);
__ beq(keylen, t2, L_aes192);
// Else we fallthrough to the biggest case (256-bit key size)
@ -2606,6 +2606,223 @@ class StubGenerator: public StubCodeGenerator {
return start;
}
// Load big-endian 128-bit from memory.
void be_load_counter_128(Register counter_hi, Register counter_lo, Register counter) {
__ ld(counter_lo, Address(counter, 8)); // Load 128-bits from counter
__ ld(counter_hi, Address(counter));
__ rev8(counter_lo, counter_lo); // Convert big-endian to little-endian
__ rev8(counter_hi, counter_hi);
}
// Little-endian 128-bit + 64-bit -> 128-bit addition.
void add_counter_128(Register counter_hi, Register counter_lo) {
assert_different_registers(counter_hi, counter_lo, t0);
__ addi(counter_lo, counter_lo, 1);
__ seqz(t0, counter_lo); // Check for result overflow
__ add(counter_hi, counter_hi, t0); // Add 1 if overflow otherwise 0
}
// Store big-endian 128-bit to memory.
void be_store_counter_128(Register counter_hi, Register counter_lo, Register counter) {
assert_different_registers(counter_hi, counter_lo, t0, t1);
__ rev8(t0, counter_lo); // Convert little-endian to big-endian
__ rev8(t1, counter_hi);
__ sd(t0, Address(counter, 8)); // Store 128-bits to counter
__ sd(t1, Address(counter));
}
void counterMode_AESCrypt(int round, Register in, Register out, Register key, Register counter,
Register input_len, Register saved_encrypted_ctr, Register used_ptr) {
// Algorithm:
//
// generate_aes_loadkeys();
// load_counter_128(counter_hi, counter_lo, counter);
//
// L_next:
// if (used >= BLOCK_SIZE) goto L_main_loop;
//
// L_encrypt_next:
// *out = *in ^ saved_encrypted_ctr[used]);
// out++; in++; used++; len--;
// if (len == 0) goto L_exit;
// goto L_next;
//
// L_main_loop:
// if (len == 0) goto L_exit;
// saved_encrypted_ctr = generate_aes_encrypt(counter);
//
// add_counter_128(counter_hi, counter_lo);
// be_store_counter_128(counter_hi, counter_lo, counter);
// used = 0;
//
// if(len < BLOCK_SIZE) goto L_encrypt_next;
//
// v_in = load_16Byte(in);
// v_out = load_16Byte(out);
// v_saved_encrypted_ctr = load_16Byte(saved_encrypted_ctr);
// v_out = v_in ^ v_saved_encrypted_ctr;
// out += BLOCK_SIZE;
// in += BLOCK_SIZE;
// len -= BLOCK_SIZE;
// used = BLOCK_SIZE;
// goto L_main_loop;
//
//
// L_exit:
// store(used);
// result = input_len
// return result;
const Register used = x28;
const Register len = x29;
const Register counter_hi = x30;
const Register counter_lo = x31;
const Register block_size = t2;
const unsigned int BLOCK_SIZE = 16;
VectorRegister working_vregs[] = {
v1, v2, v3, v4, v5, v6, v7, v8,
v9, v10, v11, v12, v13, v14, v15
};
__ vsetivli(x0, 4, Assembler::e32, Assembler::m1);
__ lwu(used, Address(used_ptr));
__ mv(len, input_len);
__ mv(block_size, BLOCK_SIZE);
// load keys to working_vregs according to round
generate_aes_loadkeys(key, working_vregs, round);
// 128-bit big-endian load
be_load_counter_128(counter_hi, counter_lo, counter);
Label L_next, L_encrypt_next, L_main_loop, L_exit;
// Check the last saved_encrypted_ctr used value, we fall through
// to L_encrypt_next when the used value lower than block_size
__ bind(L_next);
__ bgeu(used, block_size, L_main_loop);
// There is still data left fewer than block_size after L_main_loop
// or last used, we encrypt them one by one.
__ bind(L_encrypt_next);
__ add(t0, saved_encrypted_ctr, used);
__ lbu(t1, Address(t0));
__ lbu(t0, Address(in));
__ xorr(t1, t1, t0);
__ sb(t1, Address(out));
__ addi(in, in, 1);
__ addi(out, out, 1);
__ addi(used, used, 1);
__ subi(len, len, 1);
__ beqz(len, L_exit);
__ j(L_next);
// We will calculate the next saved_encrypted_ctr and encrypt the blocks of data
// one by one until there is less than a full block remaining if len not zero
__ bind(L_main_loop);
__ beqz(len, L_exit);
__ vle32_v(v16, counter);
// encrypt counter according to round
generate_aes_encrypt(v16, working_vregs, round);
__ vse32_v(v16, saved_encrypted_ctr);
// 128-bit little-endian increment
add_counter_128(counter_hi, counter_lo);
// 128-bit big-endian store
be_store_counter_128(counter_hi, counter_lo, counter);
__ mv(used, 0);
// Check if we have a full block_size
__ bltu(len, block_size, L_encrypt_next);
// We have one full block to encrypt at least
__ vle32_v(v17, in);
__ vxor_vv(v16, v16, v17);
__ vse32_v(v16, out);
__ add(out, out, block_size);
__ add(in, in, block_size);
__ sub(len, len, block_size);
__ mv(used, block_size);
__ j(L_main_loop);
__ bind(L_exit);
__ sw(used, Address(used_ptr));
__ mv(x10, input_len);
__ leave();
__ ret();
};
// CTR AES crypt.
// Arguments:
//
// Inputs:
// c_rarg0 - source byte array address
// c_rarg1 - destination byte array address
// c_rarg2 - K (key) in little endian int array
// c_rarg3 - counter vector byte array address
// c_rarg4 - input length
// c_rarg5 - saved encryptedCounter start
// c_rarg6 - saved used length
//
// Output:
// x10 - input length
//
address generate_counterMode_AESCrypt() {
assert(UseZvkn, "need AES instructions (Zvkned extension) support");
assert(UseAESCTRIntrinsics, "need AES instructions (Zvkned extension) support");
assert(UseZbb, "need basic bit manipulation (Zbb extension) support");
__ align(CodeEntryAlignment);
StubId stub_id = StubId::stubgen_counterMode_AESCrypt_id;
StubCodeMark mark(this, stub_id);
const Register in = c_rarg0;
const Register out = c_rarg1;
const Register key = c_rarg2;
const Register counter = c_rarg3;
const Register input_len = c_rarg4;
const Register saved_encrypted_ctr = c_rarg5;
const Register used_len_ptr = c_rarg6;
const Register keylen = c_rarg7; // temporary register
const address start = __ pc();
__ enter();
Label L_exit;
__ beqz(input_len, L_exit);
Label L_aes128, L_aes192;
// Compute #rounds for AES based on the length of the key array
__ lwu(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
__ mv(t0, 52); // key length could be only {11, 13, 15} * 4 = {44, 52, 60}
__ bltu(keylen, t0, L_aes128);
__ beq(keylen, t0, L_aes192);
// Else we fallthrough to the biggest case (256-bit key size)
// Note: the following function performs crypt with key += 15*16
counterMode_AESCrypt(15, in, out, key, counter, input_len, saved_encrypted_ctr, used_len_ptr);
// Note: the following function performs crypt with key += 13*16
__ bind(L_aes192);
counterMode_AESCrypt(13, in, out, key, counter, input_len, saved_encrypted_ctr, used_len_ptr);
// Note: the following function performs crypt with key += 11*16
__ bind(L_aes128);
counterMode_AESCrypt(11, in, out, key, counter, input_len, saved_encrypted_ctr, used_len_ptr);
__ bind(L_exit);
__ mv(x10, input_len);
__ leave();
__ ret();
return start;
}
// code for comparing 8 characters of strings with Latin1 and Utf16 encoding
void compare_string_8_x_LU(Register tmpL, Register tmpU,
Register strL, Register strU, Label& DIFF) {
@ -6826,6 +7043,10 @@ static const int64_t right_3_bits = right_n_bits(3);
StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock();
}
if (UseAESCTRIntrinsics) {
StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt();
}
if (UsePoly1305Intrinsics) {
StubRoutines::_poly1305_processBlocks = generate_poly1305_processBlocks();
}

View File

@ -1146,9 +1146,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
Label L;
__ ld(x28, Address(xmethod, Method::native_function_offset()));
ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
__ la(t, unsatisfied);
__ load_long_misaligned(t1, Address(t, 0), t0, 2); // 2 bytes aligned, but not 4 or 8
__ la(t1, unsatisfied);
__ bne(x28, t1, L);
__ call_VM(noreg,
CAST_FROM_FN_PTR(address,

View File

@ -434,6 +434,15 @@ void VM_Version::c2_initialize() {
warning("UseAESIntrinsics enabled, but UseAES not, enabling");
UseAES = true;
}
if (FLAG_IS_DEFAULT(UseAESCTRIntrinsics) && UseZbb) {
FLAG_SET_DEFAULT(UseAESCTRIntrinsics, true);
}
if (UseAESCTRIntrinsics && !UseZbb) {
warning("Cannot enable UseAESCTRIntrinsics on cpu without UseZbb support.");
FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
}
} else {
if (UseAES) {
warning("AES instructions are not available on this CPU");
@ -443,11 +452,10 @@ void VM_Version::c2_initialize() {
warning("AES intrinsics are not available on this CPU");
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
}
}
if (UseAESCTRIntrinsics) {
warning("AES/CTR intrinsics are not available on this CPU");
FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
if (UseAESCTRIntrinsics) {
warning("Cannot enable UseAESCTRIntrinsics on cpu without UseZvkn support.");
FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
}
}
}

View File

@ -89,11 +89,12 @@ class VM_Version : public Abstract_VM_Version {
FLAG_SET_DEFAULT(flag, true); \
} else { \
FLAG_SET_DEFAULT(flag, false); \
stringStream ss; \
deps_string(ss, dep0, ##__VA_ARGS__); \
warning("Cannot enable " #flag ", it's missing dependent extension(s) %s", ss.as_string(true)); \
/* Sync CPU features with flags */ \
disable_feature(); \
stringStream ss; \
ss.print("missing dependent extension(s): "); \
deps_string(ss, dep0, ##__VA_ARGS__); \
log_disabled(ss.as_string(true)); \
} \
} else { \
/* Sync CPU features with flags */ \
@ -101,11 +102,12 @@ class VM_Version : public Abstract_VM_Version {
disable_feature(); \
} else if (!deps_all_enabled(dep0, ##__VA_ARGS__)) { \
FLAG_SET_DEFAULT(flag, false); \
stringStream ss; \
deps_string(ss, dep0, ##__VA_ARGS__); \
warning("Cannot enable " #flag ", it's missing dependent extension(s) %s", ss.as_string(true)); \
/* Sync CPU features with flags */ \
disable_feature(); \
stringStream ss; \
ss.print("missing dependent extension(s): "); \
deps_string(ss, dep0, ##__VA_ARGS__); \
log_disabled(ss.as_string(true)); \
} \
} \
} \
@ -136,6 +138,7 @@ class VM_Version : public Abstract_VM_Version {
RVExtFeatures::current()->clear_feature(_cpu_feature_index);
}
void log_enabled();
void log_disabled(const char* reason);
protected:
bool deps_all_enabled(RVExtFeatureValue* dep0, ...) {

Some files were not shown because too many files have changed in this diff Show More