mirror of
https://github.com/openjdk/jdk.git
synced 2026-04-08 05:58:38 +00:00
Merge branch 'master' into 8044609-ssl
This commit is contained in:
commit
c44d524cee
21
.github/scripts/gen-build-failure-report.sh
vendored
21
.github/scripts/gen-build-failure-report.sh
vendored
@ -24,12 +24,19 @@
|
||||
# questions.
|
||||
#
|
||||
|
||||
# Import common utils
|
||||
. report-utils.sh
|
||||
|
||||
GITHUB_STEP_SUMMARY="$1"
|
||||
BUILD_DIR="$(ls -d build/*)"
|
||||
|
||||
# Send signal to the do-build action that we failed
|
||||
touch "$BUILD_DIR/build-failure"
|
||||
|
||||
# Collect hs_errs for build-time crashes, e.g. javac, jmod, jlink, CDS.
|
||||
# These usually land in make/
|
||||
hs_err_files=$(ls make/hs_err*.log 2> /dev/null || true)
|
||||
|
||||
(
|
||||
echo '### :boom: Build failure summary'
|
||||
echo ''
|
||||
@ -46,6 +53,20 @@ touch "$BUILD_DIR/build-failure"
|
||||
echo '</details>'
|
||||
echo ''
|
||||
|
||||
for hs_err in $hs_err_files; do
|
||||
echo "<details><summary><b>View HotSpot error log: "$hs_err"</b></summary>"
|
||||
echo ''
|
||||
echo '```'
|
||||
echo "$hs_err:"
|
||||
echo ''
|
||||
cat "$hs_err"
|
||||
echo '```'
|
||||
echo '</details>'
|
||||
echo ''
|
||||
done
|
||||
|
||||
echo ''
|
||||
echo ':arrow_right: To see the entire test log, click the job in the list to the left. To download logs, see the `failure-logs` [artifact above](#artifacts).'
|
||||
) >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
truncate_summary
|
||||
|
||||
19
.github/scripts/gen-test-results.sh
vendored
19
.github/scripts/gen-test-results.sh
vendored
@ -24,6 +24,9 @@
|
||||
# questions.
|
||||
#
|
||||
|
||||
# Import common utils
|
||||
. report-utils.sh
|
||||
|
||||
GITHUB_STEP_SUMMARY="$1"
|
||||
|
||||
test_suite_name=$(cat build/run-test-prebuilt/test-support/test-last-ids.txt)
|
||||
@ -89,18 +92,6 @@ for test in $failures $errors; do
|
||||
fi
|
||||
done >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
# With many failures, the summary can easily exceed 1024 kB, the limit set by Github
|
||||
# Trim it down if so.
|
||||
summary_size=$(wc -c < $GITHUB_STEP_SUMMARY)
|
||||
if [[ $summary_size -gt 1000000 ]]; then
|
||||
# Trim to below 1024 kB, and cut off after the last detail group
|
||||
head -c 1000000 $GITHUB_STEP_SUMMARY | tac | sed -n -e '/<\/details>/,$ p' | tac > $GITHUB_STEP_SUMMARY.tmp
|
||||
mv $GITHUB_STEP_SUMMARY.tmp $GITHUB_STEP_SUMMARY
|
||||
(
|
||||
echo ''
|
||||
echo ':x: **WARNING: Summary is too large and has been truncated.**'
|
||||
echo ''
|
||||
) >> $GITHUB_STEP_SUMMARY
|
||||
fi
|
||||
|
||||
echo ':arrow_right: To see the entire test log, click the job in the list to the left.' >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
truncate_summary
|
||||
|
||||
41
.github/scripts/report-utils.sh
vendored
Normal file
41
.github/scripts/report-utils.sh
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation. Oracle designates this
|
||||
# particular file as subject to the "Classpath" exception as provided
|
||||
# by Oracle in the LICENSE file that accompanied this code.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
|
||||
function truncate_summary() {
|
||||
# With large hs_errs, the summary can easily exceed 1024 kB, the limit set by Github
|
||||
# Trim it down if so.
|
||||
summary_size=$(wc -c < $GITHUB_STEP_SUMMARY)
|
||||
if [[ $summary_size -gt 1000000 ]]; then
|
||||
# Trim to below 1024 kB, and cut off after the last detail group
|
||||
head -c 1000000 $GITHUB_STEP_SUMMARY | tac | sed -n -e '/<\/details>/,$ p' | tac > $GITHUB_STEP_SUMMARY.tmp
|
||||
mv $GITHUB_STEP_SUMMARY.tmp $GITHUB_STEP_SUMMARY
|
||||
(
|
||||
echo ''
|
||||
echo ':x: **WARNING: Summary is too large and has been truncated.**'
|
||||
echo ''
|
||||
) >> $GITHUB_STEP_SUMMARY
|
||||
fi
|
||||
}
|
||||
2
.github/workflows/build-cross-compile.yml
vendored
2
.github/workflows/build-cross-compile.yml
vendored
@ -84,7 +84,7 @@ jobs:
|
||||
- target-cpu: riscv64
|
||||
gnu-arch: riscv64
|
||||
debian-arch: riscv64
|
||||
debian-repository: https://httpredir.debian.org/debian/
|
||||
debian-repository: https://snapshot.debian.org/archive/debian/20240228T034848Z/
|
||||
debian-version: sid
|
||||
tolerate-sysroot-errors: true
|
||||
|
||||
|
||||
1
.github/workflows/main.yml
vendored
1
.github/workflows/main.yml
vendored
@ -384,6 +384,7 @@ jobs:
|
||||
- build-windows-aarch64
|
||||
- test-linux-x64
|
||||
- test-macos-x64
|
||||
- test-macos-aarch64
|
||||
- test-windows-x64
|
||||
|
||||
steps:
|
||||
|
||||
3
SECURITY.md
Normal file
3
SECURITY.md
Normal file
@ -0,0 +1,3 @@
|
||||
# JDK Vulnerabilities
|
||||
|
||||
Please follow the process outlined in the [OpenJDK Vulnerability Policy](https://openjdk.org/groups/vulnerability/report) to disclose vulnerabilities in the JDK.
|
||||
@ -614,10 +614,9 @@ be accepted by <code>configure</code>.</p>
|
||||
<code>--with-toolchain-type=clang</code>.</p>
|
||||
<h3 id="apple-xcode">Apple Xcode</h3>
|
||||
<p>The oldest supported version of Xcode is 13.0.</p>
|
||||
<p>You will need the Xcode command line developer tools to be able to
|
||||
build the JDK. (Actually, <em>only</em> the command line tools are
|
||||
needed, not the IDE.) The simplest way to install these is to run:</p>
|
||||
<pre><code>xcode-select --install</code></pre>
|
||||
<p>You will need to download Xcode either from the App Store or specific
|
||||
versions can be easily located via the <a
|
||||
href="https://xcodereleases.com">Xcode Releases</a> website.</p>
|
||||
<p>When updating Xcode, it is advisable to keep an older version for
|
||||
building the JDK. To use a specific version of Xcode you have multiple
|
||||
options:</p>
|
||||
|
||||
@ -422,13 +422,9 @@ To use clang instead of gcc on Linux, use `--with-toolchain-type=clang`.
|
||||
|
||||
The oldest supported version of Xcode is 13.0.
|
||||
|
||||
You will need the Xcode command line developer tools to be able to build the
|
||||
JDK. (Actually, *only* the command line tools are needed, not the IDE.) The
|
||||
simplest way to install these is to run:
|
||||
|
||||
```
|
||||
xcode-select --install
|
||||
```
|
||||
You will need to download Xcode either from the App Store or specific versions
|
||||
can be easily located via the [Xcode Releases](https://xcodereleases.com)
|
||||
website.
|
||||
|
||||
When updating Xcode, it is advisable to keep an older version for building the
|
||||
JDK. To use a specific version of Xcode you have multiple options:
|
||||
|
||||
@ -27,12 +27,13 @@ default: all
|
||||
|
||||
include $(SPEC)
|
||||
include MakeBase.gmk
|
||||
|
||||
include Execute.gmk
|
||||
include Modules.gmk
|
||||
|
||||
################################################################################
|
||||
|
||||
# Use this file inside the image as target for make rule
|
||||
JIMAGE_TARGET_FILE := bin/java$(EXECUTABLE_SUFFIX)
|
||||
INTERIM_JLINK_SUPPORT_DIR := $(SUPPORT_OUTPUTDIR)/interim-image-jlink
|
||||
|
||||
INTERIM_MODULES_LIST := $(call CommaList, $(INTERIM_IMAGE_MODULES))
|
||||
|
||||
@ -42,19 +43,18 @@ JLINK_TOOL := $(JLINK) -J-Djlink.debug=true \
|
||||
--module-path $(INTERIM_JMODS_DIR) \
|
||||
--endian $(OPENJDK_BUILD_CPU_ENDIAN)
|
||||
|
||||
$(INTERIM_IMAGE_DIR)/$(JIMAGE_TARGET_FILE): $(JMODS) \
|
||||
$(call DependOnVariable, INTERIM_MODULES_LIST)
|
||||
$(call LogWarn, Creating interim jimage)
|
||||
$(RM) -r $(INTERIM_IMAGE_DIR)
|
||||
$(call MakeDir, $(INTERIM_IMAGE_DIR))
|
||||
$(call ExecuteWithLog, $(INTERIM_IMAGE_DIR)/jlink, \
|
||||
$(JLINK_TOOL) \
|
||||
--output $(INTERIM_IMAGE_DIR) \
|
||||
--disable-plugin generate-jli-classes \
|
||||
--add-modules $(INTERIM_MODULES_LIST))
|
||||
$(TOUCH) $@
|
||||
$(eval $(call SetupExecute, jlink_interim_image, \
|
||||
WARN := Creating interim jimage, \
|
||||
DEPS := $(JMODS) $(call DependOnVariable, INTERIM_MODULES_LIST), \
|
||||
OUTPUT_DIR := $(INTERIM_IMAGE_DIR), \
|
||||
SUPPORT_DIR := $(INTERIM_JLINK_SUPPORT_DIR), \
|
||||
PRE_COMMAND := $(RM) -r $(INTERIM_IMAGE_DIR), \
|
||||
COMMAND := $(JLINK_TOOL) --output $(INTERIM_IMAGE_DIR) \
|
||||
--disable-plugin generate-jli-classes \
|
||||
--add-modules $(INTERIM_MODULES_LIST), \
|
||||
))
|
||||
|
||||
TARGETS += $(INTERIM_IMAGE_DIR)/$(JIMAGE_TARGET_FILE)
|
||||
TARGETS += $(jlink_interim_image)
|
||||
|
||||
################################################################################
|
||||
|
||||
|
||||
@ -568,6 +568,10 @@ $(eval $(call SetupTarget, update-build-docs, \
|
||||
MAKEFILE := UpdateBuildDocs, \
|
||||
))
|
||||
|
||||
$(eval $(call SetupTarget, update-sleef-source, \
|
||||
MAKEFILE := UpdateSleefSource, \
|
||||
))
|
||||
|
||||
$(eval $(call SetupTarget, update-x11wrappers, \
|
||||
MAKEFILE := UpdateX11Wrappers, \
|
||||
DEPS := java.base-copy buildtools-jdk, \
|
||||
|
||||
153
make/UpdateSleefSource.gmk
Normal file
153
make/UpdateSleefSource.gmk
Normal file
@ -0,0 +1,153 @@
|
||||
#
|
||||
# Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation. Oracle designates this
|
||||
# particular file as subject to the "Classpath" exception as provided
|
||||
# by Oracle in the LICENSE file that accompanied this code.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
|
||||
################################################################################
|
||||
|
||||
default: all
|
||||
|
||||
include $(SPEC)
|
||||
include MakeBase.gmk
|
||||
|
||||
include CopyFiles.gmk
|
||||
include Execute.gmk
|
||||
|
||||
################################################################################
|
||||
# This file is responsible for updating the generated sleef source code files
|
||||
# that are checked in to the JDK repo, and that are actually used when building.
|
||||
# This target needs to be re-run every time the source code of libsleef is
|
||||
# updated from upstream.
|
||||
################################################################################
|
||||
|
||||
ifneq ($(COMPILE_TYPE), cross)
|
||||
$(error Only cross-compilation of libsleef is currently supported)
|
||||
endif
|
||||
|
||||
ifeq ($(CMAKE), )
|
||||
$(error CMake not found. Please install cmake and rerun configure)
|
||||
endif
|
||||
|
||||
ifneq ($(OPENJDK_BUILD_OS), linux)
|
||||
$(error This target is only supported on linux)
|
||||
endif
|
||||
|
||||
SLEEF_SUPPORT_DIR := $(MAKESUPPORT_OUTPUTDIR)/sleef
|
||||
SLEEF_SOURCE_BASE_DIR := $(TOPDIR)/src/jdk.incubator.vector/linux/native/libsleef
|
||||
SLEEF_SOURCE_DIR := $(SLEEF_SOURCE_BASE_DIR)/upstream
|
||||
SLEEF_TARGET_DIR := $(SLEEF_SOURCE_BASE_DIR)/generated
|
||||
SLEEF_NATIVE_BUILD_DIR := $(SLEEF_SUPPORT_DIR)/native
|
||||
SLEEF_CROSS_BUILD_DIR := $(SLEEF_SUPPORT_DIR)/cross
|
||||
|
||||
ifeq ($(OPENJDK_TARGET_CPU), aarch64)
|
||||
CROSS_COMPILATION_FILENAMES := sleefinline_advsimd.h sleefinline_sve.h
|
||||
EXTRA_CROSS_OPTIONS := -DSLEEF_ENFORCE_SVE=TRUE
|
||||
else ifeq ($(OPENJDK_TARGET_CPU), riscv64)
|
||||
CROSS_COMPILATION_FILENAMES := sleefinline_rvvm1.h
|
||||
EXTRA_CROSS_OPTIONS := -DSLEEF_ENFORCE_RVVM1=TRUE
|
||||
else
|
||||
$(error Unsupported platform)
|
||||
endif
|
||||
CROSS_COMPILATION_SRC_FILES := $(addprefix $(SLEEF_CROSS_BUILD_DIR)/include/, \
|
||||
$(CROSS_COMPILATION_FILENAMES))
|
||||
|
||||
ifeq ($(TOOLCHAIN_TYPE), clang)
|
||||
SLEEF_TOOLCHAIN_TYPE := llvm
|
||||
else
|
||||
SLEEF_TOOLCHAIN_TYPE := $(TOOLCHAIN_TYPE)
|
||||
endif
|
||||
|
||||
SLEEF_CMAKE_FILE := toolchains/$(OPENJDK_TARGET_CPU)-$(SLEEF_TOOLCHAIN_TYPE).cmake
|
||||
|
||||
# We need to run CMake twice, first using it to configure the build, and then
|
||||
# to actually build; and we need to do this twice, once for a native build
|
||||
# and once for the cross-compilation build.
|
||||
|
||||
$(eval $(call SetupExecute, sleef_native_config, \
|
||||
INFO := Configuring native sleef build, \
|
||||
OUTPUT_DIR := $(SLEEF_NATIVE_BUILD_DIR), \
|
||||
COMMAND := cd $(SLEEF_SOURCE_DIR) && $(CMAKE) -S . -B \
|
||||
$(SLEEF_NATIVE_BUILD_DIR), \
|
||||
))
|
||||
|
||||
TARGETS := $(sleef_native_config)
|
||||
|
||||
$(eval $(call SetupExecute, sleef_native_build, \
|
||||
INFO := Building native sleef, \
|
||||
DEPS := $(sleef_native_config), \
|
||||
OUTPUT_DIR := $(SLEEF_NATIVE_BUILD_DIR), \
|
||||
COMMAND := cd $(SLEEF_SOURCE_DIR) && $(CMAKE) --build \
|
||||
$(SLEEF_NATIVE_BUILD_DIR) -j, \
|
||||
))
|
||||
|
||||
TARGETS := $(sleef_native_build)
|
||||
|
||||
$(eval $(call SetupExecute, sleef_cross_config, \
|
||||
INFO := Configuring cross-compiling sleef build, \
|
||||
DEPS := $(sleef_native_build), \
|
||||
OUTPUT_DIR := $(SLEEF_CROSS_BUILD_DIR), \
|
||||
COMMAND := cd $(SLEEF_SOURCE_DIR) && $(CMAKE) -S . -B \
|
||||
$(SLEEF_CROSS_BUILD_DIR) \
|
||||
-DCMAKE_C_COMPILER=$(CC) \
|
||||
-DCMAKE_TOOLCHAIN_FILE=$(SLEEF_CMAKE_FILE) \
|
||||
-DNATIVE_BUILD_DIR=$(SLEEF_NATIVE_BUILD_DIR) \
|
||||
-DSLEEF_BUILD_INLINE_HEADERS=TRUE \
|
||||
$(EXTRA_CROSS_OPTIONS), \
|
||||
))
|
||||
|
||||
TARGETS := $(sleef_cross_config)
|
||||
|
||||
$(eval $(call SetupExecute, sleef_cross_build, \
|
||||
INFO := Building cross-compiling sleef, \
|
||||
DEPS := $(sleef_cross_config), \
|
||||
OUTPUT_DIR := $(SLEEF_NATIVE_BUILD_DIR), \
|
||||
COMMAND := cd $(SLEEF_SOURCE_DIR) && $(CMAKE) --build \
|
||||
$(SLEEF_CROSS_BUILD_DIR) -j, \
|
||||
))
|
||||
|
||||
TARGETS := $(sleef_cross_build)
|
||||
|
||||
$(CROSS_COMPILATION_SRC_FILES): $(sleef_cross_build)
|
||||
|
||||
# Finally, copy the generated files (and one needed static file) into our
|
||||
# target directory.
|
||||
|
||||
$(eval $(call SetupCopyFiles, copy_static_sleef_source, \
|
||||
FILES := $(SLEEF_SOURCE_DIR)/src/common/misc.h, \
|
||||
DEST := $(SLEEF_TARGET_DIR), \
|
||||
))
|
||||
|
||||
TARGETS := $(copy_static_sleef_source)
|
||||
|
||||
$(eval $(call SetupCopyFiles, copy_generated_sleef_source, \
|
||||
FILES := $(CROSS_COMPILATION_SRC_FILES), \
|
||||
DEST := $(SLEEF_TARGET_DIR), \
|
||||
))
|
||||
|
||||
TARGETS := $(copy_generated_sleef_source)
|
||||
|
||||
################################################################################
|
||||
|
||||
all: $(TARGETS)
|
||||
|
||||
.PHONY: all default
|
||||
@ -99,6 +99,7 @@ AC_DEFUN_ONCE([BASIC_SETUP_TOOLS],
|
||||
UTIL_REQUIRE_SPECIAL(FGREP, [AC_PROG_FGREP])
|
||||
|
||||
# Optional tools, we can do without them
|
||||
UTIL_LOOKUP_PROGS(CMAKE, cmake)
|
||||
UTIL_LOOKUP_PROGS(DF, df)
|
||||
UTIL_LOOKUP_PROGS(GIT, git)
|
||||
UTIL_LOOKUP_PROGS(NICE, nice)
|
||||
|
||||
@ -479,6 +479,22 @@ AC_DEFUN([JVM_FEATURES_CALCULATE_ACTIVE],
|
||||
$JVM_FEATURES_ENABLED, $JVM_FEATURES_DISABLED)
|
||||
])
|
||||
|
||||
################################################################################
|
||||
# Filter the unsupported feature combinations.
|
||||
# This is called after JVM_FEATURES_ACTIVE are fully populated.
|
||||
#
|
||||
AC_DEFUN([JVM_FEATURES_FILTER_UNSUPPORTED],
|
||||
[
|
||||
# G1 late barrier expansion in C2 is not implemented for some platforms.
|
||||
# Choose not to support G1 in this configuration.
|
||||
if JVM_FEATURES_IS_ACTIVE(compiler2); then
|
||||
if test "x$OPENJDK_TARGET_CPU" = "xx86"; then
|
||||
AC_MSG_NOTICE([G1 cannot be used with C2 on this platform, disabling G1])
|
||||
UTIL_GET_NON_MATCHING_VALUES(JVM_FEATURES_ACTIVE, $JVM_FEATURES_ACTIVE, "g1gc")
|
||||
fi
|
||||
fi
|
||||
])
|
||||
|
||||
################################################################################
|
||||
# Helper function for JVM_FEATURES_VERIFY. Check if the specified JVM
|
||||
# feature is active. To be used in shell if constructs, like this:
|
||||
@ -554,6 +570,9 @@ AC_DEFUN_ONCE([JVM_FEATURES_SETUP],
|
||||
# The result is stored in JVM_FEATURES_ACTIVE.
|
||||
JVM_FEATURES_CALCULATE_ACTIVE($variant)
|
||||
|
||||
# Filter unsupported feature combinations from JVM_FEATURES_ACTIVE.
|
||||
JVM_FEATURES_FILTER_UNSUPPORTED
|
||||
|
||||
# Verify consistency for JVM_FEATURES_ACTIVE.
|
||||
JVM_FEATURES_VERIFY($variant)
|
||||
|
||||
|
||||
@ -719,6 +719,7 @@ CCACHE := @CCACHE@
|
||||
# CD is going away, but remains to cater for legacy makefiles.
|
||||
CD := cd
|
||||
CHMOD := @CHMOD@
|
||||
CMAKE := @CMAKE@
|
||||
CODESIGN := @CODESIGN@
|
||||
CP := @CP@
|
||||
CUT := @CUT@
|
||||
|
||||
@ -358,6 +358,11 @@ AC_DEFUN([TOOLCHAIN_EXTRACT_COMPILER_VERSION],
|
||||
# Copyright (C) 2013 Free Software Foundation, Inc.
|
||||
# This is free software; see the source for copying conditions. There is NO
|
||||
# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
# or look like
|
||||
# gcc (GCC) 10.2.1 20200825 (Alibaba 10.2.1-3.8 2.32)
|
||||
# Copyright (C) 2020 Free Software Foundation, Inc.
|
||||
# This is free software; see the source for copying conditions. There is NO
|
||||
# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
COMPILER_VERSION_OUTPUT=`$COMPILER --version 2>&1`
|
||||
# Check that this is likely to be GCC.
|
||||
$ECHO "$COMPILER_VERSION_OUTPUT" | $GREP "Free Software Foundation" > /dev/null
|
||||
@ -371,7 +376,8 @@ AC_DEFUN([TOOLCHAIN_EXTRACT_COMPILER_VERSION],
|
||||
COMPILER_VERSION_STRING=`$ECHO $COMPILER_VERSION_OUTPUT | \
|
||||
$SED -e 's/ *Copyright .*//'`
|
||||
COMPILER_VERSION_NUMBER=`$ECHO $COMPILER_VERSION_OUTPUT | \
|
||||
$SED -e 's/^.* \(@<:@1-9@:>@<:@0-9@:>@*\.@<:@0-9.@:>@*\)@<:@^0-9.@:>@.*$/\1/'`
|
||||
$AWK -F ')' '{print [$]2}' | \
|
||||
$AWK '{print [$]1}'`
|
||||
elif test "x$TOOLCHAIN_TYPE" = xclang; then
|
||||
# clang --version output typically looks like
|
||||
# Apple clang version 15.0.0 (clang-1500.3.9.4)
|
||||
|
||||
@ -74,7 +74,7 @@ define SetupBuildLauncherBody
|
||||
endif
|
||||
|
||||
ifneq ($$($1_MAIN_CLASS), )
|
||||
$1_JAVA_ARGS += -ms8m
|
||||
$1_JAVA_ARGS += -Xms8m
|
||||
$1_LAUNCHER_CLASS := -m $$($1_MAIN_MODULE)/$$($1_MAIN_CLASS)
|
||||
endif
|
||||
|
||||
|
||||
@ -29,21 +29,21 @@ GTEST_VERSION=1.14.0
|
||||
JTREG_VERSION=7.4+1
|
||||
|
||||
LINUX_X64_BOOT_JDK_EXT=tar.gz
|
||||
LINUX_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk22.0.2/c9ecb94cd31b495da20a27d4581645e8/9/GPL/openjdk-22.0.2_linux-x64_bin.tar.gz
|
||||
LINUX_X64_BOOT_JDK_SHA256=41536f115668308ecf4eba92aaf6acaeb0936225828b741efd83b6173ba82963
|
||||
LINUX_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk23/3c5b90190c68498b986a97f276efd28a/37/GPL/openjdk-23_linux-x64_bin.tar.gz
|
||||
LINUX_X64_BOOT_JDK_SHA256=08fea92724127c6fa0f2e5ea0b07ff4951ccb1e2f22db3c21eebbd7347152a67
|
||||
|
||||
ALPINE_LINUX_X64_BOOT_JDK_EXT=tar.gz
|
||||
ALPINE_LINUX_X64_BOOT_JDK_URL=https://github.com/adoptium/temurin22-binaries/releases/download/jdk-22.0.2%2B9/OpenJDK22U-jdk_x64_alpine-linux_hotspot_22.0.2_9.tar.gz
|
||||
ALPINE_LINUX_X64_BOOT_JDK_SHA256=49f73414824b1a7c268a611225fa4d7ce5e25600201e0f1cd59f94d1040b5264
|
||||
ALPINE_LINUX_X64_BOOT_JDK_URL=https://github.com/adoptium/temurin23-binaries/releases/download/jdk-23%2B37/OpenJDK23U-jdk_x64_alpine-linux_hotspot_23_37.tar.gz
|
||||
ALPINE_LINUX_X64_BOOT_JDK_SHA256=bff4c78f30d8d173e622bf2f40c36113df47337fc6d1ee5105ed2459841165aa
|
||||
|
||||
MACOS_AARCH64_BOOT_JDK_EXT=tar.gz
|
||||
MACOS_AARCH64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk22.0.2/c9ecb94cd31b495da20a27d4581645e8/9/GPL/openjdk-22.0.2_macos-aarch64_bin.tar.gz
|
||||
MACOS_AARCH64_BOOT_JDK_SHA256=3dab98730234e1a87aec14bcb8171d2cae101e96ff4eed1dab96abbb08e843fd
|
||||
MACOS_AARCH64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk23/3c5b90190c68498b986a97f276efd28a/37/GPL/openjdk-23_macos-aarch64_bin.tar.gz
|
||||
MACOS_AARCH64_BOOT_JDK_SHA256=9527bf080a74ae6dca51df413aa826f0c011c6048885e4c8ad112172be8815f3
|
||||
|
||||
MACOS_X64_BOOT_JDK_EXT=tar.gz
|
||||
MACOS_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk22.0.2/c9ecb94cd31b495da20a27d4581645e8/9/GPL/openjdk-22.0.2_macos-x64_bin.tar.gz
|
||||
MACOS_X64_BOOT_JDK_SHA256=e8b3ec7a7077711223d31156e771f11723cd7af31c2017f1bd2eda20855940fb
|
||||
MACOS_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk23/3c5b90190c68498b986a97f276efd28a/37/GPL/openjdk-23_macos-x64_bin.tar.gz
|
||||
MACOS_X64_BOOT_JDK_SHA256=5c3a909fd2079d0e376dd43c85c4f7d02d08914866f196480bd47784b2a0121e
|
||||
|
||||
WINDOWS_X64_BOOT_JDK_EXT=zip
|
||||
WINDOWS_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk22.0.2/c9ecb94cd31b495da20a27d4581645e8/9/GPL/openjdk-22.0.2_windows-x64_bin.zip
|
||||
WINDOWS_X64_BOOT_JDK_SHA256=f2a9b9ab944e71a64637fcdc6b13a1188cf02d4eb9ecf71dc927e98b3e45f5dc
|
||||
WINDOWS_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk23/3c5b90190c68498b986a97f276efd28a/37/GPL/openjdk-23_windows-x64_bin.zip
|
||||
WINDOWS_X64_BOOT_JDK_SHA256=cba5013874ba50cae543c86fe6423453816c77281e2751a8a9a633d966f1dc04
|
||||
|
||||
@ -390,8 +390,8 @@ var getJibProfilesCommon = function (input, data) {
|
||||
};
|
||||
};
|
||||
|
||||
common.boot_jdk_version = "22";
|
||||
common.boot_jdk_build_number = "36";
|
||||
common.boot_jdk_version = "23";
|
||||
common.boot_jdk_build_number = "37";
|
||||
common.boot_jdk_home = input.get("boot_jdk", "install_path") + "/jdk-"
|
||||
+ common.boot_jdk_version
|
||||
+ (input.build_os == "macosx" ? ".jdk/Contents/Home" : "");
|
||||
|
||||
@ -37,6 +37,6 @@ DEFAULT_VERSION_DATE=2025-03-18
|
||||
DEFAULT_VERSION_CLASSFILE_MAJOR=68 # "`$EXPR $DEFAULT_VERSION_FEATURE + 44`"
|
||||
DEFAULT_VERSION_CLASSFILE_MINOR=0
|
||||
DEFAULT_VERSION_DOCS_API_SINCE=11
|
||||
DEFAULT_ACCEPTABLE_BOOT_VERSIONS="22 23 24"
|
||||
DEFAULT_ACCEPTABLE_BOOT_VERSIONS="23 24"
|
||||
DEFAULT_JDK_SOURCE_TARGET_VERSION=24
|
||||
DEFAULT_PROMOTED_VERSION_PRE=ea
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,50 +25,70 @@
|
||||
#
|
||||
|
||||
# Create a bundle in the current directory, containing what's needed to run
|
||||
# the 'autoconf' program by the OpenJDK build.
|
||||
# the 'autoconf' program by the OpenJDK build. To override TARGET_PLATFORM
|
||||
# just set the variable before running this script.
|
||||
|
||||
# Autoconf depends on m4, so download and build that first.
|
||||
AUTOCONF_VERSION=2.69
|
||||
M4_VERSION=1.4.18
|
||||
|
||||
PACKAGE_VERSION=1.0.1
|
||||
TARGET_PLATFORM=linux_x86
|
||||
case `uname -s` in
|
||||
Darwin)
|
||||
os=macosx
|
||||
;;
|
||||
Linux)
|
||||
os=linux
|
||||
;;
|
||||
CYGWIN*)
|
||||
os=cygwin
|
||||
;;
|
||||
esac
|
||||
case `uname -m` in
|
||||
arm64|aarch64)
|
||||
arch=aarch64
|
||||
;;
|
||||
amd64|x86_64|x64)
|
||||
arch=x64
|
||||
;;
|
||||
esac
|
||||
TARGET_PLATFORM=${TARGET_PLATFORM:="${os}_${arch}"}
|
||||
|
||||
MODULE_NAME=autoconf-$TARGET_PLATFORM-$AUTOCONF_VERSION+$PACKAGE_VERSION
|
||||
BUNDLE_NAME=$MODULE_NAME.tar.gz
|
||||
|
||||
TMPDIR=`mktemp -d -t autoconfbundle-XXXX`
|
||||
trap "rm -rf \"$TMPDIR\"" EXIT
|
||||
SCRIPT_DIR="$(cd "$(dirname $0)" > /dev/null && pwd)"
|
||||
OUTPUT_ROOT="${SCRIPT_DIR}/../../build/autoconf"
|
||||
|
||||
ORIG_DIR=`pwd`
|
||||
cd $TMPDIR
|
||||
OUTPUT_DIR=$TMPDIR/$MODULE_NAME
|
||||
mkdir -p $OUTPUT_DIR/usr
|
||||
cd $OUTPUT_ROOT
|
||||
IMAGE_DIR=$OUTPUT_ROOT/$MODULE_NAME
|
||||
mkdir -p $IMAGE_DIR/usr
|
||||
|
||||
# Download and build m4
|
||||
|
||||
if test "x$TARGET_PLATFORM" = xcygwin_x64; then
|
||||
# On cygwin 64-bit, just copy the cygwin .exe file
|
||||
mkdir -p $OUTPUT_DIR/usr/bin
|
||||
cp /usr/bin/m4 $OUTPUT_DIR/usr/bin
|
||||
mkdir -p $IMAGE_DIR/usr/bin
|
||||
cp /usr/bin/m4 $IMAGE_DIR/usr/bin
|
||||
elif test "x$TARGET_PLATFORM" = xcygwin_x86; then
|
||||
# On cygwin 32-bit, just copy the cygwin .exe file
|
||||
mkdir -p $OUTPUT_DIR/usr/bin
|
||||
cp /usr/bin/m4 $OUTPUT_DIR/usr/bin
|
||||
mkdir -p $IMAGE_DIR/usr/bin
|
||||
cp /usr/bin/m4 $IMAGE_DIR/usr/bin
|
||||
elif test "x$TARGET_PLATFORM" = xlinux_x64; then
|
||||
M4_VERSION=1.4.13-5
|
||||
wget http://yum.oracle.com/repo/OracleLinux/OL6/latest/x86_64/getPackage/m4-$M4_VERSION.el6.x86_64.rpm
|
||||
cd $OUTPUT_DIR
|
||||
rpm2cpio ../m4-$M4_VERSION.el6.x86_64.rpm | cpio -d -i
|
||||
cd $IMAGE_DIR
|
||||
rpm2cpio $OUTPUT_ROOT/m4-$M4_VERSION.el6.x86_64.rpm | cpio -d -i
|
||||
elif test "x$TARGET_PLATFORM" = xlinux_x86; then
|
||||
M4_VERSION=1.4.13-5
|
||||
wget http://yum.oracle.com/repo/OracleLinux/OL6/latest/i386/getPackage/m4-$M4_VERSION.el6.i686.rpm
|
||||
cd $OUTPUT_DIR
|
||||
rpm2cpio ../m4-$M4_VERSION.el6.i686.rpm | cpio -d -i
|
||||
cd $IMAGE_DIR
|
||||
rpm2cpio $OUTPUT_ROOT/m4-$M4_VERSION.el6.i686.rpm | cpio -d -i
|
||||
else
|
||||
wget https://ftp.gnu.org/gnu/m4/m4-$M4_VERSION.tar.gz
|
||||
tar xzf m4-$M4_VERSION.tar.gz
|
||||
cd m4-$M4_VERSION
|
||||
./configure --prefix=$OUTPUT_DIR/usr
|
||||
./configure --prefix=$IMAGE_DIR/usr CFLAGS="-w -Wno-everything"
|
||||
make
|
||||
make install
|
||||
cd ..
|
||||
@ -79,15 +99,14 @@ fi
|
||||
wget https://ftp.gnu.org/gnu/autoconf/autoconf-$AUTOCONF_VERSION.tar.gz
|
||||
tar xzf autoconf-$AUTOCONF_VERSION.tar.gz
|
||||
cd autoconf-$AUTOCONF_VERSION
|
||||
./configure --prefix=$OUTPUT_DIR/usr M4=$OUTPUT_DIR/usr/bin/m4
|
||||
./configure --prefix=$IMAGE_DIR/usr M4=$IMAGE_DIR/usr/bin/m4
|
||||
make
|
||||
make install
|
||||
cd ..
|
||||
|
||||
perl -pi -e "s!$OUTPUT_DIR/!./!" $OUTPUT_DIR/usr/bin/auto* $OUTPUT_DIR/usr/share/autoconf/autom4te.cfg
|
||||
cp $OUTPUT_DIR/usr/share/autoconf/autom4te.cfg $OUTPUT_DIR/autom4te.cfg
|
||||
perl -pi -e "s!$IMAGE_DIR/!./!" $IMAGE_DIR/usr/bin/auto* $IMAGE_DIR/usr/share/autoconf/autom4te.cfg
|
||||
|
||||
cat > $OUTPUT_DIR/autoconf << EOF
|
||||
cat > $IMAGE_DIR/autoconf << EOF
|
||||
#!/bin/bash
|
||||
# Get an absolute path to this script
|
||||
this_script_dir=\`dirname \$0\`
|
||||
@ -100,17 +119,10 @@ export AUTOHEADER="\$this_script_dir/usr/bin/autoheader"
|
||||
export AC_MACRODIR="\$this_script_dir/usr/share/autoconf"
|
||||
export autom4te_perllibdir="\$this_script_dir/usr/share/autoconf"
|
||||
|
||||
autom4te_cfg=\$this_script_dir/usr/share/autoconf/autom4te.cfg
|
||||
cp \$this_script_dir/autom4te.cfg \$autom4te_cfg
|
||||
PREPEND_INCLUDE="--prepend-include \$this_script_dir/usr/share/autoconf"
|
||||
|
||||
echo 'begin-language: "M4sugar"' >> \$autom4te_cfg
|
||||
echo "args: --prepend-include '"\$this_script_dir/usr/share/autoconf"'" >> \$autom4te_cfg
|
||||
echo 'end-language: "M4sugar"' >> \$autom4te_cfg
|
||||
|
||||
exec \$this_script_dir/usr/bin/autoconf "\$@"
|
||||
exec \$this_script_dir/usr/bin/autoconf \$PREPEND_INCLUDE "\$@"
|
||||
EOF
|
||||
chmod +x $OUTPUT_DIR/autoconf
|
||||
cd $OUTPUT_DIR
|
||||
tar -cvzf ../$BUNDLE_NAME *
|
||||
cd ..
|
||||
cp $BUNDLE_NAME "$ORIG_DIR"
|
||||
chmod +x $IMAGE_DIR/autoconf
|
||||
cd $IMAGE_DIR
|
||||
tar -cvzf $OUTPUT_ROOT/$BUNDLE_NAME *
|
||||
|
||||
@ -200,6 +200,13 @@ ifeq ($(call check-jvm-feature, compiler2), true)
|
||||
)))
|
||||
endif
|
||||
|
||||
ifeq ($(call check-jvm-feature, g1gc), true)
|
||||
AD_SRC_FILES += $(call uniq, $(wildcard $(foreach d, $(AD_SRC_ROOTS), \
|
||||
$d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/gc/g1/g1_$(HOTSPOT_TARGET_CPU).ad \
|
||||
$d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/gc/g1/g1_$(HOTSPOT_TARGET_CPU_ARCH).ad \
|
||||
)))
|
||||
endif
|
||||
|
||||
SINGLE_AD_SRCFILE := $(ADLC_SUPPORT_DIR)/all-ad-src.ad
|
||||
|
||||
INSERT_FILENAME_AWK_SCRIPT := \
|
||||
|
||||
@ -786,7 +786,10 @@ public class CLDRConverter {
|
||||
String tzKey = Optional.ofNullable((String)handlerSupplMeta.get(tzid))
|
||||
.orElse(tzid);
|
||||
// Follow link, if needed
|
||||
var tzLink = tzdbLinks.get(tzKey);
|
||||
String tzLink = null;
|
||||
for (var k = tzKey; tzdbLinks.containsKey(k);) {
|
||||
k = tzLink = tzdbLinks.get(k);
|
||||
}
|
||||
if (tzLink == null && tzdbLinks.containsValue(tzKey)) {
|
||||
// reverse link search
|
||||
// this is needed as in tzdb, "America/Buenos_Aires" links to
|
||||
@ -1214,7 +1217,7 @@ public class CLDRConverter {
|
||||
private static Set<String> getAvailableZoneIds() {
|
||||
assert handlerMetaZones != null;
|
||||
if (AVAILABLE_TZIDS == null) {
|
||||
AVAILABLE_TZIDS = new HashSet<>(ZoneId.getAvailableZoneIds());
|
||||
AVAILABLE_TZIDS = new HashSet<>(Arrays.asList(TimeZone.getAvailableIDs()));
|
||||
AVAILABLE_TZIDS.addAll(handlerMetaZones.keySet());
|
||||
AVAILABLE_TZIDS.remove(MetaZonesParseHandler.NO_METAZONE_KEY);
|
||||
}
|
||||
@ -1372,6 +1375,7 @@ public class CLDRConverter {
|
||||
private static void generateTZDBShortNamesMap() throws IOException {
|
||||
Files.walk(Path.of(tzDataDir), 1, FileVisitOption.FOLLOW_LINKS)
|
||||
.filter(p -> p.toFile().isFile())
|
||||
.filter(p -> p.getFileName().toString().matches("africa|antarctica|asia|australasia|backward|etcetera|europe|northamerica|southamerica"))
|
||||
.forEach(p -> {
|
||||
try {
|
||||
String zone = null;
|
||||
@ -1394,43 +1398,41 @@ public class CLDRConverter {
|
||||
}
|
||||
// remove comments in-line
|
||||
line = line.replaceAll("[ \t]*#.*", "");
|
||||
|
||||
var tokens = line.split("[ \t]+", -1);
|
||||
var token0len = tokens.length > 0 ? tokens[0].length() : 0;
|
||||
// Zone line
|
||||
if (line.startsWith("Zone")) {
|
||||
if (token0len > 0 && tokens[0].regionMatches(true, 0, "Zone", 0, token0len)) {
|
||||
if (zone != null) {
|
||||
tzdbShortNamesMap.put(zone, format + NBSP + rule);
|
||||
}
|
||||
var zl = line.split("[ \t]+", -1);
|
||||
zone = zl[1];
|
||||
rule = zl[3];
|
||||
format = flipIfNeeded(inVanguard, zl[4]);
|
||||
zone = tokens[1];
|
||||
rule = tokens[3];
|
||||
format = flipIfNeeded(inVanguard, tokens[4]);
|
||||
} else {
|
||||
if (zone != null) {
|
||||
if (line.startsWith("Rule") ||
|
||||
line.startsWith("Link")) {
|
||||
if (token0len > 0 &&
|
||||
(tokens[0].regionMatches(true, 0, "Rule", 0, token0len) ||
|
||||
tokens[0].regionMatches(true, 0, "Link", 0, token0len))) {
|
||||
tzdbShortNamesMap.put(zone, format + NBSP + rule);
|
||||
zone = null;
|
||||
rule = null;
|
||||
format = null;
|
||||
} else {
|
||||
var s = line.split("[ \t]+", -1);
|
||||
rule = s[2];
|
||||
format = flipIfNeeded(inVanguard, s[3]);
|
||||
rule = tokens[2];
|
||||
format = flipIfNeeded(inVanguard, tokens[3]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Rule line
|
||||
if (line.startsWith("Rule")) {
|
||||
var rl = line.split("[ \t]+", -1);
|
||||
tzdbSubstLetters.put(rl[1] + NBSP + (rl[8].equals("0") ? STD : DST),
|
||||
rl[9].replace(NO_SUBST, ""));
|
||||
if (token0len > 0 && tokens[0].regionMatches(true, 0, "Rule", 0, token0len)) {
|
||||
tzdbSubstLetters.put(tokens[1] + NBSP + (tokens[8].equals("0") ? STD : DST),
|
||||
tokens[9].replace(NO_SUBST, ""));
|
||||
}
|
||||
|
||||
// Link line
|
||||
if (line.startsWith("Link")) {
|
||||
var ll = line.split("[ \t]+", -1);
|
||||
tzdbLinks.put(ll[2], ll[1]);
|
||||
if (token0len > 0 && tokens[0].regionMatches(true, 0, "Link", 0, token0len)) {
|
||||
tzdbLinks.put(tokens[2], tokens[1]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1491,13 +1493,14 @@ public class CLDRConverter {
|
||||
/*
|
||||
* Convert TZDB offsets to JDK's offsets, eg, "-08" to "GMT-08:00".
|
||||
* If it cannot recognize the pattern, return the argument as is.
|
||||
* Returning null results in generating the GMT format at runtime.
|
||||
*/
|
||||
private static String convertGMTName(String f) {
|
||||
try {
|
||||
// Should pre-fill GMT format once COMPAT is gone.
|
||||
// Till then, fall back to GMT format at runtime, after COMPAT short
|
||||
// names are populated
|
||||
ZoneOffset.of(f);
|
||||
if (!f.equals("%z")) {
|
||||
// Validate if the format is an offset
|
||||
ZoneOffset.of(f);
|
||||
}
|
||||
return null;
|
||||
} catch (DateTimeException dte) {
|
||||
// textual representation. return as is
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -273,7 +273,7 @@ public final class TzdbZoneRulesCompiler {
|
||||
// link version-region-rules
|
||||
out.writeShort(builtZones.size());
|
||||
for (Map.Entry<String, ZoneRules> entry : builtZones.entrySet()) {
|
||||
int regionIndex = Arrays.binarySearch(regionArray, entry.getKey());
|
||||
int regionIndex = findRegionIndex(regionArray, entry.getKey());
|
||||
int rulesIndex = rulesList.indexOf(entry.getValue());
|
||||
out.writeShort(regionIndex);
|
||||
out.writeShort(rulesIndex);
|
||||
@ -281,8 +281,8 @@ public final class TzdbZoneRulesCompiler {
|
||||
// alias-region
|
||||
out.writeShort(links.size());
|
||||
for (Map.Entry<String, String> entry : links.entrySet()) {
|
||||
int aliasIndex = Arrays.binarySearch(regionArray, entry.getKey());
|
||||
int regionIndex = Arrays.binarySearch(regionArray, entry.getValue());
|
||||
int aliasIndex = findRegionIndex(regionArray, entry.getKey());
|
||||
int regionIndex = findRegionIndex(regionArray, entry.getValue());
|
||||
out.writeShort(aliasIndex);
|
||||
out.writeShort(regionIndex);
|
||||
}
|
||||
@ -294,6 +294,14 @@ public final class TzdbZoneRulesCompiler {
|
||||
}
|
||||
}
|
||||
|
||||
private static int findRegionIndex(String[] regionArray, String region) {
|
||||
int index = Arrays.binarySearch(regionArray, region);
|
||||
if (index < 0) {
|
||||
throw new IllegalArgumentException("Unknown region: " + region);
|
||||
}
|
||||
return index;
|
||||
}
|
||||
|
||||
/** Whether to output verbose messages. */
|
||||
private boolean verbose;
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -164,7 +164,8 @@ class TzdbZoneRulesProvider {
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (line.startsWith("Zone")) { // parse Zone line
|
||||
int token0len = tokens.length > 0 ? tokens[0].length() : line.length();
|
||||
if (line.regionMatches(true, 0, "Zone", 0, token0len)) { // parse Zone line
|
||||
String name = tokens[1];
|
||||
if (excludedZones.contains(name)){
|
||||
continue;
|
||||
@ -182,13 +183,13 @@ class TzdbZoneRulesProvider {
|
||||
if (zLine.parse(tokens, 2)) {
|
||||
openZone = null;
|
||||
}
|
||||
} else if (line.startsWith("Rule")) { // parse Rule line
|
||||
} else if (line.regionMatches(true, 0, "Rule", 0, token0len)) { // parse Rule line
|
||||
String name = tokens[1];
|
||||
if (!rules.containsKey(name)) {
|
||||
rules.put(name, new ArrayList<RuleLine>(10));
|
||||
}
|
||||
rules.get(name).add(new RuleLine().parse(tokens));
|
||||
} else if (line.startsWith("Link")) { // parse link line
|
||||
} else if (line.regionMatches(true, 0, "Link", 0, token0len)) { // parse link line
|
||||
if (tokens.length >= 3) {
|
||||
String realId = tokens[1];
|
||||
String aliasId = tokens[2];
|
||||
@ -304,7 +305,7 @@ class TzdbZoneRulesProvider {
|
||||
month = parseMonth(tokens[off++]);
|
||||
if (off < tokens.length) {
|
||||
String dayRule = tokens[off++];
|
||||
if (dayRule.startsWith("last")) {
|
||||
if (dayRule.regionMatches(true, 0, "last", 0, 4)) {
|
||||
dayOfMonth = -1;
|
||||
dayOfWeek = parseDayOfWeek(dayRule.substring(4));
|
||||
adjustForwards = false;
|
||||
@ -355,42 +356,45 @@ class TzdbZoneRulesProvider {
|
||||
}
|
||||
|
||||
int parseYear(String year, int defaultYear) {
|
||||
switch (year.toLowerCase()) {
|
||||
case "min": return 1900;
|
||||
case "max": return Year.MAX_VALUE;
|
||||
case "only": return defaultYear;
|
||||
}
|
||||
int len = year.length();
|
||||
|
||||
if (year.regionMatches(true, 0, "minimum", 0, len)) return 1900;
|
||||
if (year.regionMatches(true, 0, "maximum", 0, len)) return Year.MAX_VALUE;
|
||||
if (year.regionMatches(true, 0, "only", 0, len)) return defaultYear;
|
||||
|
||||
return Integer.parseInt(year);
|
||||
}
|
||||
|
||||
Month parseMonth(String mon) {
|
||||
switch (mon) {
|
||||
case "Jan": return Month.JANUARY;
|
||||
case "Feb": return Month.FEBRUARY;
|
||||
case "Mar": return Month.MARCH;
|
||||
case "Apr": return Month.APRIL;
|
||||
case "May": return Month.MAY;
|
||||
case "Jun": return Month.JUNE;
|
||||
case "Jul": return Month.JULY;
|
||||
case "Aug": return Month.AUGUST;
|
||||
case "Sep": return Month.SEPTEMBER;
|
||||
case "Oct": return Month.OCTOBER;
|
||||
case "Nov": return Month.NOVEMBER;
|
||||
case "Dec": return Month.DECEMBER;
|
||||
}
|
||||
int len = mon.length();
|
||||
|
||||
if (mon.regionMatches(true, 0, "January", 0, len)) return Month.JANUARY;
|
||||
if (mon.regionMatches(true, 0, "February", 0, len)) return Month.FEBRUARY;
|
||||
if (mon.regionMatches(true, 0, "March", 0, len)) return Month.MARCH;
|
||||
if (mon.regionMatches(true, 0, "April", 0, len)) return Month.APRIL;
|
||||
if (mon.regionMatches(true, 0, "May", 0, len)) return Month.MAY;
|
||||
if (mon.regionMatches(true, 0, "June", 0, len)) return Month.JUNE;
|
||||
if (mon.regionMatches(true, 0, "July", 0, len)) return Month.JULY;
|
||||
if (mon.regionMatches(true, 0, "August", 0, len)) return Month.AUGUST;
|
||||
if (mon.regionMatches(true, 0, "September", 0, len)) return Month.SEPTEMBER;
|
||||
if (mon.regionMatches(true, 0, "October", 0, len)) return Month.OCTOBER;
|
||||
if (mon.regionMatches(true, 0, "November", 0, len)) return Month.NOVEMBER;
|
||||
if (mon.regionMatches(true, 0, "December", 0, len)) return Month.DECEMBER;
|
||||
|
||||
throw new IllegalArgumentException("Unknown month: " + mon);
|
||||
}
|
||||
|
||||
DayOfWeek parseDayOfWeek(String dow) {
|
||||
switch (dow) {
|
||||
case "Mon": return DayOfWeek.MONDAY;
|
||||
case "Tue": return DayOfWeek.TUESDAY;
|
||||
case "Wed": return DayOfWeek.WEDNESDAY;
|
||||
case "Thu": return DayOfWeek.THURSDAY;
|
||||
case "Fri": return DayOfWeek.FRIDAY;
|
||||
case "Sat": return DayOfWeek.SATURDAY;
|
||||
case "Sun": return DayOfWeek.SUNDAY;
|
||||
}
|
||||
int len = dow.length();
|
||||
|
||||
if (dow.regionMatches(true, 0, "Monday", 0, len)) return DayOfWeek.MONDAY;
|
||||
if (dow.regionMatches(true, 0, "Tuesday", 0, len)) return DayOfWeek.TUESDAY;
|
||||
if (dow.regionMatches(true, 0, "Wednesday", 0, len)) return DayOfWeek.WEDNESDAY;
|
||||
if (dow.regionMatches(true, 0, "Thursday", 0, len)) return DayOfWeek.THURSDAY;
|
||||
if (dow.regionMatches(true, 0, "Friday", 0, len)) return DayOfWeek.FRIDAY;
|
||||
if (dow.regionMatches(true, 0, "Saturday", 0, len)) return DayOfWeek.SATURDAY;
|
||||
if (dow.regionMatches(true, 0, "Sunday", 0, len)) return DayOfWeek.SUNDAY;
|
||||
|
||||
throw new IllegalArgumentException("Unknown day-of-week: " + dow);
|
||||
}
|
||||
|
||||
|
||||
@ -59,9 +59,7 @@ $(eval $(call SetupJdkLibrary, BUILD_LIBSAPROC, \
|
||||
OPTIMIZATION := HIGH, \
|
||||
EXTRA_HEADER_DIRS := java.base:libjvm, \
|
||||
DISABLED_WARNINGS_gcc := sign-compare, \
|
||||
DISABLED_WARNINGS_gcc_LinuxDebuggerLocal.cpp := unused-variable, \
|
||||
DISABLED_WARNINGS_gcc_ps_core.c := pointer-arith, \
|
||||
DISABLED_WARNINGS_gcc_symtab.c := unused-but-set-variable, \
|
||||
DISABLED_WARNINGS_clang := sign-compare, \
|
||||
DISABLED_WARNINGS_clang_libproc_impl.c := format-nonliteral, \
|
||||
DISABLED_WARNINGS_clang_MacosxDebuggerLocal.m := unused-variable, \
|
||||
|
||||
@ -37,3 +37,21 @@ ifeq ($(call isTargetOs, linux windows)+$(call isTargetCpu, x86_64)+$(INCLUDE_CO
|
||||
|
||||
TARGETS += $(BUILD_LIBJSVML)
|
||||
endif
|
||||
|
||||
################################################################################
|
||||
## Build libsleef
|
||||
################################################################################
|
||||
|
||||
ifeq ($(call isTargetOs, linux)+$(call isTargetCpu, riscv64)+$(INCLUDE_COMPILER2), true+true+true)
|
||||
$(eval $(call SetupJdkLibrary, BUILD_LIBSLEEF, \
|
||||
NAME := sleef, \
|
||||
OPTIMIZATION := HIGH, \
|
||||
SRC := libsleef/lib, \
|
||||
EXTRA_SRC := libsleef/generated, \
|
||||
DISABLED_WARNINGS_gcc := unused-function sign-compare tautological-compare ignored-qualifiers, \
|
||||
DISABLED_WARNINGS_clang := unused-function sign-compare tautological-compare ignored-qualifiers, \
|
||||
CFLAGS := -march=rv64gcv, \
|
||||
))
|
||||
|
||||
TARGETS += $(BUILD_LIBSLEEF)
|
||||
endif
|
||||
|
||||
@ -885,7 +885,7 @@ BUILD_HOTSPOT_JTREG_EXECUTABLES_JDK_LIBS_exedaemonDestroy := java.base:libjvm
|
||||
|
||||
ifeq ($(call isTargetOs, windows), true)
|
||||
BUILD_HOTSPOT_JTREG_EXECUTABLES_CFLAGS_exeFPRegs := -MT
|
||||
BUILD_HOTSPOT_JTREG_EXCLUDE += exesigtest.c libterminatedThread.c libTestJNI.c libCompleteExit.c libMonitorWithDeadObjectTest.c libTestPsig.c exeGetCreatedJavaVMs.c
|
||||
BUILD_HOTSPOT_JTREG_EXCLUDE += exesigtest.c libterminatedThread.c libTestJNI.c libCompleteExit.c libMonitorWithDeadObjectTest.c libTestPsig.c exeGetCreatedJavaVMs.c libTestUnloadedClass.cpp
|
||||
BUILD_HOTSPOT_JTREG_LIBRARIES_JDK_LIBS_libnativeStack := java.base:libjvm
|
||||
BUILD_HOTSPOT_JTREG_LIBRARIES_JDK_LIBS_libVThreadEventTest := java.base:libjvm
|
||||
else
|
||||
@ -1526,6 +1526,7 @@ else
|
||||
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libCompleteExit += -lpthread
|
||||
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libMonitorWithDeadObjectTest += -lpthread
|
||||
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libnativeStack += -lpthread
|
||||
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libTestUnloadedClass += -lpthread
|
||||
BUILD_HOTSPOT_JTREG_LIBRARIES_JDK_LIBS_libVThreadEventTest := java.base:libjvm
|
||||
BUILD_HOTSPOT_JTREG_EXECUTABLES_LIBS_exeGetCreatedJavaVMs := -lpthread
|
||||
BUILD_HOTSPOT_JTREG_EXECUTABLES_JDK_LIBS_exeGetCreatedJavaVMs := java.base:libjvm
|
||||
|
||||
@ -115,6 +115,8 @@ ifeq ($(call isTargetOs, linux), true)
|
||||
# stripping during the test libraries' build.
|
||||
BUILD_JDK_JTREG_LIBRARIES_CFLAGS_libFib := -g
|
||||
BUILD_JDK_JTREG_LIBRARIES_STRIP_SYMBOLS_libFib := false
|
||||
# nio tests' libCreationTimeHelper native needs -ldl linker flag
|
||||
BUILD_JDK_JTREG_LIBRARIES_LDFLAGS_libCreationTimeHelper := -ldl
|
||||
endif
|
||||
|
||||
ifeq ($(ASAN_ENABLED), true)
|
||||
|
||||
@ -1244,7 +1244,7 @@ source %{
|
||||
|
||||
// r27 is not allocatable when compressed oops is on and heapbase is not
|
||||
// zero, compressed klass pointers doesn't use r27 after JDK-8234794
|
||||
if (UseCompressedOops && (CompressedOops::ptrs_base() != nullptr)) {
|
||||
if (UseCompressedOops && (CompressedOops::base() != nullptr)) {
|
||||
_NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
|
||||
_NO_SPECIAL_REG_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
|
||||
_NO_SPECIAL_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
|
||||
@ -2307,10 +2307,6 @@ const RegMask* Matcher::predicate_reg_mask(void) {
|
||||
return &_PR_REG_mask;
|
||||
}
|
||||
|
||||
const TypeVectMask* Matcher::predicate_reg_type(const Type* elemTy, int length) {
|
||||
return new TypeVectMask(elemTy, length);
|
||||
}
|
||||
|
||||
// Vector calling convention not yet implemented.
|
||||
bool Matcher::supports_vector_calling_convention(void) {
|
||||
return false;
|
||||
@ -2620,7 +2616,8 @@ static bool is_vector_bitwise_not_pattern(Node* n, Node* m) {
|
||||
bool Matcher::pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
|
||||
if (is_vshift_con_pattern(n, m) ||
|
||||
is_vector_bitwise_not_pattern(n, m) ||
|
||||
is_valid_sve_arith_imm_pattern(n, m)) {
|
||||
is_valid_sve_arith_imm_pattern(n, m) ||
|
||||
is_encode_and_store_pattern(n, m)) {
|
||||
mstack.push(m, Visit);
|
||||
return true;
|
||||
}
|
||||
@ -2720,7 +2717,7 @@ typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
|
||||
{
|
||||
Address addr = mem2address(opcode, base, index, scale, disp);
|
||||
if (addr.getMode() == Address::base_plus_offset) {
|
||||
// Fix up any out-of-range offsets.
|
||||
/* Fix up any out-of-range offsets. */
|
||||
assert_different_registers(rscratch1, base);
|
||||
assert_different_registers(rscratch1, reg);
|
||||
addr = __ legitimize_address(addr, size_in_memory, rscratch1);
|
||||
@ -2761,11 +2758,7 @@ typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
|
||||
int opcode, Register base, int index, int size, int disp)
|
||||
{
|
||||
if (index == -1) {
|
||||
// Fix up any out-of-range offsets.
|
||||
assert_different_registers(rscratch1, base);
|
||||
Address addr = Address(base, disp);
|
||||
addr = __ legitimize_address(addr, (1 << T), rscratch1);
|
||||
(masm->*insn)(reg, T, addr);
|
||||
(masm->*insn)(reg, T, Address(base, disp));
|
||||
} else {
|
||||
assert(disp == 0, "unsupported address mode");
|
||||
(masm->*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
|
||||
@ -2820,7 +2813,7 @@ encode %{
|
||||
|
||||
// This encoding class is generated automatically from ad_encode.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
|
||||
enc_class aarch64_enc_ldrsbw(iRegI dst, memory1 mem) %{
|
||||
Register dst_reg = as_Register($dst$$reg);
|
||||
loadStore(masm, &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
|
||||
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
|
||||
@ -2828,7 +2821,7 @@ encode %{
|
||||
|
||||
// This encoding class is generated automatically from ad_encode.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
|
||||
enc_class aarch64_enc_ldrsb(iRegI dst, memory1 mem) %{
|
||||
Register dst_reg = as_Register($dst$$reg);
|
||||
loadStore(masm, &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
|
||||
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
|
||||
@ -2836,7 +2829,7 @@ encode %{
|
||||
|
||||
// This encoding class is generated automatically from ad_encode.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
|
||||
enc_class aarch64_enc_ldrb(iRegI dst, memory1 mem) %{
|
||||
Register dst_reg = as_Register($dst$$reg);
|
||||
loadStore(masm, &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
|
||||
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
|
||||
@ -2844,7 +2837,7 @@ encode %{
|
||||
|
||||
// This encoding class is generated automatically from ad_encode.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
|
||||
enc_class aarch64_enc_ldrb(iRegL dst, memory1 mem) %{
|
||||
Register dst_reg = as_Register($dst$$reg);
|
||||
loadStore(masm, &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
|
||||
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
|
||||
@ -2852,7 +2845,7 @@ encode %{
|
||||
|
||||
// This encoding class is generated automatically from ad_encode.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
|
||||
enc_class aarch64_enc_ldrshw(iRegI dst, memory2 mem) %{
|
||||
Register dst_reg = as_Register($dst$$reg);
|
||||
loadStore(masm, &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
|
||||
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
|
||||
@ -2860,7 +2853,7 @@ encode %{
|
||||
|
||||
// This encoding class is generated automatically from ad_encode.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
|
||||
enc_class aarch64_enc_ldrsh(iRegI dst, memory2 mem) %{
|
||||
Register dst_reg = as_Register($dst$$reg);
|
||||
loadStore(masm, &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
|
||||
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
|
||||
@ -2868,7 +2861,7 @@ encode %{
|
||||
|
||||
// This encoding class is generated automatically from ad_encode.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
|
||||
enc_class aarch64_enc_ldrh(iRegI dst, memory2 mem) %{
|
||||
Register dst_reg = as_Register($dst$$reg);
|
||||
loadStore(masm, &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
|
||||
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
|
||||
@ -2876,7 +2869,7 @@ encode %{
|
||||
|
||||
// This encoding class is generated automatically from ad_encode.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
|
||||
enc_class aarch64_enc_ldrh(iRegL dst, memory2 mem) %{
|
||||
Register dst_reg = as_Register($dst$$reg);
|
||||
loadStore(masm, &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
|
||||
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
|
||||
@ -2884,7 +2877,7 @@ encode %{
|
||||
|
||||
// This encoding class is generated automatically from ad_encode.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
|
||||
enc_class aarch64_enc_ldrw(iRegI dst, memory4 mem) %{
|
||||
Register dst_reg = as_Register($dst$$reg);
|
||||
loadStore(masm, &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
|
||||
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
|
||||
@ -2892,7 +2885,7 @@ encode %{
|
||||
|
||||
// This encoding class is generated automatically from ad_encode.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
|
||||
enc_class aarch64_enc_ldrw(iRegL dst, memory4 mem) %{
|
||||
Register dst_reg = as_Register($dst$$reg);
|
||||
loadStore(masm, &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
|
||||
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
|
||||
@ -2900,7 +2893,7 @@ encode %{
|
||||
|
||||
// This encoding class is generated automatically from ad_encode.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
|
||||
enc_class aarch64_enc_ldrsw(iRegL dst, memory4 mem) %{
|
||||
Register dst_reg = as_Register($dst$$reg);
|
||||
loadStore(masm, &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
|
||||
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
|
||||
@ -2908,7 +2901,7 @@ encode %{
|
||||
|
||||
// This encoding class is generated automatically from ad_encode.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
|
||||
enc_class aarch64_enc_ldr(iRegL dst, memory8 mem) %{
|
||||
Register dst_reg = as_Register($dst$$reg);
|
||||
loadStore(masm, &MacroAssembler::ldr, dst_reg, $mem->opcode(),
|
||||
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
|
||||
@ -2916,7 +2909,7 @@ encode %{
|
||||
|
||||
// This encoding class is generated automatically from ad_encode.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
|
||||
enc_class aarch64_enc_ldrs(vRegF dst, memory4 mem) %{
|
||||
FloatRegister dst_reg = as_FloatRegister($dst$$reg);
|
||||
loadStore(masm, &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
|
||||
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
|
||||
@ -2924,7 +2917,7 @@ encode %{
|
||||
|
||||
// This encoding class is generated automatically from ad_encode.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
|
||||
enc_class aarch64_enc_ldrd(vRegD dst, memory8 mem) %{
|
||||
FloatRegister dst_reg = as_FloatRegister($dst$$reg);
|
||||
loadStore(masm, &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
|
||||
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
|
||||
@ -2932,7 +2925,7 @@ encode %{
|
||||
|
||||
// This encoding class is generated automatically from ad_encode.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
enc_class aarch64_enc_strb(iRegI src, memory mem) %{
|
||||
enc_class aarch64_enc_strb(iRegI src, memory1 mem) %{
|
||||
Register src_reg = as_Register($src$$reg);
|
||||
loadStore(masm, &MacroAssembler::strb, src_reg, $mem->opcode(),
|
||||
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
|
||||
@ -2940,14 +2933,14 @@ encode %{
|
||||
|
||||
// This encoding class is generated automatically from ad_encode.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
enc_class aarch64_enc_strb0(memory mem) %{
|
||||
enc_class aarch64_enc_strb0(memory1 mem) %{
|
||||
loadStore(masm, &MacroAssembler::strb, zr, $mem->opcode(),
|
||||
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
|
||||
%}
|
||||
|
||||
// This encoding class is generated automatically from ad_encode.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
enc_class aarch64_enc_strh(iRegI src, memory mem) %{
|
||||
enc_class aarch64_enc_strh(iRegI src, memory2 mem) %{
|
||||
Register src_reg = as_Register($src$$reg);
|
||||
loadStore(masm, &MacroAssembler::strh, src_reg, $mem->opcode(),
|
||||
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
|
||||
@ -2955,14 +2948,14 @@ encode %{
|
||||
|
||||
// This encoding class is generated automatically from ad_encode.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
enc_class aarch64_enc_strh0(memory mem) %{
|
||||
enc_class aarch64_enc_strh0(memory2 mem) %{
|
||||
loadStore(masm, &MacroAssembler::strh, zr, $mem->opcode(),
|
||||
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
|
||||
%}
|
||||
|
||||
// This encoding class is generated automatically from ad_encode.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
enc_class aarch64_enc_strw(iRegI src, memory mem) %{
|
||||
enc_class aarch64_enc_strw(iRegI src, memory4 mem) %{
|
||||
Register src_reg = as_Register($src$$reg);
|
||||
loadStore(masm, &MacroAssembler::strw, src_reg, $mem->opcode(),
|
||||
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
|
||||
@ -2970,14 +2963,14 @@ encode %{
|
||||
|
||||
// This encoding class is generated automatically from ad_encode.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
enc_class aarch64_enc_strw0(memory mem) %{
|
||||
enc_class aarch64_enc_strw0(memory4 mem) %{
|
||||
loadStore(masm, &MacroAssembler::strw, zr, $mem->opcode(),
|
||||
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
|
||||
%}
|
||||
|
||||
// This encoding class is generated automatically from ad_encode.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
enc_class aarch64_enc_str(iRegL src, memory mem) %{
|
||||
enc_class aarch64_enc_str(iRegL src, memory8 mem) %{
|
||||
Register src_reg = as_Register($src$$reg);
|
||||
// we sometimes get asked to store the stack pointer into the
|
||||
// current thread -- we cannot do that directly on AArch64
|
||||
@ -2992,14 +2985,14 @@ encode %{
|
||||
|
||||
// This encoding class is generated automatically from ad_encode.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
enc_class aarch64_enc_str0(memory mem) %{
|
||||
enc_class aarch64_enc_str0(memory8 mem) %{
|
||||
loadStore(masm, &MacroAssembler::str, zr, $mem->opcode(),
|
||||
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
|
||||
%}
|
||||
|
||||
// This encoding class is generated automatically from ad_encode.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
enc_class aarch64_enc_strs(vRegF src, memory mem) %{
|
||||
enc_class aarch64_enc_strs(vRegF src, memory4 mem) %{
|
||||
FloatRegister src_reg = as_FloatRegister($src$$reg);
|
||||
loadStore(masm, &MacroAssembler::strs, src_reg, $mem->opcode(),
|
||||
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
|
||||
@ -3007,7 +3000,7 @@ encode %{
|
||||
|
||||
// This encoding class is generated automatically from ad_encode.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
enc_class aarch64_enc_strd(vRegD src, memory mem) %{
|
||||
enc_class aarch64_enc_strd(vRegD src, memory8 mem) %{
|
||||
FloatRegister src_reg = as_FloatRegister($src$$reg);
|
||||
loadStore(masm, &MacroAssembler::strd, src_reg, $mem->opcode(),
|
||||
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
|
||||
@ -3015,7 +3008,7 @@ encode %{
|
||||
|
||||
// This encoding class is generated automatically from ad_encode.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
enc_class aarch64_enc_strb0_ordered(memory mem) %{
|
||||
enc_class aarch64_enc_strb0_ordered(memory4 mem) %{
|
||||
__ membar(Assembler::StoreStore);
|
||||
loadStore(masm, &MacroAssembler::strb, zr, $mem->opcode(),
|
||||
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
|
||||
@ -3217,7 +3210,7 @@ encode %{
|
||||
|
||||
// synchronized read/update encodings
|
||||
|
||||
enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
|
||||
enc_class aarch64_enc_ldaxr(iRegL dst, memory8 mem) %{
|
||||
Register dst_reg = as_Register($dst$$reg);
|
||||
Register base = as_Register($mem$$base);
|
||||
int index = $mem$$index;
|
||||
@ -3245,7 +3238,7 @@ encode %{
|
||||
}
|
||||
%}
|
||||
|
||||
enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
|
||||
enc_class aarch64_enc_stlxr(iRegLNoSp src, memory8 mem) %{
|
||||
Register src_reg = as_Register($src$$reg);
|
||||
Register base = as_Register($mem$$base);
|
||||
int index = $mem$$index;
|
||||
@ -4173,10 +4166,60 @@ operand immIU7()
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
// Offset for immediate loads and stores
|
||||
// Offset for scaled or unscaled immediate loads and stores
|
||||
operand immIOffset()
|
||||
%{
|
||||
predicate(n->get_int() >= -256 && n->get_int() <= 65520);
|
||||
predicate(Address::offset_ok_for_immed(n->get_int(), 0));
|
||||
match(ConI);
|
||||
|
||||
op_cost(0);
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
operand immIOffset1()
|
||||
%{
|
||||
predicate(Address::offset_ok_for_immed(n->get_int(), 0));
|
||||
match(ConI);
|
||||
|
||||
op_cost(0);
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
operand immIOffset2()
|
||||
%{
|
||||
predicate(Address::offset_ok_for_immed(n->get_int(), 1));
|
||||
match(ConI);
|
||||
|
||||
op_cost(0);
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
operand immIOffset4()
|
||||
%{
|
||||
predicate(Address::offset_ok_for_immed(n->get_int(), 2));
|
||||
match(ConI);
|
||||
|
||||
op_cost(0);
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
operand immIOffset8()
|
||||
%{
|
||||
predicate(Address::offset_ok_for_immed(n->get_int(), 3));
|
||||
match(ConI);
|
||||
|
||||
op_cost(0);
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
operand immIOffset16()
|
||||
%{
|
||||
predicate(Address::offset_ok_for_immed(n->get_int(), 4));
|
||||
match(ConI);
|
||||
|
||||
op_cost(0);
|
||||
@ -4194,6 +4237,56 @@ operand immLOffset()
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
operand immLoffset1()
|
||||
%{
|
||||
predicate(Address::offset_ok_for_immed(n->get_long(), 0));
|
||||
match(ConL);
|
||||
|
||||
op_cost(0);
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
operand immLoffset2()
|
||||
%{
|
||||
predicate(Address::offset_ok_for_immed(n->get_long(), 1));
|
||||
match(ConL);
|
||||
|
||||
op_cost(0);
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
operand immLoffset4()
|
||||
%{
|
||||
predicate(Address::offset_ok_for_immed(n->get_long(), 2));
|
||||
match(ConL);
|
||||
|
||||
op_cost(0);
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
operand immLoffset8()
|
||||
%{
|
||||
predicate(Address::offset_ok_for_immed(n->get_long(), 3));
|
||||
match(ConL);
|
||||
|
||||
op_cost(0);
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
operand immLoffset16()
|
||||
%{
|
||||
predicate(Address::offset_ok_for_immed(n->get_long(), 4));
|
||||
match(ConL);
|
||||
|
||||
op_cost(0);
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
// 5 bit signed long integer
|
||||
operand immL5()
|
||||
%{
|
||||
@ -5106,7 +5199,7 @@ operand indIndex(iRegP reg, iRegL lreg)
|
||||
%}
|
||||
%}
|
||||
|
||||
operand indOffI(iRegP reg, immIOffset off)
|
||||
operand indOffI1(iRegP reg, immIOffset1 off)
|
||||
%{
|
||||
constraint(ALLOC_IN_RC(ptr_reg));
|
||||
match(AddP reg off);
|
||||
@ -5120,7 +5213,119 @@ operand indOffI(iRegP reg, immIOffset off)
|
||||
%}
|
||||
%}
|
||||
|
||||
operand indOffL(iRegP reg, immLOffset off)
|
||||
operand indOffI2(iRegP reg, immIOffset2 off)
|
||||
%{
|
||||
constraint(ALLOC_IN_RC(ptr_reg));
|
||||
match(AddP reg off);
|
||||
op_cost(0);
|
||||
format %{ "[$reg, $off]" %}
|
||||
interface(MEMORY_INTER) %{
|
||||
base($reg);
|
||||
index(0xffffffff);
|
||||
scale(0x0);
|
||||
disp($off);
|
||||
%}
|
||||
%}
|
||||
|
||||
operand indOffI4(iRegP reg, immIOffset4 off)
|
||||
%{
|
||||
constraint(ALLOC_IN_RC(ptr_reg));
|
||||
match(AddP reg off);
|
||||
op_cost(0);
|
||||
format %{ "[$reg, $off]" %}
|
||||
interface(MEMORY_INTER) %{
|
||||
base($reg);
|
||||
index(0xffffffff);
|
||||
scale(0x0);
|
||||
disp($off);
|
||||
%}
|
||||
%}
|
||||
|
||||
operand indOffI8(iRegP reg, immIOffset8 off)
|
||||
%{
|
||||
constraint(ALLOC_IN_RC(ptr_reg));
|
||||
match(AddP reg off);
|
||||
op_cost(0);
|
||||
format %{ "[$reg, $off]" %}
|
||||
interface(MEMORY_INTER) %{
|
||||
base($reg);
|
||||
index(0xffffffff);
|
||||
scale(0x0);
|
||||
disp($off);
|
||||
%}
|
||||
%}
|
||||
|
||||
operand indOffI16(iRegP reg, immIOffset16 off)
|
||||
%{
|
||||
constraint(ALLOC_IN_RC(ptr_reg));
|
||||
match(AddP reg off);
|
||||
op_cost(0);
|
||||
format %{ "[$reg, $off]" %}
|
||||
interface(MEMORY_INTER) %{
|
||||
base($reg);
|
||||
index(0xffffffff);
|
||||
scale(0x0);
|
||||
disp($off);
|
||||
%}
|
||||
%}
|
||||
|
||||
operand indOffL1(iRegP reg, immLoffset1 off)
|
||||
%{
|
||||
constraint(ALLOC_IN_RC(ptr_reg));
|
||||
match(AddP reg off);
|
||||
op_cost(0);
|
||||
format %{ "[$reg, $off]" %}
|
||||
interface(MEMORY_INTER) %{
|
||||
base($reg);
|
||||
index(0xffffffff);
|
||||
scale(0x0);
|
||||
disp($off);
|
||||
%}
|
||||
%}
|
||||
|
||||
operand indOffL2(iRegP reg, immLoffset2 off)
|
||||
%{
|
||||
constraint(ALLOC_IN_RC(ptr_reg));
|
||||
match(AddP reg off);
|
||||
op_cost(0);
|
||||
format %{ "[$reg, $off]" %}
|
||||
interface(MEMORY_INTER) %{
|
||||
base($reg);
|
||||
index(0xffffffff);
|
||||
scale(0x0);
|
||||
disp($off);
|
||||
%}
|
||||
%}
|
||||
|
||||
operand indOffL4(iRegP reg, immLoffset4 off)
|
||||
%{
|
||||
constraint(ALLOC_IN_RC(ptr_reg));
|
||||
match(AddP reg off);
|
||||
op_cost(0);
|
||||
format %{ "[$reg, $off]" %}
|
||||
interface(MEMORY_INTER) %{
|
||||
base($reg);
|
||||
index(0xffffffff);
|
||||
scale(0x0);
|
||||
disp($off);
|
||||
%}
|
||||
%}
|
||||
|
||||
operand indOffL8(iRegP reg, immLoffset8 off)
|
||||
%{
|
||||
constraint(ALLOC_IN_RC(ptr_reg));
|
||||
match(AddP reg off);
|
||||
op_cost(0);
|
||||
format %{ "[$reg, $off]" %}
|
||||
interface(MEMORY_INTER) %{
|
||||
base($reg);
|
||||
index(0xffffffff);
|
||||
scale(0x0);
|
||||
disp($off);
|
||||
%}
|
||||
%}
|
||||
|
||||
operand indOffL16(iRegP reg, immLoffset16 off)
|
||||
%{
|
||||
constraint(ALLOC_IN_RC(ptr_reg));
|
||||
match(AddP reg off);
|
||||
@ -5496,7 +5701,10 @@ operand iRegL2P(iRegL reg) %{
|
||||
interface(REG_INTER)
|
||||
%}
|
||||
|
||||
opclass vmem(indirect, indIndex, indOffI, indOffL, indOffIN, indOffLN);
|
||||
opclass vmem2(indirect, indIndex, indOffI2, indOffL2);
|
||||
opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
|
||||
opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
|
||||
opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
|
||||
|
||||
//----------OPERAND CLASSES----------------------------------------------------
|
||||
// Operand Classes are groups of operands that are used as to simplify
|
||||
@ -5508,9 +5716,23 @@ opclass vmem(indirect, indIndex, indOffI, indOffL, indOffIN, indOffLN);
|
||||
// memory is used to define read/write location for load/store
|
||||
// instruction defs. we can turn a memory op into an Address
|
||||
|
||||
opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI, indOffL,
|
||||
indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN,
|
||||
indOffLN, indirectX2P, indOffX2P);
|
||||
opclass memory1(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI1, indOffL1,
|
||||
indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indirectX2P, indOffX2P);
|
||||
|
||||
opclass memory2(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI2, indOffL2,
|
||||
indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indirectX2P, indOffX2P);
|
||||
|
||||
opclass memory4(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI4, indOffL4,
|
||||
indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN, indirectX2P, indOffX2P);
|
||||
|
||||
opclass memory8(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI8, indOffL8,
|
||||
indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN, indirectX2P, indOffX2P);
|
||||
|
||||
// All of the memory operands. For the pipeline description.
|
||||
opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex,
|
||||
indOffI1, indOffL1, indOffI2, indOffL2, indOffI4, indOffL4, indOffI8, indOffL8,
|
||||
indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN, indirectX2P, indOffX2P);
|
||||
|
||||
|
||||
// iRegIorL2I is used for src inputs in rules for 32 bit int (I)
|
||||
// operations. it allows the src to be either an iRegI or a (ConvL2I
|
||||
@ -6212,7 +6434,7 @@ define %{
|
||||
// Load Instructions
|
||||
|
||||
// Load Byte (8 bit signed)
|
||||
instruct loadB(iRegINoSp dst, memory mem)
|
||||
instruct loadB(iRegINoSp dst, memory1 mem)
|
||||
%{
|
||||
match(Set dst (LoadB mem));
|
||||
predicate(!needs_acquiring_load(n));
|
||||
@ -6226,7 +6448,7 @@ instruct loadB(iRegINoSp dst, memory mem)
|
||||
%}
|
||||
|
||||
// Load Byte (8 bit signed) into long
|
||||
instruct loadB2L(iRegLNoSp dst, memory mem)
|
||||
instruct loadB2L(iRegLNoSp dst, memory1 mem)
|
||||
%{
|
||||
match(Set dst (ConvI2L (LoadB mem)));
|
||||
predicate(!needs_acquiring_load(n->in(1)));
|
||||
@ -6240,7 +6462,7 @@ instruct loadB2L(iRegLNoSp dst, memory mem)
|
||||
%}
|
||||
|
||||
// Load Byte (8 bit unsigned)
|
||||
instruct loadUB(iRegINoSp dst, memory mem)
|
||||
instruct loadUB(iRegINoSp dst, memory1 mem)
|
||||
%{
|
||||
match(Set dst (LoadUB mem));
|
||||
predicate(!needs_acquiring_load(n));
|
||||
@ -6254,7 +6476,7 @@ instruct loadUB(iRegINoSp dst, memory mem)
|
||||
%}
|
||||
|
||||
// Load Byte (8 bit unsigned) into long
|
||||
instruct loadUB2L(iRegLNoSp dst, memory mem)
|
||||
instruct loadUB2L(iRegLNoSp dst, memory1 mem)
|
||||
%{
|
||||
match(Set dst (ConvI2L (LoadUB mem)));
|
||||
predicate(!needs_acquiring_load(n->in(1)));
|
||||
@ -6268,7 +6490,7 @@ instruct loadUB2L(iRegLNoSp dst, memory mem)
|
||||
%}
|
||||
|
||||
// Load Short (16 bit signed)
|
||||
instruct loadS(iRegINoSp dst, memory mem)
|
||||
instruct loadS(iRegINoSp dst, memory2 mem)
|
||||
%{
|
||||
match(Set dst (LoadS mem));
|
||||
predicate(!needs_acquiring_load(n));
|
||||
@ -6282,7 +6504,7 @@ instruct loadS(iRegINoSp dst, memory mem)
|
||||
%}
|
||||
|
||||
// Load Short (16 bit signed) into long
|
||||
instruct loadS2L(iRegLNoSp dst, memory mem)
|
||||
instruct loadS2L(iRegLNoSp dst, memory2 mem)
|
||||
%{
|
||||
match(Set dst (ConvI2L (LoadS mem)));
|
||||
predicate(!needs_acquiring_load(n->in(1)));
|
||||
@ -6296,7 +6518,7 @@ instruct loadS2L(iRegLNoSp dst, memory mem)
|
||||
%}
|
||||
|
||||
// Load Char (16 bit unsigned)
|
||||
instruct loadUS(iRegINoSp dst, memory mem)
|
||||
instruct loadUS(iRegINoSp dst, memory2 mem)
|
||||
%{
|
||||
match(Set dst (LoadUS mem));
|
||||
predicate(!needs_acquiring_load(n));
|
||||
@ -6310,7 +6532,7 @@ instruct loadUS(iRegINoSp dst, memory mem)
|
||||
%}
|
||||
|
||||
// Load Short/Char (16 bit unsigned) into long
|
||||
instruct loadUS2L(iRegLNoSp dst, memory mem)
|
||||
instruct loadUS2L(iRegLNoSp dst, memory2 mem)
|
||||
%{
|
||||
match(Set dst (ConvI2L (LoadUS mem)));
|
||||
predicate(!needs_acquiring_load(n->in(1)));
|
||||
@ -6324,7 +6546,7 @@ instruct loadUS2L(iRegLNoSp dst, memory mem)
|
||||
%}
|
||||
|
||||
// Load Integer (32 bit signed)
|
||||
instruct loadI(iRegINoSp dst, memory mem)
|
||||
instruct loadI(iRegINoSp dst, memory4 mem)
|
||||
%{
|
||||
match(Set dst (LoadI mem));
|
||||
predicate(!needs_acquiring_load(n));
|
||||
@ -6338,7 +6560,7 @@ instruct loadI(iRegINoSp dst, memory mem)
|
||||
%}
|
||||
|
||||
// Load Integer (32 bit signed) into long
|
||||
instruct loadI2L(iRegLNoSp dst, memory mem)
|
||||
instruct loadI2L(iRegLNoSp dst, memory4 mem)
|
||||
%{
|
||||
match(Set dst (ConvI2L (LoadI mem)));
|
||||
predicate(!needs_acquiring_load(n->in(1)));
|
||||
@ -6352,7 +6574,7 @@ instruct loadI2L(iRegLNoSp dst, memory mem)
|
||||
%}
|
||||
|
||||
// Load Integer (32 bit unsigned) into long
|
||||
instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
|
||||
instruct loadUI2L(iRegLNoSp dst, memory4 mem, immL_32bits mask)
|
||||
%{
|
||||
match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
|
||||
predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
|
||||
@ -6366,7 +6588,7 @@ instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
|
||||
%}
|
||||
|
||||
// Load Long (64 bit signed)
|
||||
instruct loadL(iRegLNoSp dst, memory mem)
|
||||
instruct loadL(iRegLNoSp dst, memory8 mem)
|
||||
%{
|
||||
match(Set dst (LoadL mem));
|
||||
predicate(!needs_acquiring_load(n));
|
||||
@ -6380,7 +6602,7 @@ instruct loadL(iRegLNoSp dst, memory mem)
|
||||
%}
|
||||
|
||||
// Load Range
|
||||
instruct loadRange(iRegINoSp dst, memory mem)
|
||||
instruct loadRange(iRegINoSp dst, memory4 mem)
|
||||
%{
|
||||
match(Set dst (LoadRange mem));
|
||||
|
||||
@ -6393,7 +6615,7 @@ instruct loadRange(iRegINoSp dst, memory mem)
|
||||
%}
|
||||
|
||||
// Load Pointer
|
||||
instruct loadP(iRegPNoSp dst, memory mem)
|
||||
instruct loadP(iRegPNoSp dst, memory8 mem)
|
||||
%{
|
||||
match(Set dst (LoadP mem));
|
||||
predicate(!needs_acquiring_load(n) && (n->as_Load()->barrier_data() == 0));
|
||||
@ -6407,10 +6629,10 @@ instruct loadP(iRegPNoSp dst, memory mem)
|
||||
%}
|
||||
|
||||
// Load Compressed Pointer
|
||||
instruct loadN(iRegNNoSp dst, memory mem)
|
||||
instruct loadN(iRegNNoSp dst, memory4 mem)
|
||||
%{
|
||||
match(Set dst (LoadN mem));
|
||||
predicate(!needs_acquiring_load(n));
|
||||
predicate(!needs_acquiring_load(n) && n->as_Load()->barrier_data() == 0);
|
||||
|
||||
ins_cost(4 * INSN_COST);
|
||||
format %{ "ldrw $dst, $mem\t# compressed ptr" %}
|
||||
@ -6421,7 +6643,7 @@ instruct loadN(iRegNNoSp dst, memory mem)
|
||||
%}
|
||||
|
||||
// Load Klass Pointer
|
||||
instruct loadKlass(iRegPNoSp dst, memory mem)
|
||||
instruct loadKlass(iRegPNoSp dst, memory8 mem)
|
||||
%{
|
||||
match(Set dst (LoadKlass mem));
|
||||
predicate(!needs_acquiring_load(n));
|
||||
@ -6435,7 +6657,7 @@ instruct loadKlass(iRegPNoSp dst, memory mem)
|
||||
%}
|
||||
|
||||
// Load Narrow Klass Pointer
|
||||
instruct loadNKlass(iRegNNoSp dst, memory mem)
|
||||
instruct loadNKlass(iRegNNoSp dst, memory4 mem)
|
||||
%{
|
||||
match(Set dst (LoadNKlass mem));
|
||||
predicate(!needs_acquiring_load(n));
|
||||
@ -6449,7 +6671,7 @@ instruct loadNKlass(iRegNNoSp dst, memory mem)
|
||||
%}
|
||||
|
||||
// Load Float
|
||||
instruct loadF(vRegF dst, memory mem)
|
||||
instruct loadF(vRegF dst, memory4 mem)
|
||||
%{
|
||||
match(Set dst (LoadF mem));
|
||||
predicate(!needs_acquiring_load(n));
|
||||
@ -6463,7 +6685,7 @@ instruct loadF(vRegF dst, memory mem)
|
||||
%}
|
||||
|
||||
// Load Double
|
||||
instruct loadD(vRegD dst, memory mem)
|
||||
instruct loadD(vRegD dst, memory8 mem)
|
||||
%{
|
||||
match(Set dst (LoadD mem));
|
||||
predicate(!needs_acquiring_load(n));
|
||||
@ -6666,38 +6888,8 @@ instruct loadConD(vRegD dst, immD con) %{
|
||||
|
||||
// Store Instructions
|
||||
|
||||
// Store CMS card-mark Immediate
|
||||
instruct storeimmCM0(immI0 zero, memory mem)
|
||||
%{
|
||||
match(Set mem (StoreCM mem zero));
|
||||
|
||||
ins_cost(INSN_COST);
|
||||
format %{ "storestore (elided)\n\t"
|
||||
"strb zr, $mem\t# byte" %}
|
||||
|
||||
ins_encode(aarch64_enc_strb0(mem));
|
||||
|
||||
ins_pipe(istore_mem);
|
||||
%}
|
||||
|
||||
// Store CMS card-mark Immediate with intervening StoreStore
|
||||
// needed when using CMS with no conditional card marking
|
||||
instruct storeimmCM0_ordered(immI0 zero, memory mem)
|
||||
%{
|
||||
match(Set mem (StoreCM mem zero));
|
||||
|
||||
ins_cost(INSN_COST * 2);
|
||||
format %{ "storestore\n\t"
|
||||
"dmb ishst"
|
||||
"\n\tstrb zr, $mem\t# byte" %}
|
||||
|
||||
ins_encode(aarch64_enc_strb0_ordered(mem));
|
||||
|
||||
ins_pipe(istore_mem);
|
||||
%}
|
||||
|
||||
// Store Byte
|
||||
instruct storeB(iRegIorL2I src, memory mem)
|
||||
instruct storeB(iRegIorL2I src, memory1 mem)
|
||||
%{
|
||||
match(Set mem (StoreB mem src));
|
||||
predicate(!needs_releasing_store(n));
|
||||
@ -6711,7 +6903,7 @@ instruct storeB(iRegIorL2I src, memory mem)
|
||||
%}
|
||||
|
||||
|
||||
instruct storeimmB0(immI0 zero, memory mem)
|
||||
instruct storeimmB0(immI0 zero, memory1 mem)
|
||||
%{
|
||||
match(Set mem (StoreB mem zero));
|
||||
predicate(!needs_releasing_store(n));
|
||||
@ -6725,7 +6917,7 @@ instruct storeimmB0(immI0 zero, memory mem)
|
||||
%}
|
||||
|
||||
// Store Char/Short
|
||||
instruct storeC(iRegIorL2I src, memory mem)
|
||||
instruct storeC(iRegIorL2I src, memory2 mem)
|
||||
%{
|
||||
match(Set mem (StoreC mem src));
|
||||
predicate(!needs_releasing_store(n));
|
||||
@ -6738,7 +6930,7 @@ instruct storeC(iRegIorL2I src, memory mem)
|
||||
ins_pipe(istore_reg_mem);
|
||||
%}
|
||||
|
||||
instruct storeimmC0(immI0 zero, memory mem)
|
||||
instruct storeimmC0(immI0 zero, memory2 mem)
|
||||
%{
|
||||
match(Set mem (StoreC mem zero));
|
||||
predicate(!needs_releasing_store(n));
|
||||
@ -6753,7 +6945,7 @@ instruct storeimmC0(immI0 zero, memory mem)
|
||||
|
||||
// Store Integer
|
||||
|
||||
instruct storeI(iRegIorL2I src, memory mem)
|
||||
instruct storeI(iRegIorL2I src, memory4 mem)
|
||||
%{
|
||||
match(Set mem(StoreI mem src));
|
||||
predicate(!needs_releasing_store(n));
|
||||
@ -6766,7 +6958,7 @@ instruct storeI(iRegIorL2I src, memory mem)
|
||||
ins_pipe(istore_reg_mem);
|
||||
%}
|
||||
|
||||
instruct storeimmI0(immI0 zero, memory mem)
|
||||
instruct storeimmI0(immI0 zero, memory4 mem)
|
||||
%{
|
||||
match(Set mem(StoreI mem zero));
|
||||
predicate(!needs_releasing_store(n));
|
||||
@ -6780,7 +6972,7 @@ instruct storeimmI0(immI0 zero, memory mem)
|
||||
%}
|
||||
|
||||
// Store Long (64 bit signed)
|
||||
instruct storeL(iRegL src, memory mem)
|
||||
instruct storeL(iRegL src, memory8 mem)
|
||||
%{
|
||||
match(Set mem (StoreL mem src));
|
||||
predicate(!needs_releasing_store(n));
|
||||
@ -6794,7 +6986,7 @@ instruct storeL(iRegL src, memory mem)
|
||||
%}
|
||||
|
||||
// Store Long (64 bit signed)
|
||||
instruct storeimmL0(immL0 zero, memory mem)
|
||||
instruct storeimmL0(immL0 zero, memory8 mem)
|
||||
%{
|
||||
match(Set mem (StoreL mem zero));
|
||||
predicate(!needs_releasing_store(n));
|
||||
@ -6808,7 +7000,7 @@ instruct storeimmL0(immL0 zero, memory mem)
|
||||
%}
|
||||
|
||||
// Store Pointer
|
||||
instruct storeP(iRegP src, memory mem)
|
||||
instruct storeP(iRegP src, memory8 mem)
|
||||
%{
|
||||
match(Set mem (StoreP mem src));
|
||||
predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
|
||||
@ -6822,7 +7014,7 @@ instruct storeP(iRegP src, memory mem)
|
||||
%}
|
||||
|
||||
// Store Pointer
|
||||
instruct storeimmP0(immP0 zero, memory mem)
|
||||
instruct storeimmP0(immP0 zero, memory8 mem)
|
||||
%{
|
||||
match(Set mem (StoreP mem zero));
|
||||
predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
|
||||
@ -6836,10 +7028,10 @@ instruct storeimmP0(immP0 zero, memory mem)
|
||||
%}
|
||||
|
||||
// Store Compressed Pointer
|
||||
instruct storeN(iRegN src, memory mem)
|
||||
instruct storeN(iRegN src, memory4 mem)
|
||||
%{
|
||||
match(Set mem (StoreN mem src));
|
||||
predicate(!needs_releasing_store(n));
|
||||
predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
|
||||
|
||||
ins_cost(INSN_COST);
|
||||
format %{ "strw $src, $mem\t# compressed ptr" %}
|
||||
@ -6849,10 +7041,10 @@ instruct storeN(iRegN src, memory mem)
|
||||
ins_pipe(istore_reg_mem);
|
||||
%}
|
||||
|
||||
instruct storeImmN0(immN0 zero, memory mem)
|
||||
instruct storeImmN0(immN0 zero, memory4 mem)
|
||||
%{
|
||||
match(Set mem (StoreN mem zero));
|
||||
predicate(!needs_releasing_store(n));
|
||||
predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
|
||||
|
||||
ins_cost(INSN_COST);
|
||||
format %{ "strw zr, $mem\t# compressed ptr" %}
|
||||
@ -6863,7 +7055,7 @@ instruct storeImmN0(immN0 zero, memory mem)
|
||||
%}
|
||||
|
||||
// Store Float
|
||||
instruct storeF(vRegF src, memory mem)
|
||||
instruct storeF(vRegF src, memory4 mem)
|
||||
%{
|
||||
match(Set mem (StoreF mem src));
|
||||
predicate(!needs_releasing_store(n));
|
||||
@ -6880,7 +7072,7 @@ instruct storeF(vRegF src, memory mem)
|
||||
// implement storeImmF0 and storeFImmPacked
|
||||
|
||||
// Store Double
|
||||
instruct storeD(vRegD src, memory mem)
|
||||
instruct storeD(vRegD src, memory8 mem)
|
||||
%{
|
||||
match(Set mem (StoreD mem src));
|
||||
predicate(!needs_releasing_store(n));
|
||||
@ -6894,7 +7086,7 @@ instruct storeD(vRegD src, memory mem)
|
||||
%}
|
||||
|
||||
// Store Compressed Klass Pointer
|
||||
instruct storeNKlass(iRegN src, memory mem)
|
||||
instruct storeNKlass(iRegN src, memory4 mem)
|
||||
%{
|
||||
predicate(!needs_releasing_store(n));
|
||||
match(Set mem (StoreNKlass mem src));
|
||||
@ -6913,7 +7105,7 @@ instruct storeNKlass(iRegN src, memory mem)
|
||||
// prefetch instructions
|
||||
// Must be safe to execute with invalid address (cannot fault).
|
||||
|
||||
instruct prefetchalloc( memory mem ) %{
|
||||
instruct prefetchalloc( memory8 mem ) %{
|
||||
match(PrefetchAllocation mem);
|
||||
|
||||
ins_cost(INSN_COST);
|
||||
@ -7086,6 +7278,7 @@ instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
|
||||
instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
|
||||
%{
|
||||
match(Set dst (LoadN mem));
|
||||
predicate(n->as_Load()->barrier_data() == 0);
|
||||
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
format %{ "ldarw $dst, $mem\t# compressed ptr" %}
|
||||
@ -7253,6 +7446,7 @@ instruct storeimmP0_volatile(immP0 zero, /* sync_memory*/indirect mem)
|
||||
instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
|
||||
%{
|
||||
match(Set mem (StoreN mem src));
|
||||
predicate(n->as_Store()->barrier_data() == 0);
|
||||
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
format %{ "stlrw $src, $mem\t# compressed ptr" %}
|
||||
@ -7265,6 +7459,7 @@ instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
|
||||
instruct storeimmN0_volatile(immN0 zero, /* sync_memory*/indirect mem)
|
||||
%{
|
||||
match(Set mem (StoreN mem zero));
|
||||
predicate(n->as_Store()->barrier_data() == 0);
|
||||
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
format %{ "stlrw zr, $mem\t# compressed ptr" %}
|
||||
@ -7482,7 +7677,7 @@ instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
|
||||
instruct popCountI_mem(iRegINoSp dst, memory4 mem, vRegF tmp) %{
|
||||
match(Set dst (PopCountI (LoadI mem)));
|
||||
effect(TEMP tmp);
|
||||
ins_cost(INSN_COST * 13);
|
||||
@ -7523,7 +7718,7 @@ instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
|
||||
instruct popCountL_mem(iRegINoSp dst, memory8 mem, vRegD tmp) %{
|
||||
match(Set dst (PopCountL (LoadL mem)));
|
||||
effect(TEMP tmp);
|
||||
ins_cost(INSN_COST * 13);
|
||||
@ -8061,6 +8256,7 @@ instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval
|
||||
instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
|
||||
|
||||
match(Set res (CompareAndSwapN mem (Binary oldval newval)));
|
||||
predicate(n->as_LoadStore()->barrier_data() == 0);
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
effect(KILL cr);
|
||||
@ -8175,7 +8371,7 @@ instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP new
|
||||
|
||||
instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
|
||||
|
||||
predicate(needs_acquiring_load_exclusive(n));
|
||||
predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
|
||||
match(Set res (CompareAndSwapN mem (Binary oldval newval)));
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
|
||||
@ -8280,6 +8476,7 @@ instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL ne
|
||||
// This pattern is generated automatically from cas.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
|
||||
predicate(n->as_LoadStore()->barrier_data() == 0);
|
||||
match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
effect(TEMP_DEF res, KILL cr);
|
||||
@ -8389,7 +8586,7 @@ instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL
|
||||
// This pattern is generated automatically from cas.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
|
||||
predicate(needs_acquiring_load_exclusive(n));
|
||||
predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
|
||||
match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
effect(TEMP_DEF res, KILL cr);
|
||||
@ -8501,6 +8698,7 @@ instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL ne
|
||||
// This pattern is generated automatically from cas.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
|
||||
predicate(n->as_LoadStore()->barrier_data() == 0);
|
||||
match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
effect(KILL cr);
|
||||
@ -8620,7 +8818,7 @@ instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL
|
||||
// This pattern is generated automatically from cas.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
|
||||
predicate(needs_acquiring_load_exclusive(n));
|
||||
predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
|
||||
match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
effect(KILL cr);
|
||||
@ -8681,6 +8879,7 @@ instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
|
||||
%}
|
||||
|
||||
instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
|
||||
predicate(n->as_LoadStore()->barrier_data() == 0);
|
||||
match(Set prev (GetAndSetN mem newv));
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
|
||||
@ -8724,7 +8923,7 @@ instruct get_and_setLAcq(indirect mem, iRegL newv, iRegLNoSp prev) %{
|
||||
%}
|
||||
|
||||
instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev) %{
|
||||
predicate(needs_acquiring_load_exclusive(n));
|
||||
predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
|
||||
match(Set prev (GetAndSetN mem newv));
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
format %{ "atomic_xchgw_acq $prev, $newv, [$mem]" %}
|
||||
@ -16672,7 +16871,7 @@ instruct compressBitsI_reg(iRegINoSp dst, iRegIorL2I src, iRegIorL2I mask,
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct compressBitsI_memcon(iRegINoSp dst, memory mem, immI mask,
|
||||
instruct compressBitsI_memcon(iRegINoSp dst, memory4 mem, immI mask,
|
||||
vRegF tdst, vRegF tsrc, vRegF tmask) %{
|
||||
match(Set dst (CompressBits (LoadI mem) mask));
|
||||
effect(TEMP tdst, TEMP tsrc, TEMP tmask);
|
||||
@ -16709,7 +16908,7 @@ instruct compressBitsL_reg(iRegLNoSp dst, iRegL src, iRegL mask,
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct compressBitsL_memcon(iRegLNoSp dst, memory mem, immL mask,
|
||||
instruct compressBitsL_memcon(iRegLNoSp dst, memory8 mem, immL mask,
|
||||
vRegF tdst, vRegF tsrc, vRegF tmask) %{
|
||||
match(Set dst (CompressBits (LoadL mem) mask));
|
||||
effect(TEMP tdst, TEMP tsrc, TEMP tmask);
|
||||
@ -16746,7 +16945,7 @@ instruct expandBitsI_reg(iRegINoSp dst, iRegIorL2I src, iRegIorL2I mask,
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct expandBitsI_memcon(iRegINoSp dst, memory mem, immI mask,
|
||||
instruct expandBitsI_memcon(iRegINoSp dst, memory4 mem, immI mask,
|
||||
vRegF tdst, vRegF tsrc, vRegF tmask) %{
|
||||
match(Set dst (ExpandBits (LoadI mem) mask));
|
||||
effect(TEMP tdst, TEMP tsrc, TEMP tmask);
|
||||
@ -16784,7 +16983,7 @@ instruct expandBitsL_reg(iRegLNoSp dst, iRegL src, iRegL mask,
|
||||
%}
|
||||
|
||||
|
||||
instruct expandBitsL_memcon(iRegINoSp dst, memory mem, immL mask,
|
||||
instruct expandBitsL_memcon(iRegINoSp dst, memory8 mem, immL mask,
|
||||
vRegF tdst, vRegF tsrc, vRegF tmask) %{
|
||||
match(Set dst (ExpandBits (LoadL mem) mask));
|
||||
effect(TEMP tdst, TEMP tsrc, TEMP tmask);
|
||||
|
||||
@ -345,7 +345,7 @@ source %{
|
||||
// ------------------------------ Vector load/store ----------------------------
|
||||
|
||||
// Load Vector (16 bits)
|
||||
instruct loadV2(vReg dst, vmem mem) %{
|
||||
instruct loadV2(vReg dst, vmem2 mem) %{
|
||||
predicate(n->as_LoadVector()->memory_size() == 2);
|
||||
match(Set dst (LoadVector mem));
|
||||
format %{ "loadV2 $dst, $mem\t# vector (16 bits)" %}
|
||||
@ -354,7 +354,7 @@ instruct loadV2(vReg dst, vmem mem) %{
|
||||
%}
|
||||
|
||||
// Store Vector (16 bits)
|
||||
instruct storeV2(vReg src, vmem mem) %{
|
||||
instruct storeV2(vReg src, vmem2 mem) %{
|
||||
predicate(n->as_StoreVector()->memory_size() == 2);
|
||||
match(Set mem (StoreVector mem src));
|
||||
format %{ "storeV2 $mem, $src\t# vector (16 bits)" %}
|
||||
@ -363,7 +363,7 @@ instruct storeV2(vReg src, vmem mem) %{
|
||||
%}
|
||||
|
||||
// Load Vector (32 bits)
|
||||
instruct loadV4(vReg dst, vmem mem) %{
|
||||
instruct loadV4(vReg dst, vmem4 mem) %{
|
||||
predicate(n->as_LoadVector()->memory_size() == 4);
|
||||
match(Set dst (LoadVector mem));
|
||||
format %{ "loadV4 $dst, $mem\t# vector (32 bits)" %}
|
||||
@ -372,7 +372,7 @@ instruct loadV4(vReg dst, vmem mem) %{
|
||||
%}
|
||||
|
||||
// Store Vector (32 bits)
|
||||
instruct storeV4(vReg src, vmem mem) %{
|
||||
instruct storeV4(vReg src, vmem4 mem) %{
|
||||
predicate(n->as_StoreVector()->memory_size() == 4);
|
||||
match(Set mem (StoreVector mem src));
|
||||
format %{ "storeV4 $mem, $src\t# vector (32 bits)" %}
|
||||
@ -381,7 +381,7 @@ instruct storeV4(vReg src, vmem mem) %{
|
||||
%}
|
||||
|
||||
// Load Vector (64 bits)
|
||||
instruct loadV8(vReg dst, vmem mem) %{
|
||||
instruct loadV8(vReg dst, vmem8 mem) %{
|
||||
predicate(n->as_LoadVector()->memory_size() == 8);
|
||||
match(Set dst (LoadVector mem));
|
||||
format %{ "loadV8 $dst, $mem\t# vector (64 bits)" %}
|
||||
@ -390,7 +390,7 @@ instruct loadV8(vReg dst, vmem mem) %{
|
||||
%}
|
||||
|
||||
// Store Vector (64 bits)
|
||||
instruct storeV8(vReg src, vmem mem) %{
|
||||
instruct storeV8(vReg src, vmem8 mem) %{
|
||||
predicate(n->as_StoreVector()->memory_size() == 8);
|
||||
match(Set mem (StoreVector mem src));
|
||||
format %{ "storeV8 $mem, $src\t# vector (64 bits)" %}
|
||||
@ -399,7 +399,7 @@ instruct storeV8(vReg src, vmem mem) %{
|
||||
%}
|
||||
|
||||
// Load Vector (128 bits)
|
||||
instruct loadV16(vReg dst, vmem mem) %{
|
||||
instruct loadV16(vReg dst, vmem16 mem) %{
|
||||
predicate(n->as_LoadVector()->memory_size() == 16);
|
||||
match(Set dst (LoadVector mem));
|
||||
format %{ "loadV16 $dst, $mem\t# vector (128 bits)" %}
|
||||
@ -408,7 +408,7 @@ instruct loadV16(vReg dst, vmem mem) %{
|
||||
%}
|
||||
|
||||
// Store Vector (128 bits)
|
||||
instruct storeV16(vReg src, vmem mem) %{
|
||||
instruct storeV16(vReg src, vmem16 mem) %{
|
||||
predicate(n->as_StoreVector()->memory_size() == 16);
|
||||
match(Set mem (StoreVector mem src));
|
||||
format %{ "storeV16 $mem, $src\t# vector (128 bits)" %}
|
||||
|
||||
@ -338,7 +338,7 @@ dnl VECTOR_LOAD_STORE($1, $2, $3, $4, $5 )
|
||||
dnl VECTOR_LOAD_STORE(type, nbytes, arg_name, nbits, size)
|
||||
define(`VECTOR_LOAD_STORE', `
|
||||
// ifelse(load, $1, Load, Store) Vector ($4 bits)
|
||||
instruct $1V$2(vReg $3, vmem mem) %{
|
||||
instruct $1V$2(vReg $3, vmem$2 mem) %{
|
||||
predicate(`n->as_'ifelse(load, $1, Load, Store)Vector()->memory_size() == $2);
|
||||
match(Set ifelse(load, $1, dst (LoadVector mem), mem (StoreVector mem src)));
|
||||
format %{ "$1V$2 ifelse(load, $1, `$dst, $mem', `$mem, $src')\t# vector ($4 bits)" %}
|
||||
|
||||
@ -34,7 +34,7 @@ define(access, `
|
||||
define(load,`
|
||||
// This encoding class is generated automatically from ad_encode.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
enc_class aarch64_enc_$2($1 dst, memory mem) %{dnl
|
||||
enc_class aarch64_enc_$2($1 dst, memory$5 mem) %{dnl
|
||||
access(dst,$2,$3,$4,$5)')dnl
|
||||
load(iRegI,ldrsbw,,,1)
|
||||
load(iRegI,ldrsb,,,1)
|
||||
@ -53,12 +53,12 @@ load(vRegD,ldrd,Float,,8)
|
||||
define(STORE,`
|
||||
// This encoding class is generated automatically from ad_encode.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
enc_class aarch64_enc_$2($1 src, memory mem) %{dnl
|
||||
enc_class aarch64_enc_$2($1 src, memory$5 mem) %{dnl
|
||||
access(src,$2,$3,$4,$5)')dnl
|
||||
define(STORE0,`
|
||||
// This encoding class is generated automatically from ad_encode.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
enc_class aarch64_enc_$2`'0(memory mem) %{
|
||||
enc_class aarch64_enc_$2`'0(memory$4 mem) %{
|
||||
choose(masm,zr,$2,$mem->opcode(),
|
||||
as_$3Register($mem$$base),$mem$$index,$mem$$scale,$mem$$disp,$4)')dnl
|
||||
STORE(iRegI,strb,,,1)
|
||||
@ -82,7 +82,7 @@ STORE(vRegD,strd,Float,,8)
|
||||
|
||||
// This encoding class is generated automatically from ad_encode.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
enc_class aarch64_enc_strb0_ordered(memory mem) %{
|
||||
enc_class aarch64_enc_strb0_ordered(memory4 mem) %{
|
||||
__ membar(Assembler::StoreStore);
|
||||
loadStore(masm, &MacroAssembler::strb, zr, $mem->opcode(),
|
||||
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -57,7 +57,7 @@ void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
|
||||
__ mov_metadata(rscratch1, m);
|
||||
ce->store_parameter(rscratch1, 1);
|
||||
ce->store_parameter(_bci, 0);
|
||||
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));
|
||||
__ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::counter_overflow_id)));
|
||||
ce->add_call_info_here(_info);
|
||||
ce->verify_oop_map(_info);
|
||||
__ b(_continuation);
|
||||
@ -66,7 +66,7 @@ void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
|
||||
void RangeCheckStub::emit_code(LIR_Assembler* ce) {
|
||||
__ bind(_entry);
|
||||
if (_info->deoptimize_on_exception()) {
|
||||
address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
|
||||
address a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id);
|
||||
__ far_call(RuntimeAddress(a));
|
||||
ce->add_call_info_here(_info);
|
||||
ce->verify_oop_map(_info);
|
||||
@ -79,13 +79,13 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
|
||||
} else {
|
||||
__ mov(rscratch1, _index->as_jint());
|
||||
}
|
||||
Runtime1::StubID stub_id;
|
||||
C1StubId stub_id;
|
||||
if (_throw_index_out_of_bounds_exception) {
|
||||
stub_id = Runtime1::throw_index_exception_id;
|
||||
stub_id = C1StubId::throw_index_exception_id;
|
||||
} else {
|
||||
assert(_array != LIR_Opr::nullOpr(), "sanity");
|
||||
__ mov(rscratch2, _array->as_pointer_register());
|
||||
stub_id = Runtime1::throw_range_check_failed_id;
|
||||
stub_id = C1StubId::throw_range_check_failed_id;
|
||||
}
|
||||
__ lea(lr, RuntimeAddress(Runtime1::entry_for(stub_id)));
|
||||
__ blr(lr);
|
||||
@ -100,7 +100,7 @@ PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
|
||||
|
||||
void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
|
||||
__ bind(_entry);
|
||||
address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
|
||||
address a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id);
|
||||
__ far_call(RuntimeAddress(a));
|
||||
ce->add_call_info_here(_info);
|
||||
ce->verify_oop_map(_info);
|
||||
@ -112,7 +112,7 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) {
|
||||
ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
|
||||
}
|
||||
__ bind(_entry);
|
||||
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_div0_exception_id)));
|
||||
__ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::throw_div0_exception_id)));
|
||||
ce->add_call_info_here(_info);
|
||||
ce->verify_oop_map(_info);
|
||||
#ifdef ASSERT
|
||||
@ -124,14 +124,14 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) {
|
||||
|
||||
// Implementation of NewInstanceStub
|
||||
|
||||
NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
|
||||
NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, C1StubId stub_id) {
|
||||
_result = result;
|
||||
_klass = klass;
|
||||
_klass_reg = klass_reg;
|
||||
_info = new CodeEmitInfo(info);
|
||||
assert(stub_id == Runtime1::new_instance_id ||
|
||||
stub_id == Runtime1::fast_new_instance_id ||
|
||||
stub_id == Runtime1::fast_new_instance_init_check_id,
|
||||
assert(stub_id == C1StubId::new_instance_id ||
|
||||
stub_id == C1StubId::fast_new_instance_id ||
|
||||
stub_id == C1StubId::fast_new_instance_init_check_id,
|
||||
"need new_instance id");
|
||||
_stub_id = stub_id;
|
||||
}
|
||||
@ -167,7 +167,7 @@ void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
|
||||
__ bind(_entry);
|
||||
assert(_length->as_register() == r19, "length must in r19,");
|
||||
assert(_klass_reg->as_register() == r3, "klass_reg must in r3");
|
||||
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id)));
|
||||
__ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::new_type_array_id)));
|
||||
ce->add_call_info_here(_info);
|
||||
ce->verify_oop_map(_info);
|
||||
assert(_result->as_register() == r0, "result must in r0");
|
||||
@ -190,7 +190,7 @@ void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
|
||||
__ bind(_entry);
|
||||
assert(_length->as_register() == r19, "length must in r19,");
|
||||
assert(_klass_reg->as_register() == r3, "klass_reg must in r3");
|
||||
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
|
||||
__ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::new_object_array_id)));
|
||||
ce->add_call_info_here(_info);
|
||||
ce->verify_oop_map(_info);
|
||||
assert(_result->as_register() == r0, "result must in r0");
|
||||
@ -202,11 +202,11 @@ void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
|
||||
__ bind(_entry);
|
||||
ce->store_parameter(_obj_reg->as_register(), 1);
|
||||
ce->store_parameter(_lock_reg->as_register(), 0);
|
||||
Runtime1::StubID enter_id;
|
||||
C1StubId enter_id;
|
||||
if (ce->compilation()->has_fpu_code()) {
|
||||
enter_id = Runtime1::monitorenter_id;
|
||||
enter_id = C1StubId::monitorenter_id;
|
||||
} else {
|
||||
enter_id = Runtime1::monitorenter_nofpu_id;
|
||||
enter_id = C1StubId::monitorenter_nofpu_id;
|
||||
}
|
||||
__ far_call(RuntimeAddress(Runtime1::entry_for(enter_id)));
|
||||
ce->add_call_info_here(_info);
|
||||
@ -223,11 +223,11 @@ void MonitorExitStub::emit_code(LIR_Assembler* ce) {
|
||||
}
|
||||
ce->store_parameter(_lock_reg->as_register(), 0);
|
||||
// note: non-blocking leaf routine => no call info needed
|
||||
Runtime1::StubID exit_id;
|
||||
C1StubId exit_id;
|
||||
if (ce->compilation()->has_fpu_code()) {
|
||||
exit_id = Runtime1::monitorexit_id;
|
||||
exit_id = C1StubId::monitorexit_id;
|
||||
} else {
|
||||
exit_id = Runtime1::monitorexit_nofpu_id;
|
||||
exit_id = C1StubId::monitorexit_nofpu_id;
|
||||
}
|
||||
__ adr(lr, _continuation);
|
||||
__ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id)));
|
||||
@ -255,7 +255,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
|
||||
void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
|
||||
__ bind(_entry);
|
||||
ce->store_parameter(_trap_request, 0);
|
||||
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));
|
||||
__ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::deoptimize_id)));
|
||||
ce->add_call_info_here(_info);
|
||||
DEBUG_ONLY(__ should_not_reach_here());
|
||||
}
|
||||
@ -265,9 +265,9 @@ void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
|
||||
address a;
|
||||
if (_info->deoptimize_on_exception()) {
|
||||
// Deoptimize, do not throw the exception, because it is probably wrong to do it here.
|
||||
a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
|
||||
a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id);
|
||||
} else {
|
||||
a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
|
||||
a = Runtime1::entry_for(C1StubId::throw_null_pointer_exception_id);
|
||||
}
|
||||
|
||||
ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
|
||||
|
||||
@ -321,19 +321,19 @@ void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) {
|
||||
|
||||
switch (patching_id(info)) {
|
||||
case PatchingStub::access_field_id:
|
||||
target = Runtime1::entry_for(Runtime1::access_field_patching_id);
|
||||
target = Runtime1::entry_for(C1StubId::access_field_patching_id);
|
||||
reloc_type = relocInfo::section_word_type;
|
||||
break;
|
||||
case PatchingStub::load_klass_id:
|
||||
target = Runtime1::entry_for(Runtime1::load_klass_patching_id);
|
||||
target = Runtime1::entry_for(C1StubId::load_klass_patching_id);
|
||||
reloc_type = relocInfo::metadata_type;
|
||||
break;
|
||||
case PatchingStub::load_mirror_id:
|
||||
target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);
|
||||
target = Runtime1::entry_for(C1StubId::load_mirror_patching_id);
|
||||
reloc_type = relocInfo::oop_type;
|
||||
break;
|
||||
case PatchingStub::load_appendix_id:
|
||||
target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);
|
||||
target = Runtime1::entry_for(C1StubId::load_appendix_patching_id);
|
||||
reloc_type = relocInfo::oop_type;
|
||||
break;
|
||||
default: ShouldNotReachHere();
|
||||
@ -375,7 +375,7 @@ int LIR_Assembler::emit_exception_handler() {
|
||||
__ verify_not_null_oop(r0);
|
||||
|
||||
// search an exception handler (r0: exception oop, r3: throwing pc)
|
||||
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)));
|
||||
__ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::handle_exception_from_callee_id)));
|
||||
__ should_not_reach_here();
|
||||
guarantee(code_offset() - offset <= exception_handler_size(), "overflow");
|
||||
__ end_a_stub();
|
||||
@ -432,7 +432,7 @@ int LIR_Assembler::emit_unwind_handler() {
|
||||
// remove the activation and dispatch to the unwind handler
|
||||
__ block_comment("remove_frame and dispatch to the unwind handler");
|
||||
__ remove_frame(initial_frame_size_in_bytes());
|
||||
__ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
|
||||
__ far_jump(RuntimeAddress(Runtime1::entry_for(C1StubId::unwind_exception_id)));
|
||||
|
||||
// Emit the slow path assembly
|
||||
if (stub != nullptr) {
|
||||
@ -875,19 +875,19 @@ void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
|
||||
|
||||
switch (patching_id(info)) {
|
||||
case PatchingStub::access_field_id:
|
||||
target = Runtime1::entry_for(Runtime1::access_field_patching_id);
|
||||
target = Runtime1::entry_for(C1StubId::access_field_patching_id);
|
||||
reloc_type = relocInfo::section_word_type;
|
||||
break;
|
||||
case PatchingStub::load_klass_id:
|
||||
target = Runtime1::entry_for(Runtime1::load_klass_patching_id);
|
||||
target = Runtime1::entry_for(C1StubId::load_klass_patching_id);
|
||||
reloc_type = relocInfo::metadata_type;
|
||||
break;
|
||||
case PatchingStub::load_mirror_id:
|
||||
target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);
|
||||
target = Runtime1::entry_for(C1StubId::load_mirror_patching_id);
|
||||
reloc_type = relocInfo::oop_type;
|
||||
break;
|
||||
case PatchingStub::load_appendix_id:
|
||||
target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);
|
||||
target = Runtime1::entry_for(C1StubId::load_appendix_patching_id);
|
||||
reloc_type = relocInfo::oop_type;
|
||||
break;
|
||||
default: ShouldNotReachHere();
|
||||
@ -1168,8 +1168,8 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
|
||||
|
||||
void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
|
||||
if (op->init_check()) {
|
||||
__ ldrb(rscratch1, Address(op->klass()->as_register(),
|
||||
InstanceKlass::init_state_offset()));
|
||||
__ lea(rscratch1, Address(op->klass()->as_register(), InstanceKlass::init_state_offset()));
|
||||
__ ldarb(rscratch1, rscratch1);
|
||||
__ cmpw(rscratch1, InstanceKlass::fully_initialized);
|
||||
add_debug_info_for_null_check_here(op->stub()->info());
|
||||
__ br(Assembler::NE, *op->stub()->entry());
|
||||
@ -1356,7 +1356,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
|
||||
__ br(Assembler::EQ, *success_target);
|
||||
|
||||
__ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
|
||||
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
|
||||
__ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id)));
|
||||
__ ldr(klass_RInfo, Address(__ post(sp, 2 * wordSize)));
|
||||
// result is a boolean
|
||||
__ cbzw(klass_RInfo, *failure_target);
|
||||
@ -1367,7 +1367,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
|
||||
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
|
||||
// call out-of-line instance of __ check_klass_subtype_slow_path(...):
|
||||
__ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
|
||||
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
|
||||
__ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id)));
|
||||
__ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));
|
||||
// result is a boolean
|
||||
__ cbz(k_RInfo, *failure_target);
|
||||
@ -1446,7 +1446,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
|
||||
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
|
||||
// call out-of-line instance of __ check_klass_subtype_slow_path(...):
|
||||
__ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
|
||||
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
|
||||
__ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id)));
|
||||
__ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));
|
||||
// result is a boolean
|
||||
__ cbzw(k_RInfo, *failure_target);
|
||||
@ -2035,7 +2035,7 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit
|
||||
// exception object is not added to oop map by LinearScan
|
||||
// (LinearScan assumes that no oops are in fixed registers)
|
||||
info->add_register_oop(exceptionOop);
|
||||
Runtime1::StubID unwind_id;
|
||||
C1StubId unwind_id;
|
||||
|
||||
// get current pc information
|
||||
// pc is only needed if the method has an exception handler, the unwind code does not need it.
|
||||
@ -2054,9 +2054,9 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit
|
||||
__ verify_not_null_oop(r0);
|
||||
// search an exception handler (r0: exception oop, r3: throwing pc)
|
||||
if (compilation()->has_fpu_code()) {
|
||||
unwind_id = Runtime1::handle_exception_id;
|
||||
unwind_id = C1StubId::handle_exception_id;
|
||||
} else {
|
||||
unwind_id = Runtime1::handle_exception_nofpu_id;
|
||||
unwind_id = C1StubId::handle_exception_nofpu_id;
|
||||
}
|
||||
__ far_call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
|
||||
|
||||
@ -2337,7 +2337,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
__ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, nullptr);
|
||||
|
||||
__ PUSH(src, dst);
|
||||
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
|
||||
__ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id)));
|
||||
__ POP(src, dst);
|
||||
|
||||
__ cbnz(src, cont);
|
||||
|
||||
@ -1246,7 +1246,7 @@ void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
|
||||
args->append(rank);
|
||||
args->append(varargs);
|
||||
LIR_Opr reg = result_register_for(x->type());
|
||||
__ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
|
||||
__ call_runtime(Runtime1::entry_for(C1StubId::new_multi_array_id),
|
||||
LIR_OprFact::illegalOpr,
|
||||
reg, args, info);
|
||||
|
||||
@ -1277,14 +1277,14 @@ void LIRGenerator::do_CheckCast(CheckCast* x) {
|
||||
CodeStub* stub;
|
||||
if (x->is_incompatible_class_change_check()) {
|
||||
assert(patching_info == nullptr, "can't patch this");
|
||||
stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
|
||||
stub = new SimpleExceptionStub(C1StubId::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
|
||||
} else if (x->is_invokespecial_receiver_check()) {
|
||||
assert(patching_info == nullptr, "can't patch this");
|
||||
stub = new DeoptimizeStub(info_for_exception,
|
||||
Deoptimization::Reason_class_check,
|
||||
Deoptimization::Action_none);
|
||||
} else {
|
||||
stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
|
||||
stub = new SimpleExceptionStub(C1StubId::throw_class_cast_exception_id, obj.result(), info_for_exception);
|
||||
}
|
||||
LIR_Opr reg = rlock_result(x);
|
||||
LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
|
||||
|
||||
@ -75,8 +75,8 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
|
||||
|
||||
if (DiagnoseSyncOnValueBasedClasses != 0) {
|
||||
load_klass(hdr, obj);
|
||||
ldrw(hdr, Address(hdr, Klass::access_flags_offset()));
|
||||
tstw(hdr, JVM_ACC_IS_VALUE_BASED_CLASS);
|
||||
ldrb(hdr, Address(hdr, Klass::misc_flags_offset()));
|
||||
tst(hdr, KlassFlags::_misc_is_value_based_class);
|
||||
br(Assembler::NE, slow_case);
|
||||
}
|
||||
|
||||
@ -267,7 +267,7 @@ void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register
|
||||
|
||||
if (CURRENT_ENV->dtrace_alloc_probes()) {
|
||||
assert(obj == r0, "must be");
|
||||
far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)));
|
||||
far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::dtrace_object_alloc_id)));
|
||||
}
|
||||
|
||||
verify_oop(obj);
|
||||
@ -308,7 +308,7 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1,
|
||||
|
||||
if (CURRENT_ENV->dtrace_alloc_probes()) {
|
||||
assert(obj == r0, "must be");
|
||||
far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)));
|
||||
far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::dtrace_object_alloc_id)));
|
||||
}
|
||||
|
||||
verify_oop(obj);
|
||||
|
||||
@ -100,10 +100,10 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre
|
||||
if (frame_size() == no_frame_size) {
|
||||
leave();
|
||||
far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
|
||||
} else if (_stub_id == Runtime1::forward_exception_id) {
|
||||
} else if (_stub_id == (int)C1StubId::forward_exception_id) {
|
||||
should_not_reach_here();
|
||||
} else {
|
||||
far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));
|
||||
far_jump(RuntimeAddress(Runtime1::entry_for(C1StubId::forward_exception_id)));
|
||||
}
|
||||
bind(L);
|
||||
}
|
||||
@ -358,7 +358,7 @@ OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address targe
|
||||
}
|
||||
|
||||
|
||||
OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
|
||||
OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler *sasm) {
|
||||
__ block_comment("generate_handle_exception");
|
||||
|
||||
// incoming parameters
|
||||
@ -370,7 +370,7 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
|
||||
OopMapSet* oop_maps = new OopMapSet();
|
||||
OopMap* oop_map = nullptr;
|
||||
switch (id) {
|
||||
case forward_exception_id:
|
||||
case C1StubId::forward_exception_id:
|
||||
// We're handling an exception in the context of a compiled frame.
|
||||
// The registers have been saved in the standard places. Perform
|
||||
// an exception lookup in the caller and dispatch to the handler
|
||||
@ -390,12 +390,12 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
|
||||
__ str(zr, Address(rthread, JavaThread::vm_result_offset()));
|
||||
__ str(zr, Address(rthread, JavaThread::vm_result_2_offset()));
|
||||
break;
|
||||
case handle_exception_nofpu_id:
|
||||
case handle_exception_id:
|
||||
case C1StubId::handle_exception_nofpu_id:
|
||||
case C1StubId::handle_exception_id:
|
||||
// At this point all registers MAY be live.
|
||||
oop_map = save_live_registers(sasm, id != handle_exception_nofpu_id);
|
||||
oop_map = save_live_registers(sasm, id != C1StubId::handle_exception_nofpu_id);
|
||||
break;
|
||||
case handle_exception_from_callee_id: {
|
||||
case C1StubId::handle_exception_from_callee_id: {
|
||||
// At this point all registers except exception oop (r0) and
|
||||
// exception pc (lr) are dead.
|
||||
const int frame_size = 2 /*fp, return address*/;
|
||||
@ -453,13 +453,13 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
|
||||
__ str(r0, Address(rfp, 1*BytesPerWord));
|
||||
|
||||
switch (id) {
|
||||
case forward_exception_id:
|
||||
case handle_exception_nofpu_id:
|
||||
case handle_exception_id:
|
||||
case C1StubId::forward_exception_id:
|
||||
case C1StubId::handle_exception_nofpu_id:
|
||||
case C1StubId::handle_exception_id:
|
||||
// Restore the registers that were saved at the beginning.
|
||||
restore_live_registers(sasm, id != handle_exception_nofpu_id);
|
||||
restore_live_registers(sasm, id != C1StubId::handle_exception_nofpu_id);
|
||||
break;
|
||||
case handle_exception_from_callee_id:
|
||||
case C1StubId::handle_exception_from_callee_id:
|
||||
break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
@ -611,7 +611,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
|
||||
}
|
||||
|
||||
|
||||
OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) {
|
||||
|
||||
const Register exception_oop = r0;
|
||||
const Register exception_pc = r3;
|
||||
@ -628,7 +628,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
OopMap* oop_map = nullptr;
|
||||
switch (id) {
|
||||
{
|
||||
case forward_exception_id:
|
||||
case C1StubId::forward_exception_id:
|
||||
{
|
||||
oop_maps = generate_handle_exception(id, sasm);
|
||||
__ leave();
|
||||
@ -636,31 +636,31 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
}
|
||||
break;
|
||||
|
||||
case throw_div0_exception_id:
|
||||
case C1StubId::throw_div0_exception_id:
|
||||
{ StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments, does_not_return);
|
||||
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
|
||||
}
|
||||
break;
|
||||
|
||||
case throw_null_pointer_exception_id:
|
||||
case C1StubId::throw_null_pointer_exception_id:
|
||||
{ StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments, does_not_return);
|
||||
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
|
||||
}
|
||||
break;
|
||||
|
||||
case new_instance_id:
|
||||
case fast_new_instance_id:
|
||||
case fast_new_instance_init_check_id:
|
||||
case C1StubId::new_instance_id:
|
||||
case C1StubId::fast_new_instance_id:
|
||||
case C1StubId::fast_new_instance_init_check_id:
|
||||
{
|
||||
Register klass = r3; // Incoming
|
||||
Register obj = r0; // Result
|
||||
|
||||
if (id == new_instance_id) {
|
||||
if (id == C1StubId::new_instance_id) {
|
||||
__ set_info("new_instance", dont_gc_arguments);
|
||||
} else if (id == fast_new_instance_id) {
|
||||
} else if (id == C1StubId::fast_new_instance_id) {
|
||||
__ set_info("fast new_instance", dont_gc_arguments);
|
||||
} else {
|
||||
assert(id == fast_new_instance_init_check_id, "bad StubID");
|
||||
assert(id == C1StubId::fast_new_instance_init_check_id, "bad C1StubId");
|
||||
__ set_info("fast new_instance init check", dont_gc_arguments);
|
||||
}
|
||||
|
||||
@ -679,7 +679,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
|
||||
break;
|
||||
|
||||
case counter_overflow_id:
|
||||
case C1StubId::counter_overflow_id:
|
||||
{
|
||||
Register bci = r0, method = r1;
|
||||
__ enter();
|
||||
@ -697,14 +697,14 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
}
|
||||
break;
|
||||
|
||||
case new_type_array_id:
|
||||
case new_object_array_id:
|
||||
case C1StubId::new_type_array_id:
|
||||
case C1StubId::new_object_array_id:
|
||||
{
|
||||
Register length = r19; // Incoming
|
||||
Register klass = r3; // Incoming
|
||||
Register obj = r0; // Result
|
||||
|
||||
if (id == new_type_array_id) {
|
||||
if (id == C1StubId::new_type_array_id) {
|
||||
__ set_info("new_type_array", dont_gc_arguments);
|
||||
} else {
|
||||
__ set_info("new_object_array", dont_gc_arguments);
|
||||
@ -717,7 +717,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
Register t0 = obj;
|
||||
__ ldrw(t0, Address(klass, Klass::layout_helper_offset()));
|
||||
__ asrw(t0, t0, Klass::_lh_array_tag_shift);
|
||||
int tag = ((id == new_type_array_id)
|
||||
int tag = ((id == C1StubId::new_type_array_id)
|
||||
? Klass::_lh_array_tag_type_value
|
||||
: Klass::_lh_array_tag_obj_value);
|
||||
__ mov(rscratch1, tag);
|
||||
@ -732,7 +732,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
__ enter();
|
||||
OopMap* map = save_live_registers(sasm);
|
||||
int call_offset;
|
||||
if (id == new_type_array_id) {
|
||||
if (id == C1StubId::new_type_array_id) {
|
||||
call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
|
||||
} else {
|
||||
call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
|
||||
@ -750,7 +750,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
}
|
||||
break;
|
||||
|
||||
case new_multi_array_id:
|
||||
case C1StubId::new_multi_array_id:
|
||||
{ StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
|
||||
// r0,: klass
|
||||
// r19,: rank
|
||||
@ -770,7 +770,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
}
|
||||
break;
|
||||
|
||||
case register_finalizer_id:
|
||||
case C1StubId::register_finalizer_id:
|
||||
{
|
||||
__ set_info("register_finalizer", dont_gc_arguments);
|
||||
|
||||
@ -783,8 +783,8 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
Label register_finalizer;
|
||||
Register t = r5;
|
||||
__ load_klass(t, r0);
|
||||
__ ldrw(t, Address(t, Klass::access_flags_offset()));
|
||||
__ tbnz(t, exact_log2(JVM_ACC_HAS_FINALIZER), register_finalizer);
|
||||
__ ldrb(t, Address(t, Klass::misc_flags_offset()));
|
||||
__ tbnz(t, exact_log2(KlassFlags::_misc_has_finalizer), register_finalizer);
|
||||
__ ret(lr);
|
||||
|
||||
__ bind(register_finalizer);
|
||||
@ -802,19 +802,19 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
}
|
||||
break;
|
||||
|
||||
case throw_class_cast_exception_id:
|
||||
case C1StubId::throw_class_cast_exception_id:
|
||||
{ StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments, does_not_return);
|
||||
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
|
||||
}
|
||||
break;
|
||||
|
||||
case throw_incompatible_class_change_error_id:
|
||||
case C1StubId::throw_incompatible_class_change_error_id:
|
||||
{ StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments, does_not_return);
|
||||
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
|
||||
}
|
||||
break;
|
||||
|
||||
case slow_subtype_check_id:
|
||||
case C1StubId::slow_subtype_check_id:
|
||||
{
|
||||
// Typical calling sequence:
|
||||
// __ push(klass_RInfo); // object klass or other subclass
|
||||
@ -857,10 +857,10 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
}
|
||||
break;
|
||||
|
||||
case monitorenter_nofpu_id:
|
||||
case C1StubId::monitorenter_nofpu_id:
|
||||
save_fpu_registers = false;
|
||||
// fall through
|
||||
case monitorenter_id:
|
||||
case C1StubId::monitorenter_id:
|
||||
{
|
||||
StubFrame f(sasm, "monitorenter", dont_gc_arguments);
|
||||
OopMap* map = save_live_registers(sasm, save_fpu_registers);
|
||||
@ -878,10 +878,10 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
}
|
||||
break;
|
||||
|
||||
case monitorexit_nofpu_id:
|
||||
case C1StubId::monitorexit_nofpu_id:
|
||||
save_fpu_registers = false;
|
||||
// fall through
|
||||
case monitorexit_id:
|
||||
case C1StubId::monitorexit_id:
|
||||
{
|
||||
StubFrame f(sasm, "monitorexit", dont_gc_arguments);
|
||||
OopMap* map = save_live_registers(sasm, save_fpu_registers);
|
||||
@ -901,7 +901,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
}
|
||||
break;
|
||||
|
||||
case deoptimize_id:
|
||||
case C1StubId::deoptimize_id:
|
||||
{
|
||||
StubFrame f(sasm, "deoptimize", dont_gc_arguments, does_not_return);
|
||||
OopMap* oop_map = save_live_registers(sasm);
|
||||
@ -918,13 +918,13 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
}
|
||||
break;
|
||||
|
||||
case throw_range_check_failed_id:
|
||||
case C1StubId::throw_range_check_failed_id:
|
||||
{ StubFrame f(sasm, "range_check_failed", dont_gc_arguments, does_not_return);
|
||||
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
|
||||
}
|
||||
break;
|
||||
|
||||
case unwind_exception_id:
|
||||
case C1StubId::unwind_exception_id:
|
||||
{ __ set_info("unwind_exception", dont_gc_arguments);
|
||||
// note: no stubframe since we are about to leave the current
|
||||
// activation and we are calling a leaf VM function only.
|
||||
@ -932,54 +932,54 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
}
|
||||
break;
|
||||
|
||||
case access_field_patching_id:
|
||||
case C1StubId::access_field_patching_id:
|
||||
{ StubFrame f(sasm, "access_field_patching", dont_gc_arguments, does_not_return);
|
||||
// we should set up register map
|
||||
oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
|
||||
}
|
||||
break;
|
||||
|
||||
case load_klass_patching_id:
|
||||
case C1StubId::load_klass_patching_id:
|
||||
{ StubFrame f(sasm, "load_klass_patching", dont_gc_arguments, does_not_return);
|
||||
// we should set up register map
|
||||
oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
|
||||
}
|
||||
break;
|
||||
|
||||
case load_mirror_patching_id:
|
||||
case C1StubId::load_mirror_patching_id:
|
||||
{ StubFrame f(sasm, "load_mirror_patching", dont_gc_arguments, does_not_return);
|
||||
// we should set up register map
|
||||
oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));
|
||||
}
|
||||
break;
|
||||
|
||||
case load_appendix_patching_id:
|
||||
case C1StubId::load_appendix_patching_id:
|
||||
{ StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments, does_not_return);
|
||||
// we should set up register map
|
||||
oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
|
||||
}
|
||||
break;
|
||||
|
||||
case handle_exception_nofpu_id:
|
||||
case handle_exception_id:
|
||||
case C1StubId::handle_exception_nofpu_id:
|
||||
case C1StubId::handle_exception_id:
|
||||
{ StubFrame f(sasm, "handle_exception", dont_gc_arguments);
|
||||
oop_maps = generate_handle_exception(id, sasm);
|
||||
}
|
||||
break;
|
||||
|
||||
case handle_exception_from_callee_id:
|
||||
case C1StubId::handle_exception_from_callee_id:
|
||||
{ StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments);
|
||||
oop_maps = generate_handle_exception(id, sasm);
|
||||
}
|
||||
break;
|
||||
|
||||
case throw_index_exception_id:
|
||||
case C1StubId::throw_index_exception_id:
|
||||
{ StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments, does_not_return);
|
||||
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
|
||||
}
|
||||
break;
|
||||
|
||||
case throw_array_store_exception_id:
|
||||
case C1StubId::throw_array_store_exception_id:
|
||||
{ StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments, does_not_return);
|
||||
// tos + 0: link
|
||||
// + 1: return address
|
||||
@ -987,7 +987,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
}
|
||||
break;
|
||||
|
||||
case predicate_failed_trap_id:
|
||||
case C1StubId::predicate_failed_trap_id:
|
||||
{
|
||||
StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments, does_not_return);
|
||||
|
||||
@ -1005,7 +1005,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
}
|
||||
break;
|
||||
|
||||
case dtrace_object_alloc_id:
|
||||
case C1StubId::dtrace_object_alloc_id:
|
||||
{ // c_rarg0: object
|
||||
StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
|
||||
save_live_registers(sasm);
|
||||
|
||||
@ -64,31 +64,4 @@ void C2EntryBarrierStub::emit(C2_MacroAssembler& masm) {
|
||||
__ emit_int32(0); // nmethod guard value
|
||||
}
|
||||
|
||||
int C2HandleAnonOMOwnerStub::max_size() const {
|
||||
// Max size of stub has been determined by testing with 0, in which case
|
||||
// C2CodeStubList::emit() will throw an assertion and report the actual size that
|
||||
// is needed.
|
||||
return 24;
|
||||
}
|
||||
|
||||
void C2HandleAnonOMOwnerStub::emit(C2_MacroAssembler& masm) {
|
||||
__ bind(entry());
|
||||
Register mon = monitor();
|
||||
Register t = tmp();
|
||||
assert(t != noreg, "need tmp register");
|
||||
|
||||
// Fix owner to be the current thread.
|
||||
__ str(rthread, Address(mon, ObjectMonitor::owner_offset()));
|
||||
|
||||
// Pop owner object from lock-stack.
|
||||
__ ldrw(t, Address(rthread, JavaThread::lock_stack_top_offset()));
|
||||
__ subw(t, t, oopSize);
|
||||
#ifdef ASSERT
|
||||
__ str(zr, Address(rthread, t));
|
||||
#endif
|
||||
__ strw(t, Address(rthread, JavaThread::lock_stack_top_offset()));
|
||||
|
||||
__ b(continuation());
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
@ -64,8 +64,8 @@ void C2_MacroAssembler::fast_lock(Register objectReg, Register boxReg, Register
|
||||
|
||||
if (DiagnoseSyncOnValueBasedClasses != 0) {
|
||||
load_klass(tmp, oop);
|
||||
ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
|
||||
tstw(tmp, JVM_ACC_IS_VALUE_BASED_CLASS);
|
||||
ldrb(tmp, Address(tmp, Klass::misc_flags_offset()));
|
||||
tst(tmp, KlassFlags::_misc_is_value_based_class);
|
||||
br(Assembler::NE, cont);
|
||||
}
|
||||
|
||||
@ -150,10 +150,12 @@ void C2_MacroAssembler::fast_unlock(Register objectReg, Register boxReg, Registe
|
||||
Register oop = objectReg;
|
||||
Register box = boxReg;
|
||||
Register disp_hdr = tmpReg;
|
||||
Register owner_addr = tmpReg;
|
||||
Register tmp = tmp2Reg;
|
||||
Label cont;
|
||||
Label object_has_monitor;
|
||||
Label count, no_count;
|
||||
Label unlocked;
|
||||
|
||||
assert(LockingMode != LM_LIGHTWEIGHT, "lightweight locking should use fast_unlock_lightweight");
|
||||
assert_different_registers(oop, box, tmp, disp_hdr);
|
||||
@ -204,14 +206,40 @@ void C2_MacroAssembler::fast_unlock(Register objectReg, Register boxReg, Registe
|
||||
b(cont);
|
||||
|
||||
bind(notRecursive);
|
||||
|
||||
// Compute owner address.
|
||||
lea(owner_addr, Address(tmp, ObjectMonitor::owner_offset()));
|
||||
|
||||
// Set owner to null.
|
||||
// Release to satisfy the JMM
|
||||
stlr(zr, owner_addr);
|
||||
// We need a full fence after clearing owner to avoid stranding.
|
||||
// StoreLoad achieves this.
|
||||
membar(StoreLoad);
|
||||
|
||||
// Check if the entry lists are empty.
|
||||
ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset()));
|
||||
ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset()));
|
||||
orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
|
||||
cmp(rscratch1, zr); // Sets flags for result
|
||||
cbnz(rscratch1, cont);
|
||||
// need a release store here
|
||||
lea(tmp, Address(tmp, ObjectMonitor::owner_offset()));
|
||||
stlr(zr, tmp); // set unowned
|
||||
ldr(tmpReg, Address(tmp, ObjectMonitor::cxq_offset()));
|
||||
orr(rscratch1, rscratch1, tmpReg);
|
||||
cmp(rscratch1, zr);
|
||||
br(Assembler::EQ, cont); // If so we are done.
|
||||
|
||||
// Check if there is a successor.
|
||||
ldr(rscratch1, Address(tmp, ObjectMonitor::succ_offset()));
|
||||
cmp(rscratch1, zr);
|
||||
br(Assembler::NE, unlocked); // If so we are done.
|
||||
|
||||
// Save the monitor pointer in the current thread, so we can try to
|
||||
// reacquire the lock in SharedRuntime::monitor_exit_helper().
|
||||
str(tmp, Address(rthread, JavaThread::unlocked_inflated_monitor_offset()));
|
||||
|
||||
cmp(zr, rthread); // Set Flag to NE => slow path
|
||||
b(cont);
|
||||
|
||||
bind(unlocked);
|
||||
cmp(zr, zr); // Set Flag to EQ => fast path
|
||||
|
||||
// Intentional fall-through
|
||||
|
||||
bind(cont);
|
||||
// flag == EQ indicates success
|
||||
@ -243,8 +271,8 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Regist
|
||||
|
||||
if (DiagnoseSyncOnValueBasedClasses != 0) {
|
||||
load_klass(t1, obj);
|
||||
ldrw(t1, Address(t1, Klass::access_flags_offset()));
|
||||
tstw(t1, JVM_ACC_IS_VALUE_BASED_CLASS);
|
||||
ldrb(t1, Address(t1, Klass::misc_flags_offset()));
|
||||
tst(t1, KlassFlags::_misc_is_value_based_class);
|
||||
br(Assembler::NE, slow_path);
|
||||
}
|
||||
|
||||
@ -498,33 +526,41 @@ void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register box, Regi
|
||||
|
||||
bind(not_recursive);
|
||||
|
||||
Label release;
|
||||
const Register t2_owner_addr = t2;
|
||||
|
||||
// Compute owner address.
|
||||
lea(t2_owner_addr, Address(t1_monitor, ObjectMonitor::owner_offset()));
|
||||
|
||||
// Set owner to null.
|
||||
// Release to satisfy the JMM
|
||||
stlr(zr, t2_owner_addr);
|
||||
// We need a full fence after clearing owner to avoid stranding.
|
||||
// StoreLoad achieves this.
|
||||
membar(StoreLoad);
|
||||
|
||||
// Check if the entry lists are empty.
|
||||
ldr(rscratch1, Address(t1_monitor, ObjectMonitor::EntryList_offset()));
|
||||
ldr(t3_t, Address(t1_monitor, ObjectMonitor::cxq_offset()));
|
||||
orr(rscratch1, rscratch1, t3_t);
|
||||
cmp(rscratch1, zr);
|
||||
br(Assembler::EQ, release);
|
||||
br(Assembler::EQ, unlocked); // If so we are done.
|
||||
|
||||
// The owner may be anonymous and we removed the last obj entry in
|
||||
// the lock-stack. This loses the information about the owner.
|
||||
// Write the thread to the owner field so the runtime knows the owner.
|
||||
str(rthread, Address(t2_owner_addr));
|
||||
// Check if there is a successor.
|
||||
ldr(rscratch1, Address(t1_monitor, ObjectMonitor::succ_offset()));
|
||||
cmp(rscratch1, zr);
|
||||
br(Assembler::NE, unlocked); // If so we are done.
|
||||
|
||||
// Save the monitor pointer in the current thread, so we can try to
|
||||
// reacquire the lock in SharedRuntime::monitor_exit_helper().
|
||||
str(t1_monitor, Address(rthread, JavaThread::unlocked_inflated_monitor_offset()));
|
||||
|
||||
cmp(zr, rthread); // Set Flag to NE => slow path
|
||||
b(slow_path);
|
||||
|
||||
bind(release);
|
||||
// Set owner to null.
|
||||
// Release to satisfy the JMM
|
||||
stlr(zr, t2_owner_addr);
|
||||
}
|
||||
|
||||
bind(unlocked);
|
||||
decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
|
||||
cmp(zr, zr); // Set Flags to EQ => fast path
|
||||
|
||||
#ifdef ASSERT
|
||||
// Check that unlocked label is reached with Flags == EQ.
|
||||
|
||||
@ -45,7 +45,9 @@ define(`CAS_INSN',
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct compareAndExchange$1$6(iReg$2NoSp res, indirect mem, iReg$2 oldval, iReg$2 newval, rFlagsReg cr) %{
|
||||
ifelse($1$6,PAcq,INDENT(predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));),
|
||||
$1$6,NAcq,INDENT(predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);),
|
||||
$1,P,INDENT(predicate(n->as_LoadStore()->barrier_data() == 0);),
|
||||
$1,N,INDENT(predicate(n->as_LoadStore()->barrier_data() == 0);),
|
||||
$6,Acq,INDENT(predicate(needs_acquiring_load_exclusive(n));),
|
||||
`dnl')
|
||||
match(Set res (CompareAndExchange$1 mem (Binary oldval newval)));
|
||||
@ -122,7 +124,9 @@ define(`CAS_INSN3',
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct weakCompareAndSwap$1$6(iRegINoSp res, indirect mem, iReg$2 oldval, iReg$2 newval, rFlagsReg cr) %{
|
||||
ifelse($1$6,PAcq,INDENT(predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));),
|
||||
$1$6,NAcq,INDENT(predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);),
|
||||
$1,P,INDENT(predicate(n->as_LoadStore()->barrier_data() == 0);),
|
||||
$1,N,INDENT(predicate(n->as_LoadStore()->barrier_data() == 0);),
|
||||
$6,Acq,INDENT(predicate(needs_acquiring_load_exclusive(n));),
|
||||
`dnl')
|
||||
match(Set res (WeakCompareAndSwap$1 mem (Binary oldval newval)));
|
||||
|
||||
@ -129,5 +129,7 @@ void CompressedKlassPointers::initialize(address addr, size_t len) {
|
||||
address const end = addr + len;
|
||||
_base = (end <= (address)unscaled_max) ? nullptr : addr;
|
||||
|
||||
_range = end - _base;
|
||||
// Remember the Klass range:
|
||||
_klass_range_start = addr;
|
||||
_klass_range_end = addr + len;
|
||||
}
|
||||
|
||||
@ -38,7 +38,10 @@
|
||||
#include "c1/c1_LIRAssembler.hpp"
|
||||
#include "c1/c1_MacroAssembler.hpp"
|
||||
#include "gc/g1/c1/g1BarrierSetC1.hpp"
|
||||
#endif
|
||||
#endif // COMPILER1
|
||||
#ifdef COMPILER2
|
||||
#include "gc/g1/c2/g1BarrierSetC2.hpp"
|
||||
#endif // COMPILER2
|
||||
|
||||
#define __ masm->
|
||||
|
||||
@ -95,6 +98,54 @@ void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* mas
|
||||
__ pop(saved_regs, sp);
|
||||
}
|
||||
|
||||
static void generate_queue_test_and_insertion(MacroAssembler* masm, ByteSize index_offset, ByteSize buffer_offset, Label& runtime,
|
||||
const Register thread, const Register value, const Register temp1, const Register temp2) {
|
||||
// Can we store a value in the given thread's buffer?
|
||||
// (The index field is typed as size_t.)
|
||||
__ ldr(temp1, Address(thread, in_bytes(index_offset))); // temp1 := *(index address)
|
||||
__ cbz(temp1, runtime); // jump to runtime if index == 0 (full buffer)
|
||||
// The buffer is not full, store value into it.
|
||||
__ sub(temp1, temp1, wordSize); // temp1 := next index
|
||||
__ str(temp1, Address(thread, in_bytes(index_offset))); // *(index address) := next index
|
||||
__ ldr(temp2, Address(thread, in_bytes(buffer_offset))); // temp2 := buffer address
|
||||
__ str(value, Address(temp2, temp1)); // *(buffer address + next index) := value
|
||||
}
|
||||
|
||||
static void generate_pre_barrier_fast_path(MacroAssembler* masm,
|
||||
const Register thread,
|
||||
const Register tmp1) {
|
||||
Address in_progress(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
|
||||
// Is marking active?
|
||||
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
|
||||
__ ldrw(tmp1, in_progress);
|
||||
} else {
|
||||
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
|
||||
__ ldrb(tmp1, in_progress);
|
||||
}
|
||||
}
|
||||
|
||||
static void generate_pre_barrier_slow_path(MacroAssembler* masm,
|
||||
const Register obj,
|
||||
const Register pre_val,
|
||||
const Register thread,
|
||||
const Register tmp1,
|
||||
const Register tmp2,
|
||||
Label& done,
|
||||
Label& runtime) {
|
||||
// Do we need to load the previous value?
|
||||
if (obj != noreg) {
|
||||
__ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW);
|
||||
}
|
||||
// Is the previous value null?
|
||||
__ cbz(pre_val, done);
|
||||
generate_queue_test_and_insertion(masm,
|
||||
G1ThreadLocalData::satb_mark_queue_index_offset(),
|
||||
G1ThreadLocalData::satb_mark_queue_buffer_offset(),
|
||||
runtime,
|
||||
thread, pre_val, tmp1, tmp2);
|
||||
__ b(done);
|
||||
}
|
||||
|
||||
void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm,
|
||||
Register obj,
|
||||
Register pre_val,
|
||||
@ -115,43 +166,10 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm,
|
||||
assert_different_registers(obj, pre_val, tmp1, tmp2);
|
||||
assert(pre_val != noreg && tmp1 != noreg && tmp2 != noreg, "expecting a register");
|
||||
|
||||
Address in_progress(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
|
||||
Address index(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()));
|
||||
Address buffer(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()));
|
||||
|
||||
// Is marking active?
|
||||
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
|
||||
__ ldrw(tmp1, in_progress);
|
||||
} else {
|
||||
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
|
||||
__ ldrb(tmp1, in_progress);
|
||||
}
|
||||
generate_pre_barrier_fast_path(masm, thread, tmp1);
|
||||
// If marking is not active (*(mark queue active address) == 0), jump to done
|
||||
__ cbzw(tmp1, done);
|
||||
|
||||
// Do we need to load the previous value?
|
||||
if (obj != noreg) {
|
||||
__ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW);
|
||||
}
|
||||
|
||||
// Is the previous value null?
|
||||
__ cbz(pre_val, done);
|
||||
|
||||
// Can we store original value in the thread's buffer?
|
||||
// Is index == 0?
|
||||
// (The index field is typed as size_t.)
|
||||
|
||||
__ ldr(tmp1, index); // tmp := *index_adr
|
||||
__ cbz(tmp1, runtime); // tmp == 0?
|
||||
// If yes, goto runtime
|
||||
|
||||
__ sub(tmp1, tmp1, wordSize); // tmp := tmp - wordSize
|
||||
__ str(tmp1, index); // *index_adr := tmp
|
||||
__ ldr(tmp2, buffer);
|
||||
__ add(tmp1, tmp1, tmp2); // tmp := tmp + *buffer_adr
|
||||
|
||||
// Record the previous value
|
||||
__ str(pre_val, Address(tmp1, 0));
|
||||
__ b(done);
|
||||
generate_pre_barrier_slow_path(masm, obj, pre_val, thread, tmp1, tmp2, done, runtime);
|
||||
|
||||
__ bind(runtime);
|
||||
|
||||
@ -182,6 +200,50 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm,
|
||||
|
||||
}
|
||||
|
||||
static void generate_post_barrier_fast_path(MacroAssembler* masm,
|
||||
const Register store_addr,
|
||||
const Register new_val,
|
||||
const Register tmp1,
|
||||
const Register tmp2,
|
||||
Label& done,
|
||||
bool new_val_may_be_null) {
|
||||
// Does store cross heap regions?
|
||||
__ eor(tmp1, store_addr, new_val); // tmp1 := store address ^ new value
|
||||
__ lsr(tmp1, tmp1, G1HeapRegion::LogOfHRGrainBytes); // tmp1 := ((store address ^ new value) >> LogOfHRGrainBytes)
|
||||
__ cbz(tmp1, done);
|
||||
// Crosses regions, storing null?
|
||||
if (new_val_may_be_null) {
|
||||
__ cbz(new_val, done);
|
||||
}
|
||||
// Storing region crossing non-null, is card young?
|
||||
__ lsr(tmp1, store_addr, CardTable::card_shift()); // tmp1 := card address relative to card table base
|
||||
__ load_byte_map_base(tmp2); // tmp2 := card table base address
|
||||
__ add(tmp1, tmp1, tmp2); // tmp1 := card address
|
||||
__ ldrb(tmp2, Address(tmp1)); // tmp2 := card
|
||||
__ cmpw(tmp2, (int)G1CardTable::g1_young_card_val()); // tmp2 := card == young_card_val?
|
||||
}
|
||||
|
||||
static void generate_post_barrier_slow_path(MacroAssembler* masm,
|
||||
const Register thread,
|
||||
const Register tmp1,
|
||||
const Register tmp2,
|
||||
Label& done,
|
||||
Label& runtime) {
|
||||
__ membar(Assembler::StoreLoad); // StoreLoad membar
|
||||
__ ldrb(tmp2, Address(tmp1)); // tmp2 := card
|
||||
__ cbzw(tmp2, done);
|
||||
// Storing a region crossing, non-null oop, card is clean.
|
||||
// Dirty card and log.
|
||||
STATIC_ASSERT(CardTable::dirty_card_val() == 0);
|
||||
__ strb(zr, Address(tmp1)); // *(card address) := dirty_card_val
|
||||
generate_queue_test_and_insertion(masm,
|
||||
G1ThreadLocalData::dirty_card_queue_index_offset(),
|
||||
G1ThreadLocalData::dirty_card_queue_buffer_offset(),
|
||||
runtime,
|
||||
thread, tmp1, tmp2, rscratch1);
|
||||
__ b(done);
|
||||
}
|
||||
|
||||
void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
|
||||
Register store_addr,
|
||||
Register new_val,
|
||||
@ -194,70 +256,116 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
|
||||
assert(store_addr != noreg && new_val != noreg && tmp1 != noreg
|
||||
&& tmp2 != noreg, "expecting a register");
|
||||
|
||||
Address queue_index(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()));
|
||||
Address buffer(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()));
|
||||
|
||||
BarrierSet* bs = BarrierSet::barrier_set();
|
||||
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
|
||||
CardTable* ct = ctbs->card_table();
|
||||
|
||||
Label done;
|
||||
Label runtime;
|
||||
|
||||
// Does store cross heap regions?
|
||||
|
||||
__ eor(tmp1, store_addr, new_val);
|
||||
__ lsr(tmp1, tmp1, G1HeapRegion::LogOfHRGrainBytes);
|
||||
__ cbz(tmp1, done);
|
||||
|
||||
// crosses regions, storing null?
|
||||
|
||||
__ cbz(new_val, done);
|
||||
|
||||
// storing region crossing non-null, is card already dirty?
|
||||
|
||||
const Register card_addr = tmp1;
|
||||
|
||||
__ lsr(card_addr, store_addr, CardTable::card_shift());
|
||||
|
||||
// get the address of the card
|
||||
__ load_byte_map_base(tmp2);
|
||||
__ add(card_addr, card_addr, tmp2);
|
||||
__ ldrb(tmp2, Address(card_addr));
|
||||
__ cmpw(tmp2, (int)G1CardTable::g1_young_card_val());
|
||||
generate_post_barrier_fast_path(masm, store_addr, new_val, tmp1, tmp2, done, true /* new_val_may_be_null */);
|
||||
// If card is young, jump to done
|
||||
__ br(Assembler::EQ, done);
|
||||
|
||||
assert((int)CardTable::dirty_card_val() == 0, "must be 0");
|
||||
|
||||
__ membar(Assembler::StoreLoad);
|
||||
|
||||
__ ldrb(tmp2, Address(card_addr));
|
||||
__ cbzw(tmp2, done);
|
||||
|
||||
// storing a region crossing, non-null oop, card is clean.
|
||||
// dirty card and log.
|
||||
|
||||
__ strb(zr, Address(card_addr));
|
||||
|
||||
__ ldr(rscratch1, queue_index);
|
||||
__ cbz(rscratch1, runtime);
|
||||
__ sub(rscratch1, rscratch1, wordSize);
|
||||
__ str(rscratch1, queue_index);
|
||||
|
||||
__ ldr(tmp2, buffer);
|
||||
__ str(card_addr, Address(tmp2, rscratch1));
|
||||
__ b(done);
|
||||
generate_post_barrier_slow_path(masm, thread, tmp1, tmp2, done, runtime);
|
||||
|
||||
__ bind(runtime);
|
||||
// save the live input values
|
||||
RegSet saved = RegSet::of(store_addr);
|
||||
__ push(saved, sp);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), card_addr, thread);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), tmp1, thread);
|
||||
__ pop(saved, sp);
|
||||
|
||||
__ bind(done);
|
||||
}
|
||||
|
||||
#if defined(COMPILER2)
|
||||
|
||||
static void generate_c2_barrier_runtime_call(MacroAssembler* masm, G1BarrierStubC2* stub, const Register arg, const address runtime_path) {
|
||||
SaveLiveRegisters save_registers(masm, stub);
|
||||
if (c_rarg0 != arg) {
|
||||
__ mov(c_rarg0, arg);
|
||||
}
|
||||
__ mov(c_rarg1, rthread);
|
||||
__ mov(rscratch1, runtime_path);
|
||||
__ blr(rscratch1);
|
||||
}
|
||||
|
||||
void G1BarrierSetAssembler::g1_write_barrier_pre_c2(MacroAssembler* masm,
|
||||
Register obj,
|
||||
Register pre_val,
|
||||
Register thread,
|
||||
Register tmp1,
|
||||
Register tmp2,
|
||||
G1PreBarrierStubC2* stub) {
|
||||
assert(thread == rthread, "must be");
|
||||
assert_different_registers(obj, pre_val, tmp1, tmp2);
|
||||
assert(pre_val != noreg && tmp1 != noreg && tmp2 != noreg, "expecting a register");
|
||||
|
||||
stub->initialize_registers(obj, pre_val, thread, tmp1, tmp2);
|
||||
|
||||
generate_pre_barrier_fast_path(masm, thread, tmp1);
|
||||
// If marking is active (*(mark queue active address) != 0), jump to stub (slow path)
|
||||
__ cbnzw(tmp1, *stub->entry());
|
||||
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
void G1BarrierSetAssembler::generate_c2_pre_barrier_stub(MacroAssembler* masm,
|
||||
G1PreBarrierStubC2* stub) const {
|
||||
Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
|
||||
Label runtime;
|
||||
Register obj = stub->obj();
|
||||
Register pre_val = stub->pre_val();
|
||||
Register thread = stub->thread();
|
||||
Register tmp1 = stub->tmp1();
|
||||
Register tmp2 = stub->tmp2();
|
||||
|
||||
__ bind(*stub->entry());
|
||||
generate_pre_barrier_slow_path(masm, obj, pre_val, thread, tmp1, tmp2, *stub->continuation(), runtime);
|
||||
|
||||
__ bind(runtime);
|
||||
generate_c2_barrier_runtime_call(masm, stub, pre_val, CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry));
|
||||
__ b(*stub->continuation());
|
||||
}
|
||||
|
||||
void G1BarrierSetAssembler::g1_write_barrier_post_c2(MacroAssembler* masm,
|
||||
Register store_addr,
|
||||
Register new_val,
|
||||
Register thread,
|
||||
Register tmp1,
|
||||
Register tmp2,
|
||||
G1PostBarrierStubC2* stub) {
|
||||
assert(thread == rthread, "must be");
|
||||
assert_different_registers(store_addr, new_val, thread, tmp1, tmp2,
|
||||
rscratch1);
|
||||
assert(store_addr != noreg && new_val != noreg && tmp1 != noreg
|
||||
&& tmp2 != noreg, "expecting a register");
|
||||
|
||||
stub->initialize_registers(thread, tmp1, tmp2);
|
||||
|
||||
bool new_val_may_be_null = (stub->barrier_data() & G1C2BarrierPostNotNull) == 0;
|
||||
generate_post_barrier_fast_path(masm, store_addr, new_val, tmp1, tmp2, *stub->continuation(), new_val_may_be_null);
|
||||
// If card is not young, jump to stub (slow path)
|
||||
__ br(Assembler::NE, *stub->entry());
|
||||
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
void G1BarrierSetAssembler::generate_c2_post_barrier_stub(MacroAssembler* masm,
|
||||
G1PostBarrierStubC2* stub) const {
|
||||
Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
|
||||
Label runtime;
|
||||
Register thread = stub->thread();
|
||||
Register tmp1 = stub->tmp1(); // tmp1 holds the card address.
|
||||
Register tmp2 = stub->tmp2();
|
||||
assert(stub->tmp3() == noreg, "not needed in this platform");
|
||||
|
||||
__ bind(*stub->entry());
|
||||
generate_post_barrier_slow_path(masm, thread, tmp1, tmp2, *stub->continuation(), runtime);
|
||||
|
||||
__ bind(runtime);
|
||||
generate_c2_barrier_runtime_call(masm, stub, tmp1, CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry));
|
||||
__ b(*stub->continuation());
|
||||
}
|
||||
|
||||
#endif // COMPILER2
|
||||
|
||||
void G1BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register dst, Address src, Register tmp1, Register tmp2) {
|
||||
bool on_oop = is_reference_type(type);
|
||||
|
||||
@ -33,6 +33,8 @@ class LIR_Assembler;
|
||||
class StubAssembler;
|
||||
class G1PreBarrierStub;
|
||||
class G1PostBarrierStub;
|
||||
class G1PreBarrierStubC2;
|
||||
class G1PostBarrierStubC2;
|
||||
|
||||
class G1BarrierSetAssembler: public ModRefBarrierSetAssembler {
|
||||
protected:
|
||||
@ -69,6 +71,27 @@ public:
|
||||
void generate_c1_post_barrier_runtime_stub(StubAssembler* sasm);
|
||||
#endif
|
||||
|
||||
#ifdef COMPILER2
|
||||
void g1_write_barrier_pre_c2(MacroAssembler* masm,
|
||||
Register obj,
|
||||
Register pre_val,
|
||||
Register thread,
|
||||
Register tmp1,
|
||||
Register tmp2,
|
||||
G1PreBarrierStubC2* c2_stub);
|
||||
void generate_c2_pre_barrier_stub(MacroAssembler* masm,
|
||||
G1PreBarrierStubC2* stub) const;
|
||||
void g1_write_barrier_post_c2(MacroAssembler* masm,
|
||||
Register store_addr,
|
||||
Register new_val,
|
||||
Register thread,
|
||||
Register tmp1,
|
||||
Register tmp2,
|
||||
G1PostBarrierStubC2* c2_stub);
|
||||
void generate_c2_post_barrier_stub(MacroAssembler* masm,
|
||||
G1PostBarrierStubC2* stub) const;
|
||||
#endif
|
||||
|
||||
void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register dst, Address src, Register tmp1, Register tmp2);
|
||||
};
|
||||
|
||||
680
src/hotspot/cpu/aarch64/gc/g1/g1_aarch64.ad
Normal file
680
src/hotspot/cpu/aarch64/gc/g1/g1_aarch64.ad
Normal file
@ -0,0 +1,680 @@
|
||||
//
|
||||
// Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
//
|
||||
// This code is free software; you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License version 2 only, as
|
||||
// published by the Free Software Foundation.
|
||||
//
|
||||
// This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
// version 2 for more details (a copy is included in the LICENSE file that
|
||||
// accompanied this code).
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License version
|
||||
// 2 along with this work; if not, write to the Free Software Foundation,
|
||||
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
//
|
||||
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
// or visit www.oracle.com if you need additional information or have any
|
||||
// questions.
|
||||
//
|
||||
|
||||
source_hpp %{
|
||||
|
||||
#include "gc/g1/c2/g1BarrierSetC2.hpp"
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
|
||||
%}
|
||||
|
||||
source %{
|
||||
|
||||
#include "gc/g1/g1BarrierSetAssembler_aarch64.hpp"
|
||||
#include "gc/g1/g1BarrierSetRuntime.hpp"
|
||||
|
||||
static void write_barrier_pre(MacroAssembler* masm,
|
||||
const MachNode* node,
|
||||
Register obj,
|
||||
Register pre_val,
|
||||
Register tmp1,
|
||||
Register tmp2,
|
||||
RegSet preserve = RegSet(),
|
||||
RegSet no_preserve = RegSet()) {
|
||||
if (!G1PreBarrierStubC2::needs_barrier(node)) {
|
||||
return;
|
||||
}
|
||||
Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
|
||||
G1BarrierSetAssembler* g1_asm = static_cast<G1BarrierSetAssembler*>(BarrierSet::barrier_set()->barrier_set_assembler());
|
||||
G1PreBarrierStubC2* const stub = G1PreBarrierStubC2::create(node);
|
||||
for (RegSetIterator<Register> reg = preserve.begin(); *reg != noreg; ++reg) {
|
||||
stub->preserve(*reg);
|
||||
}
|
||||
for (RegSetIterator<Register> reg = no_preserve.begin(); *reg != noreg; ++reg) {
|
||||
stub->dont_preserve(*reg);
|
||||
}
|
||||
g1_asm->g1_write_barrier_pre_c2(masm, obj, pre_val, rthread, tmp1, tmp2, stub);
|
||||
}
|
||||
|
||||
static void write_barrier_post(MacroAssembler* masm,
|
||||
const MachNode* node,
|
||||
Register store_addr,
|
||||
Register new_val,
|
||||
Register tmp1,
|
||||
Register tmp2) {
|
||||
if (!G1PostBarrierStubC2::needs_barrier(node)) {
|
||||
return;
|
||||
}
|
||||
Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
|
||||
G1BarrierSetAssembler* g1_asm = static_cast<G1BarrierSetAssembler*>(BarrierSet::barrier_set()->barrier_set_assembler());
|
||||
G1PostBarrierStubC2* const stub = G1PostBarrierStubC2::create(node);
|
||||
g1_asm->g1_write_barrier_post_c2(masm, store_addr, new_val, rthread, tmp1, tmp2, stub);
|
||||
}
|
||||
|
||||
%}
|
||||
|
||||
// BEGIN This section of the file is automatically generated. Do not edit --------------
|
||||
|
||||
// This section is generated from g1_aarch64.m4
|
||||
|
||||
|
||||
// This pattern is generated automatically from g1_aarch64.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct g1StoreP(indirect mem, iRegP src, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseG1GC && !needs_releasing_store(n) && n->as_Store()->barrier_data() != 0);
|
||||
match(Set mem (StoreP mem src));
|
||||
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
|
||||
ins_cost(INSN_COST);
|
||||
format %{ "str $src, $mem\t# ptr" %}
|
||||
ins_encode %{
|
||||
write_barrier_pre(masm, this,
|
||||
$mem$$Register /* obj */,
|
||||
$tmp1$$Register /* pre_val */,
|
||||
$tmp2$$Register /* tmp1 */,
|
||||
$tmp3$$Register /* tmp2 */,
|
||||
RegSet::of($mem$$Register, $src$$Register) /* preserve */);
|
||||
__ str($src$$Register, $mem$$Register);
|
||||
write_barrier_post(masm, this,
|
||||
$mem$$Register /* store_addr */,
|
||||
$src$$Register /* new_val */,
|
||||
$tmp2$$Register /* tmp1 */,
|
||||
$tmp3$$Register /* tmp2 */);
|
||||
%}
|
||||
ins_pipe(istore_reg_mem);
|
||||
%}
|
||||
|
||||
// This pattern is generated automatically from g1_aarch64.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct g1StorePVolatile(indirect mem, iRegP src, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseG1GC && needs_releasing_store(n) && n->as_Store()->barrier_data() != 0);
|
||||
match(Set mem (StoreP mem src));
|
||||
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
format %{ "stlr $src, $mem\t# ptr" %}
|
||||
ins_encode %{
|
||||
write_barrier_pre(masm, this,
|
||||
$mem$$Register /* obj */,
|
||||
$tmp1$$Register /* pre_val */,
|
||||
$tmp2$$Register /* tmp1 */,
|
||||
$tmp3$$Register /* tmp2 */,
|
||||
RegSet::of($mem$$Register, $src$$Register) /* preserve */);
|
||||
__ stlr($src$$Register, $mem$$Register);
|
||||
write_barrier_post(masm, this,
|
||||
$mem$$Register /* store_addr */,
|
||||
$src$$Register /* new_val */,
|
||||
$tmp2$$Register /* tmp1 */,
|
||||
$tmp3$$Register /* tmp2 */);
|
||||
%}
|
||||
ins_pipe(pipe_class_memory);
|
||||
%}
|
||||
|
||||
// This pattern is generated automatically from g1_aarch64.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct g1StoreN(indirect mem, iRegN src, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseG1GC && !needs_releasing_store(n) && n->as_Store()->barrier_data() != 0);
|
||||
match(Set mem (StoreN mem src));
|
||||
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
|
||||
ins_cost(INSN_COST);
|
||||
format %{ "strw $src, $mem\t# compressed ptr" %}
|
||||
ins_encode %{
|
||||
write_barrier_pre(masm, this,
|
||||
$mem$$Register /* obj */,
|
||||
$tmp1$$Register /* pre_val */,
|
||||
$tmp2$$Register /* tmp1 */,
|
||||
$tmp3$$Register /* tmp2 */,
|
||||
RegSet::of($mem$$Register, $src$$Register) /* preserve */);
|
||||
__ strw($src$$Register, $mem$$Register);
|
||||
if ((barrier_data() & G1C2BarrierPost) != 0) {
|
||||
if ((barrier_data() & G1C2BarrierPostNotNull) == 0) {
|
||||
__ decode_heap_oop($tmp1$$Register, $src$$Register);
|
||||
} else {
|
||||
__ decode_heap_oop_not_null($tmp1$$Register, $src$$Register);
|
||||
}
|
||||
}
|
||||
write_barrier_post(masm, this,
|
||||
$mem$$Register /* store_addr */,
|
||||
$tmp1$$Register /* new_val */,
|
||||
$tmp2$$Register /* tmp1 */,
|
||||
$tmp3$$Register /* tmp2 */);
|
||||
%}
|
||||
ins_pipe(istore_reg_mem);
|
||||
%}
|
||||
|
||||
// This pattern is generated automatically from g1_aarch64.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct g1StoreNVolatile(indirect mem, iRegN src, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseG1GC && needs_releasing_store(n) && n->as_Store()->barrier_data() != 0);
|
||||
match(Set mem (StoreN mem src));
|
||||
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
format %{ "stlrw $src, $mem\t# compressed ptr" %}
|
||||
ins_encode %{
|
||||
write_barrier_pre(masm, this,
|
||||
$mem$$Register /* obj */,
|
||||
$tmp1$$Register /* pre_val */,
|
||||
$tmp2$$Register /* tmp1 */,
|
||||
$tmp3$$Register /* tmp2 */,
|
||||
RegSet::of($mem$$Register, $src$$Register) /* preserve */);
|
||||
__ stlrw($src$$Register, $mem$$Register);
|
||||
if ((barrier_data() & G1C2BarrierPost) != 0) {
|
||||
if ((barrier_data() & G1C2BarrierPostNotNull) == 0) {
|
||||
__ decode_heap_oop($tmp1$$Register, $src$$Register);
|
||||
} else {
|
||||
__ decode_heap_oop_not_null($tmp1$$Register, $src$$Register);
|
||||
}
|
||||
}
|
||||
write_barrier_post(masm, this,
|
||||
$mem$$Register /* store_addr */,
|
||||
$tmp1$$Register /* new_val */,
|
||||
$tmp2$$Register /* tmp1 */,
|
||||
$tmp3$$Register /* tmp2 */);
|
||||
%}
|
||||
ins_pipe(pipe_class_memory);
|
||||
%}
|
||||
|
||||
// This pattern is generated automatically from g1_aarch64.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct g1EncodePAndStoreN(indirect mem, iRegP src, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseG1GC && !needs_releasing_store(n) && n->as_Store()->barrier_data() != 0);
|
||||
match(Set mem (StoreN mem (EncodeP src)));
|
||||
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
|
||||
ins_cost(INSN_COST);
|
||||
format %{ "encode_heap_oop $tmp1, $src\n\t"
|
||||
"strw $tmp1, $mem\t# compressed ptr" %}
|
||||
ins_encode %{
|
||||
write_barrier_pre(masm, this,
|
||||
$mem$$Register /* obj */,
|
||||
$tmp1$$Register /* pre_val */,
|
||||
$tmp2$$Register /* tmp1 */,
|
||||
$tmp3$$Register /* tmp2 */,
|
||||
RegSet::of($mem$$Register, $src$$Register) /* preserve */);
|
||||
if ((barrier_data() & G1C2BarrierPostNotNull) == 0) {
|
||||
__ encode_heap_oop($tmp1$$Register, $src$$Register);
|
||||
} else {
|
||||
__ encode_heap_oop_not_null($tmp1$$Register, $src$$Register);
|
||||
}
|
||||
__ strw($tmp1$$Register, $mem$$Register);
|
||||
write_barrier_post(masm, this,
|
||||
$mem$$Register /* store_addr */,
|
||||
$src$$Register /* new_val */,
|
||||
$tmp2$$Register /* tmp1 */,
|
||||
$tmp3$$Register /* tmp2 */);
|
||||
%}
|
||||
ins_pipe(istore_reg_mem);
|
||||
%}
|
||||
|
||||
// This pattern is generated automatically from g1_aarch64.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct g1EncodePAndStoreNVolatile(indirect mem, iRegP src, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseG1GC && needs_releasing_store(n) && n->as_Store()->barrier_data() != 0);
|
||||
match(Set mem (StoreN mem (EncodeP src)));
|
||||
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
format %{ "encode_heap_oop $tmp1, $src\n\t"
|
||||
"stlrw $tmp1, $mem\t# compressed ptr" %}
|
||||
ins_encode %{
|
||||
write_barrier_pre(masm, this,
|
||||
$mem$$Register /* obj */,
|
||||
$tmp1$$Register /* pre_val */,
|
||||
$tmp2$$Register /* tmp1 */,
|
||||
$tmp3$$Register /* tmp2 */,
|
||||
RegSet::of($mem$$Register, $src$$Register) /* preserve */);
|
||||
if ((barrier_data() & G1C2BarrierPostNotNull) == 0) {
|
||||
__ encode_heap_oop($tmp1$$Register, $src$$Register);
|
||||
} else {
|
||||
__ encode_heap_oop_not_null($tmp1$$Register, $src$$Register);
|
||||
}
|
||||
__ stlrw($tmp1$$Register, $mem$$Register);
|
||||
write_barrier_post(masm, this,
|
||||
$mem$$Register /* store_addr */,
|
||||
$src$$Register /* new_val */,
|
||||
$tmp2$$Register /* tmp1 */,
|
||||
$tmp3$$Register /* tmp2 */);
|
||||
%}
|
||||
ins_pipe(pipe_class_memory);
|
||||
%}
|
||||
|
||||
// This pattern is generated automatically from g1_aarch64.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct g1CompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp1, iRegPNoSp tmp2, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseG1GC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
|
||||
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
|
||||
effect(TEMP res, TEMP tmp1, TEMP tmp2, KILL cr);
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
format %{ "cmpxchg $res = $mem, $oldval, $newval\t# ptr" %}
|
||||
ins_encode %{
|
||||
assert_different_registers($oldval$$Register, $mem$$Register);
|
||||
assert_different_registers($newval$$Register, $mem$$Register);
|
||||
// Pass $oldval to the pre-barrier (instead of loading from $mem), because
|
||||
// $oldval is the only value that can be overwritten.
|
||||
// The same holds for g1CompareAndSwapP and its Acq variant.
|
||||
write_barrier_pre(masm, this,
|
||||
noreg /* obj */,
|
||||
$oldval$$Register /* pre_val */,
|
||||
$tmp1$$Register /* tmp1 */,
|
||||
$tmp2$$Register /* tmp2 */,
|
||||
RegSet::of($mem$$Register, $oldval$$Register, $newval$$Register) /* preserve */,
|
||||
RegSet::of($res$$Register) /* no_preserve */);
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
|
||||
false /* acquire */, true /* release */, false /* weak */, $res$$Register);
|
||||
write_barrier_post(masm, this,
|
||||
$mem$$Register /* store_addr */,
|
||||
$newval$$Register /* new_val */,
|
||||
$tmp1$$Register /* tmp1 */,
|
||||
$tmp2$$Register /* tmp2 */);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// This pattern is generated automatically from g1_aarch64.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct g1CompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp1, iRegPNoSp tmp2, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseG1GC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
|
||||
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
|
||||
effect(TEMP res, TEMP tmp1, TEMP tmp2, KILL cr);
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
format %{ "cmpxchg_acq $res = $mem, $oldval, $newval\t# ptr" %}
|
||||
ins_encode %{
|
||||
assert_different_registers($oldval$$Register, $mem$$Register);
|
||||
assert_different_registers($newval$$Register, $mem$$Register);
|
||||
// Pass $oldval to the pre-barrier (instead of loading from $mem), because
|
||||
// $oldval is the only value that can be overwritten.
|
||||
// The same holds for g1CompareAndSwapP and its Acq variant.
|
||||
write_barrier_pre(masm, this,
|
||||
noreg /* obj */,
|
||||
$oldval$$Register /* pre_val */,
|
||||
$tmp1$$Register /* tmp1 */,
|
||||
$tmp2$$Register /* tmp2 */,
|
||||
RegSet::of($mem$$Register, $oldval$$Register, $newval$$Register) /* preserve */,
|
||||
RegSet::of($res$$Register) /* no_preserve */);
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
|
||||
true /* acquire */, true /* release */, false /* weak */, $res$$Register);
|
||||
write_barrier_post(masm, this,
|
||||
$mem$$Register /* store_addr */,
|
||||
$newval$$Register /* new_val */,
|
||||
$tmp1$$Register /* tmp1 */,
|
||||
$tmp2$$Register /* tmp2 */);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// This pattern is generated automatically from g1_aarch64.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct g1CompareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseG1GC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
|
||||
match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
|
||||
effect(TEMP res, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
format %{ "cmpxchg $res = $mem, $oldval, $newval\t# narrow oop" %}
|
||||
ins_encode %{
|
||||
assert_different_registers($oldval$$Register, $mem$$Register);
|
||||
assert_different_registers($newval$$Register, $mem$$Register);
|
||||
write_barrier_pre(masm, this,
|
||||
$mem$$Register /* obj */,
|
||||
$tmp1$$Register /* pre_val */,
|
||||
$tmp2$$Register /* tmp1 */,
|
||||
$tmp3$$Register /* tmp2 */,
|
||||
RegSet::of($mem$$Register, $oldval$$Register, $newval$$Register) /* preserve */,
|
||||
RegSet::of($res$$Register) /* no_preserve */);
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::word,
|
||||
false /* acquire */, true /* release */, false /* weak */, $res$$Register);
|
||||
__ decode_heap_oop($tmp1$$Register, $newval$$Register);
|
||||
write_barrier_post(masm, this,
|
||||
$mem$$Register /* store_addr */,
|
||||
$tmp1$$Register /* new_val */,
|
||||
$tmp2$$Register /* tmp1 */,
|
||||
$tmp3$$Register /* tmp2 */);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// This pattern is generated automatically from g1_aarch64.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct g1CompareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseG1GC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
|
||||
match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
|
||||
effect(TEMP res, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
format %{ "cmpxchg_acq $res = $mem, $oldval, $newval\t# narrow oop" %}
|
||||
ins_encode %{
|
||||
assert_different_registers($oldval$$Register, $mem$$Register);
|
||||
assert_different_registers($newval$$Register, $mem$$Register);
|
||||
write_barrier_pre(masm, this,
|
||||
$mem$$Register /* obj */,
|
||||
$tmp1$$Register /* pre_val */,
|
||||
$tmp2$$Register /* tmp1 */,
|
||||
$tmp3$$Register /* tmp2 */,
|
||||
RegSet::of($mem$$Register, $oldval$$Register, $newval$$Register) /* preserve */,
|
||||
RegSet::of($res$$Register) /* no_preserve */);
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::word,
|
||||
true /* acquire */, true /* release */, false /* weak */, $res$$Register);
|
||||
__ decode_heap_oop($tmp1$$Register, $newval$$Register);
|
||||
write_barrier_post(masm, this,
|
||||
$mem$$Register /* store_addr */,
|
||||
$tmp1$$Register /* new_val */,
|
||||
$tmp2$$Register /* tmp1 */,
|
||||
$tmp3$$Register /* tmp2 */);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// This pattern is generated automatically from g1_aarch64.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct g1CompareAndSwapP(iRegINoSp res, indirect mem, iRegP newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegP oldval, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseG1GC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
|
||||
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
|
||||
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
|
||||
effect(TEMP res, TEMP tmp1, TEMP tmp2, KILL cr);
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
format %{ "cmpxchg $mem, $oldval, $newval\t# (ptr)\n\t"
|
||||
"cset $res, EQ" %}
|
||||
ins_encode %{
|
||||
assert_different_registers($oldval$$Register, $mem$$Register);
|
||||
assert_different_registers($newval$$Register, $mem$$Register);
|
||||
write_barrier_pre(masm, this,
|
||||
noreg /* obj */,
|
||||
$oldval$$Register /* pre_val */,
|
||||
$tmp1$$Register /* tmp1 */,
|
||||
$tmp2$$Register /* tmp2 */,
|
||||
RegSet::of($mem$$Register, $oldval$$Register, $newval$$Register) /* preserve */,
|
||||
RegSet::of($res$$Register) /* no_preserve */);
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
|
||||
false /* acquire */, true /* release */, false /* weak */, noreg);
|
||||
__ cset($res$$Register, Assembler::EQ);
|
||||
write_barrier_post(masm, this,
|
||||
$mem$$Register /* store_addr */,
|
||||
$newval$$Register /* new_val */,
|
||||
$tmp1$$Register /* tmp1 */,
|
||||
$tmp2$$Register /* tmp2 */);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// This pattern is generated automatically from g1_aarch64.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct g1CompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegP oldval, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseG1GC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
|
||||
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
|
||||
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
|
||||
effect(TEMP res, TEMP tmp1, TEMP tmp2, KILL cr);
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
format %{ "cmpxchg_acq $mem, $oldval, $newval\t# (ptr)\n\t"
|
||||
"cset $res, EQ" %}
|
||||
ins_encode %{
|
||||
assert_different_registers($oldval$$Register, $mem$$Register);
|
||||
assert_different_registers($newval$$Register, $mem$$Register);
|
||||
write_barrier_pre(masm, this,
|
||||
noreg /* obj */,
|
||||
$oldval$$Register /* pre_val */,
|
||||
$tmp1$$Register /* tmp1 */,
|
||||
$tmp2$$Register /* tmp2 */,
|
||||
RegSet::of($mem$$Register, $oldval$$Register, $newval$$Register) /* preserve */,
|
||||
RegSet::of($res$$Register) /* no_preserve */);
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
|
||||
true /* acquire */, true /* release */, false /* weak */, noreg);
|
||||
__ cset($res$$Register, Assembler::EQ);
|
||||
write_barrier_post(masm, this,
|
||||
$mem$$Register /* store_addr */,
|
||||
$newval$$Register /* new_val */,
|
||||
$tmp1$$Register /* tmp1 */,
|
||||
$tmp2$$Register /* tmp2 */);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// This pattern is generated automatically from g1_aarch64.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct g1CompareAndSwapN(iRegINoSp res, indirect mem, iRegN newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, iRegN oldval, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseG1GC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
|
||||
match(Set res (CompareAndSwapN mem (Binary oldval newval)));
|
||||
match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
|
||||
effect(TEMP res, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
format %{ "cmpxchg $mem, $oldval, $newval\t# (narrow oop)\n\t"
|
||||
"cset $res, EQ" %}
|
||||
ins_encode %{
|
||||
assert_different_registers($oldval$$Register, $mem$$Register);
|
||||
assert_different_registers($newval$$Register, $mem$$Register);
|
||||
write_barrier_pre(masm, this,
|
||||
$mem$$Register /* obj */,
|
||||
$tmp1$$Register /* pre_val */,
|
||||
$tmp2$$Register /* tmp1 */,
|
||||
$tmp3$$Register /* tmp2 */,
|
||||
RegSet::of($mem$$Register, $oldval$$Register, $newval$$Register) /* preserve */,
|
||||
RegSet::of($res$$Register) /* no_preserve */);
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::word,
|
||||
false /* acquire */, true /* release */, false /* weak */, noreg);
|
||||
__ cset($res$$Register, Assembler::EQ);
|
||||
__ decode_heap_oop($tmp1$$Register, $newval$$Register);
|
||||
write_barrier_post(masm, this,
|
||||
$mem$$Register /* store_addr */,
|
||||
$tmp1$$Register /* new_val */,
|
||||
$tmp2$$Register /* tmp1 */,
|
||||
$tmp3$$Register /* tmp2 */);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// This pattern is generated automatically from g1_aarch64.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct g1CompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, iRegN oldval, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseG1GC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
|
||||
match(Set res (CompareAndSwapN mem (Binary oldval newval)));
|
||||
match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
|
||||
effect(TEMP res, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
format %{ "cmpxchg_acq $mem, $oldval, $newval\t# (narrow oop)\n\t"
|
||||
"cset $res, EQ" %}
|
||||
ins_encode %{
|
||||
assert_different_registers($oldval$$Register, $mem$$Register);
|
||||
assert_different_registers($newval$$Register, $mem$$Register);
|
||||
write_barrier_pre(masm, this,
|
||||
$mem$$Register /* obj */,
|
||||
$tmp1$$Register /* pre_val */,
|
||||
$tmp2$$Register /* tmp1 */,
|
||||
$tmp3$$Register /* tmp2 */,
|
||||
RegSet::of($mem$$Register, $oldval$$Register, $newval$$Register) /* preserve */,
|
||||
RegSet::of($res$$Register) /* no_preserve */);
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::word,
|
||||
true /* acquire */, true /* release */, false /* weak */, noreg);
|
||||
__ cset($res$$Register, Assembler::EQ);
|
||||
__ decode_heap_oop($tmp1$$Register, $newval$$Register);
|
||||
write_barrier_post(masm, this,
|
||||
$mem$$Register /* store_addr */,
|
||||
$tmp1$$Register /* new_val */,
|
||||
$tmp2$$Register /* tmp1 */,
|
||||
$tmp3$$Register /* tmp2 */);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// This pattern is generated automatically from g1_aarch64.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct g1GetAndSetP(indirect mem, iRegP newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp preval, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseG1GC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
|
||||
match(Set preval (GetAndSetP mem newval));
|
||||
effect(TEMP preval, TEMP tmp1, TEMP tmp2, KILL cr);
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
format %{ "atomic_xchg $preval, $newval, [$mem]" %}
|
||||
ins_encode %{
|
||||
assert_different_registers($mem$$Register, $newval$$Register);
|
||||
write_barrier_pre(masm, this,
|
||||
$mem$$Register /* obj */,
|
||||
$preval$$Register /* pre_val (as a temporary register) */,
|
||||
$tmp1$$Register /* tmp1 */,
|
||||
$tmp2$$Register /* tmp2 */,
|
||||
RegSet::of($mem$$Register, $preval$$Register, $newval$$Register) /* preserve */);
|
||||
__ atomic_xchg($preval$$Register, $newval$$Register, $mem$$Register);
|
||||
write_barrier_post(masm, this,
|
||||
$mem$$Register /* store_addr */,
|
||||
$newval$$Register /* new_val */,
|
||||
$tmp1$$Register /* tmp1 */,
|
||||
$tmp2$$Register /* tmp2 */);
|
||||
%}
|
||||
ins_pipe(pipe_serial);
|
||||
%}
|
||||
|
||||
// This pattern is generated automatically from g1_aarch64.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct g1GetAndSetPAcq(indirect mem, iRegP newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp preval, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseG1GC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
|
||||
match(Set preval (GetAndSetP mem newval));
|
||||
effect(TEMP preval, TEMP tmp1, TEMP tmp2, KILL cr);
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
format %{ "atomic_xchg_acq $preval, $newval, [$mem]" %}
|
||||
ins_encode %{
|
||||
assert_different_registers($mem$$Register, $newval$$Register);
|
||||
write_barrier_pre(masm, this,
|
||||
$mem$$Register /* obj */,
|
||||
$preval$$Register /* pre_val (as a temporary register) */,
|
||||
$tmp1$$Register /* tmp1 */,
|
||||
$tmp2$$Register /* tmp2 */,
|
||||
RegSet::of($mem$$Register, $preval$$Register, $newval$$Register) /* preserve */);
|
||||
__ atomic_xchgal($preval$$Register, $newval$$Register, $mem$$Register);
|
||||
write_barrier_post(masm, this,
|
||||
$mem$$Register /* store_addr */,
|
||||
$newval$$Register /* new_val */,
|
||||
$tmp1$$Register /* tmp1 */,
|
||||
$tmp2$$Register /* tmp2 */);
|
||||
%}
|
||||
ins_pipe(pipe_serial);
|
||||
%}
|
||||
|
||||
// This pattern is generated automatically from g1_aarch64.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct g1GetAndSetN(indirect mem, iRegN newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, iRegNNoSp preval, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseG1GC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
|
||||
match(Set preval (GetAndSetN mem newval));
|
||||
effect(TEMP preval, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
format %{ "atomic_xchgw $preval, $newval, [$mem]" %}
|
||||
ins_encode %{
|
||||
assert_different_registers($mem$$Register, $newval$$Register);
|
||||
write_barrier_pre(masm, this,
|
||||
$mem$$Register /* obj */,
|
||||
$tmp1$$Register /* pre_val */,
|
||||
$tmp2$$Register /* tmp1 */,
|
||||
$tmp3$$Register /* tmp2 */,
|
||||
RegSet::of($mem$$Register, $preval$$Register, $newval$$Register) /* preserve */);
|
||||
__ atomic_xchgw($preval$$Register, $newval$$Register, $mem$$Register);
|
||||
__ decode_heap_oop($tmp1$$Register, $newval$$Register);
|
||||
write_barrier_post(masm, this,
|
||||
$mem$$Register /* store_addr */,
|
||||
$tmp1$$Register /* new_val */,
|
||||
$tmp2$$Register /* tmp1 */,
|
||||
$tmp3$$Register /* tmp2 */);
|
||||
%}
|
||||
ins_pipe(pipe_serial);
|
||||
%}
|
||||
|
||||
// This pattern is generated automatically from g1_aarch64.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct g1GetAndSetNAcq(indirect mem, iRegN newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, iRegNNoSp preval, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseG1GC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
|
||||
match(Set preval (GetAndSetN mem newval));
|
||||
effect(TEMP preval, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
format %{ "atomic_xchgw_acq $preval, $newval, [$mem]" %}
|
||||
ins_encode %{
|
||||
assert_different_registers($mem$$Register, $newval$$Register);
|
||||
write_barrier_pre(masm, this,
|
||||
$mem$$Register /* obj */,
|
||||
$tmp1$$Register /* pre_val */,
|
||||
$tmp2$$Register /* tmp1 */,
|
||||
$tmp3$$Register /* tmp2 */,
|
||||
RegSet::of($mem$$Register, $preval$$Register, $newval$$Register) /* preserve */);
|
||||
__ atomic_xchgalw($preval$$Register, $newval$$Register, $mem$$Register);
|
||||
__ decode_heap_oop($tmp1$$Register, $newval$$Register);
|
||||
write_barrier_post(masm, this,
|
||||
$mem$$Register /* store_addr */,
|
||||
$tmp1$$Register /* new_val */,
|
||||
$tmp2$$Register /* tmp1 */,
|
||||
$tmp3$$Register /* tmp2 */);
|
||||
%}
|
||||
ins_pipe(pipe_serial);
|
||||
%}
|
||||
|
||||
// This pattern is generated automatically from g1_aarch64.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct g1LoadP(iRegPNoSp dst, indirect mem, iRegPNoSp tmp1, iRegPNoSp tmp2, rFlagsReg cr)
|
||||
%{
|
||||
// This instruction does not need an acquiring counterpart because it is only
|
||||
// used for reference loading (Reference::get()). The same holds for g1LoadN.
|
||||
predicate(UseG1GC && !needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0);
|
||||
match(Set dst (LoadP mem));
|
||||
effect(TEMP dst, TEMP tmp1, TEMP tmp2, KILL cr);
|
||||
ins_cost(4 * INSN_COST);
|
||||
format %{ "ldr $dst, $mem\t# ptr" %}
|
||||
ins_encode %{
|
||||
__ ldr($dst$$Register, $mem$$Register);
|
||||
write_barrier_pre(masm, this,
|
||||
noreg /* obj */,
|
||||
$dst$$Register /* pre_val */,
|
||||
$tmp1$$Register /* tmp1 */,
|
||||
$tmp2$$Register /* tmp2 */);
|
||||
%}
|
||||
ins_pipe(iload_reg_mem);
|
||||
%}
|
||||
|
||||
// This pattern is generated automatically from g1_aarch64.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct g1LoadN(iRegNNoSp dst, indirect mem, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseG1GC && !needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0);
|
||||
match(Set dst (LoadN mem));
|
||||
effect(TEMP dst, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
|
||||
ins_cost(4 * INSN_COST);
|
||||
format %{ "ldrw $dst, $mem\t# compressed ptr" %}
|
||||
ins_encode %{
|
||||
__ ldrw($dst$$Register, $mem$$Register);
|
||||
if ((barrier_data() & G1C2BarrierPre) != 0) {
|
||||
__ decode_heap_oop($tmp1$$Register, $dst$$Register);
|
||||
write_barrier_pre(masm, this,
|
||||
noreg /* obj */,
|
||||
$tmp1$$Register /* pre_val */,
|
||||
$tmp2$$Register /* tmp1 */,
|
||||
$tmp3$$Register /* tmp2 */);
|
||||
}
|
||||
%}
|
||||
ins_pipe(iload_reg_mem);
|
||||
%}
|
||||
|
||||
// END This section of the file is automatically generated. Do not edit --------------
|
||||
384
src/hotspot/cpu/aarch64/gc/g1/g1_aarch64.m4
Normal file
384
src/hotspot/cpu/aarch64/gc/g1/g1_aarch64.m4
Normal file
@ -0,0 +1,384 @@
|
||||
dnl Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
dnl DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
dnl
|
||||
dnl This code is free software; you can redistribute it and/or modify it
|
||||
dnl under the terms of the GNU General Public License version 2 only, as
|
||||
dnl published by the Free Software Foundation.
|
||||
dnl
|
||||
dnl This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
dnl ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
dnl FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
dnl version 2 for more details (a copy is included in the LICENSE file that
|
||||
dnl accompanied this code).
|
||||
dnl
|
||||
dnl You should have received a copy of the GNU General Public License version
|
||||
dnl 2 along with this work; if not, write to the Free Software Foundation,
|
||||
dnl Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
dnl
|
||||
dnl Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
dnl or visit www.oracle.com if you need additional information or have any
|
||||
dnl questions.
|
||||
dnl
|
||||
// BEGIN This section of the file is automatically generated. Do not edit --------------
|
||||
|
||||
// This section is generated from g1_aarch64.m4
|
||||
|
||||
define(`STOREP_INSN',
|
||||
`
|
||||
// This pattern is generated automatically from g1_aarch64.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct g1StoreP$1(indirect mem, iRegP src, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseG1GC && ifelse($1,Volatile,'needs_releasing_store(n)`,'!needs_releasing_store(n)`) && n->as_Store()->barrier_data() != 0);
|
||||
match(Set mem (StoreP mem src));
|
||||
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
|
||||
ins_cost(ifelse($1,Volatile,VOLATILE_REF_COST,INSN_COST));
|
||||
format %{ "$2 $src, $mem\t# ptr" %}
|
||||
ins_encode %{
|
||||
write_barrier_pre(masm, this,
|
||||
$mem$$Register /* obj */,
|
||||
$tmp1$$Register /* pre_val */,
|
||||
$tmp2$$Register /* tmp1 */,
|
||||
$tmp3$$Register /* tmp2 */,
|
||||
RegSet::of($mem$$Register, $src$$Register) /* preserve */);
|
||||
__ $2($src$$Register, $mem$$Register);
|
||||
write_barrier_post(masm, this,
|
||||
$mem$$Register /* store_addr */,
|
||||
$src$$Register /* new_val */,
|
||||
$tmp2$$Register /* tmp1 */,
|
||||
$tmp3$$Register /* tmp2 */);
|
||||
%}
|
||||
ins_pipe(ifelse($1,Volatile,pipe_class_memory,istore_reg_mem));
|
||||
%}')dnl
|
||||
STOREP_INSN(,str)
|
||||
STOREP_INSN(Volatile,stlr)
|
||||
dnl
|
||||
define(`STOREN_INSN',
|
||||
`
|
||||
// This pattern is generated automatically from g1_aarch64.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct g1StoreN$1(indirect mem, iRegN src, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseG1GC && ifelse($1,Volatile,'needs_releasing_store(n)`,'!needs_releasing_store(n)`) && n->as_Store()->barrier_data() != 0);
|
||||
match(Set mem (StoreN mem src));
|
||||
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
|
||||
ins_cost(ifelse($1,Volatile,VOLATILE_REF_COST,INSN_COST));
|
||||
format %{ "$2 $src, $mem\t# compressed ptr" %}
|
||||
ins_encode %{
|
||||
write_barrier_pre(masm, this,
|
||||
$mem$$Register /* obj */,
|
||||
$tmp1$$Register /* pre_val */,
|
||||
$tmp2$$Register /* tmp1 */,
|
||||
$tmp3$$Register /* tmp2 */,
|
||||
RegSet::of($mem$$Register, $src$$Register) /* preserve */);
|
||||
__ $2($src$$Register, $mem$$Register);
|
||||
if ((barrier_data() & G1C2BarrierPost) != 0) {
|
||||
if ((barrier_data() & G1C2BarrierPostNotNull) == 0) {
|
||||
__ decode_heap_oop($tmp1$$Register, $src$$Register);
|
||||
} else {
|
||||
__ decode_heap_oop_not_null($tmp1$$Register, $src$$Register);
|
||||
}
|
||||
}
|
||||
write_barrier_post(masm, this,
|
||||
$mem$$Register /* store_addr */,
|
||||
$tmp1$$Register /* new_val */,
|
||||
$tmp2$$Register /* tmp1 */,
|
||||
$tmp3$$Register /* tmp2 */);
|
||||
%}
|
||||
ins_pipe(ifelse($1,Volatile,pipe_class_memory,istore_reg_mem));
|
||||
%}')dnl
|
||||
STOREN_INSN(,strw)
|
||||
STOREN_INSN(Volatile,stlrw)
|
||||
dnl
|
||||
define(`ENCODESTOREN_INSN',
|
||||
`
|
||||
// This pattern is generated automatically from g1_aarch64.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct g1EncodePAndStoreN$1(indirect mem, iRegP src, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseG1GC && ifelse($1,Volatile,'needs_releasing_store(n)`,'!needs_releasing_store(n)`) && n->as_Store()->barrier_data() != 0);
|
||||
match(Set mem (StoreN mem (EncodeP src)));
|
||||
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
|
||||
ins_cost(ifelse($1,Volatile,VOLATILE_REF_COST,INSN_COST));
|
||||
format %{ "encode_heap_oop $tmp1, $src\n\t"
|
||||
"$2 $tmp1, $mem\t# compressed ptr" %}
|
||||
ins_encode %{
|
||||
write_barrier_pre(masm, this,
|
||||
$mem$$Register /* obj */,
|
||||
$tmp1$$Register /* pre_val */,
|
||||
$tmp2$$Register /* tmp1 */,
|
||||
$tmp3$$Register /* tmp2 */,
|
||||
RegSet::of($mem$$Register, $src$$Register) /* preserve */);
|
||||
if ((barrier_data() & G1C2BarrierPostNotNull) == 0) {
|
||||
__ encode_heap_oop($tmp1$$Register, $src$$Register);
|
||||
} else {
|
||||
__ encode_heap_oop_not_null($tmp1$$Register, $src$$Register);
|
||||
}
|
||||
__ $2($tmp1$$Register, $mem$$Register);
|
||||
write_barrier_post(masm, this,
|
||||
$mem$$Register /* store_addr */,
|
||||
$src$$Register /* new_val */,
|
||||
$tmp2$$Register /* tmp1 */,
|
||||
$tmp3$$Register /* tmp2 */);
|
||||
%}
|
||||
ins_pipe(ifelse($1,Volatile,pipe_class_memory,istore_reg_mem));
|
||||
%}')dnl
|
||||
ENCODESTOREN_INSN(,strw)
|
||||
ENCODESTOREN_INSN(Volatile,stlrw)
|
||||
dnl
|
||||
define(`CAEP_INSN',
|
||||
`
|
||||
// This pattern is generated automatically from g1_aarch64.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct g1CompareAndExchangeP$1(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp1, iRegPNoSp tmp2, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseG1GC && ifelse($1,Acq,'needs_acquiring_load_exclusive(n)`,'!needs_acquiring_load_exclusive(n)`) && n->as_LoadStore()->barrier_data() != 0);
|
||||
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
|
||||
effect(TEMP res, TEMP tmp1, TEMP tmp2, KILL cr);
|
||||
ins_cost(ifelse($1,Acq,VOLATILE_REF_COST,2 * VOLATILE_REF_COST));
|
||||
format %{ "cmpxchg$2 $res = $mem, $oldval, $newval\t# ptr" %}
|
||||
ins_encode %{
|
||||
assert_different_registers($oldval$$Register, $mem$$Register);
|
||||
assert_different_registers($newval$$Register, $mem$$Register);
|
||||
// Pass $oldval to the pre-barrier (instead of loading from $mem), because
|
||||
// $oldval is the only value that can be overwritten.
|
||||
// The same holds for g1CompareAndSwapP and its Acq variant.
|
||||
write_barrier_pre(masm, this,
|
||||
noreg /* obj */,
|
||||
$oldval$$Register /* pre_val */,
|
||||
$tmp1$$Register /* tmp1 */,
|
||||
$tmp2$$Register /* tmp2 */,
|
||||
RegSet::of($mem$$Register, $oldval$$Register, $newval$$Register) /* preserve */,
|
||||
RegSet::of($res$$Register) /* no_preserve */);
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
|
||||
$3 /* acquire */, true /* release */, false /* weak */, $res$$Register);
|
||||
write_barrier_post(masm, this,
|
||||
$mem$$Register /* store_addr */,
|
||||
$newval$$Register /* new_val */,
|
||||
$tmp1$$Register /* tmp1 */,
|
||||
$tmp2$$Register /* tmp2 */);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}')dnl
|
||||
CAEP_INSN(,,false)
|
||||
CAEP_INSN(Acq,_acq,true)
|
||||
dnl
|
||||
define(`CAEN_INSN',
|
||||
`
|
||||
// This pattern is generated automatically from g1_aarch64.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct g1CompareAndExchangeN$1(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseG1GC && ifelse($1,Acq,'needs_acquiring_load_exclusive(n)`,'!needs_acquiring_load_exclusive(n)`) && n->as_LoadStore()->barrier_data() != 0);
|
||||
match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
|
||||
effect(TEMP res, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
|
||||
ins_cost(ifelse($1,Acq,VOLATILE_REF_COST,2 * VOLATILE_REF_COST));
|
||||
format %{ "cmpxchg$2 $res = $mem, $oldval, $newval\t# narrow oop" %}
|
||||
ins_encode %{
|
||||
assert_different_registers($oldval$$Register, $mem$$Register);
|
||||
assert_different_registers($newval$$Register, $mem$$Register);
|
||||
write_barrier_pre(masm, this,
|
||||
$mem$$Register /* obj */,
|
||||
$tmp1$$Register /* pre_val */,
|
||||
$tmp2$$Register /* tmp1 */,
|
||||
$tmp3$$Register /* tmp2 */,
|
||||
RegSet::of($mem$$Register, $oldval$$Register, $newval$$Register) /* preserve */,
|
||||
RegSet::of($res$$Register) /* no_preserve */);
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::word,
|
||||
$3 /* acquire */, true /* release */, false /* weak */, $res$$Register);
|
||||
__ decode_heap_oop($tmp1$$Register, $newval$$Register);
|
||||
write_barrier_post(masm, this,
|
||||
$mem$$Register /* store_addr */,
|
||||
$tmp1$$Register /* new_val */,
|
||||
$tmp2$$Register /* tmp1 */,
|
||||
$tmp3$$Register /* tmp2 */);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}')dnl
|
||||
CAEN_INSN(,,false)
|
||||
CAEN_INSN(Acq,_acq,true)
|
||||
dnl
|
||||
define(`CASP_INSN',
|
||||
`
|
||||
// This pattern is generated automatically from g1_aarch64.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct g1CompareAndSwapP$1(iRegINoSp res, indirect mem, iRegP newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegP oldval, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseG1GC && ifelse($1,Acq,'needs_acquiring_load_exclusive(n)`,'!needs_acquiring_load_exclusive(n)`) && n->as_LoadStore()->barrier_data() != 0);
|
||||
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
|
||||
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
|
||||
effect(TEMP res, TEMP tmp1, TEMP tmp2, KILL cr);
|
||||
ins_cost(ifelse($1,Acq,VOLATILE_REF_COST,2 * VOLATILE_REF_COST));
|
||||
format %{ "cmpxchg$2 $mem, $oldval, $newval\t# (ptr)\n\t"
|
||||
"cset $res, EQ" %}
|
||||
ins_encode %{
|
||||
assert_different_registers($oldval$$Register, $mem$$Register);
|
||||
assert_different_registers($newval$$Register, $mem$$Register);
|
||||
write_barrier_pre(masm, this,
|
||||
noreg /* obj */,
|
||||
$oldval$$Register /* pre_val */,
|
||||
$tmp1$$Register /* tmp1 */,
|
||||
$tmp2$$Register /* tmp2 */,
|
||||
RegSet::of($mem$$Register, $oldval$$Register, $newval$$Register) /* preserve */,
|
||||
RegSet::of($res$$Register) /* no_preserve */);
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
|
||||
$3 /* acquire */, true /* release */, false /* weak */, noreg);
|
||||
__ cset($res$$Register, Assembler::EQ);
|
||||
write_barrier_post(masm, this,
|
||||
$mem$$Register /* store_addr */,
|
||||
$newval$$Register /* new_val */,
|
||||
$tmp1$$Register /* tmp1 */,
|
||||
$tmp2$$Register /* tmp2 */);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}')dnl
|
||||
CASP_INSN(,,false)
|
||||
CASP_INSN(Acq,_acq,true)
|
||||
dnl
|
||||
define(`CASN_INSN',
|
||||
`
|
||||
// This pattern is generated automatically from g1_aarch64.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct g1CompareAndSwapN$1(iRegINoSp res, indirect mem, iRegN newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, iRegN oldval, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseG1GC && ifelse($1,Acq,'needs_acquiring_load_exclusive(n)`,'!needs_acquiring_load_exclusive(n)`) && n->as_LoadStore()->barrier_data() != 0);
|
||||
match(Set res (CompareAndSwapN mem (Binary oldval newval)));
|
||||
match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
|
||||
effect(TEMP res, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
|
||||
ins_cost(ifelse($1,Acq,VOLATILE_REF_COST,2 * VOLATILE_REF_COST));
|
||||
format %{ "cmpxchg$2 $mem, $oldval, $newval\t# (narrow oop)\n\t"
|
||||
"cset $res, EQ" %}
|
||||
ins_encode %{
|
||||
assert_different_registers($oldval$$Register, $mem$$Register);
|
||||
assert_different_registers($newval$$Register, $mem$$Register);
|
||||
write_barrier_pre(masm, this,
|
||||
$mem$$Register /* obj */,
|
||||
$tmp1$$Register /* pre_val */,
|
||||
$tmp2$$Register /* tmp1 */,
|
||||
$tmp3$$Register /* tmp2 */,
|
||||
RegSet::of($mem$$Register, $oldval$$Register, $newval$$Register) /* preserve */,
|
||||
RegSet::of($res$$Register) /* no_preserve */);
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::word,
|
||||
$3 /* acquire */, true /* release */, false /* weak */, noreg);
|
||||
__ cset($res$$Register, Assembler::EQ);
|
||||
__ decode_heap_oop($tmp1$$Register, $newval$$Register);
|
||||
write_barrier_post(masm, this,
|
||||
$mem$$Register /* store_addr */,
|
||||
$tmp1$$Register /* new_val */,
|
||||
$tmp2$$Register /* tmp1 */,
|
||||
$tmp3$$Register /* tmp2 */);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}')dnl
|
||||
CASN_INSN(,,false)
|
||||
CASN_INSN(Acq,_acq,true)
|
||||
dnl
|
||||
define(`XCHGP_INSN',
|
||||
`
|
||||
// This pattern is generated automatically from g1_aarch64.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct g1GetAndSetP$1(indirect mem, iRegP newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp preval, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseG1GC && ifelse($1,Acq,'needs_acquiring_load_exclusive(n)`,'!needs_acquiring_load_exclusive(n)`) && n->as_LoadStore()->barrier_data() != 0);
|
||||
match(Set preval (GetAndSetP mem newval));
|
||||
effect(TEMP preval, TEMP tmp1, TEMP tmp2, KILL cr);
|
||||
ins_cost(ifelse($1,Acq,VOLATILE_REF_COST,2 * VOLATILE_REF_COST));
|
||||
format %{ "atomic_xchg$2 $preval, $newval, [$mem]" %}
|
||||
ins_encode %{
|
||||
assert_different_registers($mem$$Register, $newval$$Register);
|
||||
write_barrier_pre(masm, this,
|
||||
$mem$$Register /* obj */,
|
||||
$preval$$Register /* pre_val (as a temporary register) */,
|
||||
$tmp1$$Register /* tmp1 */,
|
||||
$tmp2$$Register /* tmp2 */,
|
||||
RegSet::of($mem$$Register, $preval$$Register, $newval$$Register) /* preserve */);
|
||||
__ $3($preval$$Register, $newval$$Register, $mem$$Register);
|
||||
write_barrier_post(masm, this,
|
||||
$mem$$Register /* store_addr */,
|
||||
$newval$$Register /* new_val */,
|
||||
$tmp1$$Register /* tmp1 */,
|
||||
$tmp2$$Register /* tmp2 */);
|
||||
%}
|
||||
ins_pipe(pipe_serial);
|
||||
%}')dnl
|
||||
XCHGP_INSN(,,atomic_xchg)
|
||||
XCHGP_INSN(Acq,_acq,atomic_xchgal)
|
||||
dnl
|
||||
define(`XCHGN_INSN',
|
||||
`
|
||||
// This pattern is generated automatically from g1_aarch64.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct g1GetAndSetN$1(indirect mem, iRegN newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, iRegNNoSp preval, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseG1GC && ifelse($1,Acq,'needs_acquiring_load_exclusive(n)`,'!needs_acquiring_load_exclusive(n)`) && n->as_LoadStore()->barrier_data() != 0);
|
||||
match(Set preval (GetAndSetN mem newval));
|
||||
effect(TEMP preval, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
|
||||
ins_cost(ifelse($1,Acq,VOLATILE_REF_COST,2 * VOLATILE_REF_COST));
|
||||
format %{ "$2 $preval, $newval, [$mem]" %}
|
||||
ins_encode %{
|
||||
assert_different_registers($mem$$Register, $newval$$Register);
|
||||
write_barrier_pre(masm, this,
|
||||
$mem$$Register /* obj */,
|
||||
$tmp1$$Register /* pre_val */,
|
||||
$tmp2$$Register /* tmp1 */,
|
||||
$tmp3$$Register /* tmp2 */,
|
||||
RegSet::of($mem$$Register, $preval$$Register, $newval$$Register) /* preserve */);
|
||||
__ $3($preval$$Register, $newval$$Register, $mem$$Register);
|
||||
__ decode_heap_oop($tmp1$$Register, $newval$$Register);
|
||||
write_barrier_post(masm, this,
|
||||
$mem$$Register /* store_addr */,
|
||||
$tmp1$$Register /* new_val */,
|
||||
$tmp2$$Register /* tmp1 */,
|
||||
$tmp3$$Register /* tmp2 */);
|
||||
%}
|
||||
ins_pipe(pipe_serial);
|
||||
%}')dnl
|
||||
XCHGN_INSN(,atomic_xchgw,atomic_xchgw)
|
||||
XCHGN_INSN(Acq,atomic_xchgw_acq,atomic_xchgalw)
|
||||
|
||||
// This pattern is generated automatically from g1_aarch64.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct g1LoadP(iRegPNoSp dst, indirect mem, iRegPNoSp tmp1, iRegPNoSp tmp2, rFlagsReg cr)
|
||||
%{
|
||||
// This instruction does not need an acquiring counterpart because it is only
|
||||
// used for reference loading (Reference::get()). The same holds for g1LoadN.
|
||||
predicate(UseG1GC && !needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0);
|
||||
match(Set dst (LoadP mem));
|
||||
effect(TEMP dst, TEMP tmp1, TEMP tmp2, KILL cr);
|
||||
ins_cost(4 * INSN_COST);
|
||||
format %{ "ldr $dst, $mem\t# ptr" %}
|
||||
ins_encode %{
|
||||
__ ldr($dst$$Register, $mem$$Register);
|
||||
write_barrier_pre(masm, this,
|
||||
noreg /* obj */,
|
||||
$dst$$Register /* pre_val */,
|
||||
$tmp1$$Register /* tmp1 */,
|
||||
$tmp2$$Register /* tmp2 */);
|
||||
%}
|
||||
ins_pipe(iload_reg_mem);
|
||||
%}
|
||||
|
||||
// This pattern is generated automatically from g1_aarch64.m4.
|
||||
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
|
||||
instruct g1LoadN(iRegNNoSp dst, indirect mem, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseG1GC && !needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0);
|
||||
match(Set dst (LoadN mem));
|
||||
effect(TEMP dst, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
|
||||
ins_cost(4 * INSN_COST);
|
||||
format %{ "ldrw $dst, $mem\t# compressed ptr" %}
|
||||
ins_encode %{
|
||||
__ ldrw($dst$$Register, $mem$$Register);
|
||||
if ((barrier_data() & G1C2BarrierPre) != 0) {
|
||||
__ decode_heap_oop($tmp1$$Register, $dst$$Register);
|
||||
write_barrier_pre(masm, this,
|
||||
noreg /* obj */,
|
||||
$tmp1$$Register /* pre_val */,
|
||||
$tmp2$$Register /* tmp1 */,
|
||||
$tmp3$$Register /* tmp2 */);
|
||||
}
|
||||
%}
|
||||
ins_pipe(iload_reg_mem);
|
||||
%}
|
||||
|
||||
// END This section of the file is automatically generated. Do not edit --------------
|
||||
@ -67,9 +67,9 @@ void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, Dec
|
||||
|
||||
__ push(saved_regs, sp);
|
||||
if (UseCompressedOops) {
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_narrow_oop_entry), src, dst, count);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_narrow_oop), src, dst, count);
|
||||
} else {
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_oop_entry), src, dst, count);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_oop), src, dst, count);
|
||||
}
|
||||
__ pop(saved_regs, sp);
|
||||
__ bind(done);
|
||||
@ -164,9 +164,9 @@ void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
|
||||
|
||||
if (expand_call) {
|
||||
assert(pre_val != c_rarg1, "smashed arg");
|
||||
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread);
|
||||
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre), pre_val, thread);
|
||||
} else {
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre), pre_val, thread);
|
||||
}
|
||||
|
||||
__ pop(saved, sp);
|
||||
@ -698,7 +698,7 @@ void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAss
|
||||
__ bind(runtime);
|
||||
__ push_call_clobbered_registers();
|
||||
__ load_parameter(0, pre_val);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre), pre_val, thread);
|
||||
__ pop_call_clobbered_registers();
|
||||
__ bind(done);
|
||||
|
||||
|
||||
@ -51,7 +51,7 @@ static void x_load_barrier_slow_path(MacroAssembler* masm, const MachNode* node,
|
||||
%}
|
||||
|
||||
// Load Pointer
|
||||
instruct xLoadP(iRegPNoSp dst, memory mem, rFlagsReg cr)
|
||||
instruct xLoadP(iRegPNoSp dst, memory8 mem, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (LoadP mem));
|
||||
predicate(UseZGC && !ZGenerational && !needs_acquiring_load(n) && (n->as_Load()->barrier_data() != 0));
|
||||
|
||||
@ -93,7 +93,7 @@ static size_t probe_valid_max_address_bit() {
|
||||
}
|
||||
|
||||
size_t ZPlatformAddressOffsetBits() {
|
||||
const static size_t valid_max_address_offset_bits = probe_valid_max_address_bit() + 1;
|
||||
static const size_t valid_max_address_offset_bits = probe_valid_max_address_bit() + 1;
|
||||
const size_t max_address_offset_bits = valid_max_address_offset_bits - 3;
|
||||
const size_t min_address_offset_bits = max_address_offset_bits - 2;
|
||||
const size_t address_offset = round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio);
|
||||
|
||||
@ -100,7 +100,7 @@ static void z_store_barrier(MacroAssembler* masm, const MachNode* node, Address
|
||||
%}
|
||||
|
||||
// Load Pointer
|
||||
instruct zLoadP(iRegPNoSp dst, memory mem, rFlagsReg cr)
|
||||
instruct zLoadP(iRegPNoSp dst, memory8 mem, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (LoadP mem));
|
||||
predicate(UseZGC && ZGenerational && !needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0);
|
||||
|
||||
@ -690,8 +690,8 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
|
||||
|
||||
if (DiagnoseSyncOnValueBasedClasses != 0) {
|
||||
load_klass(tmp, obj_reg);
|
||||
ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
|
||||
tstw(tmp, JVM_ACC_IS_VALUE_BASED_CLASS);
|
||||
ldrb(tmp, Address(tmp, Klass::misc_flags_offset()));
|
||||
tst(tmp, KlassFlags::_misc_is_value_based_class);
|
||||
br(Assembler::NE, slow_case);
|
||||
}
|
||||
|
||||
|
||||
@ -1838,7 +1838,8 @@ void MacroAssembler::clinit_barrier(Register klass, Register scratch, Label* L_f
|
||||
L_slow_path = &L_fallthrough;
|
||||
}
|
||||
// Fast path check: class is fully initialized
|
||||
ldrb(scratch, Address(klass, InstanceKlass::init_state_offset()));
|
||||
lea(scratch, Address(klass, InstanceKlass::init_state_offset()));
|
||||
ldarb(scratch, scratch);
|
||||
subs(zr, scratch, InstanceKlass::fully_initialized);
|
||||
br(Assembler::EQ, *L_fast_path);
|
||||
|
||||
@ -2967,7 +2968,7 @@ void MacroAssembler::verify_heapbase(const char* msg) {
|
||||
if (CheckCompressedOops) {
|
||||
Label ok;
|
||||
push(1 << rscratch1->encoding(), sp); // cmpptr trashes rscratch1
|
||||
cmpptr(rheapbase, ExternalAddress(CompressedOops::ptrs_base_addr()));
|
||||
cmpptr(rheapbase, ExternalAddress(CompressedOops::base_addr()));
|
||||
br(Assembler::EQ, ok);
|
||||
stop(msg);
|
||||
bind(ok);
|
||||
@ -3133,9 +3134,9 @@ void MacroAssembler::reinit_heapbase()
|
||||
{
|
||||
if (UseCompressedOops) {
|
||||
if (Universe::is_fully_initialized()) {
|
||||
mov(rheapbase, CompressedOops::ptrs_base());
|
||||
mov(rheapbase, CompressedOops::base());
|
||||
} else {
|
||||
lea(rheapbase, ExternalAddress(CompressedOops::ptrs_base_addr()));
|
||||
lea(rheapbase, ExternalAddress(CompressedOops::base_addr()));
|
||||
ldr(rheapbase, Address(rheapbase));
|
||||
}
|
||||
}
|
||||
@ -5010,8 +5011,10 @@ void MacroAssembler::decode_heap_oop(Register d, Register s) {
|
||||
verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?");
|
||||
#endif
|
||||
if (CompressedOops::base() == nullptr) {
|
||||
if (CompressedOops::shift() != 0 || d != s) {
|
||||
if (CompressedOops::shift() != 0) {
|
||||
lsl(d, s, CompressedOops::shift());
|
||||
} else if (d != s) {
|
||||
mov(d, s);
|
||||
}
|
||||
} else {
|
||||
Label done;
|
||||
@ -5082,8 +5085,8 @@ MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode() {
|
||||
|
||||
if (operand_valid_for_logical_immediate(
|
||||
/*is32*/false, (uint64_t)CompressedKlassPointers::base())) {
|
||||
const uint64_t range_mask =
|
||||
(1ULL << log2i(CompressedKlassPointers::range())) - 1;
|
||||
const size_t range = CompressedKlassPointers::klass_range_end() - CompressedKlassPointers::base();
|
||||
const uint64_t range_mask = (1ULL << log2i(range)) - 1;
|
||||
if (((uint64_t)CompressedKlassPointers::base() & range_mask) == 0) {
|
||||
return (_klass_decode_mode = KlassDecodeXor);
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -27,6 +27,7 @@
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "classfile/javaClasses.inline.hpp"
|
||||
#include "classfile/vmClasses.hpp"
|
||||
#include "compiler/disassembler.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "interpreter/interpreterRuntime.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
@ -36,7 +37,7 @@
|
||||
#include "runtime/frame.inline.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
|
||||
#define __ _masm->
|
||||
#define __ Disassembler::hook<MacroAssembler>(__FILE__, __LINE__, _masm)->
|
||||
|
||||
#ifdef PRODUCT
|
||||
#define BLOCK_COMMENT(str) /* nothing */
|
||||
|
||||
@ -49,6 +49,7 @@
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/signature.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "runtime/timerTrace.hpp"
|
||||
#include "runtime/vframeArray.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/formatBuffer.hpp"
|
||||
@ -2181,7 +2182,8 @@ void SharedRuntime::generate_deopt_blob() {
|
||||
pad += 512; // Increase the buffer size when compiling for JVMCI
|
||||
}
|
||||
#endif
|
||||
CodeBuffer buffer("deopt_blob", 2048+pad, 1024);
|
||||
const char* name = SharedRuntime::stub_name(SharedStubId::deopt_id);
|
||||
CodeBuffer buffer(name, 2048+pad, 1024);
|
||||
MacroAssembler* masm = new MacroAssembler(&buffer);
|
||||
int frame_size_in_words;
|
||||
OopMap* map = nullptr;
|
||||
@ -2232,7 +2234,7 @@ void SharedRuntime::generate_deopt_blob() {
|
||||
|
||||
int reexecute_offset = __ pc() - start;
|
||||
#if INCLUDE_JVMCI && !defined(COMPILER1)
|
||||
if (EnableJVMCI && UseJVMCICompiler) {
|
||||
if (UseJVMCICompiler) {
|
||||
// JVMCI does not use this kind of deoptimization
|
||||
__ should_not_reach_here();
|
||||
}
|
||||
@ -2565,20 +2567,23 @@ uint SharedRuntime::out_preserve_stack_slots() {
|
||||
// Generate a special Compile2Runtime blob that saves all registers,
|
||||
// and setup oopmap.
|
||||
//
|
||||
SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
|
||||
SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address call_ptr) {
|
||||
assert(is_polling_page_id(id), "expected a polling page stub id");
|
||||
|
||||
ResourceMark rm;
|
||||
OopMapSet *oop_maps = new OopMapSet();
|
||||
OopMap* map;
|
||||
|
||||
// Allocate space for the code. Setup code generation tools.
|
||||
CodeBuffer buffer("handler_blob", 2048, 1024);
|
||||
const char* name = SharedRuntime::stub_name(id);
|
||||
CodeBuffer buffer(name, 2048, 1024);
|
||||
MacroAssembler* masm = new MacroAssembler(&buffer);
|
||||
|
||||
address start = __ pc();
|
||||
address call_pc = nullptr;
|
||||
int frame_size_in_words;
|
||||
bool cause_return = (poll_type == POLL_AT_RETURN);
|
||||
RegisterSaver reg_save(poll_type == POLL_AT_VECTOR_LOOP /* save_vectors */);
|
||||
bool cause_return = (id == SharedStubId::polling_page_return_handler_id);
|
||||
RegisterSaver reg_save(id == SharedStubId::polling_page_vectors_safepoint_handler_id /* save_vectors */);
|
||||
|
||||
// When the signal occurred, the LR was either signed and stored on the stack (in which
|
||||
// case it will be restored from the stack before being used) or unsigned and not stored
|
||||
@ -2690,12 +2695,14 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
|
||||
// but since this is generic code we don't know what they are and the caller
|
||||
// must do any gc of the args.
|
||||
//
|
||||
RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
|
||||
RuntimeStub* SharedRuntime::generate_resolve_blob(SharedStubId id, address destination) {
|
||||
assert (StubRoutines::forward_exception_entry() != nullptr, "must be generated before");
|
||||
assert(is_resolve_id(id), "expected a resolve stub id");
|
||||
|
||||
// allocate space for the code
|
||||
ResourceMark rm;
|
||||
|
||||
const char* name = SharedRuntime::stub_name(id);
|
||||
CodeBuffer buffer(name, 1000, 512);
|
||||
MacroAssembler* masm = new MacroAssembler(&buffer);
|
||||
|
||||
@ -2787,7 +2794,11 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
|
||||
// otherwise assume that stack unwinding will be initiated, so
|
||||
// caller saved registers were assumed volatile in the compiler.
|
||||
|
||||
RuntimeStub* SharedRuntime::generate_throw_exception(const char* name, address runtime_entry) {
|
||||
RuntimeStub* SharedRuntime::generate_throw_exception(SharedStubId id, address runtime_entry) {
|
||||
assert(is_throw_id(id), "expected a throw stub id");
|
||||
|
||||
const char* name = SharedRuntime::stub_name(id);
|
||||
|
||||
// Information about frame layout at time of blocking runtime call.
|
||||
// Note that we only have to preserve callee-saved registers since
|
||||
// the compilers are responsible for supplying a continuation point
|
||||
@ -2896,7 +2907,8 @@ RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() {
|
||||
|
||||
int insts_size = 1024;
|
||||
int locs_size = 64;
|
||||
CodeBuffer code("jfr_write_checkpoint", insts_size, locs_size);
|
||||
const char* name = SharedRuntime::stub_name(SharedStubId::jfr_write_checkpoint_id);
|
||||
CodeBuffer code(name, insts_size, locs_size);
|
||||
OopMapSet* oop_maps = new OopMapSet();
|
||||
MacroAssembler* masm = new MacroAssembler(&code);
|
||||
|
||||
@ -2915,7 +2927,7 @@ RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() {
|
||||
oop_maps->add_gc_map(the_pc - start, map);
|
||||
|
||||
RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
|
||||
RuntimeStub::new_runtime_stub("jfr_write_checkpoint", &code, frame_complete,
|
||||
RuntimeStub::new_runtime_stub(name, &code, frame_complete,
|
||||
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
|
||||
oop_maps, false);
|
||||
return stub;
|
||||
@ -2934,7 +2946,8 @@ RuntimeStub* SharedRuntime::generate_jfr_return_lease() {
|
||||
int insts_size = 1024;
|
||||
int locs_size = 64;
|
||||
|
||||
CodeBuffer code("jfr_return_lease", insts_size, locs_size);
|
||||
const char* name = SharedRuntime::stub_name(SharedStubId::jfr_return_lease_id);
|
||||
CodeBuffer code(name, insts_size, locs_size);
|
||||
OopMapSet* oop_maps = new OopMapSet();
|
||||
MacroAssembler* masm = new MacroAssembler(&code);
|
||||
|
||||
@ -2953,7 +2966,7 @@ RuntimeStub* SharedRuntime::generate_jfr_return_lease() {
|
||||
oop_maps->add_gc_map(the_pc - start, map);
|
||||
|
||||
RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
|
||||
RuntimeStub::new_runtime_stub("jfr_return_lease", &code, frame_complete,
|
||||
RuntimeStub::new_runtime_stub(name, &code, frame_complete,
|
||||
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
|
||||
oop_maps, false);
|
||||
return stub;
|
||||
|
||||
@ -3417,15 +3417,15 @@ class StubGenerator: public StubCodeGenerator {
|
||||
Register rscratch3 = r10;
|
||||
Register rscratch4 = r11;
|
||||
|
||||
__ andw(rscratch3, r2, r4);
|
||||
__ bicw(rscratch4, r3, r4);
|
||||
reg_cache.extract_u32(rscratch1, k);
|
||||
__ movw(rscratch2, t);
|
||||
__ orrw(rscratch3, rscratch3, rscratch4);
|
||||
__ addw(rscratch4, r1, rscratch2);
|
||||
__ addw(rscratch4, rscratch4, rscratch1);
|
||||
__ addw(rscratch3, rscratch3, rscratch4);
|
||||
__ rorw(rscratch2, rscratch3, 32 - s);
|
||||
__ bicw(rscratch2, r3, r4);
|
||||
__ andw(rscratch3, r2, r4);
|
||||
__ addw(rscratch2, rscratch2, rscratch4);
|
||||
__ addw(rscratch2, rscratch2, rscratch3);
|
||||
__ rorw(rscratch2, rscratch2, 32 - s);
|
||||
__ addw(r1, rscratch2, r2);
|
||||
}
|
||||
|
||||
@ -7320,6 +7320,28 @@ class StubGenerator: public StubCodeGenerator {
|
||||
return start;
|
||||
}
|
||||
|
||||
// load Method* target of MethodHandle
|
||||
// j_rarg0 = jobject receiver
|
||||
// rmethod = result
|
||||
address generate_upcall_stub_load_target() {
|
||||
StubCodeMark mark(this, "StubRoutines", "upcall_stub_load_target");
|
||||
address start = __ pc();
|
||||
|
||||
__ resolve_global_jobject(j_rarg0, rscratch1, rscratch2);
|
||||
// Load target method from receiver
|
||||
__ load_heap_oop(rmethod, Address(j_rarg0, java_lang_invoke_MethodHandle::form_offset()), rscratch1, rscratch2);
|
||||
__ load_heap_oop(rmethod, Address(rmethod, java_lang_invoke_LambdaForm::vmentry_offset()), rscratch1, rscratch2);
|
||||
__ load_heap_oop(rmethod, Address(rmethod, java_lang_invoke_MemberName::method_offset()), rscratch1, rscratch2);
|
||||
__ access_load_at(T_ADDRESS, IN_HEAP, rmethod,
|
||||
Address(rmethod, java_lang_invoke_ResolvedMethodName::vmtarget_offset()),
|
||||
noreg, noreg);
|
||||
__ str(rmethod, Address(rthread, JavaThread::callee_target_offset())); // just in case callee is deoptimized
|
||||
|
||||
__ ret(lr);
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
#undef __
|
||||
#define __ masm->
|
||||
|
||||
@ -8241,6 +8263,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
#endif
|
||||
|
||||
StubRoutines::_upcall_stub_exception_handler = generate_upcall_stub_exception_handler();
|
||||
StubRoutines::_upcall_stub_load_target = generate_upcall_stub_load_target();
|
||||
|
||||
StubRoutines::aarch64::set_completed(); // Inidicate that arraycopy and zero_blocks stubs are generated
|
||||
}
|
||||
|
||||
@ -26,6 +26,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "classfile/javaClasses.hpp"
|
||||
#include "compiler/disassembler.hpp"
|
||||
#include "compiler/compiler_globals.hpp"
|
||||
#include "gc/shared/barrierSetAssembler.hpp"
|
||||
#include "interpreter/bytecodeHistogram.hpp"
|
||||
@ -67,13 +68,7 @@
|
||||
// Max size with JVMTI
|
||||
int TemplateInterpreter::InterpreterCodeSize = 200 * 1024;
|
||||
|
||||
#define __ _masm->
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
extern "C" void entry(CodeBuffer*);
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
#define __ Disassembler::hook<InterpreterMacroAssembler>(__FILE__, __LINE__, _masm)->
|
||||
|
||||
address TemplateInterpreterGenerator::generate_slow_signature_handler() {
|
||||
address entry = __ pc();
|
||||
@ -2004,13 +1999,21 @@ void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t,
|
||||
address& vep) {
|
||||
assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
|
||||
Label L;
|
||||
aep = __ pc(); __ push_ptr(); __ b(L);
|
||||
fep = __ pc(); __ push_f(); __ b(L);
|
||||
dep = __ pc(); __ push_d(); __ b(L);
|
||||
lep = __ pc(); __ push_l(); __ b(L);
|
||||
bep = cep = sep =
|
||||
iep = __ pc(); __ push_i();
|
||||
vep = __ pc();
|
||||
aep = __ pc(); // atos entry point
|
||||
__ push_ptr();
|
||||
__ b(L);
|
||||
fep = __ pc(); // ftos entry point
|
||||
__ push_f();
|
||||
__ b(L);
|
||||
dep = __ pc(); // dtos entry point
|
||||
__ push_d();
|
||||
__ b(L);
|
||||
lep = __ pc(); // ltos entry point
|
||||
__ push_l();
|
||||
__ b(L);
|
||||
bep = cep = sep = iep = __ pc(); // [bcsi]tos entry point
|
||||
__ push_i();
|
||||
vep = __ pc(); // vtos entry point
|
||||
__ bind(L);
|
||||
generate_and_dispatch(t);
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -25,6 +25,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "compiler/disassembler.hpp"
|
||||
#include "compiler/compilerDefinitions.inline.hpp"
|
||||
#include "gc/shared/barrierSetAssembler.hpp"
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
@ -49,7 +50,7 @@
|
||||
#include "runtime/synchronizer.hpp"
|
||||
#include "utilities/powerOfTwo.hpp"
|
||||
|
||||
#define __ _masm->
|
||||
#define __ Disassembler::hook<InterpreterMacroAssembler>(__FILE__, __LINE__, _masm)->
|
||||
|
||||
// Address computation: local variables
|
||||
|
||||
@ -2191,9 +2192,9 @@ void TemplateTable::_return(TosState state)
|
||||
|
||||
__ ldr(c_rarg1, aaddress(0));
|
||||
__ load_klass(r3, c_rarg1);
|
||||
__ ldrw(r3, Address(r3, Klass::access_flags_offset()));
|
||||
__ ldrb(r3, Address(r3, Klass::misc_flags_offset()));
|
||||
Label skip_register_finalizer;
|
||||
__ tbz(r3, exact_log2(JVM_ACC_HAS_FINALIZER), skip_register_finalizer);
|
||||
__ tbz(r3, exact_log2(KlassFlags::_misc_has_finalizer), skip_register_finalizer);
|
||||
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), c_rarg1);
|
||||
|
||||
|
||||
@ -24,6 +24,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "classfile/javaClasses.hpp"
|
||||
#include "logging/logStream.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "prims/upcallLinker.hpp"
|
||||
@ -117,7 +118,7 @@ static void restore_callee_saved_registers(MacroAssembler* _masm, const ABIDescr
|
||||
static const int upcall_stub_code_base_size = 1024;
|
||||
static const int upcall_stub_size_per_arg = 16;
|
||||
|
||||
address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
|
||||
address UpcallLinker::make_upcall_stub(jobject receiver, Symbol* signature,
|
||||
BasicType* out_sig_bt, int total_out_args,
|
||||
BasicType ret_type,
|
||||
jobject jabi, jobject jconv,
|
||||
@ -222,7 +223,6 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
|
||||
|
||||
__ block_comment("{ on_entry");
|
||||
__ lea(c_rarg0, Address(sp, frame_data_offset));
|
||||
__ movptr(c_rarg1, (intptr_t)receiver);
|
||||
__ movptr(rscratch1, CAST_FROM_FN_PTR(uint64_t, UpcallLinker::on_entry));
|
||||
__ blr(rscratch1);
|
||||
__ mov(rthread, r0);
|
||||
@ -238,12 +238,10 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
|
||||
arg_shuffle.generate(_masm, as_VMStorage(shuffle_reg), abi._shadow_space_bytes, 0);
|
||||
__ block_comment("} argument shuffle");
|
||||
|
||||
__ block_comment("{ receiver ");
|
||||
__ get_vm_result(j_rarg0, rthread);
|
||||
__ block_comment("} receiver ");
|
||||
|
||||
__ mov_metadata(rmethod, entry);
|
||||
__ str(rmethod, Address(rthread, JavaThread::callee_target_offset())); // just in case callee is deoptimized
|
||||
__ block_comment("{ load target ");
|
||||
__ movptr(j_rarg0, (intptr_t)receiver);
|
||||
__ far_call(RuntimeAddress(StubRoutines::upcall_stub_load_target()), rscratch1); // puts target Method* in rmethod
|
||||
__ block_comment("} load target ");
|
||||
|
||||
__ push_cont_fastpath(rthread);
|
||||
|
||||
@ -318,7 +316,7 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
|
||||
|
||||
#ifndef PRODUCT
|
||||
stringStream ss;
|
||||
ss.print("upcall_stub_%s", entry->signature()->as_C_string());
|
||||
ss.print("upcall_stub_%s", signature->as_C_string());
|
||||
const char* name = _masm->code_string(ss.as_string());
|
||||
#else // PRODUCT
|
||||
const char* name = "upcall_stub";
|
||||
|
||||
@ -1003,10 +1003,6 @@ const RegMask* Matcher::predicate_reg_mask(void) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
const TypeVectMask* Matcher::predicate_reg_type(const Type* elemTy, int length) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Vector calling convention not yet implemented.
|
||||
bool Matcher::supports_vector_calling_convention(void) {
|
||||
return false;
|
||||
@ -3890,6 +3886,7 @@ instruct loadRange(iRegI dst, memoryI mem) %{
|
||||
|
||||
|
||||
instruct loadP(iRegP dst, memoryP mem) %{
|
||||
predicate(!(UseG1GC && n->as_Load()->barrier_data() != 0));
|
||||
match(Set dst (LoadP mem));
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
size(4);
|
||||
@ -4225,18 +4222,6 @@ instruct storeB(memoryB mem, store_RegI src) %{
|
||||
ins_pipe(istore_mem_reg);
|
||||
%}
|
||||
|
||||
instruct storeCM(memoryB mem, store_RegI src) %{
|
||||
match(Set mem (StoreCM mem src));
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
size(4);
|
||||
format %{ "STRB $src,$mem\t! CMS card-mark byte" %}
|
||||
ins_encode %{
|
||||
__ strb($src$$Register, $mem$$Address);
|
||||
%}
|
||||
ins_pipe(istore_mem_reg);
|
||||
%}
|
||||
|
||||
// Store Char/Short
|
||||
|
||||
|
||||
@ -4356,6 +4341,7 @@ instruct movSP(store_ptr_RegP dst, SPRegP src) %{
|
||||
|
||||
|
||||
instruct storeP(memoryP mem, store_ptr_RegP src) %{
|
||||
predicate(!(UseG1GC && n->as_Store()->barrier_data() != 0));
|
||||
match(Set mem (StoreP mem src));
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
size(4);
|
||||
@ -5390,6 +5376,7 @@ instruct compareAndSwapI_bool(memoryex mem, iRegI oldval, iRegI newval, iRegI re
|
||||
%}
|
||||
|
||||
instruct compareAndSwapP_bool(memoryex mem, iRegP oldval, iRegP newval, iRegI res, iRegI tmp, flagsReg ccr ) %{
|
||||
predicate(!(UseG1GC && n->as_LoadStore()->barrier_data() != 0));
|
||||
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
|
||||
effect( KILL ccr, TEMP tmp);
|
||||
size(28);
|
||||
@ -5659,6 +5646,7 @@ instruct xchgL(memoryex mem, iRegLd newval, iRegLd res, iRegI tmp, flagsReg ccr)
|
||||
%}
|
||||
|
||||
instruct xchgP(memoryex mem, iRegP newval, iRegP res, iRegI tmp, flagsReg ccr) %{
|
||||
predicate(!(UseG1GC && n->as_LoadStore()->barrier_data() != 0));
|
||||
match(Set res (GetAndSetP mem newval));
|
||||
effect(KILL ccr, TEMP tmp, TEMP res);
|
||||
size(16);
|
||||
|
||||
@ -119,8 +119,9 @@ class RegisterSet {
|
||||
}
|
||||
|
||||
friend RegisterSet operator | (const RegisterSet set1, const RegisterSet set2) {
|
||||
assert((set1._encoding & set2._encoding) == 0,
|
||||
"encoding constraint");
|
||||
// why so strong constraint?
|
||||
// assert((set1._encoding & set2._encoding) == 0,
|
||||
// "encoding constraint");
|
||||
return RegisterSet(set1._encoding | set2._encoding);
|
||||
}
|
||||
|
||||
@ -142,6 +143,11 @@ class RegisterSet {
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
static RegisterSet from(RegSet set) {
|
||||
assert(set.size(), "RegSet must not be empty");
|
||||
return RegisterSet(set.bits());
|
||||
}
|
||||
};
|
||||
|
||||
#if R9_IS_SCRATCHED
|
||||
@ -157,6 +163,10 @@ class FloatRegisterSet {
|
||||
|
||||
public:
|
||||
|
||||
FloatRegisterSet() {
|
||||
_encoding = 0;
|
||||
}
|
||||
|
||||
FloatRegisterSet(FloatRegister reg) {
|
||||
if (reg->hi_bit() == 0) {
|
||||
_encoding = reg->hi_bits() << 12 | reg->lo_bit() << 22 | 1;
|
||||
@ -185,6 +195,15 @@ class FloatRegisterSet {
|
||||
return (_encoding & 0xFFFFFF00) | ((_encoding & 0xFF) << 1);
|
||||
}
|
||||
|
||||
static FloatRegisterSet from(FloatRegSet set) {
|
||||
assert(set.size(), "FloatRegSet must not be empty");
|
||||
// the vector load/store instructions operate on a set of consecutive registers.
|
||||
// for the sake of simplicity, write all registers between the first and last in the set
|
||||
size_t range = (*set.rbegin())->encoding() - (*set.begin())->encoding() + 1;
|
||||
// push_float stores float regisgters by pairs
|
||||
return FloatRegisterSet(*set.begin(), (range+1)/2);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -46,7 +46,7 @@ void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
|
||||
__ bind(_entry);
|
||||
ce->store_parameter(_bci, 0);
|
||||
ce->store_parameter(_method->as_constant_ptr()->as_metadata(), 1);
|
||||
__ call(Runtime1::entry_for(Runtime1::counter_overflow_id), relocInfo::runtime_call_type);
|
||||
__ call(Runtime1::entry_for(C1StubId::counter_overflow_id), relocInfo::runtime_call_type);
|
||||
ce->add_call_info_here(_info);
|
||||
ce->verify_oop_map(_info);
|
||||
|
||||
@ -57,7 +57,7 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
|
||||
__ bind(_entry);
|
||||
|
||||
if (_info->deoptimize_on_exception()) {
|
||||
__ call(Runtime1::entry_for(Runtime1::predicate_failed_trap_id), relocInfo::runtime_call_type);
|
||||
__ call(Runtime1::entry_for(C1StubId::predicate_failed_trap_id), relocInfo::runtime_call_type);
|
||||
ce->add_call_info_here(_info);
|
||||
ce->verify_oop_map(_info);
|
||||
debug_only(__ should_not_reach_here());
|
||||
@ -73,10 +73,10 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
|
||||
}
|
||||
|
||||
if (_throw_index_out_of_bounds_exception) {
|
||||
__ call(Runtime1::entry_for(Runtime1::throw_index_exception_id), relocInfo::runtime_call_type);
|
||||
__ call(Runtime1::entry_for(C1StubId::throw_index_exception_id), relocInfo::runtime_call_type);
|
||||
} else {
|
||||
__ str(_array->as_pointer_register(), Address(SP, BytesPerWord)); // ??? Correct offset? Correct instruction?
|
||||
__ call(Runtime1::entry_for(Runtime1::throw_range_check_failed_id), relocInfo::runtime_call_type);
|
||||
__ call(Runtime1::entry_for(C1StubId::throw_range_check_failed_id), relocInfo::runtime_call_type);
|
||||
}
|
||||
ce->add_call_info_here(_info);
|
||||
ce->verify_oop_map(_info);
|
||||
@ -89,7 +89,7 @@ PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
|
||||
|
||||
void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
|
||||
__ bind(_entry);
|
||||
__ call(Runtime1::entry_for(Runtime1::predicate_failed_trap_id), relocInfo::runtime_call_type);
|
||||
__ call(Runtime1::entry_for(C1StubId::predicate_failed_trap_id), relocInfo::runtime_call_type);
|
||||
ce->add_call_info_here(_info);
|
||||
ce->verify_oop_map(_info);
|
||||
debug_only(__ should_not_reach_here());
|
||||
@ -100,7 +100,7 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) {
|
||||
ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
|
||||
}
|
||||
__ bind(_entry);
|
||||
__ call(Runtime1::entry_for(Runtime1::throw_div0_exception_id),
|
||||
__ call(Runtime1::entry_for(C1StubId::throw_div0_exception_id),
|
||||
relocInfo::runtime_call_type);
|
||||
ce->add_call_info_here(_info);
|
||||
DEBUG_ONLY(STOP("DivByZero");)
|
||||
@ -109,14 +109,14 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) {
|
||||
|
||||
// Implementation of NewInstanceStub
|
||||
|
||||
NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
|
||||
NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, C1StubId stub_id) {
|
||||
_result = result;
|
||||
_klass = klass;
|
||||
_klass_reg = klass_reg;
|
||||
_info = new CodeEmitInfo(info);
|
||||
assert(stub_id == Runtime1::new_instance_id ||
|
||||
stub_id == Runtime1::fast_new_instance_id ||
|
||||
stub_id == Runtime1::fast_new_instance_init_check_id,
|
||||
assert(stub_id == C1StubId::new_instance_id ||
|
||||
stub_id == C1StubId::fast_new_instance_id ||
|
||||
stub_id == C1StubId::fast_new_instance_init_check_id,
|
||||
"need new_instance id");
|
||||
_stub_id = stub_id;
|
||||
}
|
||||
@ -148,7 +148,7 @@ void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
|
||||
assert(_klass_reg->as_register() == R1, "runtime call setup");
|
||||
assert(_length->as_register() == R2, "runtime call setup");
|
||||
__ bind(_entry);
|
||||
__ call(Runtime1::entry_for(Runtime1::new_type_array_id), relocInfo::runtime_call_type);
|
||||
__ call(Runtime1::entry_for(C1StubId::new_type_array_id), relocInfo::runtime_call_type);
|
||||
ce->add_call_info_here(_info);
|
||||
ce->verify_oop_map(_info);
|
||||
__ b(_continuation);
|
||||
@ -170,7 +170,7 @@ void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
|
||||
assert(_klass_reg->as_register() == R1, "runtime call setup");
|
||||
assert(_length->as_register() == R2, "runtime call setup");
|
||||
__ bind(_entry);
|
||||
__ call(Runtime1::entry_for(Runtime1::new_object_array_id), relocInfo::runtime_call_type);
|
||||
__ call(Runtime1::entry_for(C1StubId::new_object_array_id), relocInfo::runtime_call_type);
|
||||
ce->add_call_info_here(_info);
|
||||
ce->verify_oop_map(_info);
|
||||
__ b(_continuation);
|
||||
@ -189,9 +189,9 @@ void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
|
||||
__ str(lock_reg, Address(SP, BytesPerWord));
|
||||
}
|
||||
|
||||
Runtime1::StubID enter_id = ce->compilation()->has_fpu_code() ?
|
||||
Runtime1::monitorenter_id :
|
||||
Runtime1::monitorenter_nofpu_id;
|
||||
C1StubId enter_id = ce->compilation()->has_fpu_code() ?
|
||||
C1StubId::monitorenter_id :
|
||||
C1StubId::monitorenter_nofpu_id;
|
||||
__ call(Runtime1::entry_for(enter_id), relocInfo::runtime_call_type);
|
||||
ce->add_call_info_here(_info);
|
||||
ce->verify_oop_map(_info);
|
||||
@ -210,9 +210,9 @@ void MonitorExitStub::emit_code(LIR_Assembler* ce) {
|
||||
__ str(lock_reg, Address(SP));
|
||||
|
||||
// Non-blocking leaf routine - no call info needed
|
||||
Runtime1::StubID exit_id = ce->compilation()->has_fpu_code() ?
|
||||
Runtime1::monitorexit_id :
|
||||
Runtime1::monitorexit_nofpu_id;
|
||||
C1StubId exit_id = ce->compilation()->has_fpu_code() ?
|
||||
C1StubId::monitorexit_id :
|
||||
C1StubId::monitorexit_nofpu_id;
|
||||
__ call(Runtime1::entry_for(exit_id), relocInfo::runtime_call_type);
|
||||
__ b(_continuation);
|
||||
}
|
||||
@ -322,10 +322,10 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
|
||||
address target = nullptr;
|
||||
relocInfo::relocType reloc_type = relocInfo::none;
|
||||
switch (_id) {
|
||||
case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
|
||||
case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
|
||||
case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
|
||||
case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
|
||||
case access_field_id: target = Runtime1::entry_for(C1StubId::access_field_patching_id); break;
|
||||
case load_klass_id: target = Runtime1::entry_for(C1StubId::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
|
||||
case load_mirror_id: target = Runtime1::entry_for(C1StubId::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
|
||||
case load_appendix_id: target = Runtime1::entry_for(C1StubId::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
__ bind(call_patch);
|
||||
@ -351,7 +351,7 @@ void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
|
||||
__ mov_slow(Rtemp, _trap_request);
|
||||
ce->verify_reserved_argument_area_size(1);
|
||||
__ str(Rtemp, Address(SP));
|
||||
__ call(Runtime1::entry_for(Runtime1::deoptimize_id), relocInfo::runtime_call_type);
|
||||
__ call(Runtime1::entry_for(C1StubId::deoptimize_id), relocInfo::runtime_call_type);
|
||||
ce->add_call_info_here(_info);
|
||||
DEBUG_ONLY(__ should_not_reach_here());
|
||||
}
|
||||
@ -362,9 +362,9 @@ void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
|
||||
if (_info->deoptimize_on_exception()) {
|
||||
// Deoptimize, do not throw the exception, because it is
|
||||
// probably wrong to do it here.
|
||||
a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
|
||||
a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id);
|
||||
} else {
|
||||
a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
|
||||
a = Runtime1::entry_for(C1StubId::throw_null_pointer_exception_id);
|
||||
}
|
||||
ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
|
||||
__ bind(_entry);
|
||||
|
||||
@ -213,7 +213,7 @@ int LIR_Assembler::emit_exception_handler() {
|
||||
// check that there is really an exception
|
||||
__ verify_not_null_oop(Rexception_obj);
|
||||
|
||||
__ call(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id), relocInfo::runtime_call_type);
|
||||
__ call(Runtime1::entry_for(C1StubId::handle_exception_from_callee_id), relocInfo::runtime_call_type);
|
||||
__ should_not_reach_here();
|
||||
|
||||
assert(code_offset() - offset <= exception_handler_size(), "overflow");
|
||||
@ -253,7 +253,7 @@ int LIR_Assembler::emit_unwind_handler() {
|
||||
|
||||
// remove the activation and dispatch to the unwind handler
|
||||
__ remove_frame(initial_frame_size_in_bytes()); // restores FP and LR
|
||||
__ jump(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type, Rtemp);
|
||||
__ jump(Runtime1::entry_for(C1StubId::unwind_exception_id), relocInfo::runtime_call_type, Rtemp);
|
||||
|
||||
// Emit the slow path assembly
|
||||
if (stub != nullptr) {
|
||||
@ -948,6 +948,7 @@ void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
|
||||
if (op->init_check()) {
|
||||
Register tmp = op->tmp1()->as_register();
|
||||
__ ldrb(tmp, Address(op->klass()->as_register(), InstanceKlass::init_state_offset()));
|
||||
__ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
|
||||
add_debug_info_for_null_check_here(op->stub()->info());
|
||||
__ cmp(tmp, InstanceKlass::fully_initialized);
|
||||
__ b(*op->stub()->entry(), ne);
|
||||
@ -1136,7 +1137,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
|
||||
__ b(*failure_target, ne);
|
||||
// slow case
|
||||
assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup");
|
||||
__ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
|
||||
__ call(Runtime1::entry_for(C1StubId::slow_subtype_check_id), relocInfo::runtime_call_type);
|
||||
__ cbz(R0, *failure_target);
|
||||
if (op->should_profile()) {
|
||||
Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtemp;
|
||||
@ -1210,7 +1211,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
|
||||
__ cmp(Rtemp, k_RInfo, ne);
|
||||
__ b(*success_target, eq);
|
||||
assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup");
|
||||
__ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
|
||||
__ call(Runtime1::entry_for(C1StubId::slow_subtype_check_id), relocInfo::runtime_call_type);
|
||||
__ cbz(R0, *failure_target);
|
||||
}
|
||||
} else {
|
||||
@ -1227,7 +1228,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
|
||||
__ b(*failure_target, ne);
|
||||
// slow case
|
||||
assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup");
|
||||
__ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
|
||||
__ call(Runtime1::entry_for(C1StubId::slow_subtype_check_id), relocInfo::runtime_call_type);
|
||||
__ cbz(R0, *failure_target);
|
||||
}
|
||||
|
||||
@ -1303,7 +1304,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
|
||||
}
|
||||
__ b(*success_target, eq);
|
||||
assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup");
|
||||
__ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
|
||||
__ call(Runtime1::entry_for(C1StubId::slow_subtype_check_id), relocInfo::runtime_call_type);
|
||||
if (!op->should_profile()) {
|
||||
move_regs(R0, res);
|
||||
} else {
|
||||
@ -1334,7 +1335,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
|
||||
__ b(*failure_target, ne);
|
||||
// slow case
|
||||
assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup");
|
||||
__ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
|
||||
__ call(Runtime1::entry_for(C1StubId::slow_subtype_check_id), relocInfo::runtime_call_type);
|
||||
if (!op->should_profile()) {
|
||||
move_regs(R0, res);
|
||||
}
|
||||
@ -1981,9 +1982,9 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit
|
||||
assert(exceptionPC->as_register() == Rexception_pc, "must match");
|
||||
info->add_register_oop(exceptionOop);
|
||||
|
||||
Runtime1::StubID handle_id = compilation()->has_fpu_code() ?
|
||||
Runtime1::handle_exception_id :
|
||||
Runtime1::handle_exception_nofpu_id;
|
||||
C1StubId handle_id = compilation()->has_fpu_code() ?
|
||||
C1StubId::handle_exception_id :
|
||||
C1StubId::handle_exception_nofpu_id;
|
||||
Label return_address;
|
||||
__ adr(Rexception_pc, return_address);
|
||||
__ call(Runtime1::entry_for(handle_id), relocInfo::runtime_call_type);
|
||||
@ -2260,7 +2261,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
__ mov(altFP_7_11, R1);
|
||||
__ mov(R0, tmp);
|
||||
__ mov(R1, tmp2);
|
||||
__ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); // does not blow any registers except R0, LR and Rtemp
|
||||
__ call(Runtime1::entry_for(C1StubId::slow_subtype_check_id), relocInfo::runtime_call_type); // does not blow any registers except R0, LR and Rtemp
|
||||
__ cmp_32(R0, 0);
|
||||
__ mov(R0, R6);
|
||||
__ mov(R1, altFP_7_11);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1054,7 +1054,7 @@ void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
|
||||
args->append(rank);
|
||||
args->append(varargs);
|
||||
LIR_Opr reg = result_register_for(x->type());
|
||||
__ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
|
||||
__ call_runtime(Runtime1::entry_for(C1StubId::new_multi_array_id),
|
||||
LIR_OprFact::illegalOpr, reg, args, info);
|
||||
|
||||
LIR_Opr result = rlock_result(x);
|
||||
@ -1083,7 +1083,7 @@ void LIRGenerator::do_CheckCast(CheckCast* x) {
|
||||
CodeStub* stub;
|
||||
if (x->is_incompatible_class_change_check()) {
|
||||
assert(patching_info == nullptr, "can't patch this");
|
||||
stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id,
|
||||
stub = new SimpleExceptionStub(C1StubId::throw_incompatible_class_change_error_id,
|
||||
LIR_OprFact::illegalOpr, info_for_exception);
|
||||
} else if (x->is_invokespecial_receiver_check()) {
|
||||
assert(patching_info == nullptr, "can't patch this");
|
||||
@ -1091,7 +1091,7 @@ void LIRGenerator::do_CheckCast(CheckCast* x) {
|
||||
Deoptimization::Reason_class_check,
|
||||
Deoptimization::Action_none);
|
||||
} else {
|
||||
stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id,
|
||||
stub = new SimpleExceptionStub(C1StubId::throw_class_cast_exception_id,
|
||||
LIR_OprFact::illegalOpr, info_for_exception);
|
||||
}
|
||||
|
||||
|
||||
@ -195,8 +195,8 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
|
||||
|
||||
if (DiagnoseSyncOnValueBasedClasses != 0) {
|
||||
load_klass(tmp2, obj);
|
||||
ldr_u32(tmp2, Address(tmp2, Klass::access_flags_offset()));
|
||||
tst(tmp2, JVM_ACC_IS_VALUE_BASED_CLASS);
|
||||
ldrb(tmp2, Address(tmp2, Klass::misc_flags_offset()));
|
||||
tst(tmp2, KlassFlags::_misc_is_value_based_class);
|
||||
b(slow_case, ne);
|
||||
}
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -65,7 +65,7 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre
|
||||
reset_last_Java_frame(Rtemp);
|
||||
|
||||
assert(frame_size() != no_frame_size, "frame must be fixed");
|
||||
if (_stub_id != Runtime1::forward_exception_id) {
|
||||
if (_stub_id != (int)C1StubId::forward_exception_id) {
|
||||
ldr(R3, Address(Rthread, Thread::pending_exception_offset()));
|
||||
}
|
||||
|
||||
@ -81,10 +81,10 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre
|
||||
// Check for pending exception
|
||||
// unpack_with_exception_in_tls path is taken through
|
||||
// Runtime1::exception_handler_for_pc
|
||||
if (_stub_id != Runtime1::forward_exception_id) {
|
||||
if (_stub_id != (int)C1StubId::forward_exception_id) {
|
||||
assert(frame_size() != no_frame_size, "cannot directly call forward_exception_id");
|
||||
cmp(R3, 0);
|
||||
jump(Runtime1::entry_for(Runtime1::forward_exception_id), relocInfo::runtime_call_type, Rtemp, ne);
|
||||
jump(Runtime1::entry_for(C1StubId::forward_exception_id), relocInfo::runtime_call_type, Rtemp, ne);
|
||||
} else {
|
||||
#ifdef ASSERT
|
||||
// Should not have pending exception in forward_exception stub
|
||||
@ -280,7 +280,7 @@ static void restore_sp_for_method_handle(StubAssembler* sasm) {
|
||||
}
|
||||
|
||||
|
||||
OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) {
|
||||
OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler* sasm) {
|
||||
__ block_comment("generate_handle_exception");
|
||||
|
||||
bool save_fpu_registers = false;
|
||||
@ -290,7 +290,7 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) {
|
||||
OopMap* oop_map = nullptr;
|
||||
|
||||
switch (id) {
|
||||
case forward_exception_id: {
|
||||
case C1StubId::forward_exception_id: {
|
||||
save_fpu_registers = HaveVFP;
|
||||
oop_map = generate_oop_map(sasm);
|
||||
__ ldr(Rexception_obj, Address(Rthread, Thread::pending_exception_offset()));
|
||||
@ -299,14 +299,14 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) {
|
||||
__ str(zero, Address(Rthread, Thread::pending_exception_offset()));
|
||||
break;
|
||||
}
|
||||
case handle_exception_id:
|
||||
case C1StubId::handle_exception_id:
|
||||
save_fpu_registers = HaveVFP;
|
||||
// fall-through
|
||||
case handle_exception_nofpu_id:
|
||||
case C1StubId::handle_exception_nofpu_id:
|
||||
// At this point all registers MAY be live.
|
||||
oop_map = save_live_registers(sasm, save_fpu_registers);
|
||||
break;
|
||||
case handle_exception_from_callee_id:
|
||||
case C1StubId::handle_exception_from_callee_id:
|
||||
// At this point all registers except exception oop (R4/R19) and
|
||||
// exception pc (R5/R20) are dead.
|
||||
oop_map = save_live_registers(sasm); // TODO it's not required to save all registers
|
||||
@ -328,13 +328,13 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) {
|
||||
// Restore the registers that were saved at the beginning, remove
|
||||
// frame and jump to the exception handler.
|
||||
switch (id) {
|
||||
case forward_exception_id:
|
||||
case handle_exception_nofpu_id:
|
||||
case handle_exception_id:
|
||||
case C1StubId::forward_exception_id:
|
||||
case C1StubId::handle_exception_nofpu_id:
|
||||
case C1StubId::handle_exception_id:
|
||||
restore_live_registers(sasm, save_fpu_registers);
|
||||
// Note: the restore live registers includes the jump to LR (patched to R0)
|
||||
break;
|
||||
case handle_exception_from_callee_id:
|
||||
case C1StubId::handle_exception_from_callee_id:
|
||||
restore_live_registers_without_return(sasm); // must not jump immediately to handler
|
||||
restore_sp_for_method_handle(sasm);
|
||||
__ ret();
|
||||
@ -403,7 +403,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
|
||||
}
|
||||
|
||||
|
||||
OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) {
|
||||
const bool must_gc_arguments = true;
|
||||
const bool dont_gc_arguments = false;
|
||||
|
||||
@ -411,16 +411,16 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
bool save_fpu_registers = HaveVFP;
|
||||
|
||||
switch (id) {
|
||||
case forward_exception_id:
|
||||
case C1StubId::forward_exception_id:
|
||||
{
|
||||
oop_maps = generate_handle_exception(id, sasm);
|
||||
// does not return on ARM
|
||||
}
|
||||
break;
|
||||
|
||||
case new_instance_id:
|
||||
case fast_new_instance_id:
|
||||
case fast_new_instance_init_check_id:
|
||||
case C1StubId::new_instance_id:
|
||||
case C1StubId::fast_new_instance_id:
|
||||
case C1StubId::fast_new_instance_init_check_id:
|
||||
{
|
||||
const Register result = R0;
|
||||
const Register klass = R1;
|
||||
@ -436,7 +436,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
}
|
||||
break;
|
||||
|
||||
case counter_overflow_id:
|
||||
case C1StubId::counter_overflow_id:
|
||||
{
|
||||
OopMap* oop_map = save_live_registers(sasm);
|
||||
__ ldr(R1, Address(SP, arg1_offset));
|
||||
@ -448,10 +448,10 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
}
|
||||
break;
|
||||
|
||||
case new_type_array_id:
|
||||
case new_object_array_id:
|
||||
case C1StubId::new_type_array_id:
|
||||
case C1StubId::new_object_array_id:
|
||||
{
|
||||
if (id == new_type_array_id) {
|
||||
if (id == C1StubId::new_type_array_id) {
|
||||
__ set_info("new_type_array", dont_gc_arguments);
|
||||
} else {
|
||||
__ set_info("new_object_array", dont_gc_arguments);
|
||||
@ -463,7 +463,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
|
||||
OopMap* map = save_live_registers(sasm);
|
||||
int call_offset;
|
||||
if (id == new_type_array_id) {
|
||||
if (id == C1StubId::new_type_array_id) {
|
||||
call_offset = __ call_RT(result, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
|
||||
} else {
|
||||
call_offset = __ call_RT(result, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
|
||||
@ -477,7 +477,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
}
|
||||
break;
|
||||
|
||||
case new_multi_array_id:
|
||||
case C1StubId::new_multi_array_id:
|
||||
{
|
||||
__ set_info("new_multi_array", dont_gc_arguments);
|
||||
|
||||
@ -500,15 +500,15 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
}
|
||||
break;
|
||||
|
||||
case register_finalizer_id:
|
||||
case C1StubId::register_finalizer_id:
|
||||
{
|
||||
__ set_info("register_finalizer", dont_gc_arguments);
|
||||
|
||||
// Do not call runtime if JVM_ACC_HAS_FINALIZER flag is not set
|
||||
// Do not call runtime if has_finalizer flag is not set
|
||||
__ load_klass(Rtemp, R0);
|
||||
__ ldr_u32(Rtemp, Address(Rtemp, Klass::access_flags_offset()));
|
||||
__ ldrb(Rtemp, Address(Rtemp, Klass::misc_flags_offset()));
|
||||
|
||||
__ tst(Rtemp, JVM_ACC_HAS_FINALIZER);
|
||||
__ tst(Rtemp, KlassFlags::_misc_has_finalizer);
|
||||
__ bx(LR, eq);
|
||||
|
||||
// Call VM
|
||||
@ -521,78 +521,78 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
}
|
||||
break;
|
||||
|
||||
case throw_range_check_failed_id:
|
||||
case C1StubId::throw_range_check_failed_id:
|
||||
{
|
||||
__ set_info("range_check_failed", dont_gc_arguments);
|
||||
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
|
||||
}
|
||||
break;
|
||||
|
||||
case throw_index_exception_id:
|
||||
case C1StubId::throw_index_exception_id:
|
||||
{
|
||||
__ set_info("index_range_check_failed", dont_gc_arguments);
|
||||
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
|
||||
}
|
||||
break;
|
||||
|
||||
case throw_div0_exception_id:
|
||||
case C1StubId::throw_div0_exception_id:
|
||||
{
|
||||
__ set_info("throw_div0_exception", dont_gc_arguments);
|
||||
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
|
||||
}
|
||||
break;
|
||||
|
||||
case throw_null_pointer_exception_id:
|
||||
case C1StubId::throw_null_pointer_exception_id:
|
||||
{
|
||||
__ set_info("throw_null_pointer_exception", dont_gc_arguments);
|
||||
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
|
||||
}
|
||||
break;
|
||||
|
||||
case handle_exception_nofpu_id:
|
||||
case handle_exception_id:
|
||||
case C1StubId::handle_exception_nofpu_id:
|
||||
case C1StubId::handle_exception_id:
|
||||
{
|
||||
__ set_info("handle_exception", dont_gc_arguments);
|
||||
oop_maps = generate_handle_exception(id, sasm);
|
||||
}
|
||||
break;
|
||||
|
||||
case handle_exception_from_callee_id:
|
||||
case C1StubId::handle_exception_from_callee_id:
|
||||
{
|
||||
__ set_info("handle_exception_from_callee", dont_gc_arguments);
|
||||
oop_maps = generate_handle_exception(id, sasm);
|
||||
}
|
||||
break;
|
||||
|
||||
case unwind_exception_id:
|
||||
case C1StubId::unwind_exception_id:
|
||||
{
|
||||
__ set_info("unwind_exception", dont_gc_arguments);
|
||||
generate_unwind_exception(sasm);
|
||||
}
|
||||
break;
|
||||
|
||||
case throw_array_store_exception_id:
|
||||
case C1StubId::throw_array_store_exception_id:
|
||||
{
|
||||
__ set_info("throw_array_store_exception", dont_gc_arguments);
|
||||
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
|
||||
}
|
||||
break;
|
||||
|
||||
case throw_class_cast_exception_id:
|
||||
case C1StubId::throw_class_cast_exception_id:
|
||||
{
|
||||
__ set_info("throw_class_cast_exception", dont_gc_arguments);
|
||||
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
|
||||
}
|
||||
break;
|
||||
|
||||
case throw_incompatible_class_change_error_id:
|
||||
case C1StubId::throw_incompatible_class_change_error_id:
|
||||
{
|
||||
__ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments);
|
||||
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
|
||||
}
|
||||
break;
|
||||
|
||||
case slow_subtype_check_id:
|
||||
case C1StubId::slow_subtype_check_id:
|
||||
{
|
||||
// (in) R0 - sub, destroyed,
|
||||
// (in) R1 - super, not changed
|
||||
@ -625,10 +625,10 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
}
|
||||
break;
|
||||
|
||||
case monitorenter_nofpu_id:
|
||||
case C1StubId::monitorenter_nofpu_id:
|
||||
save_fpu_registers = false;
|
||||
// fall through
|
||||
case monitorenter_id:
|
||||
case C1StubId::monitorenter_id:
|
||||
{
|
||||
__ set_info("monitorenter", dont_gc_arguments);
|
||||
const Register obj = R1;
|
||||
@ -643,10 +643,10 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
}
|
||||
break;
|
||||
|
||||
case monitorexit_nofpu_id:
|
||||
case C1StubId::monitorexit_nofpu_id:
|
||||
save_fpu_registers = false;
|
||||
// fall through
|
||||
case monitorexit_id:
|
||||
case C1StubId::monitorexit_id:
|
||||
{
|
||||
__ set_info("monitorexit", dont_gc_arguments);
|
||||
const Register lock = R1;
|
||||
@ -659,7 +659,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
}
|
||||
break;
|
||||
|
||||
case deoptimize_id:
|
||||
case C1StubId::deoptimize_id:
|
||||
{
|
||||
__ set_info("deoptimize", dont_gc_arguments);
|
||||
OopMap* oop_map = save_live_registers(sasm);
|
||||
@ -675,35 +675,35 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
}
|
||||
break;
|
||||
|
||||
case access_field_patching_id:
|
||||
case C1StubId::access_field_patching_id:
|
||||
{
|
||||
__ set_info("access_field_patching", dont_gc_arguments);
|
||||
oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
|
||||
}
|
||||
break;
|
||||
|
||||
case load_klass_patching_id:
|
||||
case C1StubId::load_klass_patching_id:
|
||||
{
|
||||
__ set_info("load_klass_patching", dont_gc_arguments);
|
||||
oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
|
||||
}
|
||||
break;
|
||||
|
||||
case load_appendix_patching_id:
|
||||
case C1StubId::load_appendix_patching_id:
|
||||
{
|
||||
__ set_info("load_appendix_patching", dont_gc_arguments);
|
||||
oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
|
||||
}
|
||||
break;
|
||||
|
||||
case load_mirror_patching_id:
|
||||
case C1StubId::load_mirror_patching_id:
|
||||
{
|
||||
__ set_info("load_mirror_patching", dont_gc_arguments);
|
||||
oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));
|
||||
}
|
||||
break;
|
||||
|
||||
case predicate_failed_trap_id:
|
||||
case C1StubId::predicate_failed_trap_id:
|
||||
{
|
||||
__ set_info("predicate_failed_trap", dont_gc_arguments);
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -86,8 +86,8 @@ void C2_MacroAssembler::fast_lock(Register Roop, Register Rbox, Register Rscratc
|
||||
|
||||
if (DiagnoseSyncOnValueBasedClasses != 0) {
|
||||
load_klass(Rscratch, Roop);
|
||||
ldr_u32(Rscratch, Address(Rscratch, Klass::access_flags_offset()));
|
||||
tst(Rscratch, JVM_ACC_IS_VALUE_BASED_CLASS);
|
||||
ldrb(Rscratch, Address(Rscratch, Klass::misc_flags_offset()));
|
||||
tst(Rscratch, KlassFlags::_misc_is_value_based_class);
|
||||
b(done, ne);
|
||||
}
|
||||
|
||||
|
||||
@ -39,8 +39,10 @@
|
||||
#include "c1/c1_LIRAssembler.hpp"
|
||||
#include "c1/c1_MacroAssembler.hpp"
|
||||
#include "gc/g1/c1/g1BarrierSetC1.hpp"
|
||||
#endif
|
||||
|
||||
#endif // COMPILER1
|
||||
#ifdef COMPILER2
|
||||
#include "gc/g1/c2/g1BarrierSetC2.hpp"
|
||||
#endif // COMPILER2
|
||||
#define __ masm->
|
||||
|
||||
#ifdef PRODUCT
|
||||
@ -106,70 +108,87 @@ void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* mas
|
||||
#endif // !R9_IS_SCRATCHED
|
||||
}
|
||||
|
||||
static void generate_queue_test_and_insertion(MacroAssembler* masm, ByteSize index_offset, ByteSize buffer_offset, Label& runtime,
|
||||
const Register thread, const Register value, const Register temp1, const Register temp2) {
|
||||
assert_different_registers(value, temp1, temp2);
|
||||
// Can we store original value in the thread's buffer?
|
||||
// (The index field is typed as size_t.)
|
||||
__ ldr(temp1, Address(thread, in_bytes(index_offset))); // temp1 := *(index address)
|
||||
__ cbz(temp1, runtime); // jump to runtime if index == 0 (full buffer)
|
||||
// The buffer is not full, store value into it.
|
||||
__ sub(temp1, temp1, wordSize); // temp1 := next index
|
||||
__ str(temp1, Address(thread, in_bytes(index_offset))); // *(index address) := next index
|
||||
__ ldr(temp2, Address(thread, in_bytes(buffer_offset))); // temp2 := buffer address
|
||||
// Record the previous value
|
||||
__ str(value, Address(temp2, temp1)); // *(buffer address + next index) := value
|
||||
}
|
||||
|
||||
static void generate_pre_barrier_fast_path(MacroAssembler* masm,
|
||||
const Register thread,
|
||||
const Register tmp1) {
|
||||
Address in_progress(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
|
||||
// Is marking active?
|
||||
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "adjust this code");
|
||||
__ ldrb(tmp1, in_progress);
|
||||
}
|
||||
|
||||
static void generate_pre_barrier_slow_path(MacroAssembler* masm,
|
||||
const Register obj,
|
||||
const Register pre_val,
|
||||
const Register thread,
|
||||
const Register tmp1,
|
||||
const Register tmp2,
|
||||
Label& done,
|
||||
Label& runtime) {
|
||||
// Do we need to load the previous value?
|
||||
if (obj != noreg) {
|
||||
__ load_heap_oop(pre_val, Address(obj, 0));
|
||||
}
|
||||
|
||||
// Is the previous value null?
|
||||
__ cbz(pre_val, done);
|
||||
|
||||
generate_queue_test_and_insertion(masm,
|
||||
G1ThreadLocalData::satb_mark_queue_index_offset(),
|
||||
G1ThreadLocalData::satb_mark_queue_buffer_offset(),
|
||||
runtime,
|
||||
thread, pre_val, tmp1, tmp2);
|
||||
__ b(done);
|
||||
}
|
||||
|
||||
// G1 pre-barrier.
|
||||
// Blows all volatile registers R0-R3, Rtemp, LR).
|
||||
// If store_addr != noreg, then previous value is loaded from [store_addr];
|
||||
// in such case store_addr and new_val registers are preserved;
|
||||
// Blows all volatile registers R0-R3, LR).
|
||||
// If obj != noreg, then previous value is loaded from [obj];
|
||||
// in such case obj and pre_val registers is preserved;
|
||||
// otherwise pre_val register is preserved.
|
||||
void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm,
|
||||
Register store_addr,
|
||||
Register new_val,
|
||||
Register obj,
|
||||
Register pre_val,
|
||||
Register tmp1,
|
||||
Register tmp2) {
|
||||
Label done;
|
||||
Label runtime;
|
||||
|
||||
if (store_addr != noreg) {
|
||||
assert_different_registers(store_addr, new_val, pre_val, tmp1, tmp2, noreg);
|
||||
} else {
|
||||
assert (new_val == noreg, "should be");
|
||||
assert_different_registers(pre_val, tmp1, tmp2, noreg);
|
||||
}
|
||||
assert_different_registers(obj, pre_val, tmp1, tmp2, noreg);
|
||||
|
||||
Address in_progress(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
|
||||
Address index(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()));
|
||||
Address buffer(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()));
|
||||
|
||||
// Is marking active?
|
||||
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "adjust this code");
|
||||
__ ldrb(tmp1, in_progress);
|
||||
generate_pre_barrier_fast_path(masm, Rthread, tmp1);
|
||||
// If marking is not active (*(mark queue active address) == 0), jump to done
|
||||
__ cbz(tmp1, done);
|
||||
|
||||
// Do we need to load the previous value?
|
||||
if (store_addr != noreg) {
|
||||
__ load_heap_oop(pre_val, Address(store_addr, 0));
|
||||
}
|
||||
|
||||
// Is the previous value null?
|
||||
__ cbz(pre_val, done);
|
||||
|
||||
// Can we store original value in the thread's buffer?
|
||||
// Is index == 0?
|
||||
// (The index field is typed as size_t.)
|
||||
|
||||
__ ldr(tmp1, index); // tmp1 := *index_adr
|
||||
__ ldr(tmp2, buffer);
|
||||
|
||||
__ subs(tmp1, tmp1, wordSize); // tmp1 := tmp1 - wordSize
|
||||
__ b(runtime, lt); // If negative, goto runtime
|
||||
|
||||
__ str(tmp1, index); // *index_adr := tmp1
|
||||
|
||||
// Record the previous value
|
||||
__ str(pre_val, Address(tmp2, tmp1));
|
||||
__ b(done);
|
||||
generate_pre_barrier_slow_path(masm, obj, pre_val, Rthread, tmp1, tmp2, done, runtime);
|
||||
|
||||
__ bind(runtime);
|
||||
|
||||
// save the live input values
|
||||
if (store_addr != noreg) {
|
||||
// avoid raw_push to support any ordering of store_addr and new_val
|
||||
__ push(RegisterSet(store_addr) | RegisterSet(new_val));
|
||||
} else {
|
||||
__ push(pre_val);
|
||||
RegisterSet set = RegisterSet(pre_val) | RegisterSet(R0, R3) | RegisterSet(R12);
|
||||
// save the live input values
|
||||
if (obj != noreg) {
|
||||
// avoid raw_push to support any ordering of store_addr and pre_val
|
||||
set = set | RegisterSet(obj);
|
||||
}
|
||||
|
||||
__ push(set);
|
||||
|
||||
if (pre_val != R0) {
|
||||
__ mov(R0, pre_val);
|
||||
}
|
||||
@ -177,33 +196,17 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm,
|
||||
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), R0, R1);
|
||||
|
||||
if (store_addr != noreg) {
|
||||
__ pop(RegisterSet(store_addr) | RegisterSet(new_val));
|
||||
} else {
|
||||
__ pop(pre_val);
|
||||
}
|
||||
|
||||
__ pop(set);
|
||||
__ bind(done);
|
||||
}
|
||||
|
||||
// G1 post-barrier.
|
||||
// Blows all volatile registers R0-R3, Rtemp, LR).
|
||||
void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
|
||||
Register store_addr,
|
||||
Register new_val,
|
||||
Register tmp1,
|
||||
Register tmp2,
|
||||
Register tmp3) {
|
||||
|
||||
Address queue_index(Rthread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()));
|
||||
Address buffer(Rthread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()));
|
||||
|
||||
BarrierSet* bs = BarrierSet::barrier_set();
|
||||
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
|
||||
CardTable* ct = ctbs->card_table();
|
||||
Label done;
|
||||
Label runtime;
|
||||
|
||||
static void generate_post_barrier_fast_path(MacroAssembler* masm,
|
||||
const Register store_addr,
|
||||
const Register new_val,
|
||||
const Register tmp1,
|
||||
const Register tmp2,
|
||||
Label& done,
|
||||
bool new_val_may_be_null) {
|
||||
// Does store cross heap regions?
|
||||
|
||||
__ eor(tmp1, store_addr, new_val);
|
||||
@ -211,22 +214,31 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
|
||||
__ b(done, eq);
|
||||
|
||||
// crosses regions, storing null?
|
||||
|
||||
__ cbz(new_val, done);
|
||||
|
||||
if (new_val_may_be_null) {
|
||||
__ cbz(new_val, done);
|
||||
}
|
||||
// storing region crossing non-null, is card already dirty?
|
||||
const Register card_addr = tmp1;
|
||||
|
||||
__ mov_address(tmp2, (address)ct->byte_map_base());
|
||||
CardTableBarrierSet* ct = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
|
||||
__ mov_address(tmp2, (address)ct->card_table()->byte_map_base());
|
||||
__ add(card_addr, tmp2, AsmOperand(store_addr, lsr, CardTable::card_shift()));
|
||||
|
||||
__ ldrb(tmp2, Address(card_addr));
|
||||
__ cmp(tmp2, (int)G1CardTable::g1_young_card_val());
|
||||
__ b(done, eq);
|
||||
}
|
||||
|
||||
static void generate_post_barrier_slow_path(MacroAssembler* masm,
|
||||
const Register thread,
|
||||
const Register tmp1,
|
||||
const Register tmp2,
|
||||
const Register tmp3,
|
||||
Label& done,
|
||||
Label& runtime) {
|
||||
__ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad), tmp2);
|
||||
|
||||
assert(CardTable::dirty_card_val() == 0, "adjust this code");
|
||||
// card_addr is loaded by generate_post_barrier_fast_path
|
||||
const Register card_addr = tmp1;
|
||||
__ ldrb(tmp2, Address(card_addr));
|
||||
__ cbz(tmp2, done);
|
||||
|
||||
@ -234,29 +246,139 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
|
||||
// dirty card and log.
|
||||
|
||||
__ strb(__ zero_register(tmp2), Address(card_addr));
|
||||
|
||||
__ ldr(tmp2, queue_index);
|
||||
__ ldr(tmp3, buffer);
|
||||
|
||||
__ subs(tmp2, tmp2, wordSize);
|
||||
__ b(runtime, lt); // go to runtime if now negative
|
||||
|
||||
__ str(tmp2, queue_index);
|
||||
|
||||
__ str(card_addr, Address(tmp3, tmp2));
|
||||
generate_queue_test_and_insertion(masm,
|
||||
G1ThreadLocalData::dirty_card_queue_index_offset(),
|
||||
G1ThreadLocalData::dirty_card_queue_buffer_offset(),
|
||||
runtime,
|
||||
thread, card_addr, tmp2, tmp3);
|
||||
__ b(done);
|
||||
}
|
||||
|
||||
|
||||
// G1 post-barrier.
|
||||
// Blows all volatile registers R0-R3, LR).
|
||||
void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
|
||||
Register store_addr,
|
||||
Register new_val,
|
||||
Register tmp1,
|
||||
Register tmp2,
|
||||
Register tmp3) {
|
||||
Label done;
|
||||
Label runtime;
|
||||
|
||||
generate_post_barrier_fast_path(masm, store_addr, new_val, tmp1, tmp2, done, true /* new_val_may_be_null */);
|
||||
// If card is young, jump to done
|
||||
// card_addr and card are loaded by generate_post_barrier_fast_path
|
||||
const Register card = tmp2;
|
||||
const Register card_addr = tmp1;
|
||||
__ b(done, eq);
|
||||
generate_post_barrier_slow_path(masm, Rthread, card_addr, tmp2, tmp3, done, runtime);
|
||||
|
||||
__ bind(runtime);
|
||||
|
||||
RegisterSet set = RegisterSet(store_addr) | RegisterSet(R0, R3) | RegisterSet(R12);
|
||||
__ push(set);
|
||||
|
||||
if (card_addr != R0) {
|
||||
__ mov(R0, card_addr);
|
||||
}
|
||||
__ mov(R1, Rthread);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), R0, R1);
|
||||
|
||||
__ pop(set);
|
||||
|
||||
__ bind(done);
|
||||
}
|
||||
|
||||
#if defined(COMPILER2)
|
||||
|
||||
static void generate_c2_barrier_runtime_call(MacroAssembler* masm, G1BarrierStubC2* stub, const Register arg, const address runtime_path, Register tmp1) {
|
||||
SaveLiveRegisters save_registers(masm, stub);
|
||||
if (c_rarg0 != arg) {
|
||||
__ mov(c_rarg0, arg);
|
||||
}
|
||||
__ mov(c_rarg1, Rthread);
|
||||
__ call_VM_leaf(runtime_path, R0, R1);
|
||||
}
|
||||
|
||||
void G1BarrierSetAssembler::g1_write_barrier_pre_c2(MacroAssembler* masm,
|
||||
Register obj,
|
||||
Register pre_val,
|
||||
Register thread,
|
||||
Register tmp1,
|
||||
Register tmp2,
|
||||
G1PreBarrierStubC2* stub) {
|
||||
assert(thread == Rthread, "must be");
|
||||
assert_different_registers(obj, pre_val, tmp1, tmp2);
|
||||
assert(pre_val != noreg && tmp1 != noreg && tmp2 != noreg, "expecting a register");
|
||||
|
||||
stub->initialize_registers(obj, pre_val, thread, tmp1, tmp2);
|
||||
|
||||
generate_pre_barrier_fast_path(masm, thread, tmp1);
|
||||
// If marking is active (*(mark queue active address) != 0), jump to stub (slow path)
|
||||
__ cbnz(tmp1, *stub->entry());
|
||||
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
void G1BarrierSetAssembler::generate_c2_pre_barrier_stub(MacroAssembler* masm,
|
||||
G1PreBarrierStubC2* stub) const {
|
||||
Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
|
||||
Label runtime;
|
||||
Register obj = stub->obj();
|
||||
Register pre_val = stub->pre_val();
|
||||
Register thread = stub->thread();
|
||||
Register tmp1 = stub->tmp1();
|
||||
Register tmp2 = stub->tmp2();
|
||||
|
||||
__ bind(*stub->entry());
|
||||
generate_pre_barrier_slow_path(masm, obj, pre_val, thread, tmp1, tmp2, *stub->continuation(), runtime);
|
||||
|
||||
__ bind(runtime);
|
||||
generate_c2_barrier_runtime_call(masm, stub, pre_val, CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), tmp1);
|
||||
__ b(*stub->continuation());
|
||||
}
|
||||
|
||||
void G1BarrierSetAssembler::g1_write_barrier_post_c2(MacroAssembler* masm,
|
||||
Register store_addr,
|
||||
Register new_val,
|
||||
Register thread,
|
||||
Register tmp1,
|
||||
Register tmp2,
|
||||
Register tmp3,
|
||||
G1PostBarrierStubC2* stub) {
|
||||
assert(thread == Rthread, "must be");
|
||||
assert_different_registers(store_addr, new_val, thread, tmp1, tmp2, noreg);
|
||||
|
||||
stub->initialize_registers(thread, tmp1, tmp2, tmp3);
|
||||
|
||||
bool new_val_may_be_null = (stub->barrier_data() & G1C2BarrierPostNotNull) == 0;
|
||||
generate_post_barrier_fast_path(masm, store_addr, new_val, tmp1, tmp2, *stub->continuation(), new_val_may_be_null);
|
||||
// If card is not young, jump to stub (slow path)
|
||||
__ b(*stub->entry(), ne);
|
||||
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
void G1BarrierSetAssembler::generate_c2_post_barrier_stub(MacroAssembler* masm,
|
||||
G1PostBarrierStubC2* stub) const {
|
||||
Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
|
||||
Label runtime;
|
||||
Register thread = stub->thread();
|
||||
Register tmp1 = stub->tmp1(); // tmp1 holds the card address.
|
||||
Register tmp2 = stub->tmp2();
|
||||
Register tmp3 = stub->tmp3();
|
||||
|
||||
__ bind(*stub->entry());
|
||||
generate_post_barrier_slow_path(masm, thread, tmp1, tmp2, tmp3, *stub->continuation(), runtime);
|
||||
|
||||
__ bind(runtime);
|
||||
generate_c2_barrier_runtime_call(masm, stub, tmp1, CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), tmp2);
|
||||
__ b(*stub->continuation());
|
||||
}
|
||||
|
||||
#endif // COMPILER2
|
||||
|
||||
void G1BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register dst, Address src, Register tmp1, Register tmp2, Register tmp3) {
|
||||
bool on_oop = type == T_OBJECT || type == T_ARRAY;
|
||||
@ -268,7 +390,7 @@ void G1BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorator
|
||||
if (on_oop && on_reference) {
|
||||
// Generate the G1 pre-barrier code to log the value of
|
||||
// the referent field in an SATB buffer.
|
||||
g1_write_barrier_pre(masm, noreg, noreg, dst, tmp1, tmp2);
|
||||
g1_write_barrier_pre(masm, noreg, dst, tmp1, tmp2);
|
||||
}
|
||||
}
|
||||
|
||||
@ -295,7 +417,7 @@ void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet deco
|
||||
}
|
||||
|
||||
if (needs_pre_barrier) {
|
||||
g1_write_barrier_pre(masm, store_addr, new_val, tmp1, tmp2, tmp3);
|
||||
g1_write_barrier_pre(masm, store_addr, tmp3 /*pre_val*/, tmp1, tmp2);
|
||||
}
|
||||
|
||||
if (is_null) {
|
||||
|
||||
@ -33,6 +33,8 @@ class LIR_Assembler;
|
||||
class StubAssembler;
|
||||
class G1PreBarrierStub;
|
||||
class G1PostBarrierStub;
|
||||
class G1PreBarrierStubC2;
|
||||
class G1PostBarrierStubC2;
|
||||
|
||||
class G1BarrierSetAssembler: public ModRefBarrierSetAssembler {
|
||||
protected:
|
||||
@ -43,7 +45,6 @@ protected:
|
||||
|
||||
void g1_write_barrier_pre(MacroAssembler* masm,
|
||||
Register store_addr,
|
||||
Register new_val,
|
||||
Register pre_val,
|
||||
Register tmp1,
|
||||
Register tmp2);
|
||||
@ -70,6 +71,29 @@ public:
|
||||
void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm);
|
||||
void generate_c1_post_barrier_runtime_stub(StubAssembler* sasm);
|
||||
#endif
|
||||
|
||||
#ifdef COMPILER2
|
||||
void g1_write_barrier_pre_c2(MacroAssembler* masm,
|
||||
Register obj,
|
||||
Register pre_val,
|
||||
Register thread,
|
||||
Register tmp1,
|
||||
Register tmp2,
|
||||
G1PreBarrierStubC2* c2_stub);
|
||||
void generate_c2_pre_barrier_stub(MacroAssembler* masm,
|
||||
G1PreBarrierStubC2* stub) const;
|
||||
void g1_write_barrier_post_c2(MacroAssembler* masm,
|
||||
Register store_addr,
|
||||
Register new_val,
|
||||
Register thread,
|
||||
Register tmp1,
|
||||
Register tmp2,
|
||||
Register tmp3,
|
||||
G1PostBarrierStubC2* c2_stub);
|
||||
void generate_c2_post_barrier_stub(MacroAssembler* masm,
|
||||
G1PostBarrierStubC2* stub) const;
|
||||
#endif
|
||||
|
||||
};
|
||||
|
||||
#endif // CPU_ARM_GC_G1_G1BARRIERSETASSEMBLER_ARM_HPP
|
||||
|
||||
201
src/hotspot/cpu/arm/gc/g1/g1_arm.ad
Normal file
201
src/hotspot/cpu/arm/gc/g1/g1_arm.ad
Normal file
@ -0,0 +1,201 @@
|
||||
//
|
||||
// Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
//
|
||||
// This code is free software; you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License version 2 only, as
|
||||
// published by the Free Software Foundation.
|
||||
//
|
||||
// This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
// version 2 for more details (a copy is included in the LICENSE file that
|
||||
// accompanied this code).
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License version
|
||||
// 2 along with this work; if not, write to the Free Software Foundation,
|
||||
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
//
|
||||
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
// or visit www.oracle.com if you need additional information or have any
|
||||
// questions.
|
||||
//
|
||||
|
||||
source_hpp %{
|
||||
|
||||
#include "gc/g1/c2/g1BarrierSetC2.hpp"
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
|
||||
%}
|
||||
|
||||
source %{
|
||||
|
||||
#include "gc/g1/g1BarrierSetAssembler_arm.hpp"
|
||||
#include "gc/g1/g1BarrierSetRuntime.hpp"
|
||||
|
||||
static void write_barrier_pre(MacroAssembler* masm,
|
||||
const MachNode* node,
|
||||
Register obj,
|
||||
Register pre_val,
|
||||
Register tmp1,
|
||||
Register tmp2,
|
||||
RegSet preserve = RegSet(),
|
||||
RegSet no_preserve = RegSet()) {
|
||||
if (!G1PreBarrierStubC2::needs_barrier(node)) {
|
||||
return;
|
||||
}
|
||||
Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
|
||||
G1BarrierSetAssembler* g1_asm = static_cast<G1BarrierSetAssembler*>(BarrierSet::barrier_set()->barrier_set_assembler());
|
||||
G1PreBarrierStubC2* const stub = G1PreBarrierStubC2::create(node);
|
||||
for (RegSetIterator<Register> reg = preserve.begin(); *reg != noreg; ++reg) {
|
||||
stub->preserve(*reg);
|
||||
}
|
||||
for (RegSetIterator<Register> reg = no_preserve.begin(); *reg != noreg; ++reg) {
|
||||
stub->dont_preserve(*reg);
|
||||
}
|
||||
g1_asm->g1_write_barrier_pre_c2(masm, obj, pre_val, Rthread, tmp1, tmp2, stub);
|
||||
}
|
||||
|
||||
static void write_barrier_post(MacroAssembler* masm,
|
||||
const MachNode* node,
|
||||
Register store_addr,
|
||||
Register new_val,
|
||||
Register tmp1,
|
||||
Register tmp2,
|
||||
Register tmp3) {
|
||||
if (!G1PostBarrierStubC2::needs_barrier(node)) {
|
||||
return;
|
||||
}
|
||||
Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
|
||||
G1BarrierSetAssembler* g1_asm = static_cast<G1BarrierSetAssembler*>(BarrierSet::barrier_set()->barrier_set_assembler());
|
||||
G1PostBarrierStubC2* const stub = G1PostBarrierStubC2::create(node);
|
||||
g1_asm->g1_write_barrier_post_c2(masm, store_addr, new_val, Rthread, tmp1, tmp2, tmp3, stub);
|
||||
}
|
||||
|
||||
%}
|
||||
|
||||
instruct g1StoreP(indirect mem, iRegP src, iRegP tmp1, iRegP tmp2, iRegP tmp3, flagsReg icc)
|
||||
%{
|
||||
predicate(UseG1GC && n->as_Store()->barrier_data() != 0);
|
||||
match(Set mem (StoreP mem src));
|
||||
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL icc);
|
||||
ins_cost(2 * (MEMORY_REF_COST + BRANCH_COST));
|
||||
format %{ "sd $src, $mem\t# ptr" %}
|
||||
ins_encode %{
|
||||
guarantee($mem$$disp == 0, "impossible encoding");
|
||||
write_barrier_pre(masm, this,
|
||||
$mem$$Register /* obj */,
|
||||
$tmp1$$Register /* pre_val */,
|
||||
$tmp2$$Register /* tmp1 */,
|
||||
$tmp3$$Register /* tmp2 */,
|
||||
RegSet::of($mem$$Register, $src$$Register) /* preserve */);
|
||||
__ str($src$$Register, Address($mem$$Register));
|
||||
write_barrier_post(masm, this,
|
||||
$mem$$Register /* store_addr */,
|
||||
$src$$Register /* new_val */,
|
||||
$tmp1$$Register /* tmp1 */,
|
||||
$tmp2$$Register /* tmp2 */,
|
||||
$tmp3$$Register /* tmp3 */);
|
||||
%}
|
||||
ins_pipe(istore_mem_reg);
|
||||
%}
|
||||
|
||||
instruct g1CompareAndSwapP(iRegI res, indirect mem, iRegP newval, iRegP tmp1, iRegP tmp2, iRegP tmp3, iRegP oldval, flagsReg ccr )
|
||||
%{
|
||||
predicate(UseG1GC && n->as_LoadStore()->barrier_data() != 0);
|
||||
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
|
||||
effect(KILL ccr, TEMP res, TEMP tmp1, TEMP tmp2, TEMP tmp3);
|
||||
ins_cost(4 * (MEMORY_REF_COST + BRANCH_COST));
|
||||
format %{ "loop: \n\t"
|
||||
"LDREX $tmp1, $mem\t! If $oldval==[$mem] Then store $newval into [$mem]\n\t"
|
||||
"CMP $tmp1, $oldval\n\t"
|
||||
"STREX.eq $tmp1, $newval, $mem\n\t"
|
||||
"MOV.ne $tmp1, 0 \n\t"
|
||||
"EORS.eq $tmp1,$tmp1, 1 \n\t"
|
||||
"B.eq loop \n\t"
|
||||
"MOV $res, $tmp1" %}
|
||||
ins_encode %{
|
||||
guarantee($mem$$disp == 0, "impossible encoding");
|
||||
assert_different_registers($oldval$$Register, $mem$$Register);
|
||||
assert_different_registers($newval$$Register, $mem$$Register);
|
||||
write_barrier_pre(masm, this,
|
||||
noreg /* obj */,
|
||||
$oldval$$Register /* pre_val */,
|
||||
$tmp2$$Register /* tmp1 */,
|
||||
$tmp3$$Register /* tmp2 */,
|
||||
RegSet::of($mem$$Register, $oldval$$Register, $newval$$Register) /* preserve */,
|
||||
RegSet::of($res$$Register) /* no_preserve */);
|
||||
Label loop;
|
||||
__ bind(loop);
|
||||
__ ldrex($tmp1$$Register,$mem$$Address);
|
||||
__ cmp($tmp1$$Register, $oldval$$Register);
|
||||
__ strex($tmp1$$Register, $newval$$Register, $mem$$Address, eq);
|
||||
__ mov($tmp1$$Register, 0, ne);
|
||||
__ eors($tmp1$$Register, $tmp1$$Register, 1, eq);
|
||||
__ b(loop, eq);
|
||||
__ mov($res$$Register, $tmp1$$Register);
|
||||
write_barrier_post(masm, this,
|
||||
$mem$$Register /* store_addr */,
|
||||
$newval$$Register /* new_val */,
|
||||
$tmp1$$Register /* tmp1 */,
|
||||
$tmp2$$Register /* tmp2 */,
|
||||
$tmp3$$Register /* tmp3 */);
|
||||
%}
|
||||
ins_pipe(long_memory_op);
|
||||
%}
|
||||
|
||||
|
||||
instruct g1GetAndSetP(indirect mem, iRegP newval, iRegP tmp1, iRegP tmp2, iRegP tmp3, iRegP preval, flagsReg ccr)
|
||||
%{
|
||||
predicate(UseG1GC && n->as_LoadStore()->barrier_data() != 0);
|
||||
match(Set preval (GetAndSetP mem newval));
|
||||
effect(KILL ccr, TEMP preval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
|
||||
ins_cost(4 * (MEMORY_REF_COST + BRANCH_COST));
|
||||
format %{ "loop: \n\t"
|
||||
"LDREX $preval, $mem\n\t"
|
||||
"STREX $tmp1, $newval, $mem\n\t"
|
||||
"CMP $tmp1, 0 \n\t"
|
||||
"B.ne loop \n\t" %}
|
||||
ins_encode %{
|
||||
guarantee($mem$$disp == 0, "impossible encoding");
|
||||
assert_different_registers($mem$$Register, $newval$$Register);
|
||||
write_barrier_pre(masm, this,
|
||||
$mem$$Register /* obj */,
|
||||
$preval$$Register /* pre_val (as a temporary register) */,
|
||||
$tmp1$$Register /* tmp1 */,
|
||||
$tmp2$$Register /* tmp2 */,
|
||||
RegSet::of($mem$$Register, $preval$$Register, $newval$$Register) /* preserve */);
|
||||
Label loop;
|
||||
__ bind(loop);
|
||||
__ ldrex($preval$$Register,$mem$$Address);
|
||||
__ strex($tmp1$$Register, $newval$$Register, $mem$$Address);
|
||||
__ cmp($tmp1$$Register, 0);
|
||||
__ b(loop, ne);
|
||||
write_barrier_post(masm, this,
|
||||
$mem$$Register /* store_addr */,
|
||||
$newval$$Register /* new_val */,
|
||||
$tmp1$$Register /* tmp1 */,
|
||||
$tmp2$$Register /* tmp2 */,
|
||||
$tmp3$$Register /* tmp3 */);
|
||||
%}
|
||||
ins_pipe(long_memory_op);
|
||||
%}
|
||||
|
||||
instruct g1LoadP(iRegP dst, indirect mem, iRegP tmp1, iRegP tmp2, flagsReg icc)
|
||||
%{
|
||||
predicate(UseG1GC && n->as_Load()->barrier_data() != 0);
|
||||
match(Set dst (LoadP mem));
|
||||
effect(TEMP dst, TEMP tmp1, TEMP tmp2, KILL icc);
|
||||
ins_cost(MEMORY_REF_COST + BRANCH_COST);
|
||||
format %{ "ld $dst, $mem\t# ptr" %}
|
||||
ins_encode %{
|
||||
guarantee($mem$$disp == 0, "impossible encoding");
|
||||
__ ldr($dst$$Register, Address($mem$$Register));
|
||||
write_barrier_pre(masm, this,
|
||||
noreg /* obj */,
|
||||
$dst$$Register /* pre_val */,
|
||||
$tmp1$$Register /* tmp1 */,
|
||||
$tmp2$$Register /* tmp2 */);
|
||||
%}
|
||||
ins_pipe(iload_mem);
|
||||
%}
|
||||
@ -31,6 +31,10 @@
|
||||
#include "runtime/javaThread.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
|
||||
#ifdef COMPILER2
|
||||
#include "gc/shared/c2/barrierSetC2.hpp"
|
||||
#endif // COMPILER2
|
||||
|
||||
#define __ masm->
|
||||
|
||||
void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
@ -206,7 +210,57 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm) {
|
||||
#ifdef COMPILER2
|
||||
|
||||
OptoReg::Name BarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) {
|
||||
Unimplemented(); // This must be implemented to support late barrier expansion.
|
||||
if (!OptoReg::is_reg(opto_reg)) {
|
||||
return OptoReg::Bad;
|
||||
}
|
||||
|
||||
const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
|
||||
if (!vm_reg->is_valid()){
|
||||
// skip APSR and FPSCR
|
||||
return OptoReg::Bad;
|
||||
}
|
||||
|
||||
return opto_reg;
|
||||
}
|
||||
|
||||
void SaveLiveRegisters::initialize(BarrierStubC2* stub) {
|
||||
// Record registers that needs to be saved/restored
|
||||
RegMaskIterator rmi(stub->preserve_set());
|
||||
while (rmi.has_next()) {
|
||||
const OptoReg::Name opto_reg = rmi.next();
|
||||
if (OptoReg::is_reg(opto_reg)) {
|
||||
const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
|
||||
if (vm_reg->is_Register()) {
|
||||
gp_regs += RegSet::of(vm_reg->as_Register());
|
||||
} else if (vm_reg->is_FloatRegister()) {
|
||||
fp_regs += FloatRegSet::of(vm_reg->as_FloatRegister());
|
||||
} else {
|
||||
fatal("Unknown register type");
|
||||
}
|
||||
}
|
||||
}
|
||||
// Remove C-ABI SOE registers that will be updated
|
||||
gp_regs -= RegSet::range(R4, R11) + RegSet::of(R13, R15);
|
||||
|
||||
// Remove C-ABI SOE fp registers
|
||||
fp_regs -= FloatRegSet::range(S16, S31);
|
||||
}
|
||||
|
||||
SaveLiveRegisters::SaveLiveRegisters(MacroAssembler* masm, BarrierStubC2* stub)
|
||||
: masm(masm),
|
||||
gp_regs(),
|
||||
fp_regs() {
|
||||
// Figure out what registers to save/restore
|
||||
initialize(stub);
|
||||
|
||||
// Save registers
|
||||
if (gp_regs.size() > 0) __ push(RegisterSet::from(gp_regs));
|
||||
if (fp_regs.size() > 0) __ fpush(FloatRegisterSet::from(fp_regs));
|
||||
}
|
||||
|
||||
SaveLiveRegisters::~SaveLiveRegisters() {
|
||||
// Restore registers
|
||||
if (fp_regs.size() > 0) __ fpop(FloatRegisterSet::from(fp_regs));
|
||||
if (gp_regs.size() > 0) __ pop(RegisterSet::from(gp_regs));
|
||||
}
|
||||
#endif // COMPILER2
|
||||
|
||||
@ -31,7 +31,9 @@
|
||||
#ifdef COMPILER2
|
||||
#include "code/vmreg.hpp"
|
||||
#include "opto/optoreg.hpp"
|
||||
#include "opto/regmask.hpp"
|
||||
|
||||
class BarrierStubC2;
|
||||
class Node;
|
||||
#endif // COMPILER2
|
||||
|
||||
@ -69,4 +71,26 @@ public:
|
||||
#endif // COMPILER2
|
||||
};
|
||||
|
||||
#ifdef COMPILER2
|
||||
// This class saves and restores the registers that need to be preserved across
|
||||
// the runtime call represented by a given C2 barrier stub. Use as follows:
|
||||
// {
|
||||
// SaveLiveRegisters save(masm, stub);
|
||||
// ..
|
||||
// __ bl(...);
|
||||
// ..
|
||||
// }
|
||||
class SaveLiveRegisters {
|
||||
private:
|
||||
MacroAssembler* const masm;
|
||||
RegSet gp_regs;
|
||||
FloatRegSet fp_regs;
|
||||
|
||||
public:
|
||||
void initialize(BarrierStubC2* stub);
|
||||
SaveLiveRegisters(MacroAssembler* masm, BarrierStubC2* stub);
|
||||
~SaveLiveRegisters();
|
||||
};
|
||||
|
||||
#endif // COMPILER2
|
||||
#endif // CPU_ARM_GC_SHARED_BARRIERSETASSEMBLER_ARM_HPP
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -909,8 +909,8 @@ void InterpreterMacroAssembler::lock_object(Register Rlock) {
|
||||
|
||||
if (DiagnoseSyncOnValueBasedClasses != 0) {
|
||||
load_klass(R0, Robj);
|
||||
ldr_u32(R0, Address(R0, Klass::access_flags_offset()));
|
||||
tst(R0, JVM_ACC_IS_VALUE_BASED_CLASS);
|
||||
ldrb(R0, Address(R0, Klass::misc_flags_offset()));
|
||||
tst(R0, KlassFlags::_misc_is_value_based_class);
|
||||
b(slow_case, ne);
|
||||
}
|
||||
|
||||
|
||||
@ -303,6 +303,31 @@ class ConcreteRegisterImpl : public AbstractRegisterImpl {
|
||||
static const int max_fpr;
|
||||
};
|
||||
|
||||
typedef AbstractRegSet<Register> RegSet;
|
||||
typedef AbstractRegSet<FloatRegister> FloatRegSet;
|
||||
|
||||
template <>
|
||||
inline Register AbstractRegSet<Register>::first() {
|
||||
if (_bitset == 0) { return noreg; }
|
||||
return as_Register(count_trailing_zeros(_bitset));
|
||||
}
|
||||
|
||||
|
||||
template <>
|
||||
inline FloatRegister AbstractRegSet<FloatRegister>::first() {
|
||||
uint32_t first = _bitset & -_bitset;
|
||||
return first ? as_FloatRegister(exact_log2(first)) : fnoreg;
|
||||
}
|
||||
|
||||
template <>
|
||||
inline FloatRegister AbstractRegSet<FloatRegister>::last() {
|
||||
if (_bitset == 0) { return fnoreg; }
|
||||
int last = max_size() - 1 - count_leading_zeros(_bitset);
|
||||
return as_FloatRegister(last);
|
||||
}
|
||||
|
||||
|
||||
|
||||
class VFPSystemRegisterImpl;
|
||||
typedef VFPSystemRegisterImpl* VFPSystemRegister;
|
||||
class VFPSystemRegisterImpl : public AbstractRegisterImpl {
|
||||
|
||||
@ -38,6 +38,7 @@
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/safepointMechanism.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "runtime/timerTrace.hpp"
|
||||
#include "runtime/vframeArray.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/powerOfTwo.hpp"
|
||||
@ -1360,7 +1361,8 @@ uint SharedRuntime::out_preserve_stack_slots() {
|
||||
//------------------------------generate_deopt_blob----------------------------
|
||||
void SharedRuntime::generate_deopt_blob() {
|
||||
ResourceMark rm;
|
||||
CodeBuffer buffer("deopt_blob", 1024, 1024);
|
||||
const char* name = SharedRuntime::stub_name(SharedStubId::deopt_id);
|
||||
CodeBuffer buffer(name, 1024, 1024);
|
||||
int frame_size_in_words;
|
||||
OopMapSet* oop_maps;
|
||||
int reexecute_offset;
|
||||
@ -1601,15 +1603,17 @@ void SharedRuntime::generate_deopt_blob() {
|
||||
// setup oopmap, and calls safepoint code to stop the compiled code for
|
||||
// a safepoint.
|
||||
//
|
||||
SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
|
||||
SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address call_ptr) {
|
||||
assert(StubRoutines::forward_exception_entry() != nullptr, "must be generated before");
|
||||
assert(is_polling_page_id(id), "expected a polling page stub id");
|
||||
|
||||
ResourceMark rm;
|
||||
CodeBuffer buffer("handler_blob", 256, 256);
|
||||
const char* name = SharedRuntime::stub_name(id);
|
||||
CodeBuffer buffer(name, 256, 256);
|
||||
int frame_size_words;
|
||||
OopMapSet* oop_maps;
|
||||
|
||||
bool cause_return = (poll_type == POLL_AT_RETURN);
|
||||
bool cause_return = (id == SharedStubId::polling_page_return_handler_id);
|
||||
|
||||
MacroAssembler* masm = new MacroAssembler(&buffer);
|
||||
address start = __ pc();
|
||||
@ -1671,10 +1675,12 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
|
||||
return SafepointBlob::create(&buffer, oop_maps, frame_size_words);
|
||||
}
|
||||
|
||||
RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
|
||||
RuntimeStub* SharedRuntime::generate_resolve_blob(SharedStubId id, address destination) {
|
||||
assert(StubRoutines::forward_exception_entry() != nullptr, "must be generated before");
|
||||
assert(is_resolve_id(id), "expected a resolve stub id");
|
||||
|
||||
ResourceMark rm;
|
||||
const char* name = SharedRuntime::stub_name(id);
|
||||
CodeBuffer buffer(name, 1000, 512);
|
||||
int frame_size_words;
|
||||
OopMapSet *oop_maps;
|
||||
@ -1733,7 +1739,11 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
|
||||
// Continuation point for throwing of implicit exceptions that are not handled in
|
||||
// the current activation. Fabricates an exception oop and initiates normal
|
||||
// exception dispatching in this frame.
|
||||
RuntimeStub* SharedRuntime::generate_throw_exception(const char* name, address runtime_entry) {
|
||||
RuntimeStub* SharedRuntime::generate_throw_exception(SharedStubId id, address runtime_entry) {
|
||||
assert(is_throw_id(id), "expected a throw stub id");
|
||||
|
||||
const char* name = SharedRuntime::stub_name(id);
|
||||
|
||||
int insts_size = 128;
|
||||
int locs_size = 32;
|
||||
|
||||
@ -1793,7 +1803,8 @@ RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() {
|
||||
framesize // inclusive of return address
|
||||
};
|
||||
|
||||
CodeBuffer code("jfr_write_checkpoint", 512, 64);
|
||||
const char* name = SharedRuntime::stub_name(SharedStubId::jfr_write_checkpoint_id);
|
||||
CodeBuffer code(name, 512, 64);
|
||||
MacroAssembler* masm = new MacroAssembler(&code);
|
||||
|
||||
address start = __ pc();
|
||||
@ -1818,7 +1829,7 @@ RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() {
|
||||
oop_maps->add_gc_map(frame_complete, map);
|
||||
|
||||
RuntimeStub* stub =
|
||||
RuntimeStub::new_runtime_stub(code.name(),
|
||||
RuntimeStub::new_runtime_stub(name,
|
||||
&code,
|
||||
frame_complete,
|
||||
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
|
||||
@ -1836,7 +1847,8 @@ RuntimeStub* SharedRuntime::generate_jfr_return_lease() {
|
||||
framesize // inclusive of return address
|
||||
};
|
||||
|
||||
CodeBuffer code("jfr_return_lease", 512, 64);
|
||||
const char* name = SharedRuntime::stub_name(SharedStubId::jfr_return_lease_id);
|
||||
CodeBuffer code(name, 512, 64);
|
||||
MacroAssembler* masm = new MacroAssembler(&code);
|
||||
|
||||
address start = __ pc();
|
||||
@ -1858,7 +1870,7 @@ RuntimeStub* SharedRuntime::generate_jfr_return_lease() {
|
||||
oop_maps->add_gc_map(frame_complete, map);
|
||||
|
||||
RuntimeStub* stub =
|
||||
RuntimeStub::new_runtime_stub(code.name(),
|
||||
RuntimeStub::new_runtime_stub(name,
|
||||
&code,
|
||||
frame_complete,
|
||||
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
|
||||
|
||||
@ -175,6 +175,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
|
||||
break;
|
||||
case Interpreter::java_lang_math_fmaD:
|
||||
case Interpreter::java_lang_math_fmaF:
|
||||
case Interpreter::java_lang_math_tanh:
|
||||
// TODO: Implement intrinsic
|
||||
break;
|
||||
default:
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -2494,8 +2494,8 @@ void TemplateTable::_return(TosState state) {
|
||||
assert(state == vtos, "only valid state");
|
||||
__ ldr(R1, aaddress(0));
|
||||
__ load_klass(Rtemp, R1);
|
||||
__ ldr_u32(Rtemp, Address(Rtemp, Klass::access_flags_offset()));
|
||||
__ tbz(Rtemp, exact_log2(JVM_ACC_HAS_FINALIZER), skip_register_finalizer);
|
||||
__ ldrb(Rtemp, Address(Rtemp, Klass::misc_flags_offset()));
|
||||
__ tbz(Rtemp, exact_log2(KlassFlags::_misc_has_finalizer), skip_register_finalizer);
|
||||
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R1);
|
||||
|
||||
@ -3974,6 +3974,7 @@ void TemplateTable::_new() {
|
||||
// make sure klass is initialized
|
||||
// make sure klass is fully initialized
|
||||
__ ldrb(Rtemp, Address(Rklass, InstanceKlass::init_state_offset()));
|
||||
__ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
|
||||
__ cmp(Rtemp, InstanceKlass::fully_initialized);
|
||||
__ b(slow_case, ne);
|
||||
|
||||
|
||||
@ -25,7 +25,7 @@
|
||||
#include "prims/upcallLinker.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
|
||||
address UpcallLinker::make_upcall_stub(jobject receiver, Symbol* signature,
|
||||
BasicType* out_sig_bt, int total_out_args,
|
||||
BasicType ret_type,
|
||||
jobject jabi, jobject jconv,
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2021 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -68,7 +68,7 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
|
||||
__ bind(_entry);
|
||||
|
||||
if (_info->deoptimize_on_exception()) {
|
||||
address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
|
||||
address a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id);
|
||||
//__ load_const_optimized(R0, a);
|
||||
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a));
|
||||
__ mtctr(R0);
|
||||
@ -79,8 +79,8 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
|
||||
return;
|
||||
}
|
||||
|
||||
address stub = _throw_index_out_of_bounds_exception ? Runtime1::entry_for(Runtime1::throw_index_exception_id)
|
||||
: Runtime1::entry_for(Runtime1::throw_range_check_failed_id);
|
||||
address stub = _throw_index_out_of_bounds_exception ? Runtime1::entry_for(C1StubId::throw_index_exception_id)
|
||||
: Runtime1::entry_for(C1StubId::throw_range_check_failed_id);
|
||||
//__ load_const_optimized(R0, stub);
|
||||
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
|
||||
__ mtctr(R0);
|
||||
@ -109,7 +109,7 @@ PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
|
||||
|
||||
void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
|
||||
__ bind(_entry);
|
||||
address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
|
||||
address a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id);
|
||||
//__ load_const_optimized(R0, a);
|
||||
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a));
|
||||
__ mtctr(R0);
|
||||
@ -133,7 +133,7 @@ void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
|
||||
__ load_const_optimized(R0, md.value());
|
||||
__ std(R0, -8, R1_SP);
|
||||
|
||||
address a = Runtime1::entry_for(Runtime1::counter_overflow_id);
|
||||
address a = Runtime1::entry_for(C1StubId::counter_overflow_id);
|
||||
//__ load_const_optimized(R0, a);
|
||||
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a));
|
||||
__ mtctr(R0);
|
||||
@ -150,7 +150,7 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) {
|
||||
ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
|
||||
}
|
||||
__ bind(_entry);
|
||||
address stub = Runtime1::entry_for(Runtime1::throw_div0_exception_id);
|
||||
address stub = Runtime1::entry_for(C1StubId::throw_div0_exception_id);
|
||||
//__ load_const_optimized(R0, stub);
|
||||
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
|
||||
__ mtctr(R0);
|
||||
@ -165,9 +165,9 @@ void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
|
||||
address a;
|
||||
if (_info->deoptimize_on_exception()) {
|
||||
// Deoptimize, do not throw the exception, because it is probably wrong to do it here.
|
||||
a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
|
||||
a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id);
|
||||
} else {
|
||||
a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
|
||||
a = Runtime1::entry_for(C1StubId::throw_null_pointer_exception_id);
|
||||
}
|
||||
|
||||
if (ImplicitNullChecks || TrapBasedNullChecks) {
|
||||
@ -199,14 +199,14 @@ void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
|
||||
|
||||
|
||||
// Implementation of NewInstanceStub
|
||||
NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
|
||||
NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, C1StubId stub_id) {
|
||||
_result = result;
|
||||
_klass = klass;
|
||||
_klass_reg = klass_reg;
|
||||
_info = new CodeEmitInfo(info);
|
||||
assert(stub_id == Runtime1::new_instance_id ||
|
||||
stub_id == Runtime1::fast_new_instance_id ||
|
||||
stub_id == Runtime1::fast_new_instance_init_check_id,
|
||||
assert(stub_id == C1StubId::new_instance_id ||
|
||||
stub_id == C1StubId::fast_new_instance_id ||
|
||||
stub_id == C1StubId::fast_new_instance_init_check_id,
|
||||
"need new_instance id");
|
||||
_stub_id = stub_id;
|
||||
}
|
||||
@ -236,7 +236,7 @@ NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr re
|
||||
void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
|
||||
__ bind(_entry);
|
||||
|
||||
address entry = Runtime1::entry_for(Runtime1::new_type_array_id);
|
||||
address entry = Runtime1::entry_for(C1StubId::new_type_array_id);
|
||||
//__ load_const_optimized(R0, entry);
|
||||
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry));
|
||||
__ mr_if_needed(/*op->tmp1()->as_register()*/ R5_ARG3, _length->as_register()); // already sign-extended
|
||||
@ -259,7 +259,7 @@ NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Op
|
||||
void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
|
||||
__ bind(_entry);
|
||||
|
||||
address entry = Runtime1::entry_for(Runtime1::new_object_array_id);
|
||||
address entry = Runtime1::entry_for(C1StubId::new_object_array_id);
|
||||
//__ load_const_optimized(R0, entry);
|
||||
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry));
|
||||
__ mr_if_needed(/*op->tmp1()->as_register()*/ R5_ARG3, _length->as_register()); // already sign-extended
|
||||
@ -272,7 +272,7 @@ void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
|
||||
|
||||
void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
|
||||
__ bind(_entry);
|
||||
address stub = Runtime1::entry_for(ce->compilation()->has_fpu_code() ? Runtime1::monitorenter_id : Runtime1::monitorenter_nofpu_id);
|
||||
address stub = Runtime1::entry_for(ce->compilation()->has_fpu_code() ? C1StubId::monitorenter_id : C1StubId::monitorenter_nofpu_id);
|
||||
//__ load_const_optimized(R0, stub);
|
||||
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
|
||||
__ mr_if_needed(/*scratch_opr()->as_register()*/ R4_ARG2, _obj_reg->as_register());
|
||||
@ -289,7 +289,7 @@ void MonitorExitStub::emit_code(LIR_Assembler* ce) {
|
||||
if (_compute_lock) {
|
||||
ce->monitor_address(_monitor_ix, _lock_reg);
|
||||
}
|
||||
address stub = Runtime1::entry_for(ce->compilation()->has_fpu_code() ? Runtime1::monitorexit_id : Runtime1::monitorexit_nofpu_id);
|
||||
address stub = Runtime1::entry_for(ce->compilation()->has_fpu_code() ? C1StubId::monitorexit_id : C1StubId::monitorexit_nofpu_id);
|
||||
//__ load_const_optimized(R0, stub);
|
||||
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
|
||||
assert(_lock_reg->as_register() == R4_ARG2, "");
|
||||
@ -403,12 +403,12 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
|
||||
address target = nullptr;
|
||||
relocInfo::relocType reloc_type = relocInfo::none;
|
||||
switch (_id) {
|
||||
case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
|
||||
case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id);
|
||||
case access_field_id: target = Runtime1::entry_for(C1StubId::access_field_patching_id); break;
|
||||
case load_klass_id: target = Runtime1::entry_for(C1StubId::load_klass_patching_id);
|
||||
reloc_type = relocInfo::metadata_type; break;
|
||||
case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);
|
||||
case load_mirror_id: target = Runtime1::entry_for(C1StubId::load_mirror_patching_id);
|
||||
reloc_type = relocInfo::oop_type; break;
|
||||
case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);
|
||||
case load_appendix_id: target = Runtime1::entry_for(C1StubId::load_appendix_patching_id);
|
||||
reloc_type = relocInfo::oop_type; break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
@ -434,7 +434,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
|
||||
|
||||
void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
|
||||
__ bind(_entry);
|
||||
address stub = Runtime1::entry_for(Runtime1::deoptimize_id);
|
||||
address stub = Runtime1::entry_for(C1StubId::deoptimize_id);
|
||||
//__ load_const_optimized(R0, stub);
|
||||
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
|
||||
__ mtctr(R0);
|
||||
|
||||
@ -176,7 +176,7 @@ int LIR_Assembler::emit_exception_handler() {
|
||||
}
|
||||
|
||||
int offset = code_offset();
|
||||
address entry_point = CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::handle_exception_from_callee_id));
|
||||
address entry_point = CAST_FROM_FN_PTR(address, Runtime1::entry_for(C1StubId::handle_exception_from_callee_id));
|
||||
//__ load_const_optimized(R0, entry_point);
|
||||
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry_point));
|
||||
__ mtctr(R0);
|
||||
@ -222,7 +222,7 @@ int LIR_Assembler::emit_unwind_handler() {
|
||||
}
|
||||
|
||||
// Dispatch to the unwind logic.
|
||||
address unwind_stub = Runtime1::entry_for(Runtime1::unwind_exception_id);
|
||||
address unwind_stub = Runtime1::entry_for(C1StubId::unwind_exception_id);
|
||||
//__ load_const_optimized(R0, unwind_stub);
|
||||
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(unwind_stub));
|
||||
if (preserve_exception) { __ mr(Rexception, Rexception_save); }
|
||||
@ -1800,8 +1800,8 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit
|
||||
__ calculate_address_from_global_toc(exceptionPC->as_register(), pc_for_athrow, true, true, /*add_relocation*/ true);
|
||||
add_call_info(pc_for_athrow_offset, info); // for exception handler
|
||||
|
||||
address stub = Runtime1::entry_for(compilation()->has_fpu_code() ? Runtime1::handle_exception_id
|
||||
: Runtime1::handle_exception_nofpu_id);
|
||||
address stub = Runtime1::entry_for(compilation()->has_fpu_code() ? C1StubId::handle_exception_id
|
||||
: C1StubId::handle_exception_nofpu_id);
|
||||
//__ load_const_optimized(R0, stub);
|
||||
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
|
||||
__ mtctr(R0);
|
||||
@ -1859,7 +1859,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
__ stw(R11_scratch1, simm16_offs, tmp);
|
||||
}
|
||||
#endif
|
||||
__ call_c_with_frame_resize(copyfunc_addr, /*stub does not need resized frame*/ 0);
|
||||
__ call_c(copyfunc_addr, relocInfo::runtime_call_type);
|
||||
|
||||
__ nand(tmp, R3_RET, R3_RET);
|
||||
__ subf(length, tmp, length);
|
||||
@ -2001,7 +2001,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
__ check_klass_subtype_fast_path(sub_klass, super_klass, tmp, tmp2,
|
||||
&cont, copyfunc_addr != nullptr ? ©func : &slow, nullptr);
|
||||
|
||||
address slow_stc = Runtime1::entry_for(Runtime1::slow_subtype_check_id);
|
||||
address slow_stc = Runtime1::entry_for(C1StubId::slow_subtype_check_id);
|
||||
//__ load_const_optimized(tmp, slow_stc, tmp2);
|
||||
__ calculate_address_from_global_toc(tmp, slow_stc, true, true, false);
|
||||
__ mtctr(tmp);
|
||||
@ -2057,7 +2057,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
int sco_offset = in_bytes(Klass::super_check_offset_offset());
|
||||
__ lwz(chk_off, sco_offset, super_k);
|
||||
|
||||
__ call_c_with_frame_resize(copyfunc_addr, /*stub does not need resized frame*/ 0);
|
||||
__ call_c(copyfunc_addr, relocInfo::runtime_call_type);
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (PrintC1Statistics) {
|
||||
@ -2181,7 +2181,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
|
||||
// Arraycopy stubs takes a length in number of elements, so don't scale it.
|
||||
__ mr(len, length);
|
||||
__ call_c_with_frame_resize(entry, /*stub does not need resized frame*/ 0);
|
||||
__ call_c(entry, relocInfo::runtime_call_type);
|
||||
|
||||
if (stub != nullptr) {
|
||||
__ bind(*stub->continuation());
|
||||
@ -2274,6 +2274,7 @@ void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
|
||||
}
|
||||
__ lbz(op->tmp1()->as_register(),
|
||||
in_bytes(InstanceKlass::init_state_offset()), op->klass()->as_register());
|
||||
// acquire barrier included in membar_storestore() which follows the allocation immediately.
|
||||
__ cmpwi(CCR0, op->tmp1()->as_register(), InstanceKlass::fully_initialized);
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs0, __ bi0(CCR0, Assembler::equal), *op->stub()->entry());
|
||||
}
|
||||
@ -2452,7 +2453,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
|
||||
__ b(*success);
|
||||
} else {
|
||||
// Call out-of-line instance of __ check_klass_subtype_slow_path(...):
|
||||
address entry = Runtime1::entry_for(Runtime1::slow_subtype_check_id);
|
||||
address entry = Runtime1::entry_for(C1StubId::slow_subtype_check_id);
|
||||
// Stub needs fixed registers (tmp1-3).
|
||||
Register original_k_RInfo = op->tmp1()->as_register();
|
||||
Register original_klass_RInfo = op->tmp2()->as_register();
|
||||
@ -2543,7 +2544,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
|
||||
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, &done, &failure, nullptr);
|
||||
|
||||
// Call out-of-line instance of __ check_klass_subtype_slow_path(...):
|
||||
const address slow_path = Runtime1::entry_for(Runtime1::slow_subtype_check_id);
|
||||
const address slow_path = Runtime1::entry_for(C1StubId::slow_subtype_check_id);
|
||||
//__ load_const_optimized(R0, slow_path);
|
||||
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(slow_path));
|
||||
__ mtctr(R0);
|
||||
@ -2850,8 +2851,8 @@ void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
|
||||
void LIR_Assembler::rt_call(LIR_Opr result, address dest,
|
||||
const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
|
||||
// Stubs: Called via rt_call, but dest is a stub address (no function descriptor).
|
||||
if (dest == Runtime1::entry_for(Runtime1::register_finalizer_id) ||
|
||||
dest == Runtime1::entry_for(Runtime1::new_multi_array_id )) {
|
||||
if (dest == Runtime1::entry_for(C1StubId::register_finalizer_id) ||
|
||||
dest == Runtime1::entry_for(C1StubId::new_multi_array_id )) {
|
||||
//__ load_const_optimized(R0, dest);
|
||||
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(dest));
|
||||
__ mtctr(R0);
|
||||
@ -2862,7 +2863,7 @@ void LIR_Assembler::rt_call(LIR_Opr result, address dest,
|
||||
return;
|
||||
}
|
||||
|
||||
__ call_c_with_frame_resize(dest, /*no resizing*/ 0);
|
||||
__ call_c(dest, relocInfo::runtime_call_type);
|
||||
if (info != nullptr) {
|
||||
add_call_info_here(info);
|
||||
}
|
||||
|
||||
@ -1032,7 +1032,7 @@ void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
|
||||
args->append(rank);
|
||||
args->append(varargs);
|
||||
const LIR_Opr reg = result_register_for(x->type());
|
||||
__ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
|
||||
__ call_runtime(Runtime1::entry_for(C1StubId::new_multi_array_id),
|
||||
LIR_OprFact::illegalOpr,
|
||||
reg, args, info);
|
||||
|
||||
@ -1067,7 +1067,7 @@ void LIRGenerator::do_CheckCast(CheckCast* x) {
|
||||
|
||||
if (x->is_incompatible_class_change_check()) {
|
||||
assert(patching_info == nullptr, "can't patch this");
|
||||
stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id,
|
||||
stub = new SimpleExceptionStub(C1StubId::throw_incompatible_class_change_error_id,
|
||||
LIR_OprFact::illegalOpr, info_for_exception);
|
||||
} else if (x->is_invokespecial_receiver_check()) {
|
||||
assert(patching_info == nullptr, "can't patch this");
|
||||
@ -1075,7 +1075,7 @@ void LIRGenerator::do_CheckCast(CheckCast* x) {
|
||||
Deoptimization::Reason_class_check,
|
||||
Deoptimization::Action_none);
|
||||
} else {
|
||||
stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
|
||||
stub = new SimpleExceptionStub(C1StubId::throw_class_cast_exception_id, obj.result(), info_for_exception);
|
||||
}
|
||||
// Following registers are used by slow_subtype_check:
|
||||
LIR_Opr tmp1 = FrameMap::R4_oop_opr; // super_klass
|
||||
|
||||
@ -86,13 +86,13 @@ void C1_MacroAssembler::lock_object(Register Rmark, Register Roop, Register Rbox
|
||||
|
||||
if (DiagnoseSyncOnValueBasedClasses != 0) {
|
||||
load_klass(Rscratch, Roop);
|
||||
lwz(Rscratch, in_bytes(Klass::access_flags_offset()), Rscratch);
|
||||
testbitdi(CCR0, R0, Rscratch, exact_log2(JVM_ACC_IS_VALUE_BASED_CLASS));
|
||||
lbz(Rscratch, in_bytes(Klass::misc_flags_offset()), Rscratch);
|
||||
testbitdi(CCR0, R0, Rscratch, exact_log2(KlassFlags::_misc_is_value_based_class));
|
||||
bne(CCR0, slow_int);
|
||||
}
|
||||
|
||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||
lightweight_lock(Roop, Rmark, Rscratch, slow_int);
|
||||
lightweight_lock(Rbox, Roop, Rmark, Rscratch, slow_int);
|
||||
} else if (LockingMode == LM_LEGACY) {
|
||||
// ... and mark it unlocked.
|
||||
ori(Rmark, Rmark, markWord::unlocked_value);
|
||||
@ -293,7 +293,7 @@ void C1_MacroAssembler::initialize_object(
|
||||
if (CURRENT_ENV->dtrace_alloc_probes()) {
|
||||
Unimplemented();
|
||||
// assert(obj == O0, "must be");
|
||||
// call(CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)),
|
||||
// call(CAST_FROM_FN_PTR(address, Runtime1::entry_for(C1StubId::dtrace_object_alloc_id)),
|
||||
// relocInfo::runtime_call_type);
|
||||
}
|
||||
|
||||
@ -369,7 +369,7 @@ void C1_MacroAssembler::allocate_array(
|
||||
if (CURRENT_ENV->dtrace_alloc_probes()) {
|
||||
Unimplemented();
|
||||
//assert(obj == O0, "must be");
|
||||
//call(CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)),
|
||||
//call(CAST_FROM_FN_PTR(address, Runtime1::entry_for(C1StubId::dtrace_object_alloc_id)),
|
||||
// relocInfo::runtime_call_type);
|
||||
}
|
||||
|
||||
@ -398,20 +398,9 @@ void C1_MacroAssembler::null_check(Register r, Label* Lnull) {
|
||||
if (TrapBasedNullChecks) { // SIGTRAP based
|
||||
trap_null_check(r);
|
||||
} else { // explicit
|
||||
//const address exception_entry = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
|
||||
//const address exception_entry = Runtime1::entry_for(C1StubId::throw_null_pointer_exception_id);
|
||||
assert(Lnull != nullptr, "must have Label for explicit check");
|
||||
cmpdi(CCR0, r, 0);
|
||||
bc_far_optimized(Assembler::bcondCRbiIs1, bi0(CCR0, Assembler::equal), *Lnull);
|
||||
}
|
||||
}
|
||||
|
||||
address C1_MacroAssembler::call_c_with_frame_resize(address dest, int frame_resize) {
|
||||
if (frame_resize) { resize_frame(-frame_resize, R0); }
|
||||
#if defined(ABI_ELFv2)
|
||||
address return_pc = call_c(dest, relocInfo::runtime_call_type);
|
||||
#else
|
||||
address return_pc = call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, dest), relocInfo::runtime_call_type);
|
||||
#endif
|
||||
if (frame_resize) { resize_frame(frame_resize, R0); }
|
||||
return return_pc;
|
||||
}
|
||||
|
||||
@ -89,6 +89,5 @@
|
||||
|
||||
void null_check(Register r, Label *Lnull = nullptr);
|
||||
|
||||
address call_c_with_frame_resize(address dest, int frame_resize);
|
||||
|
||||
#endif // CPU_PPC_C1_MACROASSEMBLER_PPC_HPP
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2023 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -62,7 +62,7 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result,
|
||||
// ARG1 must hold thread address.
|
||||
mr(R3_ARG1, R16_thread);
|
||||
|
||||
address return_pc = call_c_with_frame_resize(entry_point, /*No resize, we have a C compatible frame.*/0);
|
||||
address return_pc = call_c(entry_point);
|
||||
|
||||
reset_last_Java_frame();
|
||||
|
||||
@ -97,12 +97,12 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result,
|
||||
//load_const_optimized(R0, StubRoutines::forward_exception_entry());
|
||||
//mtctr(R0);
|
||||
//bctr();
|
||||
} else if (_stub_id == Runtime1::forward_exception_id) {
|
||||
} else if (_stub_id == (int)C1StubId::forward_exception_id) {
|
||||
should_not_reach_here();
|
||||
} else {
|
||||
// keep stub frame for next call_RT
|
||||
//load_const_optimized(R0, Runtime1::entry_for(Runtime1::forward_exception_id));
|
||||
add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(Runtime1::entry_for(Runtime1::forward_exception_id)));
|
||||
//load_const_optimized(R0, Runtime1::entry_for(C1StubId::forward_exception_id));
|
||||
add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(Runtime1::entry_for(C1StubId::forward_exception_id)));
|
||||
mtctr(R0);
|
||||
bctr();
|
||||
}
|
||||
@ -388,7 +388,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
|
||||
return oop_maps;
|
||||
}
|
||||
|
||||
OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) {
|
||||
OopMapSet* oop_maps = nullptr;
|
||||
|
||||
// For better readability.
|
||||
@ -397,22 +397,22 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
|
||||
// Stub code & info for the different stubs.
|
||||
switch (id) {
|
||||
case forward_exception_id:
|
||||
case C1StubId::forward_exception_id:
|
||||
{
|
||||
oop_maps = generate_handle_exception(id, sasm);
|
||||
}
|
||||
break;
|
||||
|
||||
case new_instance_id:
|
||||
case fast_new_instance_id:
|
||||
case fast_new_instance_init_check_id:
|
||||
case C1StubId::new_instance_id:
|
||||
case C1StubId::fast_new_instance_id:
|
||||
case C1StubId::fast_new_instance_init_check_id:
|
||||
{
|
||||
if (id == new_instance_id) {
|
||||
if (id == C1StubId::new_instance_id) {
|
||||
__ set_info("new_instance", dont_gc_arguments);
|
||||
} else if (id == fast_new_instance_id) {
|
||||
} else if (id == C1StubId::fast_new_instance_id) {
|
||||
__ set_info("fast new_instance", dont_gc_arguments);
|
||||
} else {
|
||||
assert(id == fast_new_instance_init_check_id, "bad StubID");
|
||||
assert(id == C1StubId::fast_new_instance_init_check_id, "bad C1StubId");
|
||||
__ set_info("fast new_instance init check", dont_gc_arguments);
|
||||
}
|
||||
|
||||
@ -422,15 +422,15 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
}
|
||||
break;
|
||||
|
||||
case counter_overflow_id:
|
||||
case C1StubId::counter_overflow_id:
|
||||
// Bci and method are on stack.
|
||||
oop_maps = stub_call_with_stack_parms(sasm, noreg, CAST_FROM_FN_PTR(address, counter_overflow), 2);
|
||||
break;
|
||||
|
||||
case new_type_array_id:
|
||||
case new_object_array_id:
|
||||
case C1StubId::new_type_array_id:
|
||||
case C1StubId::new_object_array_id:
|
||||
{
|
||||
if (id == new_type_array_id) {
|
||||
if (id == C1StubId::new_type_array_id) {
|
||||
__ set_info("new_type_array", dont_gc_arguments);
|
||||
} else {
|
||||
__ set_info("new_object_array", dont_gc_arguments);
|
||||
@ -439,7 +439,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
#ifdef ASSERT
|
||||
// Assert object type is really an array of the proper kind.
|
||||
{
|
||||
int tag = (id == new_type_array_id) ? Klass::_lh_array_tag_type_value : Klass::_lh_array_tag_obj_value;
|
||||
int tag = (id == C1StubId::new_type_array_id) ? Klass::_lh_array_tag_type_value : Klass::_lh_array_tag_obj_value;
|
||||
Label ok;
|
||||
__ lwz(R0, in_bytes(Klass::layout_helper_offset()), R4_ARG2);
|
||||
__ srawi(R0, R0, Klass::_lh_array_tag_shift);
|
||||
@ -453,7 +453,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
|
||||
// We don't support eden allocation.
|
||||
|
||||
if (id == new_type_array_id) {
|
||||
if (id == C1StubId::new_type_array_id) {
|
||||
oop_maps = generate_stub_call(sasm, R3_RET, CAST_FROM_FN_PTR(address, new_type_array), R4_ARG2, R5_ARG3);
|
||||
} else {
|
||||
oop_maps = generate_stub_call(sasm, R3_RET, CAST_FROM_FN_PTR(address, new_object_array), R4_ARG2, R5_ARG3);
|
||||
@ -461,7 +461,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
}
|
||||
break;
|
||||
|
||||
case new_multi_array_id:
|
||||
case C1StubId::new_multi_array_id:
|
||||
{
|
||||
// R4: klass
|
||||
// R5: rank
|
||||
@ -471,7 +471,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
}
|
||||
break;
|
||||
|
||||
case register_finalizer_id:
|
||||
case C1StubId::register_finalizer_id:
|
||||
{
|
||||
__ set_info("register_finalizer", dont_gc_arguments);
|
||||
// This code is called via rt_call. Hence, caller-save registers have been saved.
|
||||
@ -479,8 +479,8 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
|
||||
// Load the klass and check the has finalizer flag.
|
||||
__ load_klass(t, R3_ARG1);
|
||||
__ lwz(t, in_bytes(Klass::access_flags_offset()), t);
|
||||
__ testbitdi(CCR0, R0, t, exact_log2(JVM_ACC_HAS_FINALIZER));
|
||||
__ lbz(t, in_bytes(Klass::misc_flags_offset()), t);
|
||||
__ testbitdi(CCR0, R0, t, exact_log2(KlassFlags::_misc_has_finalizer));
|
||||
// Return if has_finalizer bit == 0 (CR0.eq).
|
||||
__ bclr(Assembler::bcondCRbiIs1, Assembler::bi0(CCR0, Assembler::equal), Assembler::bhintbhBCLRisReturn);
|
||||
|
||||
@ -501,50 +501,50 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
}
|
||||
break;
|
||||
|
||||
case throw_range_check_failed_id:
|
||||
case C1StubId::throw_range_check_failed_id:
|
||||
{
|
||||
__ set_info("range_check_failed", dont_gc_arguments); // Arguments will be discarded.
|
||||
oop_maps = generate_exception_throw_with_stack_parms(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), 2);
|
||||
}
|
||||
break;
|
||||
|
||||
case throw_index_exception_id:
|
||||
case C1StubId::throw_index_exception_id:
|
||||
{
|
||||
__ set_info("index_range_check_failed", dont_gc_arguments); // Arguments will be discarded.
|
||||
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
|
||||
}
|
||||
break;
|
||||
|
||||
case throw_div0_exception_id:
|
||||
case C1StubId::throw_div0_exception_id:
|
||||
{
|
||||
__ set_info("throw_div0_exception", dont_gc_arguments);
|
||||
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
|
||||
}
|
||||
break;
|
||||
|
||||
case throw_null_pointer_exception_id:
|
||||
case C1StubId::throw_null_pointer_exception_id:
|
||||
{
|
||||
__ set_info("throw_null_pointer_exception", dont_gc_arguments);
|
||||
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
|
||||
}
|
||||
break;
|
||||
|
||||
case handle_exception_nofpu_id:
|
||||
case handle_exception_id:
|
||||
case C1StubId::handle_exception_nofpu_id:
|
||||
case C1StubId::handle_exception_id:
|
||||
{
|
||||
__ set_info("handle_exception", dont_gc_arguments);
|
||||
oop_maps = generate_handle_exception(id, sasm);
|
||||
}
|
||||
break;
|
||||
|
||||
case handle_exception_from_callee_id:
|
||||
case C1StubId::handle_exception_from_callee_id:
|
||||
{
|
||||
__ set_info("handle_exception_from_callee", dont_gc_arguments);
|
||||
oop_maps = generate_handle_exception(id, sasm);
|
||||
}
|
||||
break;
|
||||
|
||||
case unwind_exception_id:
|
||||
case C1StubId::unwind_exception_id:
|
||||
{
|
||||
const Register Rexception = R3 /*LIRGenerator::exceptionOopOpr()*/,
|
||||
Rexception_pc = R4 /*LIRGenerator::exceptionPcOpr()*/,
|
||||
@ -572,28 +572,28 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
}
|
||||
break;
|
||||
|
||||
case throw_array_store_exception_id:
|
||||
case C1StubId::throw_array_store_exception_id:
|
||||
{
|
||||
__ set_info("throw_array_store_exception", dont_gc_arguments);
|
||||
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
|
||||
}
|
||||
break;
|
||||
|
||||
case throw_class_cast_exception_id:
|
||||
case C1StubId::throw_class_cast_exception_id:
|
||||
{
|
||||
__ set_info("throw_class_cast_exception", dont_gc_arguments);
|
||||
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
|
||||
}
|
||||
break;
|
||||
|
||||
case throw_incompatible_class_change_error_id:
|
||||
case C1StubId::throw_incompatible_class_change_error_id:
|
||||
{
|
||||
__ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments);
|
||||
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
|
||||
}
|
||||
break;
|
||||
|
||||
case slow_subtype_check_id:
|
||||
case C1StubId::slow_subtype_check_id:
|
||||
{ // Support for uint StubRoutine::partial_subtype_check( Klass sub, Klass super );
|
||||
const Register sub_klass = R5,
|
||||
super_klass = R4,
|
||||
@ -605,12 +605,12 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
}
|
||||
break;
|
||||
|
||||
case monitorenter_nofpu_id:
|
||||
case monitorenter_id:
|
||||
case C1StubId::monitorenter_nofpu_id:
|
||||
case C1StubId::monitorenter_id:
|
||||
{
|
||||
__ set_info("monitorenter", dont_gc_arguments);
|
||||
|
||||
int save_fpu_registers = (id == monitorenter_id);
|
||||
int save_fpu_registers = (id == C1StubId::monitorenter_id);
|
||||
// Make a frame and preserve the caller's caller-save registers.
|
||||
OopMap* oop_map = save_live_registers(sasm, save_fpu_registers);
|
||||
|
||||
@ -624,15 +624,15 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
}
|
||||
break;
|
||||
|
||||
case monitorexit_nofpu_id:
|
||||
case monitorexit_id:
|
||||
case C1StubId::monitorexit_nofpu_id:
|
||||
case C1StubId::monitorexit_id:
|
||||
{
|
||||
// note: Really a leaf routine but must setup last java sp
|
||||
// => use call_RT for now (speed can be improved by
|
||||
// doing last java sp setup manually).
|
||||
__ set_info("monitorexit", dont_gc_arguments);
|
||||
|
||||
int save_fpu_registers = (id == monitorexit_id);
|
||||
int save_fpu_registers = (id == C1StubId::monitorexit_id);
|
||||
// Make a frame and preserve the caller's caller-save registers.
|
||||
OopMap* oop_map = save_live_registers(sasm, save_fpu_registers);
|
||||
|
||||
@ -646,7 +646,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
}
|
||||
break;
|
||||
|
||||
case deoptimize_id:
|
||||
case C1StubId::deoptimize_id:
|
||||
{
|
||||
__ set_info("deoptimize", dont_gc_arguments);
|
||||
__ std(R0, -8, R1_SP); // Pass trap_request on stack.
|
||||
@ -662,35 +662,35 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
}
|
||||
break;
|
||||
|
||||
case access_field_patching_id:
|
||||
case C1StubId::access_field_patching_id:
|
||||
{
|
||||
__ set_info("access_field_patching", dont_gc_arguments);
|
||||
oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
|
||||
}
|
||||
break;
|
||||
|
||||
case load_klass_patching_id:
|
||||
case C1StubId::load_klass_patching_id:
|
||||
{
|
||||
__ set_info("load_klass_patching", dont_gc_arguments);
|
||||
oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
|
||||
}
|
||||
break;
|
||||
|
||||
case load_mirror_patching_id:
|
||||
case C1StubId::load_mirror_patching_id:
|
||||
{
|
||||
__ set_info("load_mirror_patching", dont_gc_arguments);
|
||||
oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));
|
||||
}
|
||||
break;
|
||||
|
||||
case load_appendix_patching_id:
|
||||
case C1StubId::load_appendix_patching_id:
|
||||
{
|
||||
__ set_info("load_appendix_patching", dont_gc_arguments);
|
||||
oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
|
||||
}
|
||||
break;
|
||||
|
||||
case dtrace_object_alloc_id:
|
||||
case C1StubId::dtrace_object_alloc_id:
|
||||
{ // O0: object
|
||||
__ unimplemented("stub dtrace_object_alloc_id");
|
||||
__ set_info("dtrace_object_alloc", dont_gc_arguments);
|
||||
@ -710,7 +710,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
}
|
||||
break;
|
||||
|
||||
case predicate_failed_trap_id:
|
||||
case C1StubId::predicate_failed_trap_id:
|
||||
{
|
||||
__ set_info("predicate_failed_trap", dont_gc_arguments);
|
||||
OopMap* oop_map = save_live_registers(sasm);
|
||||
@ -754,7 +754,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
}
|
||||
|
||||
|
||||
OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) {
|
||||
OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler* sasm) {
|
||||
__ block_comment("generate_handle_exception");
|
||||
|
||||
// Save registers, if required.
|
||||
@ -764,7 +764,7 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) {
|
||||
Rexception_pc = R4 /*LIRGenerator::exceptionPcOpr()*/;
|
||||
|
||||
switch (id) {
|
||||
case forward_exception_id:
|
||||
case C1StubId::forward_exception_id:
|
||||
// We're handling an exception in the context of a compiled frame.
|
||||
// The registers have been saved in the standard places. Perform
|
||||
// an exception lookup in the caller and dispatch to the handler
|
||||
@ -780,12 +780,12 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) {
|
||||
__ ld(Rexception_pc, _abi0(lr), Rexception_pc);
|
||||
__ std(R0, in_bytes(JavaThread::pending_exception_offset()), R16_thread);
|
||||
break;
|
||||
case handle_exception_nofpu_id:
|
||||
case handle_exception_id:
|
||||
case C1StubId::handle_exception_nofpu_id:
|
||||
case C1StubId::handle_exception_id:
|
||||
// At this point all registers MAY be live.
|
||||
oop_map = save_live_registers(sasm, id != handle_exception_nofpu_id, Rexception_pc);
|
||||
oop_map = save_live_registers(sasm, id != C1StubId::handle_exception_nofpu_id, Rexception_pc);
|
||||
break;
|
||||
case handle_exception_from_callee_id:
|
||||
case C1StubId::handle_exception_from_callee_id:
|
||||
// At this point all registers except exception oop and exception pc are dead.
|
||||
oop_map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
|
||||
sasm->set_frame_size(frame_size_in_bytes / BytesPerWord);
|
||||
@ -824,13 +824,13 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) {
|
||||
// Restore the registers that were saved at the beginning, remove
|
||||
// the frame and jump to the exception handler.
|
||||
switch (id) {
|
||||
case forward_exception_id:
|
||||
case handle_exception_nofpu_id:
|
||||
case handle_exception_id:
|
||||
restore_live_registers(sasm, noreg, noreg, id != handle_exception_nofpu_id);
|
||||
case C1StubId::forward_exception_id:
|
||||
case C1StubId::handle_exception_nofpu_id:
|
||||
case C1StubId::handle_exception_id:
|
||||
restore_live_registers(sasm, noreg, noreg, id != C1StubId::handle_exception_nofpu_id);
|
||||
__ bctr();
|
||||
break;
|
||||
case handle_exception_from_callee_id: {
|
||||
case C1StubId::handle_exception_from_callee_id: {
|
||||
__ pop_frame();
|
||||
__ ld(Rexception_pc, _abi0(lr), R1_SP);
|
||||
__ mtlr(Rexception_pc);
|
||||
|
||||
@ -39,12 +39,12 @@
|
||||
|
||||
void C2_MacroAssembler::fast_lock_lightweight(ConditionRegister flag, Register obj, Register box,
|
||||
Register tmp1, Register tmp2, Register tmp3) {
|
||||
compiler_fast_lock_lightweight_object(flag, obj, tmp1, tmp2, tmp3);
|
||||
compiler_fast_lock_lightweight_object(flag, obj, box, tmp1, tmp2, tmp3);
|
||||
}
|
||||
|
||||
void C2_MacroAssembler::fast_unlock_lightweight(ConditionRegister flag, Register obj, Register box,
|
||||
Register tmp1, Register tmp2, Register tmp3) {
|
||||
compiler_fast_unlock_lightweight_object(flag, obj, tmp1, tmp2, tmp3);
|
||||
compiler_fast_unlock_lightweight_object(flag, obj, box, tmp1, tmp2, tmp3);
|
||||
}
|
||||
|
||||
// Intrinsics for CompactStrings
|
||||
|
||||
@ -117,9 +117,9 @@ bool frame::safe_for_sender(JavaThread *thread) {
|
||||
return false;
|
||||
}
|
||||
|
||||
common_abi* sender_abi = (common_abi*) fp;
|
||||
volatile common_abi* sender_abi = (common_abi*) fp; // May get updated concurrently by deoptimization!
|
||||
intptr_t* sender_sp = (intptr_t*) fp;
|
||||
address sender_pc = (address) sender_abi->lr;;
|
||||
address sender_pc = (address) sender_abi->lr;
|
||||
|
||||
if (Continuation::is_return_barrier_entry(sender_pc)) {
|
||||
// If our sender_pc is the return barrier, then our "real" sender is the continuation entry
|
||||
@ -134,9 +134,18 @@ bool frame::safe_for_sender(JavaThread *thread) {
|
||||
return false;
|
||||
}
|
||||
|
||||
intptr_t* unextended_sender_sp = is_interpreted_frame() ? interpreter_frame_sender_sp() : sender_sp;
|
||||
|
||||
// If the sender is a deoptimized nmethod we need to check if the original pc is valid.
|
||||
nmethod* sender_nm = sender_blob->as_nmethod_or_null();
|
||||
if (sender_nm != nullptr && sender_nm->is_deopt_pc(sender_pc)) {
|
||||
address orig_pc = *(address*)((address)unextended_sender_sp + sender_nm->orig_pc_offset());
|
||||
if (!sender_nm->insts_contains_inclusive(orig_pc)) return false;
|
||||
}
|
||||
|
||||
// It should be safe to construct the sender though it might not be valid.
|
||||
|
||||
frame sender(sender_sp, sender_pc, nullptr /* unextended_sp */, nullptr /* fp */, sender_blob);
|
||||
frame sender(sender_sp, sender_pc, unextended_sender_sp, nullptr /* fp */, sender_blob);
|
||||
|
||||
// Do we have a valid fp?
|
||||
address sender_fp = (address) sender.fp();
|
||||
|
||||
@ -41,10 +41,20 @@
|
||||
#include "c1/c1_LIRAssembler.hpp"
|
||||
#include "c1/c1_MacroAssembler.hpp"
|
||||
#include "gc/g1/c1/g1BarrierSetC1.hpp"
|
||||
#endif
|
||||
#endif // COMPILER1
|
||||
#ifdef COMPILER2
|
||||
#include "gc/g1/c2/g1BarrierSetC2.hpp"
|
||||
#endif // COMPILER2
|
||||
|
||||
#define __ masm->
|
||||
|
||||
static void generate_marking_inactive_test(MacroAssembler* masm) {
|
||||
int active_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
|
||||
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
|
||||
__ lbz(R0, active_offset, R16_thread); // tmp1 := *(mark queue active address)
|
||||
__ cmpwi(CCR0, R0, 0);
|
||||
}
|
||||
|
||||
void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
|
||||
Register from, Register to, Register count,
|
||||
Register preserve1, Register preserve2) {
|
||||
@ -58,13 +68,7 @@ void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm
|
||||
Label filtered;
|
||||
|
||||
// Is marking active?
|
||||
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
|
||||
__ lwz(R0, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()), R16_thread);
|
||||
} else {
|
||||
guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
|
||||
__ lbz(R0, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()), R16_thread);
|
||||
}
|
||||
__ cmpdi(CCR0, R0, 0);
|
||||
generate_marking_inactive_test(masm);
|
||||
__ beq(CCR0, filtered);
|
||||
|
||||
__ save_LR(R0);
|
||||
@ -109,35 +113,48 @@ void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* mas
|
||||
__ restore_LR(R0);
|
||||
}
|
||||
|
||||
static void generate_queue_insertion(MacroAssembler* masm, ByteSize index_offset, ByteSize buffer_offset, Label& runtime,
|
||||
const Register value, const Register temp) {
|
||||
assert_different_registers(value, temp);
|
||||
// Can we store a value in the given thread's buffer?
|
||||
// (The index field is typed as size_t.)
|
||||
__ ld(temp, in_bytes(index_offset), R16_thread); // temp := *(index address)
|
||||
__ cmpdi(CCR0, temp, 0); // jump to runtime if index == 0 (full buffer)
|
||||
__ beq(CCR0, runtime);
|
||||
// The buffer is not full, store value into it.
|
||||
__ ld(R0, in_bytes(buffer_offset), R16_thread); // R0 := buffer address
|
||||
__ addi(temp, temp, -wordSize); // temp := next index
|
||||
__ std(temp, in_bytes(index_offset), R16_thread); // *(index address) := next index
|
||||
__ stdx(value, temp, R0); // *(buffer address + next index) := value
|
||||
}
|
||||
|
||||
void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm, DecoratorSet decorators,
|
||||
Register obj, RegisterOrConstant ind_or_offs, Register pre_val,
|
||||
Register tmp1, Register tmp2,
|
||||
MacroAssembler::PreservationLevel preservation_level) {
|
||||
assert_different_registers(pre_val, tmp1, tmp2);
|
||||
|
||||
bool not_null = (decorators & IS_NOT_NULL) != 0,
|
||||
preloaded = obj == noreg;
|
||||
Register nv_save = noreg;
|
||||
|
||||
if (preloaded) {
|
||||
// Determine necessary runtime invocation preservation measures
|
||||
const bool needs_frame = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR;
|
||||
const bool preserve_gp_registers = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR_GP_REGS;
|
||||
const bool preserve_fp_registers = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR_GP_FP_REGS;
|
||||
int nbytes_save = 0;
|
||||
|
||||
if (pre_val->is_volatile() && preloaded && !preserve_gp_registers) {
|
||||
// We are not loading the previous value so make
|
||||
// sure that we don't trash the value in pre_val
|
||||
// with the code below.
|
||||
assert_different_registers(pre_val, tmp1, tmp2);
|
||||
if (pre_val->is_volatile()) {
|
||||
nv_save = !tmp1->is_volatile() ? tmp1 : tmp2;
|
||||
assert(!nv_save->is_volatile(), "need one nv temp register if pre_val lives in volatile register");
|
||||
}
|
||||
nv_save = !tmp1->is_volatile() ? tmp1 : tmp2;
|
||||
assert(!nv_save->is_volatile(), "need one nv temp register if pre_val lives in volatile register");
|
||||
}
|
||||
|
||||
Label runtime, filtered;
|
||||
|
||||
// Is marking active?
|
||||
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
|
||||
__ lwz(tmp1, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()), R16_thread);
|
||||
} else {
|
||||
guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
|
||||
__ lbz(tmp1, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()), R16_thread);
|
||||
}
|
||||
__ cmpdi(CCR0, tmp1, 0);
|
||||
generate_marking_inactive_test(masm);
|
||||
__ beq(CCR0, filtered);
|
||||
|
||||
// Do we need to load the previous value?
|
||||
@ -175,28 +192,12 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm, Decorator
|
||||
// Can we store original value in the thread's buffer?
|
||||
// Is index == 0?
|
||||
// (The index field is typed as size_t.)
|
||||
const Register Rbuffer = tmp1, Rindex = tmp2;
|
||||
|
||||
__ ld(Rindex, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()), R16_thread);
|
||||
__ cmpdi(CCR0, Rindex, 0);
|
||||
__ beq(CCR0, runtime); // If index == 0, goto runtime.
|
||||
__ ld(Rbuffer, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()), R16_thread);
|
||||
|
||||
__ addi(Rindex, Rindex, -wordSize); // Decrement index.
|
||||
__ std(Rindex, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()), R16_thread);
|
||||
|
||||
// Record the previous value.
|
||||
__ stdx(pre_val, Rbuffer, Rindex);
|
||||
generate_queue_insertion(masm, G1ThreadLocalData::satb_mark_queue_index_offset(), G1ThreadLocalData::satb_mark_queue_buffer_offset(),
|
||||
runtime, pre_val, tmp1);
|
||||
__ b(filtered);
|
||||
|
||||
__ bind(runtime);
|
||||
|
||||
// Determine necessary runtime invocation preservation measures
|
||||
const bool needs_frame = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR;
|
||||
const bool preserve_gp_registers = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR_GP_REGS;
|
||||
const bool preserve_fp_registers = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR_GP_FP_REGS;
|
||||
int nbytes_save = 0;
|
||||
|
||||
// May need to preserve LR. Also needed if current frame is not compatible with C calling convention.
|
||||
if (needs_frame) {
|
||||
if (preserve_gp_registers) {
|
||||
@ -210,11 +211,11 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm, Decorator
|
||||
__ push_frame_reg_args(nbytes_save, tmp2);
|
||||
}
|
||||
|
||||
if (pre_val->is_volatile() && preloaded && !preserve_gp_registers) {
|
||||
if (nv_save != noreg) {
|
||||
__ mr(nv_save, pre_val); // Save pre_val across C call if it was preloaded.
|
||||
}
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, R16_thread);
|
||||
if (pre_val->is_volatile() && preloaded && !preserve_gp_registers) {
|
||||
if (nv_save != noreg) {
|
||||
__ mr(pre_val, nv_save); // restore
|
||||
}
|
||||
|
||||
@ -230,6 +231,26 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm, Decorator
|
||||
__ bind(filtered);
|
||||
}
|
||||
|
||||
static void generate_region_crossing_test(MacroAssembler* masm, const Register store_addr, const Register new_val) {
|
||||
__ xorr(R0, store_addr, new_val); // tmp1 := store address ^ new value
|
||||
__ srdi_(R0, R0, G1HeapRegion::LogOfHRGrainBytes); // tmp1 := ((store address ^ new value) >> LogOfHRGrainBytes)
|
||||
}
|
||||
|
||||
static Address generate_card_young_test(MacroAssembler* masm, const Register store_addr, const Register tmp1, const Register tmp2) {
|
||||
CardTableBarrierSet* ct = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
|
||||
__ load_const_optimized(tmp1, (address)(ct->card_table()->byte_map_base()), tmp2);
|
||||
__ srdi(tmp2, store_addr, CardTable::card_shift()); // tmp1 := card address relative to card table base
|
||||
__ lbzx(R0, tmp1, tmp2); // tmp1 := card address
|
||||
__ cmpwi(CCR0, R0, (int)G1CardTable::g1_young_card_val());
|
||||
return Address(tmp1, tmp2); // return card address
|
||||
}
|
||||
|
||||
static void generate_card_dirty_test(MacroAssembler* masm, Address card_addr) {
|
||||
__ membar(Assembler::StoreLoad); // Must reload after StoreLoad membar due to concurrent refinement
|
||||
__ lbzx(R0, card_addr.base(), card_addr.index()); // tmp2 := card
|
||||
__ cmpwi(CCR0, R0, (int)G1CardTable::dirty_card_val()); // tmp2 := card == dirty_card_val?
|
||||
}
|
||||
|
||||
void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, DecoratorSet decorators,
|
||||
Register store_addr, Register new_val,
|
||||
Register tmp1, Register tmp2, Register tmp3,
|
||||
@ -241,9 +262,7 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, Decorato
|
||||
|
||||
CardTableBarrierSet* ct = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
|
||||
|
||||
// Does store cross heap regions?
|
||||
__ xorr(tmp1, store_addr, new_val);
|
||||
__ srdi_(tmp1, tmp1, G1HeapRegion::LogOfHRGrainBytes);
|
||||
generate_region_crossing_test(masm, store_addr, new_val);
|
||||
__ beq(CCR0, filtered);
|
||||
|
||||
// Crosses regions, storing null?
|
||||
@ -257,43 +276,22 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, Decorato
|
||||
__ beq(CCR0, filtered);
|
||||
}
|
||||
|
||||
// Storing region crossing non-null, is card already dirty?
|
||||
const Register Rcard_addr = tmp1;
|
||||
Register Rbase = tmp2;
|
||||
__ load_const_optimized(Rbase, (address)(ct->card_table()->byte_map_base()), /*temp*/ tmp3);
|
||||
|
||||
__ srdi(Rcard_addr, store_addr, CardTable::card_shift());
|
||||
|
||||
// Get the address of the card.
|
||||
__ lbzx(/*card value*/ tmp3, Rbase, Rcard_addr);
|
||||
__ cmpwi(CCR0, tmp3, (int)G1CardTable::g1_young_card_val());
|
||||
Address card_addr = generate_card_young_test(masm, store_addr, tmp1, tmp2);
|
||||
__ beq(CCR0, filtered);
|
||||
|
||||
__ membar(Assembler::StoreLoad);
|
||||
__ lbzx(/*card value*/ tmp3, Rbase, Rcard_addr); // Reload after membar.
|
||||
__ cmpwi(CCR0, tmp3 /* card value */, (int)G1CardTable::dirty_card_val());
|
||||
generate_card_dirty_test(masm, card_addr);
|
||||
__ beq(CCR0, filtered);
|
||||
|
||||
// Storing a region crossing, non-null oop, card is clean.
|
||||
// Dirty card and log.
|
||||
__ li(tmp3, (int)G1CardTable::dirty_card_val());
|
||||
//release(); // G1: oops are allowed to get visible after dirty marking.
|
||||
__ stbx(tmp3, Rbase, Rcard_addr);
|
||||
__ li(R0, (int)G1CardTable::dirty_card_val());
|
||||
__ stbx(R0, card_addr.base(), card_addr.index()); // *(card address) := dirty_card_val
|
||||
|
||||
__ add(Rcard_addr, Rbase, Rcard_addr); // This is the address which needs to get enqueued.
|
||||
Rbase = noreg; // end of lifetime
|
||||
Register Rcard_addr = tmp3;
|
||||
__ add(Rcard_addr, card_addr.base(), card_addr.index()); // This is the address which needs to get enqueued.
|
||||
|
||||
const Register Rqueue_index = tmp2,
|
||||
Rqueue_buf = tmp3;
|
||||
__ ld(Rqueue_index, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()), R16_thread);
|
||||
__ cmpdi(CCR0, Rqueue_index, 0);
|
||||
__ beq(CCR0, runtime); // index == 0 then jump to runtime
|
||||
__ ld(Rqueue_buf, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()), R16_thread);
|
||||
|
||||
__ addi(Rqueue_index, Rqueue_index, -wordSize); // decrement index
|
||||
__ std(Rqueue_index, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()), R16_thread);
|
||||
|
||||
__ stdx(Rcard_addr, Rqueue_buf, Rqueue_index); // store card
|
||||
generate_queue_insertion(masm,
|
||||
G1ThreadLocalData::dirty_card_queue_index_offset(),
|
||||
G1ThreadLocalData::dirty_card_queue_buffer_offset(),
|
||||
runtime, Rcard_addr, tmp1);
|
||||
__ b(filtered);
|
||||
|
||||
__ bind(runtime);
|
||||
@ -392,6 +390,142 @@ void G1BarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Register value
|
||||
__ bind(done);
|
||||
}
|
||||
|
||||
#ifdef COMPILER2
|
||||
|
||||
static void generate_c2_barrier_runtime_call(MacroAssembler* masm, G1BarrierStubC2* stub, const Register arg, const address runtime_path) {
|
||||
SaveLiveRegisters save_registers(masm, stub);
|
||||
__ call_VM_leaf(runtime_path, arg, R16_thread);
|
||||
}
|
||||
|
||||
void G1BarrierSetAssembler::g1_write_barrier_pre_c2(MacroAssembler* masm,
|
||||
Register obj,
|
||||
Register pre_val,
|
||||
Register tmp1,
|
||||
Register tmp2,
|
||||
G1PreBarrierStubC2* stub) {
|
||||
assert_different_registers(obj, tmp1, tmp2, R0);
|
||||
assert_different_registers(pre_val, tmp1, R0);
|
||||
assert(!UseCompressedOops || tmp2 != noreg, "tmp2 needed with CompressedOops");
|
||||
|
||||
stub->initialize_registers(obj, pre_val, R16_thread, tmp1, tmp2);
|
||||
|
||||
generate_marking_inactive_test(masm);
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs0, __ bi0(CCR0, Assembler::equal), *stub->entry());
|
||||
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
void G1BarrierSetAssembler::generate_c2_pre_barrier_stub(MacroAssembler* masm,
|
||||
G1PreBarrierStubC2* stub) const {
|
||||
Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
|
||||
Label runtime;
|
||||
Register obj = stub->obj();
|
||||
Register pre_val = stub->pre_val();
|
||||
Register tmp1 = stub->tmp1();
|
||||
|
||||
__ bind(*stub->entry());
|
||||
|
||||
if (obj != noreg) {
|
||||
// Note: C2 currently doesn't use implicit null checks with barriers.
|
||||
// Otherwise, obj could be null and the following instruction would raise a SIGSEGV.
|
||||
if (UseCompressedOops) {
|
||||
__ lwz(pre_val, 0, obj);
|
||||
} else {
|
||||
__ ld(pre_val, 0, obj);
|
||||
}
|
||||
}
|
||||
__ cmpdi(CCR0, pre_val, 0);
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::equal), *stub->continuation());
|
||||
|
||||
Register pre_val_decoded = pre_val;
|
||||
if (UseCompressedOops) {
|
||||
pre_val_decoded = __ decode_heap_oop_not_null(stub->tmp2(), pre_val);
|
||||
}
|
||||
|
||||
generate_queue_insertion(masm,
|
||||
G1ThreadLocalData::satb_mark_queue_index_offset(),
|
||||
G1ThreadLocalData::satb_mark_queue_buffer_offset(),
|
||||
runtime, pre_val_decoded, tmp1);
|
||||
__ b(*stub->continuation());
|
||||
|
||||
__ bind(runtime);
|
||||
generate_c2_barrier_runtime_call(masm, stub, pre_val_decoded, CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry));
|
||||
__ b(*stub->continuation());
|
||||
}
|
||||
|
||||
void G1BarrierSetAssembler::g1_write_barrier_post_c2(MacroAssembler* masm,
|
||||
Register store_addr,
|
||||
Register new_val,
|
||||
Register tmp1,
|
||||
Register tmp2,
|
||||
G1PostBarrierStubC2* stub,
|
||||
bool decode_new_val) {
|
||||
assert_different_registers(store_addr, new_val, tmp1, R0);
|
||||
assert_different_registers(store_addr, tmp1, tmp2, R0);
|
||||
|
||||
stub->initialize_registers(R16_thread, tmp1, tmp2);
|
||||
|
||||
bool null_check_required = (stub->barrier_data() & G1C2BarrierPostNotNull) == 0;
|
||||
Register new_val_decoded = new_val;
|
||||
|
||||
if (decode_new_val) {
|
||||
assert(UseCompressedOops, "or should not be here");
|
||||
if (null_check_required && CompressedOops::base() != nullptr) {
|
||||
// We prefer doing the null check after the region crossing check.
|
||||
// Only compressed oop modes with base != null require a null check here.
|
||||
__ cmpwi(CCR0, new_val, 0);
|
||||
__ beq(CCR0, *stub->continuation());
|
||||
null_check_required = false;
|
||||
}
|
||||
new_val_decoded = __ decode_heap_oop_not_null(tmp2, new_val);
|
||||
}
|
||||
|
||||
generate_region_crossing_test(masm, store_addr, new_val_decoded);
|
||||
__ beq(CCR0, *stub->continuation());
|
||||
|
||||
// crosses regions, storing null?
|
||||
if (null_check_required) {
|
||||
__ cmpdi(CCR0, new_val_decoded, 0);
|
||||
__ beq(CCR0, *stub->continuation());
|
||||
}
|
||||
|
||||
Address card_addr = generate_card_young_test(masm, store_addr, tmp1, tmp2);
|
||||
assert(card_addr.base() == tmp1 && card_addr.index() == tmp2, "needed by post barrier stub");
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs0, __ bi0(CCR0, Assembler::equal), *stub->entry());
|
||||
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
void G1BarrierSetAssembler::generate_c2_post_barrier_stub(MacroAssembler* masm,
|
||||
G1PostBarrierStubC2* stub) const {
|
||||
Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
|
||||
Label runtime;
|
||||
Address card_addr(stub->tmp1(), stub->tmp2()); // See above.
|
||||
|
||||
__ bind(*stub->entry());
|
||||
|
||||
generate_card_dirty_test(masm, card_addr);
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::equal), *stub->continuation());
|
||||
|
||||
__ li(R0, (int)G1CardTable::dirty_card_val());
|
||||
__ stbx(R0, card_addr.base(), card_addr.index()); // *(card address) := dirty_card_val
|
||||
|
||||
Register Rcard_addr = stub->tmp1();
|
||||
__ add(Rcard_addr, card_addr.base(), card_addr.index()); // This is the address which needs to get enqueued.
|
||||
|
||||
generate_queue_insertion(masm,
|
||||
G1ThreadLocalData::dirty_card_queue_index_offset(),
|
||||
G1ThreadLocalData::dirty_card_queue_buffer_offset(),
|
||||
runtime, Rcard_addr, stub->tmp2());
|
||||
__ b(*stub->continuation());
|
||||
|
||||
__ bind(runtime);
|
||||
generate_c2_barrier_runtime_call(masm, stub, Rcard_addr, CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry));
|
||||
__ b(*stub->continuation());
|
||||
}
|
||||
|
||||
#endif // COMPILER2
|
||||
|
||||
#ifdef COMPILER1
|
||||
|
||||
#undef __
|
||||
@ -470,13 +604,7 @@ void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler*
|
||||
__ std(tmp2, -24, R1_SP);
|
||||
|
||||
// Is marking still active?
|
||||
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
|
||||
__ lwz(tmp, satb_q_active_byte_offset, R16_thread);
|
||||
} else {
|
||||
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
|
||||
__ lbz(tmp, satb_q_active_byte_offset, R16_thread);
|
||||
}
|
||||
__ cmpdi(CCR0, tmp, 0);
|
||||
generate_marking_inactive_test(sasm);
|
||||
__ beq(CCR0, marking_not_active);
|
||||
|
||||
__ bind(restart);
|
||||
|
||||
@ -30,10 +30,16 @@
|
||||
#include "gc/shared/modRefBarrierSetAssembler.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
#ifdef COMPILER2
|
||||
#include "gc/g1/c2/g1BarrierSetC2.hpp"
|
||||
#endif
|
||||
|
||||
class LIR_Assembler;
|
||||
class StubAssembler;
|
||||
class G1PreBarrierStub;
|
||||
class G1PostBarrierStub;
|
||||
class G1PreBarrierStubC2;
|
||||
class G1PostBarrierStubC2;
|
||||
|
||||
class G1BarrierSetAssembler: public ModRefBarrierSetAssembler {
|
||||
protected:
|
||||
@ -59,6 +65,25 @@ protected:
|
||||
MacroAssembler::PreservationLevel preservation_level);
|
||||
|
||||
public:
|
||||
#ifdef COMPILER2
|
||||
void g1_write_barrier_pre_c2(MacroAssembler* masm,
|
||||
Register obj,
|
||||
Register pre_val,
|
||||
Register tmp1,
|
||||
Register tmp2,
|
||||
G1PreBarrierStubC2* c2_stub);
|
||||
void generate_c2_pre_barrier_stub(MacroAssembler* masm,
|
||||
G1PreBarrierStubC2* stub) const;
|
||||
void g1_write_barrier_post_c2(MacroAssembler* masm,
|
||||
Register store_addr,
|
||||
Register new_val,
|
||||
Register tmp1,
|
||||
Register tmp2,
|
||||
G1PostBarrierStubC2* c2_stub,
|
||||
bool decode_new_val);
|
||||
void generate_c2_post_barrier_stub(MacroAssembler* masm,
|
||||
G1PostBarrierStubC2* stub) const;
|
||||
#endif
|
||||
#ifdef COMPILER1
|
||||
void gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub);
|
||||
void gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub);
|
||||
|
||||
684
src/hotspot/cpu/ppc/gc/g1/g1_ppc.ad
Normal file
684
src/hotspot/cpu/ppc/gc/g1/g1_ppc.ad
Normal file
@ -0,0 +1,684 @@
|
||||
//
|
||||
// Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright (c) 2024 SAP SE. All rights reserved.
|
||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
//
|
||||
// This code is free software; you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License version 2 only, as
|
||||
// published by the Free Software Foundation.
|
||||
//
|
||||
// This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
// version 2 for more details (a copy is included in the LICENSE file that
|
||||
// accompanied this code).
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License version
|
||||
// 2 along with this work; if not, write to the Free Software Foundation,
|
||||
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
//
|
||||
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
// or visit www.oracle.com if you need additional information or have any
|
||||
// questions.
|
||||
//
|
||||
|
||||
source_hpp %{
|
||||
|
||||
#include "gc/g1/c2/g1BarrierSetC2.hpp"
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
|
||||
%}
|
||||
|
||||
source %{
|
||||
|
||||
#include "gc/g1/g1BarrierSetAssembler_ppc.hpp"
|
||||
#include "gc/g1/g1BarrierSetRuntime.hpp"
|
||||
|
||||
static void pre_write_barrier(MacroAssembler* masm,
|
||||
const MachNode* node,
|
||||
Register obj,
|
||||
Register pre_val,
|
||||
Register tmp1,
|
||||
Register tmp2 = noreg, // only needed with CompressedOops when pre_val needs to be preserved
|
||||
RegSet preserve = RegSet(),
|
||||
RegSet no_preserve = RegSet()) {
|
||||
if (!G1PreBarrierStubC2::needs_barrier(node)) {
|
||||
return;
|
||||
}
|
||||
Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
|
||||
G1BarrierSetAssembler* g1_asm = static_cast<G1BarrierSetAssembler*>(BarrierSet::barrier_set()->barrier_set_assembler());
|
||||
G1PreBarrierStubC2* const stub = G1PreBarrierStubC2::create(node);
|
||||
for (RegSetIterator<Register> reg = preserve.begin(); *reg != noreg; ++reg) {
|
||||
stub->preserve(*reg);
|
||||
}
|
||||
for (RegSetIterator<Register> reg = no_preserve.begin(); *reg != noreg; ++reg) {
|
||||
stub->dont_preserve(*reg);
|
||||
}
|
||||
g1_asm->g1_write_barrier_pre_c2(masm, obj, pre_val, tmp1, (tmp2 != noreg) ? tmp2 : pre_val, stub);
|
||||
}
|
||||
|
||||
static void post_write_barrier(MacroAssembler* masm,
|
||||
const MachNode* node,
|
||||
Register store_addr,
|
||||
Register new_val,
|
||||
Register tmp1,
|
||||
Register tmp2,
|
||||
bool decode_new_val = false) {
|
||||
if (!G1PostBarrierStubC2::needs_barrier(node)) {
|
||||
return;
|
||||
}
|
||||
Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
|
||||
G1BarrierSetAssembler* g1_asm = static_cast<G1BarrierSetAssembler*>(BarrierSet::barrier_set()->barrier_set_assembler());
|
||||
G1PostBarrierStubC2* const stub = G1PostBarrierStubC2::create(node);
|
||||
g1_asm->g1_write_barrier_post_c2(masm, store_addr, new_val, tmp1, tmp2, stub, decode_new_val);
|
||||
}
|
||||
|
||||
%}
|
||||
|
||||
instruct g1StoreP(indirect mem, iRegPsrc src, iRegPdst tmp1, iRegPdst tmp2, flagsRegCR0 cr0)
|
||||
%{
|
||||
predicate(UseG1GC && n->as_Store()->barrier_data() != 0);
|
||||
match(Set mem (StoreP mem src));
|
||||
effect(TEMP tmp1, TEMP tmp2, KILL cr0);
|
||||
ins_cost(2 * MEMORY_REF_COST);
|
||||
format %{ "std $mem, $src\t# ptr" %}
|
||||
ins_encode %{
|
||||
pre_write_barrier(masm, this,
|
||||
$mem$$Register,
|
||||
$tmp1$$Register,
|
||||
$tmp2$$Register,
|
||||
noreg,
|
||||
RegSet::of($mem$$Register, $src$$Register) /* preserve */);
|
||||
__ std($src$$Register, 0, $mem$$Register);
|
||||
post_write_barrier(masm, this,
|
||||
$mem$$Register,
|
||||
$src$$Register /* new_val */,
|
||||
$tmp1$$Register,
|
||||
$tmp2$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct g1StoreN(indirect mem, iRegNsrc src, iRegPdst tmp1, iRegPdst tmp2, flagsRegCR0 cr0)
|
||||
%{
|
||||
predicate(UseG1GC && n->as_Store()->barrier_data() != 0);
|
||||
match(Set mem (StoreN mem src));
|
||||
effect(TEMP tmp1, TEMP tmp2, KILL cr0);
|
||||
ins_cost(2 * MEMORY_REF_COST);
|
||||
format %{ "stw $mem, $src\t# ptr" %}
|
||||
ins_encode %{
|
||||
pre_write_barrier(masm, this,
|
||||
$mem$$Register,
|
||||
$tmp1$$Register,
|
||||
$tmp2$$Register,
|
||||
noreg,
|
||||
RegSet::of($mem$$Register, $src$$Register) /* preserve */);
|
||||
__ stw($src$$Register, 0, $mem$$Register);
|
||||
post_write_barrier(masm, this,
|
||||
$mem$$Register,
|
||||
$src$$Register /* new_val */,
|
||||
$tmp1$$Register,
|
||||
$tmp2$$Register,
|
||||
true /* decode_new_val */);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct g1EncodePAndStoreN(indirect mem, iRegPsrc src, iRegPdst tmp1, iRegPdst tmp2, flagsRegCR0 cr0)
|
||||
%{
|
||||
predicate(UseG1GC && n->as_Store()->barrier_data() != 0);
|
||||
match(Set mem (StoreN mem (EncodeP src)));
|
||||
effect(TEMP tmp1, TEMP tmp2, KILL cr0);
|
||||
ins_cost(2 * MEMORY_REF_COST);
|
||||
format %{ "encode_heap_oop $src\n\t"
|
||||
"stw $mem, $src\t# ptr" %}
|
||||
ins_encode %{
|
||||
pre_write_barrier(masm, this,
|
||||
$mem$$Register,
|
||||
$tmp1$$Register,
|
||||
$tmp2$$Register,
|
||||
noreg,
|
||||
RegSet::of($mem$$Register, $src$$Register) /* preserve */);
|
||||
Register encoded_oop = noreg;
|
||||
if ((barrier_data() & G1C2BarrierPostNotNull) == 0) {
|
||||
encoded_oop = __ encode_heap_oop($tmp2$$Register, $src$$Register);
|
||||
} else {
|
||||
encoded_oop = __ encode_heap_oop_not_null($tmp2$$Register, $src$$Register);
|
||||
}
|
||||
__ stw(encoded_oop, 0, $mem$$Register);
|
||||
post_write_barrier(masm, this,
|
||||
$mem$$Register,
|
||||
$src$$Register /* new_val */,
|
||||
$tmp1$$Register,
|
||||
$tmp2$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct g1CompareAndExchangeP(iRegPdst res, indirect mem, iRegPsrc oldval, iRegPsrc newval, iRegPdst tmp1, iRegPdst tmp2, flagsRegCR0 cr0)
|
||||
%{
|
||||
predicate(UseG1GC && n->as_LoadStore()->barrier_data() != 0 &&
|
||||
(((CompareAndExchangeNode*)n)->order() != MemNode::acquire && ((CompareAndExchangeNode*)n)->order() != MemNode::seqcst));
|
||||
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
|
||||
effect(TEMP_DEF res, TEMP tmp1, TEMP tmp2, KILL cr0);
|
||||
format %{ "cmpxchgd $newval, $mem" %}
|
||||
ins_encode %{
|
||||
Label no_update;
|
||||
__ cmpxchgd(CCR0, $res$$Register, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, &no_update, true);
|
||||
// Pass oldval to SATB which is the only value which can get overwritten.
|
||||
// Can be done after cmpxchg because there's no safepoint here.
|
||||
pre_write_barrier(masm, this,
|
||||
noreg,
|
||||
$oldval$$Register,
|
||||
$tmp1$$Register,
|
||||
$tmp2$$Register,
|
||||
RegSet::of($mem$$Register, $newval$$Register) /* preserve */);
|
||||
post_write_barrier(masm, this,
|
||||
$mem$$Register,
|
||||
$newval$$Register,
|
||||
$tmp1$$Register,
|
||||
$tmp2$$Register);
|
||||
__ bind(no_update);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct g1CompareAndExchangeP_acq(iRegPdst res, indirect mem, iRegPsrc oldval, iRegPsrc newval, iRegPdst tmp1, iRegPdst tmp2, flagsRegCR0 cr0)
|
||||
%{
|
||||
predicate(UseG1GC && n->as_LoadStore()->barrier_data() != 0 &&
|
||||
(((CompareAndExchangeNode*)n)->order() == MemNode::acquire || ((CompareAndExchangeNode*)n)->order() == MemNode::seqcst));
|
||||
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
|
||||
effect(TEMP_DEF res, TEMP tmp1, TEMP tmp2, KILL cr0);
|
||||
format %{ "cmpxchgd acq $newval, $mem" %}
|
||||
ins_encode %{
|
||||
Label no_update;
|
||||
__ cmpxchgd(CCR0, $res$$Register, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, &no_update, true);
|
||||
// Pass oldval to SATB which is the only value which can get overwritten.
|
||||
// Can be done after cmpxchg because there's no safepoint here.
|
||||
pre_write_barrier(masm, this,
|
||||
noreg,
|
||||
$oldval$$Register,
|
||||
$tmp1$$Register,
|
||||
$tmp2$$Register,
|
||||
RegSet::of($mem$$Register, $newval$$Register) /* preserve */);
|
||||
post_write_barrier(masm, this,
|
||||
$mem$$Register,
|
||||
$newval$$Register,
|
||||
$tmp1$$Register,
|
||||
$tmp2$$Register);
|
||||
__ bind(no_update);
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
__ isync();
|
||||
} else {
|
||||
// isync would be sufficient in case of CompareAndExchangeAcquire, but we currently don't optimize for that.
|
||||
__ sync();
|
||||
}
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct g1CompareAndExchangeN(iRegNdst res, indirect mem, iRegNsrc oldval, iRegNsrc newval, iRegPdst tmp1, iRegPdst tmp2, flagsRegCR0 cr0)
|
||||
%{
|
||||
predicate(UseG1GC && n->as_LoadStore()->barrier_data() != 0 &&
|
||||
(((CompareAndExchangeNode*)n)->order() != MemNode::acquire && ((CompareAndExchangeNode*)n)->order() != MemNode::seqcst));
|
||||
match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
|
||||
effect(TEMP_DEF res, TEMP tmp1, TEMP tmp2, KILL cr0);
|
||||
format %{ "cmpxchgw $newval, $mem" %}
|
||||
ins_encode %{
|
||||
Label no_update;
|
||||
__ cmpxchgw(CCR0, $res$$Register, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, &no_update, true);
|
||||
// Pass oldval to SATB which is the only value which can get overwritten.
|
||||
// Can be done after cmpxchg because there's no safepoint here.
|
||||
pre_write_barrier(masm, this,
|
||||
noreg,
|
||||
$oldval$$Register,
|
||||
$tmp1$$Register,
|
||||
$tmp2$$Register,
|
||||
RegSet::of($mem$$Register, $newval$$Register) /* preserve */);
|
||||
post_write_barrier(masm, this,
|
||||
$mem$$Register,
|
||||
$newval$$Register,
|
||||
$tmp1$$Register,
|
||||
$tmp2$$Register,
|
||||
true /* decode_new_val */);
|
||||
__ bind(no_update);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct g1CompareAndExchangeN_acq(iRegNdst res, indirect mem, iRegNsrc oldval, iRegNsrc newval, iRegPdst tmp1, iRegPdst tmp2, flagsRegCR0 cr0)
|
||||
%{
|
||||
predicate(UseG1GC && n->as_LoadStore()->barrier_data() != 0 &&
|
||||
(((CompareAndExchangeNode*)n)->order() == MemNode::acquire || ((CompareAndExchangeNode*)n)->order() == MemNode::seqcst));
|
||||
match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
|
||||
effect(TEMP_DEF res, TEMP tmp1, TEMP tmp2, KILL cr0);
|
||||
format %{ "cmpxchgw acq $newval, $mem" %}
|
||||
ins_encode %{
|
||||
Label no_update;
|
||||
__ cmpxchgw(CCR0, $res$$Register, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, &no_update, true);
|
||||
// Pass oldval to SATB which is the only value which can get overwritten.
|
||||
// Can be done after cmpxchg because there's no safepoint here.
|
||||
pre_write_barrier(masm, this,
|
||||
noreg,
|
||||
$oldval$$Register,
|
||||
$tmp1$$Register,
|
||||
$tmp2$$Register,
|
||||
RegSet::of($mem$$Register, $newval$$Register) /* preserve */);
|
||||
post_write_barrier(masm, this,
|
||||
$mem$$Register,
|
||||
$newval$$Register,
|
||||
$tmp1$$Register,
|
||||
$tmp2$$Register,
|
||||
true /* decode_new_val */);
|
||||
__ bind(no_update);
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
__ isync();
|
||||
} else {
|
||||
// isync would be sufficient in case of CompareAndExchangeAcquire, but we currently don't optimize for that.
|
||||
__ sync();
|
||||
}
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct g1CompareAndSwapP(iRegIdst res, indirect mem, iRegPsrc oldval, iRegPsrc newval, iRegPdst tmp, flagsRegCR0 cr0)
|
||||
%{
|
||||
predicate(UseG1GC && n->as_LoadStore()->barrier_data() != 0 &&
|
||||
(((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst));
|
||||
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
|
||||
effect(TEMP_DEF res, TEMP tmp, KILL cr0);
|
||||
format %{ "CMPXCHGD $res, $mem, $oldval, $newval; as bool; ptr" %}
|
||||
ins_encode %{
|
||||
Label no_update;
|
||||
__ li($res$$Register, 0);
|
||||
__ cmpxchgd(CCR0, R0, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, &no_update, true);
|
||||
// Pass oldval to SATB which is the only value which can get overwritten.
|
||||
// Can be done after cmpxchg because there's no safepoint here.
|
||||
pre_write_barrier(masm, this,
|
||||
noreg,
|
||||
$oldval$$Register /* pre_val */,
|
||||
$tmp$$Register,
|
||||
$res$$Register /* temp */,
|
||||
RegSet::of($mem$$Register, $newval$$Register) /* preserve */,
|
||||
RegSet::of($res$$Register) /* no_preserve */);
|
||||
post_write_barrier(masm, this,
|
||||
$mem$$Register,
|
||||
$newval$$Register,
|
||||
$tmp$$Register,
|
||||
$res$$Register /* temp */);
|
||||
__ li($res$$Register, 1);
|
||||
__ bind(no_update);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct g1CompareAndSwapP_acq(iRegIdst res, indirect mem, iRegPsrc oldval, iRegPsrc newval, iRegPdst tmp, flagsRegCR0 cr0)
|
||||
%{
|
||||
predicate(UseG1GC && n->as_LoadStore()->barrier_data() != 0 &&
|
||||
(((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst));
|
||||
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
|
||||
effect(TEMP_DEF res, TEMP tmp, KILL cr0);
|
||||
format %{ "CMPXCHGD acq $res, $mem, $oldval, $newval; as bool; ptr" %}
|
||||
ins_encode %{
|
||||
Label no_update;
|
||||
__ li($res$$Register, 0);
|
||||
__ cmpxchgd(CCR0, R0, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, &no_update, true);
|
||||
// Pass oldval to SATB which is the only value which can get overwritten.
|
||||
// Can be done after cmpxchg because there's no safepoint here.
|
||||
pre_write_barrier(masm, this,
|
||||
noreg,
|
||||
$oldval$$Register /* pre_val */,
|
||||
$tmp$$Register,
|
||||
$res$$Register /* temp */,
|
||||
RegSet::of($mem$$Register, $newval$$Register) /* preserve */,
|
||||
RegSet::of($res$$Register) /* no_preserve */);
|
||||
post_write_barrier(masm, this,
|
||||
$mem$$Register,
|
||||
$newval$$Register,
|
||||
$tmp$$Register,
|
||||
$res$$Register /* temp */);
|
||||
__ li($res$$Register, 1);
|
||||
__ bind(no_update);
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
__ isync();
|
||||
} else {
|
||||
// isync would be sufficient in case of CompareAndExchangeAcquire, but we currently don't optimize for that.
|
||||
__ sync();
|
||||
}
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct g1CompareAndSwapN(iRegIdst res, indirect mem, iRegNsrc oldval, iRegNsrc newval, iRegPdst tmp, flagsRegCR0 cr0)
|
||||
%{
|
||||
predicate(UseG1GC && n->as_LoadStore()->barrier_data() != 0 &&
|
||||
(((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst));
|
||||
match(Set res (CompareAndSwapN mem (Binary oldval newval)));
|
||||
effect(TEMP_DEF res, TEMP tmp, KILL cr0);
|
||||
format %{ "CMPXCHGW $res, $mem, $oldval, $newval; as bool; ptr" %}
|
||||
ins_encode %{
|
||||
Label no_update;
|
||||
__ li($res$$Register, 0);
|
||||
__ cmpxchgw(CCR0, R0, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, &no_update, true);
|
||||
// Pass oldval to SATB which is the only value which can get overwritten.
|
||||
// Can be done after cmpxchg because there's no safepoint here.
|
||||
pre_write_barrier(masm, this,
|
||||
noreg,
|
||||
$oldval$$Register /* pre_val */,
|
||||
$tmp$$Register,
|
||||
$res$$Register /* temp */,
|
||||
RegSet::of($mem$$Register, $newval$$Register) /* preserve */,
|
||||
RegSet::of($res$$Register) /* no_preserve */);
|
||||
post_write_barrier(masm, this,
|
||||
$mem$$Register,
|
||||
$newval$$Register,
|
||||
$tmp$$Register,
|
||||
$res$$Register /* temp */,
|
||||
true /* decode_new_val */);
|
||||
__ li($res$$Register, 1);
|
||||
__ bind(no_update);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct g1CompareAndSwapN_acq(iRegIdst res, indirect mem, iRegNsrc oldval, iRegNsrc newval, iRegPdst tmp, flagsRegCR0 cr0)
|
||||
%{
|
||||
predicate(UseG1GC && n->as_LoadStore()->barrier_data() != 0 &&
|
||||
(((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst));
|
||||
match(Set res (CompareAndSwapN mem (Binary oldval newval)));
|
||||
effect(TEMP_DEF res, TEMP tmp, KILL cr0);
|
||||
format %{ "CMPXCHGW acq $res, $mem, $oldval, $newval; as bool; ptr" %}
|
||||
ins_encode %{
|
||||
Label no_update;
|
||||
__ li($res$$Register, 0);
|
||||
__ cmpxchgw(CCR0, R0, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, &no_update, true);
|
||||
// Pass oldval to SATB which is the only value which can get overwritten.
|
||||
// Can be done after cmpxchg because there's no safepoint here.
|
||||
pre_write_barrier(masm, this,
|
||||
noreg,
|
||||
$oldval$$Register /* pre_val */,
|
||||
$tmp$$Register,
|
||||
$res$$Register /* temp */,
|
||||
RegSet::of($mem$$Register, $newval$$Register) /* preserve */,
|
||||
RegSet::of($res$$Register) /* no_preserve */);
|
||||
post_write_barrier(masm, this,
|
||||
$mem$$Register,
|
||||
$newval$$Register,
|
||||
$tmp$$Register,
|
||||
$res$$Register /* temp */,
|
||||
true /* decode_new_val */);
|
||||
__ li($res$$Register, 1);
|
||||
__ bind(no_update);
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
__ isync();
|
||||
} else {
|
||||
// isync would be sufficient in case of CompareAndExchangeAcquire, but we currently don't optimize for that.
|
||||
__ sync();
|
||||
}
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct weakG1CompareAndSwapP(iRegIdst res, indirect mem, iRegPsrc oldval, iRegPsrc newval, iRegPdst tmp, flagsRegCR0 cr0)
|
||||
%{
|
||||
predicate(UseG1GC && n->as_LoadStore()->barrier_data() != 0 &&
|
||||
(((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst));
|
||||
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
|
||||
effect(TEMP_DEF res, TEMP tmp, KILL cr0);
|
||||
format %{ "weak CMPXCHGD $res, $mem, $oldval, $newval; as bool; ptr" %}
|
||||
ins_encode %{
|
||||
Label no_update;
|
||||
__ li($res$$Register, 0);
|
||||
__ cmpxchgd(CCR0, R0, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, &no_update, true, true);
|
||||
// Pass oldval to SATB which is the only value which can get overwritten.
|
||||
// Can be done after cmpxchg because there's no safepoint here.
|
||||
pre_write_barrier(masm, this,
|
||||
noreg,
|
||||
$oldval$$Register /* pre_val */,
|
||||
$tmp$$Register,
|
||||
$res$$Register /* temp */,
|
||||
RegSet::of($mem$$Register, $newval$$Register) /* preserve */,
|
||||
RegSet::of($res$$Register) /* no_preserve */);
|
||||
post_write_barrier(masm, this,
|
||||
$mem$$Register,
|
||||
$newval$$Register,
|
||||
$tmp$$Register,
|
||||
$res$$Register /* temp */);
|
||||
__ li($res$$Register, 1);
|
||||
__ bind(no_update);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct weakG1CompareAndSwapP_acq(iRegIdst res, indirect mem, iRegPsrc oldval, iRegPsrc newval, iRegPdst tmp, flagsRegCR0 cr0)
|
||||
%{
|
||||
predicate(UseG1GC && n->as_LoadStore()->barrier_data() != 0 &&
|
||||
(((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst));
|
||||
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
|
||||
effect(TEMP_DEF res, TEMP tmp, KILL cr0);
|
||||
format %{ "weak CMPXCHGD acq $res, $mem, $oldval, $newval; as bool; ptr" %}
|
||||
ins_encode %{
|
||||
Label no_update;
|
||||
__ li($res$$Register, 0);
|
||||
__ cmpxchgd(CCR0, R0, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, &no_update, true, true);
|
||||
// Pass oldval to SATB which is the only value which can get overwritten.
|
||||
// Can be done after cmpxchg because there's no safepoint here.
|
||||
pre_write_barrier(masm, this,
|
||||
noreg,
|
||||
$oldval$$Register /* pre_val */,
|
||||
$tmp$$Register,
|
||||
$res$$Register /* temp */,
|
||||
RegSet::of($mem$$Register, $newval$$Register) /* preserve */,
|
||||
RegSet::of($res$$Register) /* no_preserve */);
|
||||
post_write_barrier(masm, this,
|
||||
$mem$$Register,
|
||||
$newval$$Register,
|
||||
$tmp$$Register,
|
||||
$res$$Register /* temp */);
|
||||
__ li($res$$Register, 1);
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
__ isync();
|
||||
} else {
|
||||
// isync would be sufficient in case of CompareAndExchangeAcquire, but we currently don't optimize for that.
|
||||
__ sync();
|
||||
}
|
||||
__ bind(no_update); // weak version requires no memory barrier on failure
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct weakG1CompareAndSwapN(iRegIdst res, indirect mem, iRegNsrc oldval, iRegNsrc newval, iRegPdst tmp, flagsRegCR0 cr0)
|
||||
%{
|
||||
predicate(UseG1GC && n->as_LoadStore()->barrier_data() != 0 &&
|
||||
(((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst));
|
||||
match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
|
||||
effect(TEMP_DEF res, TEMP tmp, KILL cr0);
|
||||
format %{ "weak CMPXCHGW $res, $mem, $oldval, $newval; as bool; ptr" %}
|
||||
ins_encode %{
|
||||
Label no_update;
|
||||
__ li($res$$Register, 0);
|
||||
__ cmpxchgw(CCR0, R0, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, &no_update, true, true);
|
||||
// Pass oldval to SATB which is the only value which can get overwritten.
|
||||
// Can be done after cmpxchg because there's no safepoint here.
|
||||
pre_write_barrier(masm, this,
|
||||
noreg,
|
||||
$oldval$$Register /* pre_val */,
|
||||
$tmp$$Register,
|
||||
$res$$Register /* temp */,
|
||||
RegSet::of($mem$$Register, $newval$$Register) /* preserve */,
|
||||
RegSet::of($res$$Register) /* no_preserve */);
|
||||
post_write_barrier(masm, this,
|
||||
$mem$$Register,
|
||||
$newval$$Register,
|
||||
$tmp$$Register,
|
||||
$res$$Register /* temp */,
|
||||
true /* decode_new_val */);
|
||||
__ li($res$$Register, 1);
|
||||
__ bind(no_update);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct weakG1CompareAndSwapN_acq(iRegIdst res, indirect mem, iRegNsrc oldval, iRegNsrc newval, iRegPdst tmp, flagsRegCR0 cr0)
|
||||
%{
|
||||
predicate(UseG1GC && n->as_LoadStore()->barrier_data() != 0 &&
|
||||
(((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst));
|
||||
match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
|
||||
effect(TEMP_DEF res, TEMP tmp, KILL cr0);
|
||||
format %{ "weak CMPXCHGW acq $res, $mem, $oldval, $newval; as bool; ptr" %}
|
||||
ins_encode %{
|
||||
Label no_update;
|
||||
__ li($res$$Register, 0);
|
||||
__ cmpxchgw(CCR0, R0, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, &no_update, true, true);
|
||||
// Pass oldval to SATB which is the only value which can get overwritten.
|
||||
// Can be done after cmpxchg because there's no safepoint here.
|
||||
pre_write_barrier(masm, this,
|
||||
noreg,
|
||||
$oldval$$Register /* pre_val */,
|
||||
$tmp$$Register,
|
||||
$res$$Register /* temp */,
|
||||
RegSet::of($mem$$Register, $newval$$Register) /* preserve */,
|
||||
RegSet::of($res$$Register) /* no_preserve */);
|
||||
post_write_barrier(masm, this,
|
||||
$mem$$Register,
|
||||
$newval$$Register,
|
||||
$tmp$$Register,
|
||||
$res$$Register /* temp */,
|
||||
true /* decode_new_val */);
|
||||
__ li($res$$Register, 1);
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
__ isync();
|
||||
} else {
|
||||
// isync would be sufficient in case of CompareAndExchangeAcquire, but we currently don't optimize for that.
|
||||
__ sync();
|
||||
}
|
||||
__ bind(no_update); // weak version requires no memory barrier on failure
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct g1GetAndSetP(iRegPdst res, indirect mem, iRegPsrc newval, iRegPdst tmp1, iRegPdst tmp2, flagsRegCR0 cr0)
|
||||
%{
|
||||
predicate(UseG1GC && n->as_LoadStore()->barrier_data() != 0);
|
||||
match(Set res (GetAndSetP mem newval));
|
||||
effect(TEMP_DEF res, TEMP tmp1, TEMP tmp2, KILL cr0);
|
||||
format %{ "GetAndSetP $newval, $mem" %}
|
||||
ins_encode %{
|
||||
assert_different_registers($mem$$Register, $newval$$Register);
|
||||
__ getandsetd($res$$Register, $newval$$Register, $mem$$Register,
|
||||
MacroAssembler::cmpxchgx_hint_atomic_update());
|
||||
// Can be done after cmpxchg because there's no safepoint here.
|
||||
pre_write_barrier(masm, this,
|
||||
noreg /* obj */,
|
||||
$res$$Register /* res */,
|
||||
$tmp1$$Register,
|
||||
$tmp2$$Register,
|
||||
RegSet::of($mem$$Register, $newval$$Register) /* preserve */);
|
||||
post_write_barrier(masm, this,
|
||||
$mem$$Register,
|
||||
$newval$$Register,
|
||||
$tmp1$$Register,
|
||||
$tmp2$$Register);
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
__ isync();
|
||||
} else {
|
||||
__ sync();
|
||||
}
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct g1GetAndSetN(iRegNdst res, indirect mem, iRegNsrc newval, iRegPdst tmp1, iRegPdst tmp2, flagsRegCR0 cr0)
|
||||
%{
|
||||
predicate(UseG1GC && n->as_LoadStore()->barrier_data() != 0);
|
||||
match(Set res (GetAndSetN mem newval));
|
||||
effect(TEMP_DEF res, TEMP tmp1, TEMP tmp2, KILL cr0);
|
||||
format %{ "GetAndSetN $newval, $mem" %}
|
||||
ins_encode %{
|
||||
assert_different_registers($mem$$Register, $newval$$Register);
|
||||
__ getandsetw($res$$Register, $newval$$Register, $mem$$Register,
|
||||
MacroAssembler::cmpxchgx_hint_atomic_update());
|
||||
// Can be done after cmpxchg because there's no safepoint here.
|
||||
pre_write_barrier(masm, this,
|
||||
noreg /* obj */,
|
||||
$res$$Register /* res */,
|
||||
$tmp1$$Register,
|
||||
$tmp2$$Register,
|
||||
RegSet::of($mem$$Register, $newval$$Register) /* preserve */);
|
||||
post_write_barrier(masm, this,
|
||||
$mem$$Register,
|
||||
$newval$$Register,
|
||||
$tmp1$$Register,
|
||||
$tmp2$$Register,
|
||||
true /* decode_new_val */);
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
__ isync();
|
||||
} else {
|
||||
__ sync();
|
||||
}
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct g1LoadP(iRegPdst dst, memoryAlg4 mem, iRegPdst tmp, flagsRegCR0 cr0)
|
||||
%{
|
||||
predicate(UseG1GC && n->as_Load()->is_unordered() && n->as_Load()->barrier_data() != 0);
|
||||
// This instruction does not need an acquiring counterpart because it is only
|
||||
// used for reference loading (Reference::get()).
|
||||
match(Set dst (LoadP mem));
|
||||
effect(TEMP_DEF dst, TEMP tmp, KILL cr0);
|
||||
ins_cost(2 * MEMORY_REF_COST);
|
||||
format %{ "ld $dst, $mem\t# ptr" %}
|
||||
ins_encode %{
|
||||
__ ld($dst$$Register, $mem$$disp, $mem$$base$$Register);
|
||||
pre_write_barrier(masm, this,
|
||||
noreg /* obj */,
|
||||
$dst$$Register /* pre_val */,
|
||||
$tmp$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct g1LoadN(iRegNdst dst, memoryAlg4 mem, iRegPdst tmp1, iRegPdst tmp2, flagsRegCR0 cr0)
|
||||
%{
|
||||
predicate(UseG1GC && n->as_Load()->is_unordered() && n->as_Load()->barrier_data() != 0);
|
||||
// This instruction does not need an acquiring counterpart because it is only
|
||||
// used for reference loading (Reference::get()).
|
||||
match(Set dst (LoadN mem));
|
||||
effect(TEMP_DEF dst, TEMP tmp1, TEMP tmp2, KILL cr0);
|
||||
ins_cost(2 * MEMORY_REF_COST);
|
||||
format %{ "lwz $dst, $mem\t# ptr" %}
|
||||
ins_encode %{
|
||||
__ lwz($dst$$Register, $mem$$disp, $mem$$base$$Register);
|
||||
pre_write_barrier(masm, this,
|
||||
noreg /* obj */,
|
||||
$dst$$Register,
|
||||
$tmp1$$Register,
|
||||
$tmp2$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
@ -144,9 +144,9 @@ void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler *masm, Dec
|
||||
// Invoke runtime.
|
||||
address jrt_address = nullptr;
|
||||
if (UseCompressedOops) {
|
||||
jrt_address = CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_narrow_oop_entry);
|
||||
jrt_address = CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_narrow_oop);
|
||||
} else {
|
||||
jrt_address = CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_oop_entry);
|
||||
jrt_address = CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_oop);
|
||||
}
|
||||
assert(jrt_address != nullptr, "jrt routine cannot be found");
|
||||
|
||||
@ -302,7 +302,7 @@ void ShenandoahBarrierSetAssembler::satb_write_barrier_impl(MacroAssembler *masm
|
||||
}
|
||||
|
||||
// Invoke runtime.
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, R16_thread);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre), pre_val, R16_thread);
|
||||
|
||||
// Restore to-be-preserved registers.
|
||||
if (!preserve_gp_registers && preloaded_mode && pre_val->is_volatile()) {
|
||||
@ -906,7 +906,7 @@ void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAss
|
||||
__ push_frame_reg_args(nbytes_save, R11_tmp1);
|
||||
|
||||
// Invoke runtime.
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), R0_pre_val, R16_thread);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre), R0_pre_val, R16_thread);
|
||||
|
||||
// Restore to-be-preserved registers.
|
||||
__ pop_frame();
|
||||
|
||||
@ -90,7 +90,7 @@ static size_t probe_valid_max_address_bit() {
|
||||
}
|
||||
|
||||
size_t ZPlatformAddressOffsetBits() {
|
||||
const static size_t valid_max_address_offset_bits = probe_valid_max_address_bit() + 1;
|
||||
static const size_t valid_max_address_offset_bits = probe_valid_max_address_bit() + 1;
|
||||
const size_t max_address_offset_bits = valid_max_address_offset_bits - 3;
|
||||
const size_t min_address_offset_bits = max_address_offset_bits - 2;
|
||||
const size_t address_offset = round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2023 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -135,15 +135,7 @@ void InterpreterMacroAssembler::check_and_handle_popframe(Register scratch_reg)
|
||||
// Call the Interpreter::remove_activation_preserving_args_entry()
|
||||
// func to get the address of the same-named entrypoint in the
|
||||
// generated interpreter code.
|
||||
#if defined(ABI_ELFv2)
|
||||
call_c(CAST_FROM_FN_PTR(address,
|
||||
Interpreter::remove_activation_preserving_args_entry),
|
||||
relocInfo::none);
|
||||
#else
|
||||
call_c(CAST_FROM_FN_PTR(FunctionDescriptor*,
|
||||
Interpreter::remove_activation_preserving_args_entry),
|
||||
relocInfo::none);
|
||||
#endif
|
||||
call_c(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry));
|
||||
|
||||
// Jump to Interpreter::_remove_activation_preserving_args_entry.
|
||||
mtctr(R3_RET);
|
||||
@ -970,13 +962,13 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
|
||||
|
||||
if (DiagnoseSyncOnValueBasedClasses != 0) {
|
||||
load_klass(tmp, object);
|
||||
lwz(tmp, in_bytes(Klass::access_flags_offset()), tmp);
|
||||
testbitdi(CCR0, R0, tmp, exact_log2(JVM_ACC_IS_VALUE_BASED_CLASS));
|
||||
lbz(tmp, in_bytes(Klass::misc_flags_offset()), tmp);
|
||||
testbitdi(CCR0, R0, tmp, exact_log2(KlassFlags::_misc_is_value_based_class));
|
||||
bne(CCR0, slow_case);
|
||||
}
|
||||
|
||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||
lightweight_lock(object, header, tmp, slow_case);
|
||||
lightweight_lock(monitor, object, header, tmp, slow_case);
|
||||
b(count_locking);
|
||||
} else if (LockingMode == LM_LEGACY) {
|
||||
// Load markWord from object into header.
|
||||
|
||||
@ -1293,11 +1293,7 @@ void MacroAssembler::call_VM_base(Register oop_result,
|
||||
|
||||
// ARG1 must hold thread address.
|
||||
mr(R3_ARG1, R16_thread);
|
||||
#if defined(ABI_ELFv2)
|
||||
address return_pc = call_c(entry_point, relocInfo::none);
|
||||
#else
|
||||
address return_pc = call_c((FunctionDescriptor*)entry_point, relocInfo::none);
|
||||
#endif
|
||||
|
||||
reset_last_Java_frame();
|
||||
|
||||
@ -1318,11 +1314,7 @@ void MacroAssembler::call_VM_base(Register oop_result,
|
||||
|
||||
void MacroAssembler::call_VM_leaf_base(address entry_point) {
|
||||
BLOCK_COMMENT("call_VM_leaf {");
|
||||
#if defined(ABI_ELFv2)
|
||||
call_c(entry_point, relocInfo::none);
|
||||
#else
|
||||
call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, entry_point), relocInfo::none);
|
||||
#endif
|
||||
call_c(entry_point);
|
||||
BLOCK_COMMENT("} call_VM_leaf");
|
||||
}
|
||||
|
||||
@ -2418,7 +2410,7 @@ void MacroAssembler::verify_secondary_supers_table(Register r_sub_klass,
|
||||
void MacroAssembler::clinit_barrier(Register klass, Register thread, Label* L_fast_path, Label* L_slow_path) {
|
||||
assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required");
|
||||
|
||||
Label L_fallthrough;
|
||||
Label L_check_thread, L_fallthrough;
|
||||
if (L_fast_path == nullptr) {
|
||||
L_fast_path = &L_fallthrough;
|
||||
} else if (L_slow_path == nullptr) {
|
||||
@ -2427,10 +2419,14 @@ void MacroAssembler::clinit_barrier(Register klass, Register thread, Label* L_fa
|
||||
|
||||
// Fast path check: class is fully initialized
|
||||
lbz(R0, in_bytes(InstanceKlass::init_state_offset()), klass);
|
||||
// acquire by cmp-branch-isync if fully_initialized
|
||||
cmpwi(CCR0, R0, InstanceKlass::fully_initialized);
|
||||
beq(CCR0, *L_fast_path);
|
||||
bne(CCR0, L_check_thread);
|
||||
isync();
|
||||
b(*L_fast_path);
|
||||
|
||||
// Fast path check: current thread is initializer thread
|
||||
bind(L_check_thread);
|
||||
ld(R0, in_bytes(InstanceKlass::init_thread_offset()), klass);
|
||||
cmpd(CCR0, thread, R0);
|
||||
if (L_slow_path == &L_fallthrough) {
|
||||
@ -2561,8 +2557,8 @@ void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register
|
||||
|
||||
if (DiagnoseSyncOnValueBasedClasses != 0) {
|
||||
load_klass(temp, oop);
|
||||
lwz(temp, in_bytes(Klass::access_flags_offset()), temp);
|
||||
testbitdi(flag, R0, temp, exact_log2(JVM_ACC_IS_VALUE_BASED_CLASS));
|
||||
lbz(temp, in_bytes(Klass::misc_flags_offset()), temp);
|
||||
testbitdi(flag, R0, temp, exact_log2(KlassFlags::_misc_is_value_based_class));
|
||||
bne(flag, failure);
|
||||
}
|
||||
|
||||
@ -2723,13 +2719,34 @@ void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Registe
|
||||
b(success);
|
||||
|
||||
bind(notRecursive);
|
||||
|
||||
// Set owner to null.
|
||||
// Release to satisfy the JMM
|
||||
release();
|
||||
li(temp, 0);
|
||||
std(temp, in_bytes(ObjectMonitor::owner_offset()), current_header);
|
||||
// We need a full fence after clearing owner to avoid stranding.
|
||||
// StoreLoad achieves this.
|
||||
membar(StoreLoad);
|
||||
|
||||
// Check if the entry lists are empty.
|
||||
ld(temp, in_bytes(ObjectMonitor::EntryList_offset()), current_header);
|
||||
ld(displaced_header, in_bytes(ObjectMonitor::cxq_offset()), current_header);
|
||||
orr(temp, temp, displaced_header); // Will be 0 if both are 0.
|
||||
cmpdi(flag, temp, 0);
|
||||
bne(flag, failure);
|
||||
release();
|
||||
std(temp, in_bytes(ObjectMonitor::owner_offset()), current_header);
|
||||
beq(flag, success); // If so we are done.
|
||||
|
||||
// Check if there is a successor.
|
||||
ld(temp, in_bytes(ObjectMonitor::succ_offset()), current_header);
|
||||
cmpdi(flag, temp, 0);
|
||||
bne(flag, success); // If so we are done.
|
||||
|
||||
// Save the monitor pointer in the current thread, so we can try
|
||||
// to reacquire the lock in SharedRuntime::monitor_exit_helper().
|
||||
std(current_header, in_bytes(JavaThread::unlocked_inflated_monitor_offset()), R16_thread);
|
||||
|
||||
crxor(flag, Assembler::equal, flag, Assembler::equal); // Set flag = NE => slow path
|
||||
b(failure);
|
||||
|
||||
// flag == EQ indicates success, decrement held monitor count
|
||||
// flag == NE indicates failure
|
||||
@ -2738,9 +2755,9 @@ void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Registe
|
||||
bind(failure);
|
||||
}
|
||||
|
||||
void MacroAssembler::compiler_fast_lock_lightweight_object(ConditionRegister flag, Register obj, Register tmp1,
|
||||
Register tmp2, Register tmp3) {
|
||||
assert_different_registers(obj, tmp1, tmp2, tmp3);
|
||||
void MacroAssembler::compiler_fast_lock_lightweight_object(ConditionRegister flag, Register obj, Register box,
|
||||
Register tmp1, Register tmp2, Register tmp3) {
|
||||
assert_different_registers(obj, box, tmp1, tmp2, tmp3);
|
||||
assert(flag == CCR0, "bad condition register");
|
||||
|
||||
// Handle inflated monitor.
|
||||
@ -2750,11 +2767,17 @@ void MacroAssembler::compiler_fast_lock_lightweight_object(ConditionRegister fla
|
||||
// Finish fast lock unsuccessfully. MUST branch to with flag == EQ
|
||||
Label slow_path;
|
||||
|
||||
if (UseObjectMonitorTable) {
|
||||
// Clear cache in case fast locking succeeds.
|
||||
li(tmp1, 0);
|
||||
std(tmp1, in_bytes(BasicObjectLock::lock_offset()) + BasicLock::object_monitor_cache_offset_in_bytes(), box);
|
||||
}
|
||||
|
||||
if (DiagnoseSyncOnValueBasedClasses != 0) {
|
||||
load_klass(tmp1, obj);
|
||||
lwz(tmp1, in_bytes(Klass::access_flags_offset()), tmp1);
|
||||
testbitdi(flag, R0, tmp1, exact_log2(JVM_ACC_IS_VALUE_BASED_CLASS));
|
||||
bne(flag, slow_path);
|
||||
lbz(tmp1, in_bytes(Klass::misc_flags_offset()), tmp1);
|
||||
testbitdi(CCR0, R0, tmp1, exact_log2(KlassFlags::_misc_is_value_based_class));
|
||||
bne(CCR0, slow_path);
|
||||
}
|
||||
|
||||
const Register mark = tmp1;
|
||||
@ -2769,8 +2792,8 @@ void MacroAssembler::compiler_fast_lock_lightweight_object(ConditionRegister fla
|
||||
|
||||
// Check if lock-stack is full.
|
||||
lwz(top, in_bytes(JavaThread::lock_stack_top_offset()), R16_thread);
|
||||
cmplwi(flag, top, LockStack::end_offset() - 1);
|
||||
bgt(flag, slow_path);
|
||||
cmplwi(CCR0, top, LockStack::end_offset() - 1);
|
||||
bgt(CCR0, slow_path);
|
||||
|
||||
// The underflow check is elided. The recursive check will always fail
|
||||
// when the lock stack is empty because of the _bad_oop_sentinel field.
|
||||
@ -2778,19 +2801,19 @@ void MacroAssembler::compiler_fast_lock_lightweight_object(ConditionRegister fla
|
||||
// Check if recursive.
|
||||
subi(t, top, oopSize);
|
||||
ldx(t, R16_thread, t);
|
||||
cmpd(flag, obj, t);
|
||||
beq(flag, push);
|
||||
cmpd(CCR0, obj, t);
|
||||
beq(CCR0, push);
|
||||
|
||||
// Check for monitor (0b10) or locked (0b00).
|
||||
ld(mark, oopDesc::mark_offset_in_bytes(), obj);
|
||||
andi_(t, mark, markWord::lock_mask_in_place);
|
||||
cmpldi(flag, t, markWord::unlocked_value);
|
||||
bgt(flag, inflated);
|
||||
bne(flag, slow_path);
|
||||
cmpldi(CCR0, t, markWord::unlocked_value);
|
||||
bgt(CCR0, inflated);
|
||||
bne(CCR0, slow_path);
|
||||
|
||||
// Not inflated.
|
||||
|
||||
// Try to lock. Transition lock bits 0b00 => 0b01
|
||||
// Try to lock. Transition lock bits 0b01 => 0b00
|
||||
assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid a lea");
|
||||
atomically_flip_locked_state(/* is_unlock */ false, obj, mark, slow_path, MacroAssembler::MemBarAcq);
|
||||
|
||||
@ -2805,38 +2828,84 @@ void MacroAssembler::compiler_fast_lock_lightweight_object(ConditionRegister fla
|
||||
{ // Handle inflated monitor.
|
||||
bind(inflated);
|
||||
|
||||
// mark contains the tagged ObjectMonitor*.
|
||||
const uintptr_t monitor_tag = markWord::monitor_value;
|
||||
const Register monitor = mark;
|
||||
const Register owner_addr = tmp2;
|
||||
Label monitor_locked;
|
||||
|
||||
if (!UseObjectMonitorTable) {
|
||||
// mark contains the tagged ObjectMonitor*.
|
||||
const Register tagged_monitor = mark;
|
||||
const uintptr_t monitor_tag = markWord::monitor_value;
|
||||
const Register owner_addr = tmp2;
|
||||
// Compute owner address.
|
||||
addi(owner_addr, mark, in_bytes(ObjectMonitor::owner_offset()) - monitor_tag);
|
||||
} else {
|
||||
Label monitor_found;
|
||||
Register cache_addr = tmp2;
|
||||
|
||||
// Load cache address
|
||||
addi(cache_addr, R16_thread, in_bytes(JavaThread::om_cache_oops_offset()));
|
||||
|
||||
const int num_unrolled = 2;
|
||||
for (int i = 0; i < num_unrolled; i++) {
|
||||
ld(tmp3, 0, cache_addr);
|
||||
cmpd(CCR0, tmp3, obj);
|
||||
beq(CCR0, monitor_found);
|
||||
addi(cache_addr, cache_addr, in_bytes(OMCache::oop_to_oop_difference()));
|
||||
}
|
||||
|
||||
Label loop;
|
||||
|
||||
// Search for obj in cache.
|
||||
bind(loop);
|
||||
|
||||
// Check for match.
|
||||
ld(tmp3, 0, cache_addr);
|
||||
cmpd(CCR0, tmp3, obj);
|
||||
beq(CCR0, monitor_found);
|
||||
|
||||
// Search until null encountered, guaranteed _null_sentinel at end.
|
||||
addi(cache_addr, cache_addr, in_bytes(OMCache::oop_to_oop_difference()));
|
||||
cmpdi(CCR1, tmp3, 0);
|
||||
bne(CCR1, loop);
|
||||
// Cache Miss, CCR0.NE set from cmp above
|
||||
b(slow_path);
|
||||
|
||||
bind(monitor_found);
|
||||
ld(monitor, in_bytes(OMCache::oop_to_monitor_difference()), cache_addr);
|
||||
|
||||
// Compute owner address.
|
||||
addi(owner_addr, tagged_monitor, in_bytes(ObjectMonitor::owner_offset()) - monitor_tag);
|
||||
addi(owner_addr, monitor, in_bytes(ObjectMonitor::owner_offset()));
|
||||
}
|
||||
|
||||
// CAS owner (null => current thread).
|
||||
cmpxchgd(/*flag=*/flag,
|
||||
/*current_value=*/t,
|
||||
/*compare_value=*/(intptr_t)0,
|
||||
/*exchange_value=*/R16_thread,
|
||||
/*where=*/owner_addr,
|
||||
MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
|
||||
MacroAssembler::cmpxchgx_hint_acquire_lock());
|
||||
beq(flag, locked);
|
||||
// CAS owner (null => current thread).
|
||||
cmpxchgd(/*flag=*/CCR0,
|
||||
/*current_value=*/t,
|
||||
/*compare_value=*/(intptr_t)0,
|
||||
/*exchange_value=*/R16_thread,
|
||||
/*where=*/owner_addr,
|
||||
MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
|
||||
MacroAssembler::cmpxchgx_hint_acquire_lock());
|
||||
beq(CCR0, monitor_locked);
|
||||
|
||||
// Check if recursive.
|
||||
cmpd(flag, t, R16_thread);
|
||||
bne(flag, slow_path);
|
||||
// Check if recursive.
|
||||
cmpd(CCR0, t, R16_thread);
|
||||
bne(CCR0, slow_path);
|
||||
|
||||
// Recursive.
|
||||
// Recursive.
|
||||
if (!UseObjectMonitorTable) {
|
||||
assert_different_registers(tmp1, owner_addr);
|
||||
ld(tmp1, in_bytes(ObjectMonitor::recursions_offset() - ObjectMonitor::owner_offset()), owner_addr);
|
||||
addi(tmp1, tmp1, 1);
|
||||
std(tmp1, in_bytes(ObjectMonitor::recursions_offset() - ObjectMonitor::owner_offset()), owner_addr);
|
||||
} else {
|
||||
// OMCache lookup not supported yet. Take the slowpath.
|
||||
// Set flag to NE
|
||||
crxor(flag, Assembler::equal, flag, Assembler::equal);
|
||||
b(slow_path);
|
||||
assert_different_registers(tmp2, monitor);
|
||||
ld(tmp2, in_bytes(ObjectMonitor::recursions_offset()), monitor);
|
||||
addi(tmp2, tmp2, 1);
|
||||
std(tmp2, in_bytes(ObjectMonitor::recursions_offset()), monitor);
|
||||
}
|
||||
|
||||
bind(monitor_locked);
|
||||
if (UseObjectMonitorTable) {
|
||||
std(monitor, BasicLock::object_monitor_cache_offset_in_bytes(), box);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2846,21 +2915,21 @@ void MacroAssembler::compiler_fast_lock_lightweight_object(ConditionRegister fla
|
||||
#ifdef ASSERT
|
||||
// Check that locked label is reached with flag == EQ.
|
||||
Label flag_correct;
|
||||
beq(flag, flag_correct);
|
||||
beq(CCR0, flag_correct);
|
||||
stop("Fast Lock Flag != EQ");
|
||||
#endif
|
||||
bind(slow_path);
|
||||
#ifdef ASSERT
|
||||
// Check that slow_path label is reached with flag == NE.
|
||||
bne(flag, flag_correct);
|
||||
bne(CCR0, flag_correct);
|
||||
stop("Fast Lock Flag != NE");
|
||||
bind(flag_correct);
|
||||
#endif
|
||||
// C2 uses the value of flag (NE vs EQ) to determine the continuation.
|
||||
}
|
||||
|
||||
void MacroAssembler::compiler_fast_unlock_lightweight_object(ConditionRegister flag, Register obj, Register tmp1,
|
||||
Register tmp2, Register tmp3) {
|
||||
void MacroAssembler::compiler_fast_unlock_lightweight_object(ConditionRegister flag, Register obj, Register box,
|
||||
Register tmp1, Register tmp2, Register tmp3) {
|
||||
assert_different_registers(obj, tmp1, tmp2, tmp3);
|
||||
assert(flag == CCR0, "bad condition register");
|
||||
|
||||
@ -2882,9 +2951,9 @@ void MacroAssembler::compiler_fast_unlock_lightweight_object(ConditionRegister f
|
||||
lwz(top, in_bytes(JavaThread::lock_stack_top_offset()), R16_thread);
|
||||
subi(top, top, oopSize);
|
||||
ldx(t, R16_thread, top);
|
||||
cmpd(flag, obj, t);
|
||||
cmpd(CCR0, obj, t);
|
||||
// Top of lock stack was not obj. Must be monitor.
|
||||
bne(flag, inflated_load_monitor);
|
||||
bne(CCR0, inflated_load_monitor);
|
||||
|
||||
// Pop lock-stack.
|
||||
DEBUG_ONLY(li(t, 0);)
|
||||
@ -2897,8 +2966,8 @@ void MacroAssembler::compiler_fast_unlock_lightweight_object(ConditionRegister f
|
||||
// Check if recursive.
|
||||
subi(t, top, oopSize);
|
||||
ldx(t, R16_thread, t);
|
||||
cmpd(flag, obj, t);
|
||||
beq(flag, unlocked);
|
||||
cmpd(CCR0, obj, t);
|
||||
beq(CCR0, unlocked);
|
||||
|
||||
// Not recursive.
|
||||
|
||||
@ -2949,62 +3018,74 @@ void MacroAssembler::compiler_fast_unlock_lightweight_object(ConditionRegister f
|
||||
cmplwi(CCR0, top, in_bytes(JavaThread::lock_stack_base_offset()));
|
||||
blt(CCR0, check_done);
|
||||
ldx(t, R16_thread, top);
|
||||
cmpd(flag, obj, t);
|
||||
bne(flag, inflated);
|
||||
cmpd(CCR0, obj, t);
|
||||
bne(CCR0, inflated);
|
||||
stop("Fast Unlock lock on stack");
|
||||
bind(check_done);
|
||||
#endif
|
||||
|
||||
if (!UseObjectMonitorTable) {
|
||||
// mark contains the tagged ObjectMonitor*.
|
||||
const Register monitor = mark;
|
||||
const uintptr_t monitor_tag = markWord::monitor_value;
|
||||
// mark contains the tagged ObjectMonitor*.
|
||||
const Register monitor = mark;
|
||||
const uintptr_t monitor_tag = markWord::monitor_value;
|
||||
|
||||
if (!UseObjectMonitorTable) {
|
||||
// Untag the monitor.
|
||||
subi(monitor, mark, monitor_tag);
|
||||
|
||||
const Register recursions = tmp2;
|
||||
Label not_recursive;
|
||||
|
||||
// Check if recursive.
|
||||
ld(recursions, in_bytes(ObjectMonitor::recursions_offset()), monitor);
|
||||
addic_(recursions, recursions, -1);
|
||||
blt(CCR0, not_recursive);
|
||||
|
||||
// Recursive unlock.
|
||||
std(recursions, in_bytes(ObjectMonitor::recursions_offset()), monitor);
|
||||
crorc(CCR0, Assembler::equal, CCR0, Assembler::equal);
|
||||
b(unlocked);
|
||||
|
||||
bind(not_recursive);
|
||||
|
||||
Label release_;
|
||||
const Register t2 = tmp2;
|
||||
|
||||
// Check if the entry lists are empty.
|
||||
ld(t, in_bytes(ObjectMonitor::EntryList_offset()), monitor);
|
||||
ld(t2, in_bytes(ObjectMonitor::cxq_offset()), monitor);
|
||||
orr(t, t, t2);
|
||||
cmpdi(flag, t, 0);
|
||||
beq(flag, release_);
|
||||
|
||||
// The owner may be anonymous and we removed the last obj entry in
|
||||
// the lock-stack. This loses the information about the owner.
|
||||
// Write the thread to the owner field so the runtime knows the owner.
|
||||
std(R16_thread, in_bytes(ObjectMonitor::owner_offset()), monitor);
|
||||
b(slow_path);
|
||||
|
||||
bind(release_);
|
||||
// Set owner to null.
|
||||
release();
|
||||
// t contains 0
|
||||
std(t, in_bytes(ObjectMonitor::owner_offset()), monitor);
|
||||
} else {
|
||||
// OMCache lookup not supported yet. Take the slowpath.
|
||||
// Set flag to NE
|
||||
crxor(flag, Assembler::equal, flag, Assembler::equal);
|
||||
b(slow_path);
|
||||
ld(monitor, BasicLock::object_monitor_cache_offset_in_bytes(), box);
|
||||
// null check with Flags == NE, no valid pointer below alignof(ObjectMonitor*)
|
||||
cmpldi(CCR0, monitor, checked_cast<uint8_t>(alignof(ObjectMonitor*)));
|
||||
blt(CCR0, slow_path);
|
||||
}
|
||||
|
||||
const Register recursions = tmp2;
|
||||
Label not_recursive;
|
||||
|
||||
// Check if recursive.
|
||||
ld(recursions, in_bytes(ObjectMonitor::recursions_offset()), monitor);
|
||||
addic_(recursions, recursions, -1);
|
||||
blt(CCR0, not_recursive);
|
||||
|
||||
// Recursive unlock.
|
||||
std(recursions, in_bytes(ObjectMonitor::recursions_offset()), monitor);
|
||||
crorc(CCR0, Assembler::equal, CCR0, Assembler::equal);
|
||||
b(unlocked);
|
||||
|
||||
bind(not_recursive);
|
||||
|
||||
Label set_eq_unlocked;
|
||||
const Register t2 = tmp2;
|
||||
|
||||
// Set owner to null.
|
||||
// Release to satisfy the JMM
|
||||
release();
|
||||
li(t, 0);
|
||||
std(t, in_bytes(ObjectMonitor::owner_offset()), monitor);
|
||||
// We need a full fence after clearing owner to avoid stranding.
|
||||
// StoreLoad achieves this.
|
||||
membar(StoreLoad);
|
||||
|
||||
// Check if the entry lists are empty.
|
||||
ld(t, in_bytes(ObjectMonitor::EntryList_offset()), monitor);
|
||||
ld(t2, in_bytes(ObjectMonitor::cxq_offset()), monitor);
|
||||
orr(t, t, t2);
|
||||
cmpdi(CCR0, t, 0);
|
||||
beq(CCR0, unlocked); // If so we are done.
|
||||
|
||||
// Check if there is a successor.
|
||||
ld(t, in_bytes(ObjectMonitor::succ_offset()), monitor);
|
||||
cmpdi(CCR0, t, 0);
|
||||
bne(CCR0, set_eq_unlocked); // If so we are done.
|
||||
|
||||
// Save the monitor pointer in the current thread, so we can try
|
||||
// to reacquire the lock in SharedRuntime::monitor_exit_helper().
|
||||
std(monitor, in_bytes(JavaThread::unlocked_inflated_monitor_offset()), R16_thread);
|
||||
|
||||
crxor(CCR0, Assembler::equal, CCR0, Assembler::equal); // Set flag = NE => slow path
|
||||
b(slow_path);
|
||||
|
||||
bind(set_eq_unlocked);
|
||||
crorc(CCR0, Assembler::equal, CCR0, Assembler::equal); // Set flag = EQ => fast path
|
||||
}
|
||||
|
||||
bind(unlocked);
|
||||
@ -3013,13 +3094,13 @@ void MacroAssembler::compiler_fast_unlock_lightweight_object(ConditionRegister f
|
||||
#ifdef ASSERT
|
||||
// Check that unlocked label is reached with flag == EQ.
|
||||
Label flag_correct;
|
||||
beq(flag, flag_correct);
|
||||
beq(CCR0, flag_correct);
|
||||
stop("Fast Lock Flag != EQ");
|
||||
#endif
|
||||
bind(slow_path);
|
||||
#ifdef ASSERT
|
||||
// Check that slow_path label is reached with flag == NE.
|
||||
bne(flag, flag_correct);
|
||||
bne(CCR0, flag_correct);
|
||||
stop("Fast Lock Flag != NE");
|
||||
bind(flag_correct);
|
||||
#endif
|
||||
@ -4648,15 +4729,21 @@ void MacroAssembler::atomically_flip_locked_state(bool is_unlock, Register obj,
|
||||
//
|
||||
// - obj: the object to be locked
|
||||
// - t1, t2: temporary register
|
||||
void MacroAssembler::lightweight_lock(Register obj, Register t1, Register t2, Label& slow) {
|
||||
void MacroAssembler::lightweight_lock(Register box, Register obj, Register t1, Register t2, Label& slow) {
|
||||
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
|
||||
assert_different_registers(obj, t1, t2);
|
||||
assert_different_registers(box, obj, t1, t2);
|
||||
|
||||
Label push;
|
||||
const Register top = t1;
|
||||
const Register mark = t2;
|
||||
const Register t = R0;
|
||||
|
||||
if (UseObjectMonitorTable) {
|
||||
// Clear cache in case fast locking succeeds.
|
||||
li(t, 0);
|
||||
std(t, in_bytes(BasicObjectLock::lock_offset()) + BasicLock::object_monitor_cache_offset_in_bytes(), box);
|
||||
}
|
||||
|
||||
// Check if the lock-stack is full.
|
||||
lwz(top, in_bytes(JavaThread::lock_stack_top_offset()), R16_thread);
|
||||
cmplwi(CCR0, top, LockStack::end_offset());
|
||||
@ -4677,7 +4764,7 @@ void MacroAssembler::lightweight_lock(Register obj, Register t1, Register t2, La
|
||||
andi_(t, t, markWord::lock_mask_in_place);
|
||||
bne(CCR0, slow);
|
||||
|
||||
// Try to lock. Transition lock bits 0b00 => 0b01
|
||||
// Try to lock. Transition lock bits 0b01 => 0b00
|
||||
atomically_flip_locked_state(/* is_unlock */ false, obj, mark, slow, MacroAssembler::MemBarAcq);
|
||||
|
||||
bind(push);
|
||||
|
||||
@ -359,7 +359,7 @@ class MacroAssembler: public Assembler {
|
||||
address call_c(Register function_entry);
|
||||
// For tail calls: only branch, don't link, so callee returns to caller of this function.
|
||||
address call_c_and_return_to_caller(Register function_entry);
|
||||
address call_c(address function_entry, relocInfo::relocType rt);
|
||||
address call_c(address function_entry, relocInfo::relocType rt = relocInfo::none);
|
||||
#else
|
||||
// Call a C function via a function descriptor and use full C
|
||||
// calling conventions. Updates and returns _last_calls_return_pc.
|
||||
@ -367,6 +367,9 @@ class MacroAssembler: public Assembler {
|
||||
// For tail calls: only branch, don't link, so callee returns to caller of this function.
|
||||
address call_c_and_return_to_caller(Register function_descriptor);
|
||||
address call_c(const FunctionDescriptor* function_descriptor, relocInfo::relocType rt);
|
||||
address call_c(address function_entry, relocInfo::relocType rt = relocInfo::none) {
|
||||
return call_c((const FunctionDescriptor*)function_entry, rt);
|
||||
}
|
||||
address call_c_using_toc(const FunctionDescriptor* function_descriptor, relocInfo::relocType rt,
|
||||
Register toc);
|
||||
#endif
|
||||
@ -651,7 +654,7 @@ class MacroAssembler: public Assembler {
|
||||
void inc_held_monitor_count(Register tmp);
|
||||
void dec_held_monitor_count(Register tmp);
|
||||
void atomically_flip_locked_state(bool is_unlock, Register obj, Register tmp, Label& failed, int semantics);
|
||||
void lightweight_lock(Register obj, Register t1, Register t2, Label& slow);
|
||||
void lightweight_lock(Register box, Register obj, Register t1, Register t2, Label& slow);
|
||||
void lightweight_unlock(Register obj, Register t1, Label& slow);
|
||||
|
||||
// allocation (for C1)
|
||||
@ -672,11 +675,11 @@ class MacroAssembler: public Assembler {
|
||||
void compiler_fast_unlock_object(ConditionRegister flag, Register oop, Register box,
|
||||
Register tmp1, Register tmp2, Register tmp3);
|
||||
|
||||
void compiler_fast_lock_lightweight_object(ConditionRegister flag, Register oop, Register tmp1,
|
||||
Register tmp2, Register tmp3);
|
||||
void compiler_fast_lock_lightweight_object(ConditionRegister flag, Register oop, Register box,
|
||||
Register tmp1, Register tmp2, Register tmp3);
|
||||
|
||||
void compiler_fast_unlock_lightweight_object(ConditionRegister flag, Register oop, Register tmp1,
|
||||
Register tmp2, Register tmp3);
|
||||
void compiler_fast_unlock_lightweight_object(ConditionRegister flag, Register oop, Register box,
|
||||
Register tmp1, Register tmp2, Register tmp3);
|
||||
|
||||
// Check if safepoint requested and if so branch
|
||||
void safepoint_poll(Label& slow_path, Register temp, bool at_return, bool in_nmethod);
|
||||
|
||||
@ -1000,6 +1000,10 @@ int MachNode::compute_padding(int current_offset) const {
|
||||
|
||||
// Should the matcher clone input 'm' of node 'n'?
|
||||
bool Matcher::pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
|
||||
if (is_encode_and_store_pattern(n, m)) {
|
||||
mstack.push(m, Visit);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -2150,10 +2154,6 @@ const RegMask* Matcher::predicate_reg_mask(void) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
const TypeVectMask* Matcher::predicate_reg_type(const Type* elemTy, int length) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Vector calling convention not yet implemented.
|
||||
bool Matcher::supports_vector_calling_convention(void) {
|
||||
return false;
|
||||
@ -5407,7 +5407,7 @@ instruct loadRange(iRegIdst dst, memory mem) %{
|
||||
// Load Compressed Pointer
|
||||
instruct loadN(iRegNdst dst, memory mem) %{
|
||||
match(Set dst (LoadN mem));
|
||||
predicate(n->as_Load()->is_unordered() || followed_by_acquire(n));
|
||||
predicate((n->as_Load()->is_unordered() || followed_by_acquire(n)) && n->as_Load()->barrier_data() == 0);
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
format %{ "LWZ $dst, $mem \t// load compressed ptr" %}
|
||||
@ -5419,6 +5419,7 @@ instruct loadN(iRegNdst dst, memory mem) %{
|
||||
// Load Compressed Pointer acquire.
|
||||
instruct loadN_ac(iRegNdst dst, memory mem) %{
|
||||
match(Set dst (LoadN mem));
|
||||
predicate(n->as_Load()->barrier_data() == 0);
|
||||
ins_cost(3*MEMORY_REF_COST);
|
||||
|
||||
format %{ "LWZ $dst, $mem \t// load acquire compressed ptr\n\t"
|
||||
@ -5432,7 +5433,7 @@ instruct loadN_ac(iRegNdst dst, memory mem) %{
|
||||
// Load Compressed Pointer and decode it if narrow_oop_shift == 0.
|
||||
instruct loadN2P_unscaled(iRegPdst dst, memory mem) %{
|
||||
match(Set dst (DecodeN (LoadN mem)));
|
||||
predicate(_kids[0]->_leaf->as_Load()->is_unordered() && CompressedOops::shift() == 0);
|
||||
predicate(_kids[0]->_leaf->as_Load()->is_unordered() && CompressedOops::shift() == 0 && _kids[0]->_leaf->as_Load()->barrier_data() == 0);
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
format %{ "LWZ $dst, $mem \t// DecodeN (unscaled)" %}
|
||||
@ -6423,6 +6424,7 @@ instruct reinterpretX(vecX dst) %{
|
||||
// Store Compressed Oop
|
||||
instruct storeN(memory dst, iRegN_P2N src) %{
|
||||
match(Set dst (StoreN dst src));
|
||||
predicate(n->as_Store()->barrier_data() == 0);
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
format %{ "STW $src, $dst \t// compressed oop" %}
|
||||
@ -6476,23 +6478,6 @@ instruct storeD(memory mem, regD src) %{
|
||||
ins_pipe(pipe_class_memory);
|
||||
%}
|
||||
|
||||
//----------Store Instructions With Zeros--------------------------------------
|
||||
|
||||
instruct storeCM(memory mem, immI_0 zero) %{
|
||||
match(Set mem (StoreCM mem zero));
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
format %{ "STB #0, $mem \t// CMS card-mark byte store" %}
|
||||
size(8);
|
||||
ins_encode %{
|
||||
__ li(R0, 0);
|
||||
// No release barrier: Oops are allowed to get visible after marking.
|
||||
guarantee($mem$$base$$Register != R1_SP, "use frame_slots_bias");
|
||||
__ stb(R0, $mem$$disp, $mem$$base$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_memory);
|
||||
%}
|
||||
|
||||
// Convert oop pointer into compressed form.
|
||||
|
||||
// Nodes for postalloc expand.
|
||||
@ -6598,7 +6583,7 @@ instruct encodeP_not_null_Ex(iRegNdst dst, iRegPsrc src) %{
|
||||
instruct encodeP_not_null_base_null(iRegNdst dst, iRegPsrc src) %{
|
||||
match(Set dst (EncodeP src));
|
||||
predicate(CompressedOops::shift() != 0 &&
|
||||
CompressedOops::base() ==0);
|
||||
CompressedOops::base() == nullptr);
|
||||
|
||||
format %{ "SRDI $dst, $src, #3 \t// encodeP, $src != nullptr" %}
|
||||
size(4);
|
||||
@ -6695,7 +6680,7 @@ instruct decodeN_Ex(iRegPdst dst, iRegNsrc src, flagsReg crx) %{
|
||||
predicate((n->bottom_type()->is_oopptr()->ptr() != TypePtr::NotNull &&
|
||||
n->bottom_type()->is_oopptr()->ptr() != TypePtr::Constant) &&
|
||||
CompressedOops::shift() != 0 &&
|
||||
CompressedOops::base() != 0);
|
||||
CompressedOops::base() != nullptr);
|
||||
ins_cost(4 * DEFAULT_COST); // Should be more expensive than decodeN_Disjoint_isel_Ex.
|
||||
effect(TEMP crx);
|
||||
|
||||
@ -6707,7 +6692,7 @@ instruct decodeN_Ex(iRegPdst dst, iRegNsrc src, flagsReg crx) %{
|
||||
instruct decodeN_nullBase(iRegPdst dst, iRegNsrc src) %{
|
||||
match(Set dst (DecodeN src));
|
||||
predicate(CompressedOops::shift() != 0 &&
|
||||
CompressedOops::base() == 0);
|
||||
CompressedOops::base() == nullptr);
|
||||
|
||||
format %{ "SLDI $dst, $src, #3 \t// DecodeN (zerobased)" %}
|
||||
size(4);
|
||||
@ -6825,7 +6810,7 @@ instruct decodeN_notNull_addBase_Ex(iRegPdst dst, iRegNsrc src) %{
|
||||
predicate((n->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull ||
|
||||
n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant) &&
|
||||
CompressedOops::shift() != 0 &&
|
||||
CompressedOops::base() != 0);
|
||||
CompressedOops::base() != nullptr);
|
||||
ins_cost(2 * DEFAULT_COST);
|
||||
|
||||
format %{ "DecodeN $dst, $src \t// $src != nullptr, postalloc expanded" %}
|
||||
@ -7477,6 +7462,7 @@ instruct compareAndSwapI_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc
|
||||
|
||||
instruct compareAndSwapN_regP_regN_regN(iRegIdst res, iRegPdst mem_ptr, iRegNsrc src1, iRegNsrc src2, flagsRegCR0 cr0) %{
|
||||
match(Set res (CompareAndSwapN mem_ptr (Binary src1 src2)));
|
||||
predicate(n->as_LoadStore()->barrier_data() == 0);
|
||||
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "CMPXCHGW $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
@ -7676,7 +7662,7 @@ instruct weakCompareAndSwapI_acq_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr,
|
||||
|
||||
instruct weakCompareAndSwapN_regP_regN_regN(iRegIdst res, iRegPdst mem_ptr, iRegNsrc src1, iRegNsrc src2, flagsRegCR0 cr0) %{
|
||||
match(Set res (WeakCompareAndSwapN mem_ptr (Binary src1 src2)));
|
||||
predicate(((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst);
|
||||
predicate(((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst && n->as_LoadStore()->barrier_data() == 0);
|
||||
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "weak CMPXCHGW $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
@ -7690,7 +7676,7 @@ instruct weakCompareAndSwapN_regP_regN_regN(iRegIdst res, iRegPdst mem_ptr, iReg
|
||||
|
||||
instruct weakCompareAndSwapN_acq_regP_regN_regN(iRegIdst res, iRegPdst mem_ptr, iRegNsrc src1, iRegNsrc src2, flagsRegCR0 cr0) %{
|
||||
match(Set res (WeakCompareAndSwapN mem_ptr (Binary src1 src2)));
|
||||
predicate(((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst);
|
||||
predicate((((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst) && n->as_LoadStore()->barrier_data() == 0);
|
||||
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "weak CMPXCHGW acq $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
@ -7939,7 +7925,7 @@ instruct compareAndExchangeI_acq_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr,
|
||||
|
||||
instruct compareAndExchangeN_regP_regN_regN(iRegNdst res, iRegPdst mem_ptr, iRegNsrc src1, iRegNsrc src2, flagsRegCR0 cr0) %{
|
||||
match(Set res (CompareAndExchangeN mem_ptr (Binary src1 src2)));
|
||||
predicate(((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst);
|
||||
predicate(((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst && n->as_LoadStore()->barrier_data() == 0);
|
||||
effect(TEMP_DEF res, TEMP cr0);
|
||||
format %{ "CMPXCHGW $res, $mem_ptr, $src1, $src2; as narrow oop" %}
|
||||
ins_encode %{
|
||||
@ -7953,7 +7939,7 @@ instruct compareAndExchangeN_regP_regN_regN(iRegNdst res, iRegPdst mem_ptr, iReg
|
||||
|
||||
instruct compareAndExchangeN_acq_regP_regN_regN(iRegNdst res, iRegPdst mem_ptr, iRegNsrc src1, iRegNsrc src2, flagsRegCR0 cr0) %{
|
||||
match(Set res (CompareAndExchangeN mem_ptr (Binary src1 src2)));
|
||||
predicate(((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst);
|
||||
predicate((((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst) && n->as_LoadStore()->barrier_data() == 0);
|
||||
effect(TEMP_DEF res, TEMP cr0);
|
||||
format %{ "CMPXCHGW acq $res, $mem_ptr, $src1, $src2; as narrow oop" %}
|
||||
ins_encode %{
|
||||
@ -8262,6 +8248,7 @@ instruct getAndSetP(iRegPdst res, iRegPdst mem_ptr, iRegPsrc src, flagsRegCR0 cr
|
||||
|
||||
instruct getAndSetN(iRegNdst res, iRegPdst mem_ptr, iRegNsrc src, flagsRegCR0 cr0) %{
|
||||
match(Set res (GetAndSetN mem_ptr src));
|
||||
predicate(n->as_LoadStore()->barrier_data() == 0);
|
||||
effect(TEMP_DEF res, TEMP cr0);
|
||||
format %{ "GetAndSetN $res, $mem_ptr, $src" %}
|
||||
ins_encode %{
|
||||
@ -12106,10 +12093,10 @@ instruct cmpFastUnlock(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iRegPdst tmp
|
||||
ins_pipe(pipe_class_compare);
|
||||
%}
|
||||
|
||||
instruct cmpFastLockLightweight(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2) %{
|
||||
instruct cmpFastLockLightweight(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2, flagsRegCR1 cr1) %{
|
||||
predicate(LockingMode == LM_LIGHTWEIGHT);
|
||||
match(Set crx (FastLock oop box));
|
||||
effect(TEMP tmp1, TEMP tmp2);
|
||||
effect(TEMP tmp1, TEMP tmp2, KILL cr1);
|
||||
|
||||
format %{ "FASTLOCK $oop, $box, $tmp1, $tmp2" %}
|
||||
ins_encode %{
|
||||
|
||||
@ -27,6 +27,7 @@
|
||||
#define CPU_PPC_REGISTER_PPC_HPP
|
||||
|
||||
#include "asm/register.hpp"
|
||||
#include "utilities/count_trailing_zeros.hpp"
|
||||
|
||||
// forward declaration
|
||||
class VMRegImpl;
|
||||
@ -555,4 +556,12 @@ constexpr Register R29_TOC = R29;
|
||||
constexpr Register R11_scratch1 = R11;
|
||||
constexpr Register R12_scratch2 = R12;
|
||||
|
||||
template <>
|
||||
inline Register AbstractRegSet<Register>::first() {
|
||||
if (_bitset == 0) { return noreg; }
|
||||
return as_Register(count_trailing_zeros(_bitset));
|
||||
}
|
||||
|
||||
typedef AbstractRegSet<Register> RegSet;
|
||||
|
||||
#endif // CPU_PPC_REGISTER_PPC_HPP
|
||||
|
||||
@ -99,12 +99,7 @@ void OptoRuntime::generate_exception_blob() {
|
||||
__ set_last_Java_frame(/*sp=*/R1_SP, noreg);
|
||||
|
||||
__ mr(R3_ARG1, R16_thread);
|
||||
#if defined(ABI_ELFv2)
|
||||
__ call_c((address) OptoRuntime::handle_exception_C, relocInfo::none);
|
||||
#else
|
||||
__ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, OptoRuntime::handle_exception_C),
|
||||
relocInfo::none);
|
||||
#endif
|
||||
__ call_c((address) OptoRuntime::handle_exception_C);
|
||||
address calls_return_pc = __ last_calls_return_pc();
|
||||
# ifdef ASSERT
|
||||
__ cmpdi(CCR0, R3_RET, 0);
|
||||
|
||||
@ -2399,7 +2399,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
// Try fastpath for locking.
|
||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||
// fast_lock kills r_temp_1, r_temp_2, r_temp_3.
|
||||
__ compiler_fast_lock_lightweight_object(CCR0, r_oop, r_temp_1, r_temp_2, r_temp_3);
|
||||
__ compiler_fast_lock_lightweight_object(CCR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
|
||||
} else {
|
||||
// fast_lock kills r_temp_1, r_temp_2, r_temp_3.
|
||||
__ compiler_fast_lock_object(CCR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
|
||||
@ -2444,12 +2444,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
|
||||
// The JNI call
|
||||
// --------------------------------------------------------------------------
|
||||
#if defined(ABI_ELFv2)
|
||||
__ call_c(native_func, relocInfo::runtime_call_type);
|
||||
#else
|
||||
FunctionDescriptor* fd_native_method = (FunctionDescriptor*) native_func;
|
||||
__ call_c(fd_native_method, relocInfo::runtime_call_type);
|
||||
#endif
|
||||
|
||||
|
||||
// Now, we are back from the native code.
|
||||
@ -2610,7 +2605,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
|
||||
// Try fastpath for unlocking.
|
||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||
__ compiler_fast_unlock_lightweight_object(CCR0, r_oop, r_temp_1, r_temp_2, r_temp_3);
|
||||
__ compiler_fast_unlock_lightweight_object(CCR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
|
||||
} else {
|
||||
__ compiler_fast_unlock_object(CCR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
|
||||
}
|
||||
@ -2861,7 +2856,8 @@ void SharedRuntime::generate_deopt_blob() {
|
||||
// Allocate space for the code
|
||||
ResourceMark rm;
|
||||
// Setup code generation tools
|
||||
CodeBuffer buffer("deopt_blob", 2048, 1024);
|
||||
const char* name = SharedRuntime::stub_name(SharedStubId::deopt_id);
|
||||
CodeBuffer buffer(name, 2048, 1024);
|
||||
InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer);
|
||||
Label exec_mode_initialized;
|
||||
int frame_size_in_words;
|
||||
@ -3211,23 +3207,25 @@ void OptoRuntime::generate_uncommon_trap_blob() {
|
||||
#endif // COMPILER2
|
||||
|
||||
// Generate a special Compile2Runtime blob that saves all registers, and setup oopmap.
|
||||
SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
|
||||
SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address call_ptr) {
|
||||
assert(StubRoutines::forward_exception_entry() != nullptr,
|
||||
"must be generated before");
|
||||
assert(is_polling_page_id(id), "expected a polling page stub id");
|
||||
|
||||
ResourceMark rm;
|
||||
OopMapSet *oop_maps = new OopMapSet();
|
||||
OopMap* map;
|
||||
|
||||
// Allocate space for the code. Setup code generation tools.
|
||||
CodeBuffer buffer("handler_blob", 2048, 1024);
|
||||
const char* name = SharedRuntime::stub_name(id);
|
||||
CodeBuffer buffer(name, 2048, 1024);
|
||||
MacroAssembler* masm = new MacroAssembler(&buffer);
|
||||
|
||||
address start = __ pc();
|
||||
int frame_size_in_bytes = 0;
|
||||
|
||||
RegisterSaver::ReturnPCLocation return_pc_location;
|
||||
bool cause_return = (poll_type == POLL_AT_RETURN);
|
||||
bool cause_return = (id == SharedStubId::polling_page_return_handler_id);
|
||||
if (cause_return) {
|
||||
// Nothing to do here. The frame has already been popped in MachEpilogNode.
|
||||
// Register LR already contains the return pc.
|
||||
@ -3237,7 +3235,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
|
||||
return_pc_location = RegisterSaver::return_pc_is_thread_saved_exception_pc;
|
||||
}
|
||||
|
||||
bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
|
||||
bool save_vectors = (id == SharedStubId::polling_page_vectors_safepoint_handler_id);
|
||||
|
||||
// Save registers, fpu state, and flags. Set R31 = return pc.
|
||||
map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
|
||||
@ -3324,11 +3322,13 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
|
||||
// but since this is generic code we don't know what they are and the caller
|
||||
// must do any gc of the args.
|
||||
//
|
||||
RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
|
||||
RuntimeStub* SharedRuntime::generate_resolve_blob(SharedStubId id, address destination) {
|
||||
assert(is_resolve_id(id), "expected a resolve stub id");
|
||||
|
||||
// allocate space for the code
|
||||
ResourceMark rm;
|
||||
|
||||
const char* name = SharedRuntime::stub_name(id);
|
||||
CodeBuffer buffer(name, 1000, 512);
|
||||
MacroAssembler* masm = new MacroAssembler(&buffer);
|
||||
|
||||
@ -3426,7 +3426,11 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
|
||||
// Note: the routine set_pc_not_at_call_for_caller in
|
||||
// SharedRuntime.cpp requires that this code be generated into a
|
||||
// RuntimeStub.
|
||||
RuntimeStub* SharedRuntime::generate_throw_exception(const char* name, address runtime_entry) {
|
||||
RuntimeStub* SharedRuntime::generate_throw_exception(SharedStubId id, address runtime_entry) {
|
||||
assert(is_throw_id(id), "expected a throw stub id");
|
||||
|
||||
const char* name = SharedRuntime::stub_name(id);
|
||||
|
||||
ResourceMark rm;
|
||||
const char* timer_msg = "SharedRuntime generate_throw_exception";
|
||||
TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
|
||||
@ -3455,11 +3459,7 @@ RuntimeStub* SharedRuntime::generate_throw_exception(const char* name, address r
|
||||
__ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1);
|
||||
|
||||
__ mr(R3_ARG1, R16_thread);
|
||||
#if defined(ABI_ELFv2)
|
||||
__ call_c(runtime_entry, relocInfo::none);
|
||||
#else
|
||||
__ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, runtime_entry), relocInfo::none);
|
||||
#endif
|
||||
__ call_c(runtime_entry);
|
||||
|
||||
// Set an oopmap for the call site.
|
||||
oop_maps->add_gc_map((int)(gc_map_pc - start), map);
|
||||
@ -3749,7 +3749,8 @@ void SharedRuntime::montgomery_square(jint *a_ints, jint *n_ints,
|
||||
// It returns a jobject handle to the event writer.
|
||||
// The handle is dereferenced and the return value is the event writer oop.
|
||||
RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() {
|
||||
CodeBuffer code("jfr_write_checkpoint", 512, 64);
|
||||
const char* name = SharedRuntime::stub_name(SharedStubId::jfr_write_checkpoint_id);
|
||||
CodeBuffer code(name, 512, 64);
|
||||
MacroAssembler* masm = new MacroAssembler(&code);
|
||||
|
||||
Register tmp1 = R10_ARG8;
|
||||
@ -3777,8 +3778,7 @@ RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() {
|
||||
oop_maps->add_gc_map(calls_return_pc - start, map);
|
||||
|
||||
RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
|
||||
RuntimeStub::new_runtime_stub(code.name(),
|
||||
&code, frame_complete,
|
||||
RuntimeStub::new_runtime_stub(name, &code, frame_complete,
|
||||
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
|
||||
oop_maps, false);
|
||||
return stub;
|
||||
@ -3786,7 +3786,8 @@ RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() {
|
||||
|
||||
// For c2: call to return a leased buffer.
|
||||
RuntimeStub* SharedRuntime::generate_jfr_return_lease() {
|
||||
CodeBuffer code("jfr_return_lease", 512, 64);
|
||||
const char* name = SharedRuntime::stub_name(SharedStubId::jfr_return_lease_id);
|
||||
CodeBuffer code(name, 512, 64);
|
||||
MacroAssembler* masm = new MacroAssembler(&code);
|
||||
|
||||
Register tmp1 = R10_ARG8;
|
||||
@ -3812,8 +3813,7 @@ RuntimeStub* SharedRuntime::generate_jfr_return_lease() {
|
||||
oop_maps->add_gc_map(calls_return_pc - start, map);
|
||||
|
||||
RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
|
||||
RuntimeStub::new_runtime_stub(code.name(),
|
||||
&code, frame_complete,
|
||||
RuntimeStub::new_runtime_stub(name, &code, frame_complete,
|
||||
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
|
||||
oop_maps, false);
|
||||
return stub;
|
||||
|
||||
@ -4587,6 +4587,30 @@ address generate_lookup_secondary_supers_table_stub(u1 super_klass_index) {
|
||||
return start;
|
||||
}
|
||||
|
||||
// load Method* target of MethodHandle
|
||||
// R3_ARG1 = jobject receiver
|
||||
// R19_method = result Method*
|
||||
address generate_upcall_stub_load_target() {
|
||||
|
||||
StubCodeMark mark(this, "StubRoutines", "upcall_stub_load_target");
|
||||
address start = __ pc();
|
||||
|
||||
__ resolve_global_jobject(R3_ARG1, R22_tmp2, R23_tmp3, MacroAssembler::PRESERVATION_FRAME_LR_GP_FP_REGS);
|
||||
// Load target method from receiver
|
||||
__ load_heap_oop(R19_method, java_lang_invoke_MethodHandle::form_offset(), R3_ARG1,
|
||||
R22_tmp2, R23_tmp3, MacroAssembler::PRESERVATION_FRAME_LR_GP_FP_REGS, IS_NOT_NULL);
|
||||
__ load_heap_oop(R19_method, java_lang_invoke_LambdaForm::vmentry_offset(), R19_method,
|
||||
R22_tmp2, R23_tmp3, MacroAssembler::PRESERVATION_FRAME_LR_GP_FP_REGS, IS_NOT_NULL);
|
||||
__ load_heap_oop(R19_method, java_lang_invoke_MemberName::method_offset(), R19_method,
|
||||
R22_tmp2, R23_tmp3, MacroAssembler::PRESERVATION_FRAME_LR_GP_FP_REGS, IS_NOT_NULL);
|
||||
__ ld(R19_method, java_lang_invoke_ResolvedMethodName::vmtarget_offset(), R19_method);
|
||||
__ std(R19_method, in_bytes(JavaThread::callee_target_offset()), R16_thread); // just in case callee is deoptimized
|
||||
|
||||
__ blr();
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
// Initialization
|
||||
void generate_initial_stubs() {
|
||||
// Generates all stubs and initializes the entry points
|
||||
@ -4651,6 +4675,7 @@ address generate_lookup_secondary_supers_table_stub(u1 super_klass_index) {
|
||||
}
|
||||
|
||||
StubRoutines::_upcall_stub_exception_handler = generate_upcall_stub_exception_handler();
|
||||
StubRoutines::_upcall_stub_load_target = generate_upcall_stub_load_target();
|
||||
}
|
||||
|
||||
void generate_compiler_stubs() {
|
||||
|
||||
@ -1078,6 +1078,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
|
||||
case Interpreter::java_lang_math_sin : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin); break;
|
||||
case Interpreter::java_lang_math_cos : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcos); break;
|
||||
case Interpreter::java_lang_math_tan : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dtan); break;
|
||||
case Interpreter::java_lang_math_tanh : /* run interpreted */ break;
|
||||
case Interpreter::java_lang_math_abs : /* run interpreted */ break;
|
||||
case Interpreter::java_lang_math_sqrt : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsqrt); break;
|
||||
case Interpreter::java_lang_math_log : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog); break;
|
||||
@ -1464,13 +1465,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
// native result across the call. No oop is present.
|
||||
|
||||
__ mr(R3_ARG1, R16_thread);
|
||||
#if defined(ABI_ELFv2)
|
||||
__ call_c(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
|
||||
relocInfo::none);
|
||||
#else
|
||||
__ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, JavaThread::check_special_condition_for_native_trans),
|
||||
relocInfo::none);
|
||||
#endif
|
||||
__ call_c(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans));
|
||||
|
||||
__ bind(sync_check_done);
|
||||
|
||||
|
||||
@ -2130,8 +2130,8 @@ void TemplateTable::_return(TosState state) {
|
||||
|
||||
// Load klass of this obj.
|
||||
__ load_klass(Rklass, R17_tos);
|
||||
__ lwz(Rklass_flags, in_bytes(Klass::access_flags_offset()), Rklass);
|
||||
__ testbitdi(CCR0, R0, Rklass_flags, exact_log2(JVM_ACC_HAS_FINALIZER));
|
||||
__ lbz(Rklass_flags, in_bytes(Klass::misc_flags_offset()), Rklass);
|
||||
__ testbitdi(CCR0, R0, Rklass_flags, exact_log2(KlassFlags::_misc_has_finalizer));
|
||||
__ bfalse(CCR0, Lskip_register_finalizer);
|
||||
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R17_tos /* obj */);
|
||||
|
||||
@ -24,6 +24,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "classfile/javaClasses.hpp"
|
||||
#include "logging/logStream.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "prims/upcallLinker.hpp"
|
||||
@ -118,7 +119,7 @@ static void restore_callee_saved_registers(MacroAssembler* _masm, const ABIDescr
|
||||
static const int upcall_stub_code_base_size = 1024;
|
||||
static const int upcall_stub_size_per_arg = 16; // arg save & restore + move
|
||||
|
||||
address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
|
||||
address UpcallLinker::make_upcall_stub(jobject receiver, Symbol* signature,
|
||||
BasicType* out_sig_bt, int total_out_args,
|
||||
BasicType ret_type,
|
||||
jobject jabi, jobject jconv,
|
||||
@ -221,7 +222,6 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
|
||||
__ block_comment("{ on_entry");
|
||||
__ load_const_optimized(call_target_address, CAST_FROM_FN_PTR(uint64_t, UpcallLinker::on_entry), R0);
|
||||
__ addi(R3_ARG1, R1_SP, frame_data_offset);
|
||||
__ load_const_optimized(R4_ARG2, (intptr_t)receiver, R0);
|
||||
__ call_c(call_target_address);
|
||||
__ mr(R16_thread, R3_RET);
|
||||
__ block_comment("} on_entry");
|
||||
@ -236,12 +236,12 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
|
||||
arg_shuffle.generate(_masm, as_VMStorage(callerSP), frame::native_abi_minframe_size, frame::jit_out_preserve_size);
|
||||
__ block_comment("} argument shuffle");
|
||||
|
||||
__ block_comment("{ receiver ");
|
||||
__ get_vm_result(R3_ARG1);
|
||||
__ block_comment("} receiver ");
|
||||
|
||||
__ load_const_optimized(R19_method, (intptr_t)entry);
|
||||
__ std(R19_method, in_bytes(JavaThread::callee_target_offset()), R16_thread);
|
||||
__ block_comment("{ load target ");
|
||||
__ load_const_optimized(call_target_address, StubRoutines::upcall_stub_load_target(), R0);
|
||||
__ load_const_optimized(R3_ARG1, (intptr_t)receiver, R0);
|
||||
__ mtctr(call_target_address);
|
||||
__ bctrl(); // loads target Method* into R19_method
|
||||
__ block_comment("} load target ");
|
||||
|
||||
__ push_cont_fastpath();
|
||||
|
||||
@ -326,7 +326,7 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
|
||||
|
||||
#ifndef PRODUCT
|
||||
stringStream ss;
|
||||
ss.print("upcall_stub_%s", entry->signature()->as_C_string());
|
||||
ss.print("upcall_stub_%s", signature->as_C_string());
|
||||
const char* name = _masm->code_string(ss.as_string());
|
||||
#else // PRODUCT
|
||||
const char* name = "upcall_stub";
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user