mirror of
https://github.com/openjdk/jdk.git
synced 2026-01-28 03:58:21 +00:00
Merge branch 'master' of github.com:openjdk/jdk into JDK-8362268
This commit is contained in:
commit
9b71e677d0
12
.github/workflows/main.yml
vendored
12
.github/workflows/main.yml
vendored
@ -327,8 +327,8 @@ jobs:
|
||||
uses: ./.github/workflows/build-macos.yml
|
||||
with:
|
||||
platform: macos-x64
|
||||
runs-on: 'macos-13'
|
||||
xcode-toolset-version: '14.3.1'
|
||||
runs-on: 'macos-15-intel'
|
||||
xcode-toolset-version: '16.4'
|
||||
configure-arguments: ${{ github.event.inputs.configure-arguments }}
|
||||
make-arguments: ${{ github.event.inputs.make-arguments }}
|
||||
dry-run: ${{ needs.prepare.outputs.dry-run == 'true' }}
|
||||
@ -340,8 +340,8 @@ jobs:
|
||||
uses: ./.github/workflows/build-macos.yml
|
||||
with:
|
||||
platform: macos-aarch64
|
||||
runs-on: 'macos-14'
|
||||
xcode-toolset-version: '15.4'
|
||||
runs-on: 'macos-15'
|
||||
xcode-toolset-version: '16.4'
|
||||
configure-arguments: ${{ github.event.inputs.configure-arguments }}
|
||||
make-arguments: ${{ github.event.inputs.make-arguments }}
|
||||
dry-run: ${{ needs.prepare.outputs.dry-run == 'true' }}
|
||||
@ -432,9 +432,9 @@ jobs:
|
||||
with:
|
||||
platform: macos-aarch64
|
||||
bootjdk-platform: macos-aarch64
|
||||
runs-on: macos-14
|
||||
runs-on: macos-15
|
||||
dry-run: ${{ needs.prepare.outputs.dry-run == 'true' }}
|
||||
xcode-toolset-version: '15.4'
|
||||
xcode-toolset-version: '16.4'
|
||||
debug-suffix: -debug
|
||||
|
||||
test-windows-x64:
|
||||
|
||||
@ -116,6 +116,9 @@ else ifeq ($(call isTargetOs, aix), true)
|
||||
$(eval STATIC_LIB_EXPORT_FILES += $(lib).exp) \
|
||||
)
|
||||
STATIC_LIBS := -Wl,-bexpfull $(STATIC_LIB_FILES) $(addprefix -Wl$(COMMA)-bE:, $(STATIC_LIB_EXPORT_FILES))
|
||||
ifeq ($(DEBUG_LEVEL), slowdebug)
|
||||
STATIC_LIBS += -Wl,-bbigtoc
|
||||
endif
|
||||
else
|
||||
$(error Unsupported platform)
|
||||
endif
|
||||
|
||||
@ -63,7 +63,7 @@ TOOL_GENERATECURRENCYDATA = $(JAVA_SMALL) -cp $(BUILDTOOLS_OUTPUTDIR)/jdk_tools_
|
||||
TOOL_TZDB = $(JAVA_SMALL) -cp $(BUILDTOOLS_OUTPUTDIR)/jdk_tools_classes \
|
||||
build.tools.tzdb.TzdbZoneRulesCompiler
|
||||
|
||||
TOOL_BLOCKED_CERTS = $(JAVA_SMALL) -Xlog:disable -cp $(BUILDTOOLS_OUTPUTDIR)/jdk_tools_classes \
|
||||
TOOL_BLOCKED_CERTS = $(JAVA_SMALL) -cp $(BUILDTOOLS_OUTPUTDIR)/jdk_tools_classes \
|
||||
--add-exports java.base/sun.security.util=ALL-UNNAMED \
|
||||
build.tools.blockedcertsconverter.BlockedCertsConverter
|
||||
|
||||
|
||||
@ -363,7 +363,7 @@ AC_DEFUN_ONCE([BASIC_SETUP_COMPLEX_TOOLS],
|
||||
|
||||
# Check if it's a GNU date compatible version
|
||||
AC_MSG_CHECKING([if date is a GNU compatible version])
|
||||
check_date=`$DATE --version 2>&1 | $GREP "GNU\|BusyBox"`
|
||||
check_date=`$DATE --version 2>&1 | $GREP "GNU\|BusyBox\|uutils"`
|
||||
if test "x$check_date" != x; then
|
||||
AC_MSG_RESULT([yes])
|
||||
IS_GNU_DATE=yes
|
||||
|
||||
@ -408,27 +408,6 @@ AC_DEFUN_ONCE([BOOTJDK_SETUP_BOOT_JDK],
|
||||
AC_MSG_CHECKING([if Boot JDK is 32 or 64 bits])
|
||||
AC_MSG_RESULT([$BOOT_JDK_BITS])
|
||||
|
||||
# Try to enable CDS
|
||||
AC_MSG_CHECKING([for local Boot JDK Class Data Sharing (CDS)])
|
||||
BOOT_JDK_CDS_ARCHIVE=$CONFIGURESUPPORT_OUTPUTDIR/classes.jsa
|
||||
UTIL_ADD_JVM_ARG_IF_OK([-XX:+UnlockDiagnosticVMOptions -XX:-VerifySharedSpaces -XX:SharedArchiveFile=$BOOT_JDK_CDS_ARCHIVE],boot_jdk_cds_args,[$JAVA])
|
||||
|
||||
if test "x$boot_jdk_cds_args" != x; then
|
||||
# Try creating a CDS archive
|
||||
$JAVA $boot_jdk_cds_args -Xshare:dump > /dev/null 2>&1
|
||||
if test $? -eq 0; then
|
||||
BOOTJDK_USE_LOCAL_CDS=true
|
||||
AC_MSG_RESULT([yes, created])
|
||||
else
|
||||
# Generation failed, don't use CDS.
|
||||
BOOTJDK_USE_LOCAL_CDS=false
|
||||
AC_MSG_RESULT([no, creation failed])
|
||||
fi
|
||||
else
|
||||
BOOTJDK_USE_LOCAL_CDS=false
|
||||
AC_MSG_RESULT([no, -XX:SharedArchiveFile not supported])
|
||||
fi
|
||||
|
||||
BOOTJDK_SETUP_CLASSPATH
|
||||
])
|
||||
|
||||
@ -444,13 +423,8 @@ AC_DEFUN_ONCE([BOOTJDK_SETUP_BOOT_JDK_ARGUMENTS],
|
||||
# Force en-US environment
|
||||
UTIL_ADD_JVM_ARG_IF_OK([-Duser.language=en -Duser.country=US],boot_jdk_jvmargs,[$JAVA])
|
||||
|
||||
if test "x$BOOTJDK_USE_LOCAL_CDS" = xtrue; then
|
||||
# Use our own CDS archive
|
||||
UTIL_ADD_JVM_ARG_IF_OK([$boot_jdk_cds_args -Xshare:auto],boot_jdk_jvmargs,[$JAVA])
|
||||
else
|
||||
# Otherwise optimistically use the system-wide one, if one is present
|
||||
UTIL_ADD_JVM_ARG_IF_OK([-Xshare:auto],boot_jdk_jvmargs,[$JAVA])
|
||||
fi
|
||||
UTIL_ADD_JVM_ARG_IF_OK([-Xlog:all=off:stdout],boot_jdk_jvmargs,[$JAVA])
|
||||
UTIL_ADD_JVM_ARG_IF_OK([-Xlog:all=warning:stderr],boot_jdk_jvmargs,[$JAVA])
|
||||
|
||||
# Finally append user provided options to allow them to override.
|
||||
UTIL_ADD_JVM_ARG_IF_OK([$USER_BOOT_JDK_OPTIONS],boot_jdk_jvmargs,[$JAVA])
|
||||
|
||||
@ -44,7 +44,3 @@ JAVAC_CMD := $(FIXPATH) $(BOOT_JDK)/bin/javac
|
||||
JAR_CMD := $(FIXPATH) $(BOOT_JDK)/bin/jar
|
||||
# The bootcycle JVM arguments may differ from the original boot jdk.
|
||||
JAVA_FLAGS_BIG := @BOOTCYCLE_JVM_ARGS_BIG@
|
||||
# Any CDS settings generated for the bootjdk are invalid in the bootcycle build.
|
||||
# By filtering out those JVM args, the bootcycle JVM will use its default
|
||||
# settings for CDS.
|
||||
JAVA_FLAGS := $(filter-out -XX:SharedArchiveFile% -Xshare%, $(JAVA_FLAGS))
|
||||
|
||||
@ -28,7 +28,7 @@
|
||||
################################################################################
|
||||
|
||||
# Minimum supported versions
|
||||
JTREG_MINIMUM_VERSION=8
|
||||
JTREG_MINIMUM_VERSION=8.1
|
||||
GTEST_MINIMUM_VERSION=1.14.0
|
||||
|
||||
################################################################################
|
||||
|
||||
@ -26,7 +26,7 @@
|
||||
# Versions and download locations for dependencies used by GitHub Actions (GHA)
|
||||
|
||||
GTEST_VERSION=1.14.0
|
||||
JTREG_VERSION=8+2
|
||||
JTREG_VERSION=8.1+1
|
||||
|
||||
LINUX_X64_BOOT_JDK_EXT=tar.gz
|
||||
LINUX_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk24/1f9ff9062db4449d8ca828c504ffae90/36/GPL/openjdk-24_linux-x64_bin.tar.gz
|
||||
|
||||
@ -1174,9 +1174,9 @@ var getJibProfilesDependencies = function (input, common) {
|
||||
jtreg: {
|
||||
server: "jpg",
|
||||
product: "jtreg",
|
||||
version: "8",
|
||||
build_number: "2",
|
||||
file: "bundles/jtreg-8+2.zip",
|
||||
version: "8.1",
|
||||
build_number: "1",
|
||||
file: "bundles/jtreg-8.1+1.zip",
|
||||
environment_name: "JT_HOME",
|
||||
environment_path: input.get("jtreg", "home_path") + "/bin",
|
||||
configure_args: "--with-jtreg=" + input.get("jtreg", "home_path"),
|
||||
|
||||
@ -32,11 +32,6 @@
|
||||
547d 92ca
|
||||
53da 9b7e
|
||||
446e f86f
|
||||
#
|
||||
# we should use this one instead of the 4260<-ff0d
|
||||
#4260 2212
|
||||
4260 ff0d
|
||||
#
|
||||
426A 00A6
|
||||
43A1 301C
|
||||
444A 2014
|
||||
|
||||
@ -25,13 +25,6 @@
|
||||
# 4260 <--> 2212
|
||||
# 426A <--> 00A6
|
||||
#
|
||||
# Warning:
|
||||
# "our old" implementation seems agree with above "new" mappings
|
||||
# except the entries 4260 <-> 2212. To keep the "compatbility"
|
||||
# with the "old" implementation, I changed the entries "temporarily"
|
||||
# 4260 <-> 2212
|
||||
# 4260 <- ff0d
|
||||
#
|
||||
00 0000
|
||||
01 0001
|
||||
02 0002
|
||||
@ -407,8 +400,7 @@ FF 009F
|
||||
425D FF09
|
||||
425E FF1B
|
||||
425F FFE2
|
||||
#4260 FF0D
|
||||
4260 2212
|
||||
4260 FF0D
|
||||
4261 FF0F
|
||||
426A FFE4
|
||||
426B FF0C
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2013, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -57,61 +57,61 @@
|
||||
|
||||
COMMA := ,
|
||||
|
||||
os := $(shell uname -o)
|
||||
cpu := $(shell uname -p)
|
||||
OS := $(shell uname -o)
|
||||
CPU := $(shell uname -m)
|
||||
|
||||
# Figure out what platform this is building on.
|
||||
me := $(cpu)-$(if $(findstring Linux,$(os)),linux-gnu)
|
||||
ME := $(CPU)-$(if $(findstring Linux,$(OS)),linux-gnu)
|
||||
|
||||
$(info Building on platform $(me))
|
||||
$(info Building on platform $(ME))
|
||||
|
||||
#
|
||||
# By default just build for the current platform, which is assumed to be Linux
|
||||
#
|
||||
ifeq ($(TARGETS), )
|
||||
platforms := $(me)
|
||||
host_platforms := $(platforms)
|
||||
PLATFORMS := $(ME)
|
||||
HOST_PLATFORMS := $(PLATFORMS)
|
||||
else
|
||||
platforms := $(subst $(COMMA), , $(TARGETS))
|
||||
host_platforms := $(me)
|
||||
PLATFORMS := $(subst $(COMMA), , $(TARGETS))
|
||||
HOST_PLATFORMS := $(ME)
|
||||
endif
|
||||
target_platforms := $(platforms)
|
||||
$(info host_platforms $(host_platforms))
|
||||
$(info target_platforms $(target_platforms))
|
||||
TARGET_PLATFORMS := $(PLATFORMS)
|
||||
$(info HOST_PLATFORMS $(HOST_PLATFORMS))
|
||||
$(info TARGET_PLATFORMS $(TARGET_PLATFORMS))
|
||||
|
||||
all compile : $(platforms)
|
||||
all compile : $(PLATFORMS)
|
||||
|
||||
ifeq ($(SKIP_ME), )
|
||||
$(foreach p,$(filter-out $(me),$(platforms)),$(eval $(p) : $$(me)))
|
||||
$(foreach p,$(filter-out $(ME),$(PLATFORMS)),$(eval $(p) : $$(ME)))
|
||||
endif
|
||||
|
||||
OUTPUT_ROOT = $(abspath ../../build/devkit)
|
||||
RESULT = $(OUTPUT_ROOT)/result
|
||||
|
||||
submakevars = HOST=$@ BUILD=$(me) RESULT=$(RESULT) OUTPUT_ROOT=$(OUTPUT_ROOT)
|
||||
SUBMAKEVARS = HOST=$@ BUILD=$(ME) RESULT=$(RESULT) OUTPUT_ROOT=$(OUTPUT_ROOT)
|
||||
|
||||
$(host_platforms) :
|
||||
$(HOST_PLATFORMS) :
|
||||
@echo 'Building compilers for $@'
|
||||
@echo 'Targets: $(target_platforms)'
|
||||
for p in $(filter $@, $(target_platforms)) $(filter-out $@, $(target_platforms)); do \
|
||||
$(MAKE) -f Tools.gmk download-rpms $(submakevars) \
|
||||
@echo 'Targets: $(TARGET_PLATFORMS)'
|
||||
for p in $(filter $@, $(TARGET_PLATFORMS)) $(filter-out $@, $(TARGET_PLATFORMS)); do \
|
||||
$(MAKE) -f Tools.gmk download-rpms $(SUBMAKEVARS) \
|
||||
TARGET=$$p PREFIX=$(RESULT)/$@-to-$$p && \
|
||||
$(MAKE) -f Tools.gmk all $(submakevars) \
|
||||
$(MAKE) -f Tools.gmk all $(SUBMAKEVARS) \
|
||||
TARGET=$$p PREFIX=$(RESULT)/$@-to-$$p && \
|
||||
$(MAKE) -f Tools.gmk ccache $(submakevars) \
|
||||
$(MAKE) -f Tools.gmk ccache $(SUBMAKEVARS) \
|
||||
TARGET=$@ PREFIX=$(RESULT)/$@-to-$$p || exit 1 ; \
|
||||
done
|
||||
@echo 'All done"'
|
||||
|
||||
today := $(shell date +%Y%m%d)
|
||||
TODAY := $(shell date +%Y%m%d)
|
||||
|
||||
define Mktar
|
||||
$(1)-to-$(2)_tar = $$(RESULT)/sdk-$(1)-to-$(2)-$$(today).tar.gz
|
||||
$(1)-to-$(2)_tar = $$(RESULT)/sdk-$(1)-to-$(2)-$$(TODAY).tar.gz
|
||||
$$($(1)-to-$(2)_tar) : PLATFORM = $(1)-to-$(2)
|
||||
TARFILES += $$($(1)-to-$(2)_tar)
|
||||
endef
|
||||
|
||||
$(foreach p,$(host_platforms),$(foreach t,$(target_platforms),$(eval $(call Mktar,$(p),$(t)))))
|
||||
$(foreach p,$(HOST_PLATFORMS),$(foreach t,$(TARGET_PLATFORMS),$(eval $(call Mktar,$(p),$(t)))))
|
||||
|
||||
tars : all $(TARFILES)
|
||||
onlytars : $(TARFILES)
|
||||
@ -119,9 +119,9 @@ onlytars : $(TARFILES)
|
||||
$(MAKE) -r -f Tars.gmk SRC_DIR=$(RESULT)/$(PLATFORM) TAR_FILE=$@
|
||||
|
||||
clean :
|
||||
rm -rf $(addprefix ../../build/devkit/, result $(host_platforms))
|
||||
rm -rf $(addprefix ../../build/devkit/, result $(HOST_PLATFORMS))
|
||||
dist-clean: clean
|
||||
rm -rf $(addprefix ../../build/devkit/, src download)
|
||||
|
||||
FORCE :
|
||||
.PHONY : all compile tars $(configs) $(host_platforms) clean dist-clean
|
||||
.PHONY : all compile tars $(HOST_PLATFORMS) clean dist-clean
|
||||
|
||||
@ -39,7 +39,7 @@
|
||||
# Fix this...
|
||||
#
|
||||
|
||||
uppercase = $(shell echo $1 | tr a-z A-Z)
|
||||
lowercase = $(shell echo $1 | tr A-Z a-z)
|
||||
|
||||
$(info TARGET=$(TARGET))
|
||||
$(info HOST=$(HOST))
|
||||
@ -104,26 +104,48 @@ endif
|
||||
################################################################################
|
||||
# Define external dependencies
|
||||
|
||||
gcc_ver_only := 14.2.0
|
||||
binutils_ver_only := 2.43
|
||||
ccache_ver_only := 4.10.2
|
||||
GNU_BASE_URL := https://ftp.gnu.org/pub/gnu
|
||||
|
||||
BINUTILS_VER_ONLY := 2.43
|
||||
BINUTILS_BASE_URL := $(GNU_BASE_URL)/binutils
|
||||
BINUTILS_SHA512 := 93e063163e54d6a6ee2bd48dc754270bf757a3635b49a702ed6b310e929e94063958512d191e66beaf44275f7ea60865dbde138b624626739679fcc306b133bb
|
||||
|
||||
CCACHE_VER_ONLY := 4.10.2
|
||||
CCACHE_BASE_URL := https://github.com/ccache/ccache/releases/download
|
||||
CCACHE_CMAKE_BASED := 1
|
||||
mpfr_ver_only := 4.2.1
|
||||
gmp_ver_only := 6.3.0
|
||||
mpc_ver_only := 1.3.1
|
||||
gdb_ver_only := 15.2
|
||||
CCACHE_SHA512 := 3815c71d7266c32839acb306763268018acc58b3bbbd9ec79fc101e4217c1720d2ad2f01645bf69168c1c61d27700b6f3bb755cfa82689cca69824f015653f3c
|
||||
|
||||
dependencies := gcc binutils ccache mpfr gmp mpc gdb
|
||||
GCC_VER_ONLY := 14.2.0
|
||||
GCC_BASE_URL := $(GNU_BASE_URL)/gcc
|
||||
GCC_SHA512 := 932bdef0cda94bacedf452ab17f103c0cb511ff2cec55e9112fc0328cbf1d803b42595728ea7b200e0a057c03e85626f937012e49a7515bc5dd256b2bf4bc396
|
||||
|
||||
$(foreach dep,$(dependencies),$(eval $(dep)_ver := $(dep)-$($(dep)_ver_only)))
|
||||
GDB_VER_ONLY := 15.2
|
||||
GDB_BASE_URL := $(GNU_BASE_URL)/gdb
|
||||
GDB_SHA512 := 624007deceb5b15ba89c0725883d1a699fa46714ef30887f3d0165e17c5d65d634671740a135aa69e437d916218abb08cfa2a38ed309ff19d48f51da56b2a8ba
|
||||
|
||||
GCC := http://ftp.gnu.org/pub/gnu/gcc/$(gcc_ver)/$(gcc_ver).tar.xz
|
||||
BINUTILS := http://ftp.gnu.org/pub/gnu/binutils/$(binutils_ver).tar.gz
|
||||
CCACHE := https://github.com/ccache/ccache/releases/download/v$(ccache_ver_only)/$(ccache_ver).tar.xz
|
||||
MPFR := https://www.mpfr.org/$(mpfr_ver)/$(mpfr_ver).tar.bz2
|
||||
GMP := http://ftp.gnu.org/pub/gnu/gmp/$(gmp_ver).tar.bz2
|
||||
MPC := http://ftp.gnu.org/pub/gnu/mpc/$(mpc_ver).tar.gz
|
||||
GDB := http://ftp.gnu.org/gnu/gdb/$(gdb_ver).tar.xz
|
||||
GMP_VER_ONLY := 6.3.0
|
||||
GMP_BASE_URL := $(GNU_BASE_URL)/gmp
|
||||
GMP_SHA512 := e85a0dab5195889948a3462189f0e0598d331d3457612e2d3350799dba2e244316d256f8161df5219538eb003e4b5343f989aaa00f96321559063ed8c8f29fd2
|
||||
|
||||
MPC_VER_ONLY := 1.3.1
|
||||
MPC_BASE_URL := $(GNU_BASE_URL)/mpc
|
||||
MPC_SHA512 := 4bab4ef6076f8c5dfdc99d810b51108ced61ea2942ba0c1c932d624360a5473df20d32b300fc76f2ba4aa2a97e1f275c9fd494a1ba9f07c4cb2ad7ceaeb1ae97
|
||||
|
||||
MPFR_VER_ONLY := 4.2.1
|
||||
MPFR_BASE_URL := https://www.mpfr.org
|
||||
MPFR_SHA512 := bc68c0d755d5446403644833ecbb07e37360beca45f474297b5d5c40926df1efc3e2067eecffdf253f946288bcca39ca89b0613f545d46a9e767d1d4cf358475
|
||||
|
||||
DEPENDENCIES := BINUTILS CCACHE GCC GDB GMP MPC MPFR
|
||||
|
||||
$(foreach dep,$(DEPENDENCIES),$(eval $(dep)_VER := $(call lowercase,$(dep)-$($(dep)_VER_ONLY))))
|
||||
|
||||
BINUTILS_URL := $(BINUTILS_BASE_URL)/$(BINUTILS_VER).tar.xz
|
||||
CCACHE_URL := $(CCACHE_BASE_URL)/v$(CCACHE_VER_ONLY)/$(CCACHE_VER).tar.xz
|
||||
GCC_URL := $(GCC_BASE_URL)/$(GCC_VER)/$(GCC_VER).tar.xz
|
||||
GDB_URL := $(GDB_BASE_URL)/$(GDB_VER).tar.xz
|
||||
GMP_URL := $(GMP_BASE_URL)/$(GMP_VER).tar.xz
|
||||
MPC_URL := $(MPC_BASE_URL)/$(MPC_VER).tar.gz
|
||||
MPFR_URL := $(MPFR_BASE_URL)/$(MPFR_VER)/$(MPFR_VER).tar.xz
|
||||
|
||||
REQUIRED_MIN_MAKE_MAJOR_VERSION := 4
|
||||
ifneq ($(REQUIRED_MIN_MAKE_MAJOR_VERSION),)
|
||||
@ -180,10 +202,10 @@ DOWNLOAD_RPMS := $(DOWNLOAD)/rpms/$(TARGET)-$(LINUX_VERSION)
|
||||
SRCDIR := $(OUTPUT_ROOT)/src
|
||||
|
||||
# Marker file for unpacking rpms
|
||||
rpms := $(SYSROOT)/rpms_unpacked
|
||||
RPMS := $(SYSROOT)/rpms_unpacked
|
||||
|
||||
# Need to patch libs that are linker scripts to use non-absolute paths
|
||||
libs := $(SYSROOT)/libs_patched
|
||||
LIBS := $(SYSROOT)/libs_patched
|
||||
|
||||
################################################################################
|
||||
# Download RPMs
|
||||
@ -198,10 +220,10 @@ download-rpms:
|
||||
################################################################################
|
||||
# Unpack source packages
|
||||
|
||||
# Generate downloading + unpacking of sources.
|
||||
define Download
|
||||
# Generate downloading + checksum verification of sources.
|
||||
define DownloadVerify
|
||||
# Allow override
|
||||
$(1)_DIRNAME ?= $(basename $(basename $(notdir $($(1)))))
|
||||
$(1)_DIRNAME ?= $(basename $(basename $(notdir $($(1)_URL))))
|
||||
$(1)_DIR = $(abspath $(SRCDIR)/$$($(1)_DIRNAME))
|
||||
ifeq ($$($(1)_CMAKE_BASED),)
|
||||
$(1)_CFG = $$($(1)_DIR)/configure
|
||||
@ -212,7 +234,7 @@ define Download
|
||||
$(1)_SRC_MARKER = $$($(1)_DIR)/CMakeLists.txt
|
||||
$(1)_CONFIG = $$(CMAKE_CONFIG) $$($(1)_DIR)
|
||||
endif
|
||||
$(1)_FILE = $(DOWNLOAD)/$(notdir $($(1)))
|
||||
$(1)_FILE = $(DOWNLOAD)/$(notdir $($(1)_URL))
|
||||
|
||||
$$($(1)_SRC_MARKER) : $$($(1)_FILE)
|
||||
mkdir -p $$(SRCDIR)
|
||||
@ -224,11 +246,20 @@ define Download
|
||||
touch $$@
|
||||
|
||||
$$($(1)_FILE) :
|
||||
wget -P $(DOWNLOAD) $$($(1))
|
||||
mkdir -p $$(@D)
|
||||
wget -O - $$($(1)_URL) > $$@.tmp
|
||||
sha512_actual="$$$$(sha512sum $$@.tmp | awk '{ print $$$$1; }')"; \
|
||||
if [ x"$$$${sha512_actual}" != x"$$($(1)_SHA512)" ]; then \
|
||||
echo "Checksum mismatch for $$@.tmp"; \
|
||||
echo " Expected: $$($(1)_SHA512)"; \
|
||||
echo " Actual: $$$${sha512_actual}"; \
|
||||
exit 1; \
|
||||
fi
|
||||
mv $$@.tmp $$@
|
||||
endef
|
||||
|
||||
# Download and unpack all source packages
|
||||
$(foreach dep,$(dependencies),$(eval $(call Download,$(call uppercase,$(dep)))))
|
||||
$(foreach dep,$(DEPENDENCIES),$(eval $(call DownloadVerify,$(dep))))
|
||||
|
||||
################################################################################
|
||||
# Unpack RPMS
|
||||
@ -250,7 +281,7 @@ RPM_FILE_LIST := $(sort $(foreach a, $(RPM_ARCHS), \
|
||||
# Note. For building linux you should install rpm2cpio.
|
||||
define unrpm
|
||||
$(SYSROOT)/$(notdir $(1)).unpacked : $(1)
|
||||
$$(rpms) : $(SYSROOT)/$(notdir $(1)).unpacked
|
||||
$$(RPMS) : $(SYSROOT)/$(notdir $(1)).unpacked
|
||||
endef
|
||||
|
||||
%.unpacked :
|
||||
@ -277,7 +308,7 @@ $(foreach p,$(RPM_FILE_LIST),$(eval $(call unrpm,$(p))))
|
||||
# have it anyway, but just to make sure...
|
||||
# Patch libc.so and libpthread.so to force linking against libraries in sysroot
|
||||
# and not the ones installed on the build machine.
|
||||
$(libs) : $(rpms)
|
||||
$(LIBS) : $(RPMS)
|
||||
@echo Patching libc and pthreads
|
||||
@(for f in `find $(SYSROOT) -name libc.so -o -name libpthread.so`; do \
|
||||
(cat $$f | sed -e 's|/usr/lib64/||g' \
|
||||
@ -293,10 +324,10 @@ $(libs) : $(rpms)
|
||||
# Create links for ffi header files so that they become visible by default when using the
|
||||
# devkit.
|
||||
ifeq ($(ARCH), x86_64)
|
||||
$(SYSROOT)/usr/include/ffi.h: $(rpms)
|
||||
$(SYSROOT)/usr/include/ffi.h: $(RPMS)
|
||||
cd $(@D) && rm -f $(@F) && ln -s ../lib/libffi-*/include/$(@F) .
|
||||
|
||||
$(SYSROOT)/usr/include/ffitarget.h: $(rpms)
|
||||
$(SYSROOT)/usr/include/ffitarget.h: $(RPMS)
|
||||
cd $(@D) && rm -f $(@F) && ln -s ../lib/libffi-*/include/$(@F) .
|
||||
|
||||
SYSROOT_LINKS += $(SYSROOT)/usr/include/ffi.h $(SYSROOT)/usr/include/ffitarget.h
|
||||
@ -305,7 +336,7 @@ endif
|
||||
################################################################################
|
||||
|
||||
# Define marker files for each source package to be compiled
|
||||
$(foreach dep,$(dependencies),$(eval $(dep) = $(TARGETDIR)/$($(dep)_ver).done))
|
||||
$(foreach dep,$(DEPENDENCIES),$(eval $(dep) = $(TARGETDIR)/$($(dep)_VER).done))
|
||||
|
||||
################################################################################
|
||||
|
||||
@ -345,48 +376,48 @@ TOOLS ?= $(call declare_tools,_FOR_TARGET,$(TARGET)-)
|
||||
# CFLAG_<name> to most likely -m32.
|
||||
define mk_bfd
|
||||
$$(info Libs for $(1))
|
||||
$$(BUILDDIR)/$$(binutils_ver)-$(subst /,-,$(1))/Makefile \
|
||||
$$(BUILDDIR)/$$(BINUTILS_VER)-$(subst /,-,$(1))/Makefile \
|
||||
: CFLAGS += $$(CFLAGS_$(1))
|
||||
$$(BUILDDIR)/$$(binutils_ver)-$(subst /,-,$(1))/Makefile \
|
||||
$$(BUILDDIR)/$$(BINUTILS_VER)-$(subst /,-,$(1))/Makefile \
|
||||
: LIBDIRS = --libdir=$(TARGETDIR)/$(1)
|
||||
|
||||
bfdlib += $$(TARGETDIR)/$$(binutils_ver)-$(subst /,-,$(1)).done
|
||||
bfdmakes += $$(BUILDDIR)/$$(binutils_ver)-$(subst /,-,$(1))/Makefile
|
||||
BFDLIB += $$(TARGETDIR)/$$(BINUTILS_VER)-$(subst /,-,$(1)).done
|
||||
BFDMAKES += $$(BUILDDIR)/$$(BINUTILS_VER)-$(subst /,-,$(1))/Makefile
|
||||
endef
|
||||
|
||||
# Create one set of bfds etc for each multilib arch
|
||||
$(foreach l,$(LIBDIRS),$(eval $(call mk_bfd,$(l))))
|
||||
|
||||
# Only build these two libs.
|
||||
$(bfdlib) : MAKECMD = all-libiberty all-bfd
|
||||
$(bfdlib) : INSTALLCMD = install-libiberty install-bfd
|
||||
$(BFDLIB) : MAKECMD = all-libiberty all-bfd
|
||||
$(BFDLIB) : INSTALLCMD = install-libiberty install-bfd
|
||||
|
||||
# Building targets libbfd + libiberty. HOST==TARGET, i.e not
|
||||
# for a cross env.
|
||||
$(bfdmakes) : CONFIG = --target=$(TARGET) \
|
||||
$(BFDMAKES) : CONFIG = --target=$(TARGET) \
|
||||
--host=$(TARGET) --build=$(BUILD) \
|
||||
--prefix=$(TARGETDIR) \
|
||||
--with-sysroot=$(SYSROOT) \
|
||||
$(LIBDIRS)
|
||||
|
||||
$(bfdmakes) : TOOLS = $(call declare_tools,_FOR_TARGET,$(TARGET)-) $(call declare_tools,,$(TARGET)-)
|
||||
$(BFDMAKES) : TOOLS = $(call declare_tools,_FOR_TARGET,$(TARGET)-) $(call declare_tools,,$(TARGET)-)
|
||||
|
||||
################################################################################
|
||||
|
||||
$(gcc) \
|
||||
$(binutils) \
|
||||
$(gmp) \
|
||||
$(mpfr) \
|
||||
$(mpc) \
|
||||
$(bfdmakes) \
|
||||
$(ccache) : ENVS += $(TOOLS)
|
||||
$(GCC) \
|
||||
$(BINUTILS) \
|
||||
$(GMP) \
|
||||
$(MPFR) \
|
||||
$(MPC) \
|
||||
$(BFDMAKES) \
|
||||
$(CCACHE) : ENVS += $(TOOLS)
|
||||
|
||||
# libdir to work around hateful bfd stuff installing into wrong dirs...
|
||||
# ensure we have 64 bit bfd support in the HOST library. I.e our
|
||||
# compiler on i686 will know 64 bit symbols, BUT later
|
||||
# we build just the libs again for TARGET, then with whatever the arch
|
||||
# wants.
|
||||
$(BUILDDIR)/$(binutils_ver)/Makefile : CONFIG += --enable-64-bit-bfd --libdir=$(PREFIX)/$(word 1,$(LIBDIRS))
|
||||
$(BUILDDIR)/$(BINUTILS_VER)/Makefile : CONFIG += --enable-64-bit-bfd --libdir=$(PREFIX)/$(word 1,$(LIBDIRS))
|
||||
|
||||
ifeq ($(filter $(ARCH), s390x riscv64 ppc64le), )
|
||||
# gold compiles but cannot link properly on s390x @ gcc 13.2 and Fedore 41
|
||||
@ -397,8 +428,8 @@ endif
|
||||
|
||||
# Makefile creation. Simply run configure in build dir.
|
||||
# Setting CFLAGS to -O2 generates a much faster ld.
|
||||
$(bfdmakes) \
|
||||
$(BUILDDIR)/$(binutils_ver)/Makefile \
|
||||
$(BFDMAKES) \
|
||||
$(BUILDDIR)/$(BINUTILS_VER)/Makefile \
|
||||
: $(BINUTILS_CFG)
|
||||
$(info Configuring $@. Log in $(@D)/log.config)
|
||||
@mkdir -p $(@D)
|
||||
@ -417,7 +448,7 @@ $(BUILDDIR)/$(binutils_ver)/Makefile \
|
||||
) > $(@D)/log.config 2>&1
|
||||
@echo 'done'
|
||||
|
||||
$(BUILDDIR)/$(mpfr_ver)/Makefile \
|
||||
$(BUILDDIR)/$(MPFR_VER)/Makefile \
|
||||
: $(MPFR_CFG)
|
||||
$(info Configuring $@. Log in $(@D)/log.config)
|
||||
@mkdir -p $(@D)
|
||||
@ -432,7 +463,7 @@ $(BUILDDIR)/$(mpfr_ver)/Makefile \
|
||||
) > $(@D)/log.config 2>&1
|
||||
@echo 'done'
|
||||
|
||||
$(BUILDDIR)/$(gmp_ver)/Makefile \
|
||||
$(BUILDDIR)/$(GMP_VER)/Makefile \
|
||||
: $(GMP_CFG)
|
||||
$(info Configuring $@. Log in $(@D)/log.config)
|
||||
@mkdir -p $(@D)
|
||||
@ -449,7 +480,7 @@ $(BUILDDIR)/$(gmp_ver)/Makefile \
|
||||
) > $(@D)/log.config 2>&1
|
||||
@echo 'done'
|
||||
|
||||
$(BUILDDIR)/$(mpc_ver)/Makefile \
|
||||
$(BUILDDIR)/$(MPC_VER)/Makefile \
|
||||
: $(MPC_CFG)
|
||||
$(info Configuring $@. Log in $(@D)/log.config)
|
||||
@mkdir -p $(@D)
|
||||
@ -468,11 +499,11 @@ $(BUILDDIR)/$(mpc_ver)/Makefile \
|
||||
# Only valid if glibc target -> linux
|
||||
# proper destructor handling for c++
|
||||
ifneq (,$(findstring linux,$(TARGET)))
|
||||
$(BUILDDIR)/$(gcc_ver)/Makefile : CONFIG += --enable-__cxa_atexit
|
||||
$(BUILDDIR)/$(GCC_VER)/Makefile : CONFIG += --enable-__cxa_atexit
|
||||
endif
|
||||
|
||||
ifeq ($(ARCH), armhfp)
|
||||
$(BUILDDIR)/$(gcc_ver)/Makefile : CONFIG += --with-float=hard
|
||||
$(BUILDDIR)/$(GCC_VER)/Makefile : CONFIG += --with-float=hard
|
||||
endif
|
||||
|
||||
ifneq ($(filter riscv64 ppc64le s390x, $(ARCH)), )
|
||||
@ -487,7 +518,7 @@ endif
|
||||
# skip native language.
|
||||
# and link and assemble with the binutils we created
|
||||
# earlier, so --with-gnu*
|
||||
$(BUILDDIR)/$(gcc_ver)/Makefile \
|
||||
$(BUILDDIR)/$(GCC_VER)/Makefile \
|
||||
: $(GCC_CFG)
|
||||
$(info Configuring $@. Log in $(@D)/log.config)
|
||||
mkdir -p $(@D)
|
||||
@ -509,17 +540,17 @@ $(BUILDDIR)/$(gcc_ver)/Makefile \
|
||||
@echo 'done'
|
||||
|
||||
# need binutils for gcc
|
||||
$(gcc) : $(binutils)
|
||||
$(GCC) : $(BINUTILS)
|
||||
|
||||
# as of 4.3 or so need these for doing config
|
||||
$(BUILDDIR)/$(gcc_ver)/Makefile : $(gmp) $(mpfr) $(mpc)
|
||||
$(mpfr) : $(gmp)
|
||||
$(mpc) : $(gmp) $(mpfr)
|
||||
$(BUILDDIR)/$(GCC_VER)/Makefile : $(GMP) $(MPFR) $(MPC)
|
||||
$(MPFR) : $(GMP)
|
||||
$(MPC) : $(GMP) $(MPFR)
|
||||
|
||||
################################################################################
|
||||
# Build gdb but only where host and target match
|
||||
ifeq ($(HOST), $(TARGET))
|
||||
$(BUILDDIR)/$(gdb_ver)/Makefile: $(GDB_CFG)
|
||||
$(BUILDDIR)/$(GDB_VER)/Makefile: $(GDB_CFG)
|
||||
$(info Configuring $@. Log in $(@D)/log.config)
|
||||
mkdir -p $(@D)
|
||||
( \
|
||||
@ -532,9 +563,9 @@ ifeq ($(HOST), $(TARGET))
|
||||
) > $(@D)/log.config 2>&1
|
||||
@echo 'done'
|
||||
|
||||
$(gdb): $(gcc)
|
||||
$(GDB): $(GCC)
|
||||
else
|
||||
$(BUILDDIR)/$(gdb_ver)/Makefile:
|
||||
$(BUILDDIR)/$(GDB_VER)/Makefile:
|
||||
$(info Faking $@, not used when cross-compiling)
|
||||
mkdir -p $(@D)
|
||||
echo "install:" > $@
|
||||
@ -543,7 +574,7 @@ endif
|
||||
|
||||
################################################################################
|
||||
# very straightforward. just build a ccache. it is only for host.
|
||||
$(BUILDDIR)/$(ccache_ver)/Makefile \
|
||||
$(BUILDDIR)/$(CCACHE_VER)/Makefile \
|
||||
: $(CCACHE_SRC_MARKER)
|
||||
$(info Configuring $@. Log in $(@D)/log.config)
|
||||
@mkdir -p $(@D)
|
||||
@ -554,12 +585,12 @@ $(BUILDDIR)/$(ccache_ver)/Makefile \
|
||||
) > $(@D)/log.config 2>&1
|
||||
@echo 'done'
|
||||
|
||||
gccpatch = $(TARGETDIR)/gcc-patched
|
||||
GCC_PATCHED = $(TARGETDIR)/gcc-patched
|
||||
|
||||
################################################################################
|
||||
# For some reason cpp is not created as a target-compiler
|
||||
ifeq ($(HOST),$(TARGET))
|
||||
$(gccpatch) : $(gcc) link_libs
|
||||
$(GCC_PATCHED) : $(GCC) link_libs
|
||||
@echo -n 'Creating compiler symlinks...'
|
||||
@for f in cpp; do \
|
||||
if [ ! -e $(PREFIX)/bin/$(TARGET)-$$f ]; \
|
||||
@ -587,7 +618,7 @@ ifeq ($(HOST),$(TARGET))
|
||||
done;)
|
||||
@echo 'done'
|
||||
else
|
||||
$(gccpatch) :
|
||||
$(GCC_PATCHED) :
|
||||
@echo 'done'
|
||||
endif
|
||||
|
||||
@ -615,7 +646,7 @@ $(PREFIX)/devkit.info:
|
||||
echo '# This file describes to configure how to interpret the contents of this' >> $@
|
||||
echo '# devkit' >> $@
|
||||
echo '' >> $@
|
||||
echo 'DEVKIT_NAME="$(gcc_ver) - $(LINUX_VERSION)"' >> $@
|
||||
echo 'DEVKIT_NAME="$(GCC_VER) - $(LINUX_VERSION)"' >> $@
|
||||
echo 'DEVKIT_TOOLCHAIN_PATH="$$DEVKIT_ROOT/bin"' >> $@
|
||||
echo 'DEVKIT_SYSROOT="$$DEVKIT_ROOT/$(TARGET)/sysroot"' >> $@
|
||||
echo 'DEVKIT_EXTRA_PATH="$$DEVKIT_ROOT/bin"' >> $@
|
||||
@ -651,32 +682,32 @@ ifeq ($(TARGET), $(HOST))
|
||||
@echo 'Creating missing $* soft link'
|
||||
ln -s $(TARGET)-$* $@
|
||||
|
||||
missing-links := $(addprefix $(PREFIX)/bin/, \
|
||||
addr2line ar as c++ c++filt dwp elfedit g++ gcc gcc-$(gcc_ver_only) gprof ld ld.bfd \
|
||||
MISSING_LINKS := $(addprefix $(PREFIX)/bin/, \
|
||||
addr2line ar as c++ c++filt dwp elfedit g++ gcc gcc-$(GCC_VER_ONLY) gprof ld ld.bfd \
|
||||
ld.gold nm objcopy objdump ranlib readelf size strings strip)
|
||||
endif
|
||||
|
||||
# Add link to work around "plugin needed to handle lto object" (JDK-8344272)
|
||||
$(PREFIX)/lib/bfd-plugins/liblto_plugin.so: $(PREFIX)/libexec/gcc/$(TARGET)/$(gcc_ver_only)/liblto_plugin.so
|
||||
$(PREFIX)/lib/bfd-plugins/liblto_plugin.so: $(PREFIX)/libexec/gcc/$(TARGET)/$(GCC_VER_ONLY)/liblto_plugin.so
|
||||
@echo 'Creating missing $(@F) soft link'
|
||||
@mkdir -p $(@D)
|
||||
ln -s $$(realpath -s --relative-to=$(@D) $<) $@
|
||||
|
||||
missing-links += $(PREFIX)/lib/bfd-plugins/liblto_plugin.so
|
||||
MISSING_LINKS += $(PREFIX)/lib/bfd-plugins/liblto_plugin.so
|
||||
|
||||
################################################################################
|
||||
|
||||
bfdlib : $(bfdlib)
|
||||
binutils : $(binutils)
|
||||
rpms : $(rpms)
|
||||
libs : $(libs)
|
||||
bfdlib : $(BFDLIB)
|
||||
binutils : $(BINUTILS)
|
||||
rpms : $(RPMS)
|
||||
libs : $(LIBS)
|
||||
sysroot : rpms libs
|
||||
gcc : sysroot $(gcc) $(gccpatch)
|
||||
gdb : $(gdb)
|
||||
all : binutils gcc bfdlib $(PREFIX)/devkit.info $(missing-links) $(SYSROOT_LINKS) \
|
||||
gcc : sysroot $(GCC) $(GCC_PATCHED)
|
||||
gdb : $(GDB)
|
||||
all : binutils gcc bfdlib $(PREFIX)/devkit.info $(MISSING_LINKS) $(SYSROOT_LINKS) \
|
||||
$(THESE_MAKEFILES) gdb
|
||||
|
||||
# this is only built for host. so separate.
|
||||
ccache : $(ccache)
|
||||
ccache : $(CCACHE)
|
||||
|
||||
.PHONY : gcc all binutils bfdlib link_libs rpms libs sysroot
|
||||
|
||||
@ -93,7 +93,7 @@ elif test "x$TARGET_PLATFORM" = xlinux_x64; then
|
||||
rpm2cpio $OUTPUT_ROOT/m4-$M4_VERSION.el6.x86_64.rpm | cpio -d -i
|
||||
elif test "x$TARGET_PLATFORM" = xlinux_x86; then
|
||||
M4_VERSION=1.4.13-5
|
||||
wget http://yum.oracle.com/repo/OracleLinux/OL6/latest/i386/getPackage/m4-$M4_VERSION.el6.i686.rpm
|
||||
wget https://yum.oracle.com/repo/OracleLinux/OL6/latest/i386/getPackage/m4-$M4_VERSION.el6.i686.rpm
|
||||
cd $IMAGE_DIR
|
||||
rpm2cpio $OUTPUT_ROOT/m4-$M4_VERSION.el6.i686.rpm | cpio -d -i
|
||||
else
|
||||
|
||||
@ -27,7 +27,5 @@
|
||||
|
||||
DOCLINT += -Xdoclint:all/protected \
|
||||
'-Xdoclint/package:java.*,javax.*'
|
||||
COPY += .js
|
||||
CLEAN += .properties
|
||||
|
||||
################################################################################
|
||||
|
||||
@ -1,39 +0,0 @@
|
||||
#
|
||||
# Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation. Oracle designates this
|
||||
# particular file as subject to the "Classpath" exception as provided
|
||||
# by Oracle in the LICENSE file that accompanied this code.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
|
||||
################################################################################
|
||||
|
||||
include LauncherCommon.gmk
|
||||
|
||||
################################################################################
|
||||
## Build jrunscript
|
||||
################################################################################
|
||||
|
||||
$(eval $(call SetupBuildLauncher, jrunscript, \
|
||||
MAIN_CLASS := com.sun.tools.script.shell.Main, \
|
||||
JAVA_ARGS := --add-modules ALL-DEFAULT, \
|
||||
))
|
||||
|
||||
################################################################################
|
||||
@ -2568,10 +2568,6 @@ RegMask Matcher::modL_proj_mask() {
|
||||
return RegMask();
|
||||
}
|
||||
|
||||
const RegMask Matcher::method_handle_invoke_SP_save_mask() {
|
||||
return FP_REG_mask();
|
||||
}
|
||||
|
||||
bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
|
||||
for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
|
||||
Node* u = addp->fast_out(i);
|
||||
|
||||
@ -383,13 +383,6 @@ LIR_Opr FrameMap::stack_pointer() {
|
||||
return FrameMap::sp_opr;
|
||||
}
|
||||
|
||||
|
||||
// JSR 292
|
||||
LIR_Opr FrameMap::method_handle_invoke_SP_save_opr() {
|
||||
return LIR_OprFact::illegalOpr; // Not needed on aarch64
|
||||
}
|
||||
|
||||
|
||||
bool FrameMap::validate_frame() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -228,8 +228,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
|
||||
|
||||
nmethod* nm = sender_blob->as_nmethod_or_null();
|
||||
if (nm != nullptr) {
|
||||
if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) ||
|
||||
nm->method()->is_method_handle_intrinsic()) {
|
||||
if (nm->is_deopt_entry(sender_pc) || nm->method()->is_method_handle_intrinsic()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -454,48 +453,6 @@ JavaThread** frame::saved_thread_address(const frame& f) {
|
||||
return thread_addr;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// frame::verify_deopt_original_pc
|
||||
//
|
||||
// Verifies the calculated original PC of a deoptimization PC for the
|
||||
// given unextended SP.
|
||||
#ifdef ASSERT
|
||||
void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp) {
|
||||
frame fr;
|
||||
|
||||
// This is ugly but it's better than to change {get,set}_original_pc
|
||||
// to take an SP value as argument. And it's only a debugging
|
||||
// method anyway.
|
||||
fr._unextended_sp = unextended_sp;
|
||||
|
||||
address original_pc = nm->get_original_pc(&fr);
|
||||
assert(nm->insts_contains_inclusive(original_pc),
|
||||
"original PC must be in the main code section of the compiled method (or must be immediately following it)");
|
||||
}
|
||||
#endif
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// frame::adjust_unextended_sp
|
||||
#ifdef ASSERT
|
||||
void frame::adjust_unextended_sp() {
|
||||
// On aarch64, sites calling method handle intrinsics and lambda forms are treated
|
||||
// as any other call site. Therefore, no special action is needed when we are
|
||||
// returning to any of these call sites.
|
||||
|
||||
if (_cb != nullptr) {
|
||||
nmethod* sender_nm = _cb->as_nmethod_or_null();
|
||||
if (sender_nm != nullptr) {
|
||||
// If the sender PC is a deoptimization point, get the original PC.
|
||||
if (sender_nm->is_deopt_entry(_pc) ||
|
||||
sender_nm->is_deopt_mh_entry(_pc)) {
|
||||
verify_deopt_original_pc(sender_nm, _unextended_sp);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// frame::sender_for_interpreter_frame
|
||||
frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -141,8 +141,6 @@
|
||||
int _offset_unextended_sp; // for use in stack-chunk frames
|
||||
};
|
||||
|
||||
void adjust_unextended_sp() NOT_DEBUG_RETURN;
|
||||
|
||||
// true means _sp value is correct and we can use it to get the sender's sp
|
||||
// of the compiled frame, otherwise, _sp value may be invalid and we can use
|
||||
// _fp to get the sender's sp if PreserveFramePointer is enabled.
|
||||
@ -152,11 +150,6 @@
|
||||
return (intptr_t*) addr_at(offset);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
// Used in frame::sender_for_{interpreter,compiled}_frame
|
||||
static void verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp);
|
||||
#endif
|
||||
|
||||
public:
|
||||
// Constructors
|
||||
|
||||
|
||||
@ -116,8 +116,6 @@ inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) {
|
||||
}
|
||||
|
||||
inline void frame::setup(address pc) {
|
||||
adjust_unextended_sp();
|
||||
|
||||
address original_pc = get_deopt_original_pc();
|
||||
if (original_pc != nullptr) {
|
||||
_pc = original_pc;
|
||||
@ -223,7 +221,6 @@ inline frame::frame(intptr_t* sp, intptr_t* fp) {
|
||||
// assert(_pc != nullptr, "no pc?");
|
||||
|
||||
_cb = CodeCache::find_blob(_pc);
|
||||
adjust_unextended_sp();
|
||||
|
||||
address original_pc = get_deopt_original_pc();
|
||||
if (original_pc != nullptr) {
|
||||
|
||||
@ -35,8 +35,6 @@ const bool CCallingConventionRequiresIntsAsLongs = false;
|
||||
|
||||
#define SUPPORTS_NATIVE_CX8
|
||||
|
||||
#define SUPPORT_MONITOR_COUNT
|
||||
|
||||
// Aarch64 was not originally defined to be multi-copy-atomic, but now
|
||||
// is. See: "Simplifying ARM Concurrency: Multicopy-atomic Axiomatic
|
||||
// and Operational Models for ARMv8"
|
||||
|
||||
@ -39,24 +39,22 @@ public:
|
||||
// 3 - restoring an old state (javaCalls)
|
||||
|
||||
void clear(void) {
|
||||
// No hardware barriers are necessary. All members are volatile and the profiler
|
||||
// is run from a signal handler and only observers the thread its running on.
|
||||
|
||||
// clearing _last_Java_sp must be first
|
||||
_last_Java_sp = nullptr;
|
||||
OrderAccess::release();
|
||||
_last_Java_fp = nullptr;
|
||||
_last_Java_pc = nullptr;
|
||||
}
|
||||
|
||||
void copy(JavaFrameAnchor* src) {
|
||||
// In order to make sure the transition state is valid for "this"
|
||||
// No hardware barriers are necessary. All members are volatile and the profiler
|
||||
// is run from a signal handler and only observers the thread its running on.
|
||||
|
||||
// We must clear _last_Java_sp before copying the rest of the new data
|
||||
//
|
||||
// Hack Alert: Temporary bugfix for 4717480/4721647
|
||||
// To act like previous version (pd_cache_state) don't null _last_Java_sp
|
||||
// unless the value is changing
|
||||
//
|
||||
if (_last_Java_sp != src->_last_Java_sp) {
|
||||
_last_Java_sp = nullptr;
|
||||
OrderAccess::release();
|
||||
}
|
||||
_last_Java_fp = src->_last_Java_fp;
|
||||
_last_Java_pc = src->_last_Java_pc;
|
||||
|
||||
@ -634,12 +634,13 @@ void MacroAssembler::set_last_Java_frame(Register last_java_sp,
|
||||
last_java_sp = esp;
|
||||
}
|
||||
|
||||
str(last_java_sp, Address(rthread, JavaThread::last_Java_sp_offset()));
|
||||
|
||||
// last_java_fp is optional
|
||||
if (last_java_fp->is_valid()) {
|
||||
str(last_java_fp, Address(rthread, JavaThread::last_Java_fp_offset()));
|
||||
}
|
||||
|
||||
// We must set sp last.
|
||||
str(last_java_sp, Address(rthread, JavaThread::last_Java_sp_offset()));
|
||||
}
|
||||
|
||||
void MacroAssembler::set_last_Java_frame(Register last_java_sp,
|
||||
@ -5630,38 +5631,6 @@ void MacroAssembler::tlab_allocate(Register obj,
|
||||
bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
|
||||
}
|
||||
|
||||
void MacroAssembler::inc_held_monitor_count(Register tmp) {
|
||||
Address dst(rthread, JavaThread::held_monitor_count_offset());
|
||||
#ifdef ASSERT
|
||||
ldr(tmp, dst);
|
||||
increment(tmp);
|
||||
str(tmp, dst);
|
||||
Label ok;
|
||||
tbz(tmp, 63, ok);
|
||||
STOP("assert(held monitor count underflow)");
|
||||
should_not_reach_here();
|
||||
bind(ok);
|
||||
#else
|
||||
increment(dst);
|
||||
#endif
|
||||
}
|
||||
|
||||
void MacroAssembler::dec_held_monitor_count(Register tmp) {
|
||||
Address dst(rthread, JavaThread::held_monitor_count_offset());
|
||||
#ifdef ASSERT
|
||||
ldr(tmp, dst);
|
||||
decrement(tmp);
|
||||
str(tmp, dst);
|
||||
Label ok;
|
||||
tbz(tmp, 63, ok);
|
||||
STOP("assert(held monitor count underflow)");
|
||||
should_not_reach_here();
|
||||
bind(ok);
|
||||
#else
|
||||
decrement(dst);
|
||||
#endif
|
||||
}
|
||||
|
||||
void MacroAssembler::verify_tlab() {
|
||||
#ifdef ASSERT
|
||||
if (UseTLAB && VerifyOops) {
|
||||
|
||||
@ -983,9 +983,6 @@ public:
|
||||
void push_cont_fastpath(Register java_thread = rthread);
|
||||
void pop_cont_fastpath(Register java_thread = rthread);
|
||||
|
||||
void inc_held_monitor_count(Register tmp);
|
||||
void dec_held_monitor_count(Register tmp);
|
||||
|
||||
// Round up to a power of two
|
||||
void round_to(Register reg, int modulus);
|
||||
|
||||
|
||||
@ -90,7 +90,6 @@ void Relocation::pd_set_call_destination(address x) {
|
||||
|
||||
void trampoline_stub_Relocation::pd_fix_owner_after_move() {
|
||||
NativeCall* call = nativeCall_at(owner());
|
||||
assert(call->raw_destination() == owner(), "destination should be empty");
|
||||
address trampoline = addr();
|
||||
address dest = nativeCallTrampolineStub_at(trampoline)->destination();
|
||||
if (!Assembler::reachable_from_branch_at(owner(), dest)) {
|
||||
|
||||
@ -985,11 +985,8 @@ static void fill_continuation_entry(MacroAssembler* masm) {
|
||||
|
||||
__ ldr(rscratch1, Address(rthread, JavaThread::cont_fastpath_offset()));
|
||||
__ str(rscratch1, Address(sp, ContinuationEntry::parent_cont_fastpath_offset()));
|
||||
__ ldr(rscratch1, Address(rthread, JavaThread::held_monitor_count_offset()));
|
||||
__ str(rscratch1, Address(sp, ContinuationEntry::parent_held_monitor_count_offset()));
|
||||
|
||||
__ str(zr, Address(rthread, JavaThread::cont_fastpath_offset()));
|
||||
__ str(zr, Address(rthread, JavaThread::held_monitor_count_offset()));
|
||||
}
|
||||
|
||||
// on entry, sp points to the ContinuationEntry
|
||||
@ -1005,50 +1002,6 @@ static void continuation_enter_cleanup(MacroAssembler* masm) {
|
||||
#endif
|
||||
__ ldr(rscratch1, Address(sp, ContinuationEntry::parent_cont_fastpath_offset()));
|
||||
__ str(rscratch1, Address(rthread, JavaThread::cont_fastpath_offset()));
|
||||
|
||||
if (CheckJNICalls) {
|
||||
// Check if this is a virtual thread continuation
|
||||
Label L_skip_vthread_code;
|
||||
__ ldrw(rscratch1, Address(sp, ContinuationEntry::flags_offset()));
|
||||
__ cbzw(rscratch1, L_skip_vthread_code);
|
||||
|
||||
// If the held monitor count is > 0 and this vthread is terminating then
|
||||
// it failed to release a JNI monitor. So we issue the same log message
|
||||
// that JavaThread::exit does.
|
||||
__ ldr(rscratch1, Address(rthread, JavaThread::jni_monitor_count_offset()));
|
||||
__ cbz(rscratch1, L_skip_vthread_code);
|
||||
|
||||
// Save return value potentially containing the exception oop in callee-saved R19.
|
||||
__ mov(r19, r0);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::log_jni_monitor_still_held));
|
||||
// Restore potential return value.
|
||||
__ mov(r0, r19);
|
||||
|
||||
// For vthreads we have to explicitly zero the JNI monitor count of the carrier
|
||||
// on termination. The held count is implicitly zeroed below when we restore from
|
||||
// the parent held count (which has to be zero).
|
||||
__ str(zr, Address(rthread, JavaThread::jni_monitor_count_offset()));
|
||||
|
||||
__ bind(L_skip_vthread_code);
|
||||
}
|
||||
#ifdef ASSERT
|
||||
else {
|
||||
// Check if this is a virtual thread continuation
|
||||
Label L_skip_vthread_code;
|
||||
__ ldrw(rscratch1, Address(sp, ContinuationEntry::flags_offset()));
|
||||
__ cbzw(rscratch1, L_skip_vthread_code);
|
||||
|
||||
// See comment just above. If not checking JNI calls the JNI count is only
|
||||
// needed for assertion checking.
|
||||
__ str(zr, Address(rthread, JavaThread::jni_monitor_count_offset()));
|
||||
|
||||
__ bind(L_skip_vthread_code);
|
||||
}
|
||||
#endif
|
||||
|
||||
__ ldr(rscratch1, Address(sp, ContinuationEntry::parent_held_monitor_count_offset()));
|
||||
__ str(rscratch1, Address(rthread, JavaThread::held_monitor_count_offset()));
|
||||
|
||||
__ ldr(rscratch2, Address(sp, ContinuationEntry::parent_offset()));
|
||||
__ str(rscratch2, Address(rthread, JavaThread::cont_entry_offset()));
|
||||
__ add(rfp, sp, (int)ContinuationEntry::size());
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2020, Red Hat Inc. All rights reserved.
|
||||
* Copyright 2025 Arm Limited and/or its affiliates.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -222,10 +223,13 @@ void VM_Version::initialize() {
|
||||
// Neoverse
|
||||
// N1: 0xd0c
|
||||
// N2: 0xd49
|
||||
// N3: 0xd8e
|
||||
// V1: 0xd40
|
||||
// V2: 0xd4f
|
||||
// V3: 0xd84
|
||||
if (_cpu == CPU_ARM && (model_is(0xd0c) || model_is(0xd49) ||
|
||||
model_is(0xd40) || model_is(0xd4f))) {
|
||||
model_is(0xd40) || model_is(0xd4f) ||
|
||||
model_is(0xd8e) || model_is(0xd84))) {
|
||||
if (FLAG_IS_DEFAULT(UseSIMDForMemoryOps)) {
|
||||
FLAG_SET_DEFAULT(UseSIMDForMemoryOps, true);
|
||||
}
|
||||
@ -260,7 +264,9 @@ void VM_Version::initialize() {
|
||||
// Neoverse
|
||||
// V1: 0xd40
|
||||
// V2: 0xd4f
|
||||
if (_cpu == CPU_ARM && (model_is(0xd40) || model_is(0xd4f))) {
|
||||
// V3: 0xd84
|
||||
if (_cpu == CPU_ARM &&
|
||||
(model_is(0xd40) || model_is(0xd4f) || model_is(0xd84))) {
|
||||
if (FLAG_IS_DEFAULT(UseCryptoPmullForCRC32)) {
|
||||
FLAG_SET_DEFAULT(UseCryptoPmullForCRC32, true);
|
||||
}
|
||||
|
||||
@ -1154,10 +1154,6 @@ RegMask Matcher::modL_proj_mask() {
|
||||
return RegMask();
|
||||
}
|
||||
|
||||
const RegMask Matcher::method_handle_invoke_SP_save_mask() {
|
||||
return FP_REGP_mask();
|
||||
}
|
||||
|
||||
bool maybe_far_call(const CallNode *n) {
|
||||
return !MacroAssembler::_reachable_from_cache(n->as_Call()->entry_point());
|
||||
}
|
||||
@ -1248,23 +1244,6 @@ encode %{
|
||||
__ set_inst_mark(mark);
|
||||
%}
|
||||
|
||||
enc_class preserve_SP %{
|
||||
// preserve mark
|
||||
address mark = __ inst_mark();
|
||||
DEBUG_ONLY(int off0 = __ offset());
|
||||
// FP is preserved across all calls, even compiled calls.
|
||||
// Use it to preserve SP in places where the callee might change the SP.
|
||||
__ mov(Rmh_SP_save, SP);
|
||||
DEBUG_ONLY(int off1 = __ offset());
|
||||
assert(off1 - off0 == 4, "correct size prediction");
|
||||
// restore mark
|
||||
__ set_inst_mark(mark);
|
||||
%}
|
||||
|
||||
enc_class restore_SP %{
|
||||
__ mov(SP, Rmh_SP_save);
|
||||
%}
|
||||
|
||||
enc_class Java_Dynamic_Call (method meth) %{
|
||||
Register R8_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode());
|
||||
assert(R8_ic_reg == Ricklass, "should be");
|
||||
@ -8799,7 +8778,6 @@ instruct safePoint_poll(iRegP poll, R12RegI tmp, flagsReg icc) %{
|
||||
// Call Java Static Instruction
|
||||
instruct CallStaticJavaDirect( method meth ) %{
|
||||
match(CallStaticJava);
|
||||
predicate(! ((CallStaticJavaNode*)n)->is_method_handle_invoke());
|
||||
effect(USE meth);
|
||||
|
||||
ins_cost(CALL_COST);
|
||||
@ -8808,20 +8786,6 @@ instruct CallStaticJavaDirect( method meth ) %{
|
||||
ins_pipe(simple_call);
|
||||
%}
|
||||
|
||||
// Call Java Static Instruction (method handle version)
|
||||
instruct CallStaticJavaHandle( method meth ) %{
|
||||
match(CallStaticJava);
|
||||
predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke());
|
||||
effect(USE meth);
|
||||
// FP is saved by all callees (for interpreter stack correction).
|
||||
// We use it here for a similar purpose, in {preserve,restore}_FP.
|
||||
|
||||
ins_cost(CALL_COST);
|
||||
format %{ "CALL,static/MethodHandle ==> " %}
|
||||
ins_encode( SetInstMark, preserve_SP, Java_Static_Call( meth ), restore_SP, call_epilog, ClearInstMark );
|
||||
ins_pipe(simple_call);
|
||||
%}
|
||||
|
||||
// Call Java Dynamic Instruction
|
||||
instruct CallDynamicJavaDirect( method meth ) %{
|
||||
match(CallDynamicJava);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
//
|
||||
// Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
//
|
||||
// This code is free software; you can redistribute it and/or modify it
|
||||
@ -432,8 +432,7 @@ OptoRegPair c2::return_value(int ideal_reg) {
|
||||
|
||||
int MachCallStaticJavaNode::ret_addr_offset() {
|
||||
bool far = (_method == nullptr) ? maybe_far_call(this) : !cache_reachable();
|
||||
return ((far ? 3 : 1) + (_method_handle_invoke ? 1 : 0)) *
|
||||
NativeInstruction::instruction_size;
|
||||
return (far ? 3 : 1) * NativeInstruction::instruction_size;
|
||||
}
|
||||
|
||||
int MachCallDynamicJavaNode::ret_addr_offset() {
|
||||
|
||||
@ -174,11 +174,6 @@ LIR_Opr FrameMap::stack_pointer() {
|
||||
return FrameMap::SP_opr;
|
||||
}
|
||||
|
||||
LIR_Opr FrameMap::method_handle_invoke_SP_save_opr() {
|
||||
assert(Rmh_SP_save == FP, "Fix register used for saving SP for MethodHandle calls");
|
||||
return FP_opr;
|
||||
}
|
||||
|
||||
bool FrameMap::validate_frame() {
|
||||
int max_offset = in_bytes(framesize_in_bytes());
|
||||
int java_index = 0;
|
||||
|
||||
@ -275,14 +275,6 @@ OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address targe
|
||||
}
|
||||
|
||||
|
||||
static void restore_sp_for_method_handle(StubAssembler* sasm) {
|
||||
// Restore SP from its saved reg (FP) if the exception PC is a MethodHandle call site.
|
||||
__ ldr_s32(Rtemp, Address(Rthread, JavaThread::is_method_handle_return_offset()));
|
||||
__ cmp(Rtemp, 0);
|
||||
__ mov(SP, Rmh_SP_save, ne);
|
||||
}
|
||||
|
||||
|
||||
OopMapSet* Runtime1::generate_handle_exception(StubId id, StubAssembler* sasm) {
|
||||
__ block_comment("generate_handle_exception");
|
||||
|
||||
@ -339,7 +331,6 @@ OopMapSet* Runtime1::generate_handle_exception(StubId id, StubAssembler* sasm) {
|
||||
break;
|
||||
case StubId::c1_handle_exception_from_callee_id:
|
||||
restore_live_registers_without_return(sasm); // must not jump immediately to handler
|
||||
restore_sp_for_method_handle(sasm);
|
||||
__ ret();
|
||||
break;
|
||||
default: ShouldNotReachHere();
|
||||
@ -372,9 +363,6 @@ void Runtime1::generate_unwind_exception(StubAssembler* sasm) {
|
||||
// Jump to handler
|
||||
__ verify_not_null_oop(Rexception_obj);
|
||||
|
||||
// JSR292 extension
|
||||
restore_sp_for_method_handle(sasm);
|
||||
|
||||
__ jump(R0);
|
||||
}
|
||||
|
||||
|
||||
@ -329,56 +329,6 @@ JavaThread** frame::saved_thread_address(const frame& f) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// frame::verify_deopt_original_pc
|
||||
//
|
||||
// Verifies the calculated original PC of a deoptimization PC for the
|
||||
// given unextended SP. The unextended SP might also be the saved SP
|
||||
// for MethodHandle call sites.
|
||||
#ifdef ASSERT
|
||||
void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp, bool is_method_handle_return) {
|
||||
frame fr;
|
||||
|
||||
// This is ugly but it's better than to change {get,set}_original_pc
|
||||
// to take an SP value as argument. And it's only a debugging
|
||||
// method anyway.
|
||||
fr._unextended_sp = unextended_sp;
|
||||
|
||||
address original_pc = nm->get_original_pc(&fr);
|
||||
assert(nm->insts_contains_inclusive(original_pc),
|
||||
"original PC must be in the main code section of the compiled method (or must be immediately following it)");
|
||||
assert(nm->is_method_handle_return(original_pc) == is_method_handle_return, "must be");
|
||||
}
|
||||
#endif
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// frame::adjust_unextended_sp
|
||||
void frame::adjust_unextended_sp() {
|
||||
// same as on x86
|
||||
|
||||
// If we are returning to a compiled MethodHandle call site, the
|
||||
// saved_fp will in fact be a saved value of the unextended SP. The
|
||||
// simplest way to tell whether we are returning to such a call site
|
||||
// is as follows:
|
||||
|
||||
nmethod* sender_nm = (_cb == nullptr) ? nullptr : _cb->as_nmethod_or_null();
|
||||
if (sender_nm != nullptr) {
|
||||
// If the sender PC is a deoptimization point, get the original
|
||||
// PC. For MethodHandle call site the unextended_sp is stored in
|
||||
// saved_fp.
|
||||
if (sender_nm->is_deopt_mh_entry(_pc)) {
|
||||
DEBUG_ONLY(verify_deopt_mh_original_pc(sender_nm, _fp));
|
||||
_unextended_sp = _fp;
|
||||
}
|
||||
else if (sender_nm->is_deopt_entry(_pc)) {
|
||||
DEBUG_ONLY(verify_deopt_original_pc(sender_nm, _unextended_sp));
|
||||
}
|
||||
else if (sender_nm->is_method_handle_return(_pc)) {
|
||||
_unextended_sp = _fp;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// frame::update_map_with_saved_link
|
||||
void frame::update_map_with_saved_link(RegisterMap* map, intptr_t** link_addr) {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -85,20 +85,11 @@
|
||||
// original sp.
|
||||
|
||||
intptr_t* _unextended_sp;
|
||||
void adjust_unextended_sp();
|
||||
|
||||
intptr_t* ptr_at_addr(int offset) const {
|
||||
return (intptr_t*) addr_at(offset);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
// Used in frame::sender_for_{interpreter,compiled}_frame
|
||||
static void verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp, bool is_method_handle_return = false);
|
||||
static void verify_deopt_mh_original_pc(nmethod* nm, intptr_t* unextended_sp) {
|
||||
verify_deopt_original_pc(nm, unextended_sp, true);
|
||||
}
|
||||
#endif
|
||||
|
||||
public:
|
||||
// Constructors
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -112,8 +112,6 @@ inline void frame::init(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, add
|
||||
}
|
||||
|
||||
inline void frame::setup(address pc) {
|
||||
adjust_unextended_sp();
|
||||
|
||||
address original_pc = get_deopt_original_pc();
|
||||
if (original_pc != nullptr) {
|
||||
_pc = original_pc;
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -364,7 +364,6 @@ class VFPSystemRegisterImpl : public AbstractRegisterImpl {
|
||||
// This does not seem to conflict with Rexception_pc
|
||||
// In case of issues, R3 might be OK but adapters calling the runtime would have to save it
|
||||
#define R5_mh R5 // MethodHandle register, used during the call setup
|
||||
#define Rmh_SP_save FP // for C1
|
||||
|
||||
/*
|
||||
* C++ Interpreter Register Defines
|
||||
|
||||
@ -264,11 +264,6 @@ ExceptionBlob* OptoRuntime::generate_exception_blob() {
|
||||
|
||||
__ raw_pop(FP, LR);
|
||||
|
||||
// Restore SP from its saved reg (FP) if the exception PC is a MethodHandle call site.
|
||||
__ ldr(Rtemp, Address(Rthread, JavaThread::is_method_handle_return_offset()));
|
||||
__ cmp(Rtemp, 0);
|
||||
__ mov(SP, Rmh_SP_save, ne);
|
||||
|
||||
// R0 contains handler address
|
||||
// Since this may be the deopt blob we must set R5 to look like we returned
|
||||
// from the original pc that threw the exception
|
||||
|
||||
@ -374,15 +374,6 @@ LIR_Opr FrameMap::stack_pointer() {
|
||||
return SP_opr;
|
||||
}
|
||||
|
||||
|
||||
// JSR 292
|
||||
// On PPC64, there is no need to save the SP, because neither
|
||||
// method handle intrinsics, nor compiled lambda forms modify it.
|
||||
LIR_Opr FrameMap::method_handle_invoke_SP_save_opr() {
|
||||
return LIR_OprFact::illegalOpr;
|
||||
}
|
||||
|
||||
|
||||
bool FrameMap::validate_frame() {
|
||||
int max_offset = in_bytes(framesize_in_bytes());
|
||||
int java_index = 0;
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -43,8 +43,6 @@ const bool CCallingConventionRequiresIntsAsLongs = true;
|
||||
|
||||
#define SUPPORTS_NATIVE_CX8
|
||||
|
||||
#define SUPPORT_MONITOR_COUNT
|
||||
|
||||
// PPC64 is not specified as multi-copy-atomic
|
||||
// So we must not #define CPU_MULTI_COPY_ATOMIC
|
||||
|
||||
|
||||
@ -402,7 +402,7 @@ void NativePostCallNop::make_deopt() {
|
||||
bool NativePostCallNop::patch(int32_t oopmap_slot, int32_t cb_offset) {
|
||||
int32_t i2, i1;
|
||||
assert(is_aligned(cb_offset, 4), "cb offset alignment does not match instruction alignment");
|
||||
assert(!decode(i1, i2), "already patched");
|
||||
assert(!decode(i1, i2) || NMethodRelocation, "already patched");
|
||||
|
||||
cb_offset = cb_offset >> 2;
|
||||
if (((oopmap_slot & ppc_oopmap_slot_mask) != oopmap_slot) || ((cb_offset & ppc_cb_offset_mask) != cb_offset)) {
|
||||
|
||||
@ -2473,10 +2473,6 @@ RegMask Matcher::modL_proj_mask() {
|
||||
return RegMask();
|
||||
}
|
||||
|
||||
const RegMask Matcher::method_handle_invoke_SP_save_mask() {
|
||||
return RegMask();
|
||||
}
|
||||
|
||||
%}
|
||||
|
||||
//----------ENCODING BLOCK-----------------------------------------------------
|
||||
@ -3434,7 +3430,6 @@ encode %{
|
||||
|
||||
// Create the call node.
|
||||
CallDynamicJavaDirectSchedNode *call = new CallDynamicJavaDirectSchedNode();
|
||||
call->_method_handle_invoke = _method_handle_invoke;
|
||||
call->_vtable_index = _vtable_index;
|
||||
call->_method = _method;
|
||||
call->_optimized_virtual = _optimized_virtual;
|
||||
|
||||
@ -1639,7 +1639,6 @@ static void fill_continuation_entry(MacroAssembler* masm, Register reg_cont_obj,
|
||||
assert_different_registers(reg_cont_obj, reg_flags);
|
||||
Register zero = R8_ARG6;
|
||||
Register tmp2 = R9_ARG7;
|
||||
Register tmp3 = R10_ARG8;
|
||||
|
||||
DEBUG_ONLY(__ block_comment("fill {"));
|
||||
#ifdef ASSERT
|
||||
@ -1655,12 +1654,9 @@ static void fill_continuation_entry(MacroAssembler* masm, Register reg_cont_obj,
|
||||
__ stw(zero, in_bytes(ContinuationEntry::pin_count_offset()), R1_SP);
|
||||
|
||||
__ ld_ptr(tmp2, JavaThread::cont_fastpath_offset(), R16_thread);
|
||||
__ ld(tmp3, in_bytes(JavaThread::held_monitor_count_offset()), R16_thread);
|
||||
__ st_ptr(tmp2, ContinuationEntry::parent_cont_fastpath_offset(), R1_SP);
|
||||
__ std(tmp3, in_bytes(ContinuationEntry::parent_held_monitor_count_offset()), R1_SP);
|
||||
|
||||
__ st_ptr(zero, JavaThread::cont_fastpath_offset(), R16_thread);
|
||||
__ std(zero, in_bytes(JavaThread::held_monitor_count_offset()), R16_thread);
|
||||
DEBUG_ONLY(__ block_comment("} fill"));
|
||||
}
|
||||
|
||||
@ -1681,7 +1677,6 @@ static void fill_continuation_entry(MacroAssembler* masm, Register reg_cont_obj,
|
||||
static void continuation_enter_cleanup(MacroAssembler* masm) {
|
||||
Register tmp1 = R8_ARG6;
|
||||
Register tmp2 = R9_ARG7;
|
||||
Register tmp3 = R10_ARG8;
|
||||
|
||||
#ifdef ASSERT
|
||||
__ block_comment("clean {");
|
||||
@ -1692,57 +1687,8 @@ static void continuation_enter_cleanup(MacroAssembler* masm) {
|
||||
|
||||
__ ld_ptr(tmp1, ContinuationEntry::parent_cont_fastpath_offset(), R1_SP);
|
||||
__ st_ptr(tmp1, JavaThread::cont_fastpath_offset(), R16_thread);
|
||||
|
||||
if (CheckJNICalls) {
|
||||
// Check if this is a virtual thread continuation
|
||||
Label L_skip_vthread_code;
|
||||
__ lwz(R0, in_bytes(ContinuationEntry::flags_offset()), R1_SP);
|
||||
__ cmpwi(CR0, R0, 0);
|
||||
__ beq(CR0, L_skip_vthread_code);
|
||||
|
||||
// If the held monitor count is > 0 and this vthread is terminating then
|
||||
// it failed to release a JNI monitor. So we issue the same log message
|
||||
// that JavaThread::exit does.
|
||||
__ ld(R0, in_bytes(JavaThread::jni_monitor_count_offset()), R16_thread);
|
||||
__ cmpdi(CR0, R0, 0);
|
||||
__ beq(CR0, L_skip_vthread_code);
|
||||
|
||||
// Save return value potentially containing the exception oop
|
||||
Register ex_oop = R15_esp; // nonvolatile register
|
||||
__ mr(ex_oop, R3_RET);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::log_jni_monitor_still_held));
|
||||
// Restore potental return value
|
||||
__ mr(R3_RET, ex_oop);
|
||||
|
||||
// For vthreads we have to explicitly zero the JNI monitor count of the carrier
|
||||
// on termination. The held count is implicitly zeroed below when we restore from
|
||||
// the parent held count (which has to be zero).
|
||||
__ li(tmp1, 0);
|
||||
__ std(tmp1, in_bytes(JavaThread::jni_monitor_count_offset()), R16_thread);
|
||||
|
||||
__ bind(L_skip_vthread_code);
|
||||
}
|
||||
#ifdef ASSERT
|
||||
else {
|
||||
// Check if this is a virtual thread continuation
|
||||
Label L_skip_vthread_code;
|
||||
__ lwz(R0, in_bytes(ContinuationEntry::flags_offset()), R1_SP);
|
||||
__ cmpwi(CR0, R0, 0);
|
||||
__ beq(CR0, L_skip_vthread_code);
|
||||
|
||||
// See comment just above. If not checking JNI calls the JNI count is only
|
||||
// needed for assertion checking.
|
||||
__ li(tmp1, 0);
|
||||
__ std(tmp1, in_bytes(JavaThread::jni_monitor_count_offset()), R16_thread);
|
||||
|
||||
__ bind(L_skip_vthread_code);
|
||||
}
|
||||
#endif
|
||||
|
||||
__ ld(tmp2, in_bytes(ContinuationEntry::parent_held_monitor_count_offset()), R1_SP);
|
||||
__ ld_ptr(tmp3, ContinuationEntry::parent_offset(), R1_SP);
|
||||
__ std(tmp2, in_bytes(JavaThread::held_monitor_count_offset()), R16_thread);
|
||||
__ st_ptr(tmp3, JavaThread::cont_entry_offset(), R16_thread);
|
||||
__ ld_ptr(tmp2, ContinuationEntry::parent_offset(), R1_SP);
|
||||
__ st_ptr(tmp2, JavaThread::cont_entry_offset(), R16_thread);
|
||||
DEBUG_ONLY(__ block_comment("} clean"));
|
||||
}
|
||||
|
||||
|
||||
@ -377,11 +377,6 @@ LIR_Opr FrameMap::stack_pointer() {
|
||||
return FrameMap::sp_opr;
|
||||
}
|
||||
|
||||
// JSR 292
|
||||
LIR_Opr FrameMap::method_handle_invoke_SP_save_opr() {
|
||||
return LIR_OprFact::illegalOpr; // Not needed on riscv
|
||||
}
|
||||
|
||||
bool FrameMap::validate_frame() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1687,6 +1687,7 @@ void C2_MacroAssembler::arrays_hashcode(Register ary, Register cnt, Register res
|
||||
Register tmp4, Register tmp5, Register tmp6,
|
||||
BasicType eltype)
|
||||
{
|
||||
assert(!UseRVV, "sanity");
|
||||
assert_different_registers(ary, cnt, result, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, t0, t1);
|
||||
|
||||
const int elsize = arrays_hashcode_elsize(eltype);
|
||||
@ -1759,29 +1760,143 @@ void C2_MacroAssembler::arrays_hashcode(Register ary, Register cnt, Register res
|
||||
BLOCK_COMMENT("} // arrays_hashcode");
|
||||
}
|
||||
|
||||
void C2_MacroAssembler::arrays_hashcode_v(Register ary, Register cnt, Register result,
|
||||
Register tmp1, Register tmp2, Register tmp3,
|
||||
BasicType eltype)
|
||||
{
|
||||
assert(UseRVV, "sanity");
|
||||
assert(StubRoutines::riscv::arrays_hashcode_powers_of_31() != nullptr, "sanity");
|
||||
assert_different_registers(ary, cnt, result, tmp1, tmp2, tmp3, t0, t1);
|
||||
|
||||
// The MaxVectorSize should have been set by detecting RVV max vector register
|
||||
// size when check UseRVV (i.e. MaxVectorSize == VM_Version::_initial_vector_length).
|
||||
// Let's use T_INT as all hashCode calculations eventually deal with ints.
|
||||
const int lmul = 2;
|
||||
const int stride = MaxVectorSize / sizeof(jint) * lmul;
|
||||
|
||||
const int elsize_bytes = arrays_hashcode_elsize(eltype);
|
||||
const int elsize_shift = exact_log2(elsize_bytes);
|
||||
|
||||
switch (eltype) {
|
||||
case T_BOOLEAN: BLOCK_COMMENT("arrays_hashcode_v(unsigned byte) {"); break;
|
||||
case T_CHAR: BLOCK_COMMENT("arrays_hashcode_v(char) {"); break;
|
||||
case T_BYTE: BLOCK_COMMENT("arrays_hashcode_v(byte) {"); break;
|
||||
case T_SHORT: BLOCK_COMMENT("arrays_hashcode_v(short) {"); break;
|
||||
case T_INT: BLOCK_COMMENT("arrays_hashcode_v(int) {"); break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
const Register pow31_highest = tmp1;
|
||||
const Register ary_end = tmp2;
|
||||
const Register consumed = tmp3;
|
||||
|
||||
const VectorRegister v_sum = v2;
|
||||
const VectorRegister v_src = v4;
|
||||
const VectorRegister v_coeffs = v6;
|
||||
const VectorRegister v_tmp = v8;
|
||||
|
||||
const address adr_pows31 = StubRoutines::riscv::arrays_hashcode_powers_of_31()
|
||||
+ sizeof(jint);
|
||||
Label VEC_LOOP, DONE, SCALAR_TAIL, SCALAR_TAIL_LOOP;
|
||||
|
||||
// NB: at this point (a) 'result' already has some value,
|
||||
// (b) 'cnt' is not 0 or 1, see java code for details.
|
||||
|
||||
andi(t0, cnt, ~(stride - 1));
|
||||
beqz(t0, SCALAR_TAIL);
|
||||
|
||||
la(t1, ExternalAddress(adr_pows31));
|
||||
lw(pow31_highest, Address(t1, -1 * sizeof(jint)));
|
||||
|
||||
vsetvli(consumed, cnt, Assembler::e32, Assembler::m2);
|
||||
vle32_v(v_coeffs, t1); // 31^^(stride - 1) ... 31^^0
|
||||
vmv_v_x(v_sum, x0);
|
||||
|
||||
bind(VEC_LOOP);
|
||||
arrays_hashcode_elload_v(v_src, v_tmp, ary, eltype);
|
||||
vmul_vv(v_src, v_src, v_coeffs);
|
||||
vmadd_vx(v_sum, pow31_highest, v_src);
|
||||
mulw(result, result, pow31_highest);
|
||||
shadd(ary, consumed, ary, t0, elsize_shift);
|
||||
subw(cnt, cnt, consumed);
|
||||
andi(t1, cnt, ~(stride - 1));
|
||||
bnez(t1, VEC_LOOP);
|
||||
|
||||
vmv_s_x(v_tmp, x0);
|
||||
vredsum_vs(v_sum, v_sum, v_tmp);
|
||||
vmv_x_s(t0, v_sum);
|
||||
addw(result, result, t0);
|
||||
beqz(cnt, DONE);
|
||||
|
||||
bind(SCALAR_TAIL);
|
||||
shadd(ary_end, cnt, ary, t0, elsize_shift);
|
||||
|
||||
bind(SCALAR_TAIL_LOOP);
|
||||
arrays_hashcode_elload(t0, Address(ary), eltype);
|
||||
slli(t1, result, 5); // optimize 31 * result
|
||||
subw(result, t1, result); // with result<<5 - result
|
||||
addw(result, result, t0);
|
||||
addi(ary, ary, elsize_bytes);
|
||||
bne(ary, ary_end, SCALAR_TAIL_LOOP);
|
||||
|
||||
bind(DONE);
|
||||
BLOCK_COMMENT("} // arrays_hashcode_v");
|
||||
}
|
||||
|
||||
int C2_MacroAssembler::arrays_hashcode_elsize(BasicType eltype) {
|
||||
switch (eltype) {
|
||||
case T_BOOLEAN: return sizeof(jboolean);
|
||||
case T_BYTE: return sizeof(jbyte);
|
||||
case T_SHORT: return sizeof(jshort);
|
||||
case T_CHAR: return sizeof(jchar);
|
||||
case T_INT: return sizeof(jint);
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
return -1;
|
||||
case T_BOOLEAN: return sizeof(jboolean);
|
||||
case T_BYTE: return sizeof(jbyte);
|
||||
case T_SHORT: return sizeof(jshort);
|
||||
case T_CHAR: return sizeof(jchar);
|
||||
case T_INT: return sizeof(jint);
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
void C2_MacroAssembler::arrays_hashcode_elload(Register dst, Address src, BasicType eltype) {
|
||||
switch (eltype) {
|
||||
// T_BOOLEAN used as surrogate for unsigned byte
|
||||
case T_BOOLEAN: lbu(dst, src); break;
|
||||
case T_BYTE: lb(dst, src); break;
|
||||
case T_SHORT: lh(dst, src); break;
|
||||
case T_CHAR: lhu(dst, src); break;
|
||||
case T_INT: lw(dst, src); break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
// T_BOOLEAN used as surrogate for unsigned byte
|
||||
case T_BOOLEAN: lbu(dst, src); break;
|
||||
case T_BYTE: lb(dst, src); break;
|
||||
case T_SHORT: lh(dst, src); break;
|
||||
case T_CHAR: lhu(dst, src); break;
|
||||
case T_INT: lw(dst, src); break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
|
||||
void C2_MacroAssembler::arrays_hashcode_elload_v(VectorRegister vdst,
|
||||
VectorRegister vtmp,
|
||||
Register src,
|
||||
BasicType eltype) {
|
||||
assert_different_registers(vdst, vtmp);
|
||||
switch (eltype) {
|
||||
case T_BOOLEAN:
|
||||
vle8_v(vtmp, src);
|
||||
vzext_vf4(vdst, vtmp);
|
||||
break;
|
||||
case T_BYTE:
|
||||
vle8_v(vtmp, src);
|
||||
vsext_vf4(vdst, vtmp);
|
||||
break;
|
||||
case T_CHAR:
|
||||
vle16_v(vtmp, src);
|
||||
vzext_vf2(vdst, vtmp);
|
||||
break;
|
||||
case T_SHORT:
|
||||
vle16_v(vtmp, src);
|
||||
vsext_vf2(vdst, vtmp);
|
||||
break;
|
||||
case T_INT:
|
||||
vle32_v(vdst, src);
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -92,11 +92,15 @@
|
||||
Register tmp3, Register tmp4,
|
||||
Register tmp5, Register tmp6,
|
||||
BasicType eltype);
|
||||
|
||||
// helper function for arrays_hashcode
|
||||
int arrays_hashcode_elsize(BasicType eltype);
|
||||
void arrays_hashcode_elload(Register dst, Address src, BasicType eltype);
|
||||
|
||||
void arrays_hashcode_v(Register ary, Register cnt, Register result,
|
||||
Register tmp1, Register tmp2, Register tmp3,
|
||||
BasicType eltype);
|
||||
void arrays_hashcode_elload_v(VectorRegister vdst, VectorRegister vtmp,
|
||||
Register src, BasicType eltype);
|
||||
|
||||
void string_equals(Register r1, Register r2,
|
||||
Register result, Register cnt1);
|
||||
|
||||
|
||||
@ -217,8 +217,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
|
||||
|
||||
nmethod* nm = sender_blob->as_nmethod_or_null();
|
||||
if (nm != nullptr) {
|
||||
if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) ||
|
||||
nm->method()->is_method_handle_intrinsic()) {
|
||||
if (nm->is_deopt_entry(sender_pc) || nm->method()->is_method_handle_intrinsic()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -427,49 +426,6 @@ JavaThread** frame::saved_thread_address(const frame& f) {
|
||||
return thread_addr;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// frame::verify_deopt_original_pc
|
||||
//
|
||||
// Verifies the calculated original PC of a deoptimization PC for the
|
||||
// given unextended SP.
|
||||
#ifdef ASSERT
|
||||
void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp) {
|
||||
frame fr;
|
||||
|
||||
// This is ugly but it's better than to change {get,set}_original_pc
|
||||
// to take an SP value as argument. And it's only a debugging
|
||||
// method anyway.
|
||||
fr._unextended_sp = unextended_sp;
|
||||
|
||||
assert_cond(nm != nullptr);
|
||||
address original_pc = nm->get_original_pc(&fr);
|
||||
assert(nm->insts_contains_inclusive(original_pc),
|
||||
"original PC must be in the main code section of the compiled method (or must be immediately following it)");
|
||||
}
|
||||
#endif
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// frame::adjust_unextended_sp
|
||||
#ifdef ASSERT
|
||||
void frame::adjust_unextended_sp() {
|
||||
// On riscv, sites calling method handle intrinsics and lambda forms are treated
|
||||
// as any other call site. Therefore, no special action is needed when we are
|
||||
// returning to any of these call sites.
|
||||
|
||||
if (_cb != nullptr) {
|
||||
nmethod* sender_nm = _cb->as_nmethod_or_null();
|
||||
if (sender_nm != nullptr) {
|
||||
// If the sender PC is a deoptimization point, get the original PC.
|
||||
if (sender_nm->is_deopt_entry(_pc) ||
|
||||
sender_nm->is_deopt_mh_entry(_pc)) {
|
||||
verify_deopt_original_pc(sender_nm, _unextended_sp);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// frame::sender_for_interpreter_frame
|
||||
frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -179,17 +179,10 @@
|
||||
int _offset_unextended_sp; // for use in stack-chunk frames
|
||||
};
|
||||
|
||||
void adjust_unextended_sp() NOT_DEBUG_RETURN;
|
||||
|
||||
intptr_t* ptr_at_addr(int offset) const {
|
||||
return (intptr_t*) addr_at(offset);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
// Used in frame::sender_for_{interpreter,compiled}_frame
|
||||
static void verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp);
|
||||
#endif
|
||||
|
||||
public:
|
||||
// Constructors
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@ -114,8 +114,6 @@ inline void frame::init(intptr_t* ptr_sp, intptr_t* ptr_fp, address pc) {
|
||||
}
|
||||
|
||||
inline void frame::setup(address pc) {
|
||||
adjust_unextended_sp();
|
||||
|
||||
address original_pc = get_deopt_original_pc();
|
||||
if (original_pc != nullptr) {
|
||||
_pc = original_pc;
|
||||
@ -215,7 +213,6 @@ inline frame::frame(intptr_t* ptr_sp, intptr_t* ptr_fp) {
|
||||
// value.
|
||||
|
||||
_cb = CodeCache::find_blob(_pc);
|
||||
adjust_unextended_sp();
|
||||
|
||||
address original_pc = get_deopt_original_pc();
|
||||
if (original_pc != nullptr) {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@ -44,8 +44,6 @@ const bool CCallingConventionRequiresIntsAsLongs = false;
|
||||
|
||||
#define SUPPORTS_NATIVE_CX8
|
||||
|
||||
#define SUPPORT_MONITOR_COUNT
|
||||
|
||||
#define SUPPORT_RESERVED_STACK_AREA
|
||||
|
||||
#define USE_POINTERS_TO_REGISTER_IMPL_ARRAY
|
||||
|
||||
@ -39,25 +39,23 @@ public:
|
||||
// 3 - restoring an old state (javaCalls)
|
||||
|
||||
void clear(void) {
|
||||
// No hardware barriers are necessary. All members are volatile and the profiler
|
||||
// is run from a signal handler and the only observer is the thread its running on.
|
||||
|
||||
// clearing _last_Java_sp must be first
|
||||
_last_Java_sp = nullptr;
|
||||
OrderAccess::release();
|
||||
_last_Java_fp = nullptr;
|
||||
_last_Java_pc = nullptr;
|
||||
}
|
||||
|
||||
void copy(JavaFrameAnchor* src) {
|
||||
// In order to make sure the transition state is valid for "this"
|
||||
// No hardware barriers are necessary. All members are volatile and the profiler
|
||||
// is run from a signal handler and the only observer is the thread its running on.
|
||||
|
||||
// We must clear _last_Java_sp before copying the rest of the new data
|
||||
//
|
||||
// Hack Alert: Temporary bugfix for 4717480/4721647
|
||||
// To act like previous version (pd_cache_state) don't null _last_Java_sp
|
||||
// unless the value is changing
|
||||
//
|
||||
assert(src != nullptr, "Src should not be null.");
|
||||
if (_last_Java_sp != src->_last_Java_sp) {
|
||||
_last_Java_sp = nullptr;
|
||||
OrderAccess::release();
|
||||
}
|
||||
_last_Java_fp = src->_last_Java_fp;
|
||||
_last_Java_pc = src->_last_Java_pc;
|
||||
|
||||
@ -225,36 +225,6 @@ void MacroAssembler::pop_cont_fastpath(Register java_thread) {
|
||||
bind(done);
|
||||
}
|
||||
|
||||
void MacroAssembler::inc_held_monitor_count(Register tmp) {
|
||||
Address dst(xthread, JavaThread::held_monitor_count_offset());
|
||||
ld(tmp, dst);
|
||||
addi(tmp, tmp, 1);
|
||||
sd(tmp, dst);
|
||||
#ifdef ASSERT
|
||||
Label ok;
|
||||
test_bit(tmp, tmp, 63);
|
||||
beqz(tmp, ok);
|
||||
STOP("assert(held monitor count overflow)");
|
||||
should_not_reach_here();
|
||||
bind(ok);
|
||||
#endif
|
||||
}
|
||||
|
||||
void MacroAssembler::dec_held_monitor_count(Register tmp) {
|
||||
Address dst(xthread, JavaThread::held_monitor_count_offset());
|
||||
ld(tmp, dst);
|
||||
subi(tmp, tmp, 1);
|
||||
sd(tmp, dst);
|
||||
#ifdef ASSERT
|
||||
Label ok;
|
||||
test_bit(tmp, tmp, 63);
|
||||
beqz(tmp, ok);
|
||||
STOP("assert(held monitor count underflow)");
|
||||
should_not_reach_here();
|
||||
bind(ok);
|
||||
#endif
|
||||
}
|
||||
|
||||
int MacroAssembler::align(int modulus, int extra_offset) {
|
||||
CompressibleScope scope(this);
|
||||
intptr_t before = offset();
|
||||
@ -390,12 +360,14 @@ void MacroAssembler::set_last_Java_frame(Register last_java_sp,
|
||||
last_java_sp = esp;
|
||||
}
|
||||
|
||||
sd(last_java_sp, Address(xthread, JavaThread::last_Java_sp_offset()));
|
||||
|
||||
// last_java_fp is optional
|
||||
if (last_java_fp->is_valid()) {
|
||||
sd(last_java_fp, Address(xthread, JavaThread::last_Java_fp_offset()));
|
||||
}
|
||||
|
||||
// We must set sp last.
|
||||
sd(last_java_sp, Address(xthread, JavaThread::last_Java_sp_offset()));
|
||||
|
||||
}
|
||||
|
||||
void MacroAssembler::set_last_Java_frame(Register last_java_sp,
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@ -849,9 +849,6 @@ public:
|
||||
void push_cont_fastpath(Register java_thread = xthread);
|
||||
void pop_cont_fastpath(Register java_thread = xthread);
|
||||
|
||||
void inc_held_monitor_count(Register tmp);
|
||||
void dec_held_monitor_count(Register tmp);
|
||||
|
||||
// if heap base register is used - reinit it with the correct value
|
||||
void reinit_heapbase();
|
||||
|
||||
|
||||
@ -2152,10 +2152,6 @@ RegMask Matcher::modL_proj_mask() {
|
||||
return RegMask();
|
||||
}
|
||||
|
||||
const RegMask Matcher::method_handle_invoke_SP_save_mask() {
|
||||
return FP_REG_mask();
|
||||
}
|
||||
|
||||
bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
|
||||
assert_cond(addp != nullptr);
|
||||
for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
|
||||
@ -10995,6 +10991,7 @@ instruct arrays_hashcode(iRegP_R11 ary, iRegI_R12 cnt, iRegI_R10 result, immI ba
|
||||
iRegLNoSp tmp3, iRegLNoSp tmp4,
|
||||
iRegLNoSp tmp5, iRegLNoSp tmp6, rFlagsReg cr)
|
||||
%{
|
||||
predicate(!UseRVV);
|
||||
match(Set result (VectorizedHashCode (Binary ary cnt) (Binary result basic_type)));
|
||||
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6,
|
||||
USE_KILL ary, USE_KILL cnt, USE basic_type, KILL cr);
|
||||
|
||||
@ -4080,6 +4080,28 @@ instruct varray_equalsC(iRegP_R11 ary1, iRegP_R12 ary2, iRegI_R10 result,
|
||||
ins_pipe(pipe_class_memory);
|
||||
%}
|
||||
|
||||
// fast ArraysSupport.vectorizedHashCode
|
||||
instruct varrays_hashcode(iRegP_R11 ary, iRegI_R12 cnt, iRegI_R10 result, immI basic_type,
|
||||
vReg_V2 v2, vReg_V3 v3, vReg_V4 v4, vReg_V5 v5,
|
||||
vReg_V6 v6, vReg_V7 v7, vReg_V8 v8, vReg_V9 v9,
|
||||
iRegLNoSp tmp1, iRegLNoSp tmp2, iRegLNoSp tmp3,
|
||||
rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseRVV);
|
||||
match(Set result (VectorizedHashCode (Binary ary cnt) (Binary result basic_type)));
|
||||
effect(USE_KILL ary, USE_KILL cnt, USE basic_type,
|
||||
TEMP v2, TEMP v3, TEMP v4, TEMP v5, TEMP v6, TEMP v7, TEMP v8, TEMP v9,
|
||||
TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
|
||||
|
||||
format %{ "Array HashCode array[] $ary,$cnt,$result,$basic_type -> $result // KILL all" %}
|
||||
ins_encode %{
|
||||
__ arrays_hashcode_v($ary$$Register, $cnt$$Register, $result$$Register,
|
||||
$tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
|
||||
(BasicType)$basic_type$$constant);
|
||||
%}
|
||||
ins_pipe(pipe_class_memory);
|
||||
%}
|
||||
|
||||
instruct vstring_compareU_128b(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
|
||||
iRegI_R10 result, vReg_V4 v4, vReg_V5 v5, vReg_V6 v6, vReg_V7 v7,
|
||||
vReg_V8 v8, vReg_V9 v9, vReg_V10 v10, vReg_V11 v11,
|
||||
|
||||
@ -885,11 +885,8 @@ static void fill_continuation_entry(MacroAssembler* masm) {
|
||||
|
||||
__ ld(t0, Address(xthread, JavaThread::cont_fastpath_offset()));
|
||||
__ sd(t0, Address(sp, ContinuationEntry::parent_cont_fastpath_offset()));
|
||||
__ ld(t0, Address(xthread, JavaThread::held_monitor_count_offset()));
|
||||
__ sd(t0, Address(sp, ContinuationEntry::parent_held_monitor_count_offset()));
|
||||
|
||||
__ sd(zr, Address(xthread, JavaThread::cont_fastpath_offset()));
|
||||
__ sd(zr, Address(xthread, JavaThread::held_monitor_count_offset()));
|
||||
}
|
||||
|
||||
// on entry, sp points to the ContinuationEntry
|
||||
@ -905,50 +902,6 @@ static void continuation_enter_cleanup(MacroAssembler* masm) {
|
||||
|
||||
__ ld(t0, Address(sp, ContinuationEntry::parent_cont_fastpath_offset()));
|
||||
__ sd(t0, Address(xthread, JavaThread::cont_fastpath_offset()));
|
||||
|
||||
if (CheckJNICalls) {
|
||||
// Check if this is a virtual thread continuation
|
||||
Label L_skip_vthread_code;
|
||||
__ lwu(t0, Address(sp, ContinuationEntry::flags_offset()));
|
||||
__ beqz(t0, L_skip_vthread_code);
|
||||
|
||||
// If the held monitor count is > 0 and this vthread is terminating then
|
||||
// it failed to release a JNI monitor. So we issue the same log message
|
||||
// that JavaThread::exit does.
|
||||
__ ld(t0, Address(xthread, JavaThread::jni_monitor_count_offset()));
|
||||
__ beqz(t0, L_skip_vthread_code);
|
||||
|
||||
// Save return value potentially containing the exception oop in callee-saved x9
|
||||
__ mv(x9, x10);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::log_jni_monitor_still_held));
|
||||
// Restore potential return value
|
||||
__ mv(x10, x9);
|
||||
|
||||
// For vthreads we have to explicitly zero the JNI monitor count of the carrier
|
||||
// on termination. The held count is implicitly zeroed below when we restore from
|
||||
// the parent held count (which has to be zero).
|
||||
__ sd(zr, Address(xthread, JavaThread::jni_monitor_count_offset()));
|
||||
|
||||
__ bind(L_skip_vthread_code);
|
||||
}
|
||||
#ifdef ASSERT
|
||||
else {
|
||||
// Check if this is a virtual thread continuation
|
||||
Label L_skip_vthread_code;
|
||||
__ lwu(t0, Address(sp, ContinuationEntry::flags_offset()));
|
||||
__ beqz(t0, L_skip_vthread_code);
|
||||
|
||||
// See comment just above. If not checking JNI calls the JNI count is only
|
||||
// needed for assertion checking.
|
||||
__ sd(zr, Address(xthread, JavaThread::jni_monitor_count_offset()));
|
||||
|
||||
__ bind(L_skip_vthread_code);
|
||||
}
|
||||
#endif
|
||||
|
||||
__ ld(t0, Address(sp, ContinuationEntry::parent_held_monitor_count_offset()));
|
||||
__ sd(t0, Address(xthread, JavaThread::held_monitor_count_offset()));
|
||||
|
||||
__ ld(t0, Address(sp, ContinuationEntry::parent_offset()));
|
||||
__ sd(t0, Address(xthread, JavaThread::cont_entry_offset()));
|
||||
__ add(fp, sp, (int)ContinuationEntry::size() + 2 * wordSize /* 2 extra words to match up with leave() */);
|
||||
|
||||
@ -73,6 +73,9 @@
|
||||
do_stub(compiler, string_indexof_linear_ul) \
|
||||
do_arch_entry(riscv, compiler, string_indexof_linear_ul, \
|
||||
string_indexof_linear_ul, string_indexof_linear_ul) \
|
||||
do_stub(compiler, arrays_hashcode_powers_of_31) \
|
||||
do_arch_entry(riscv, compiler, arrays_hashcode_powers_of_31, \
|
||||
arrays_hashcode_powers_of_31, arrays_hashcode_powers_of_31) \
|
||||
|
||||
|
||||
#define STUBGEN_FINAL_BLOBS_ARCH_DO(do_stub, \
|
||||
|
||||
@ -6624,6 +6624,24 @@ static const int64_t right_3_bits = right_n_bits(3);
|
||||
return start;
|
||||
}
|
||||
|
||||
address generate_arrays_hashcode_powers_of_31() {
|
||||
assert(UseRVV, "sanity");
|
||||
const int lmul = 2;
|
||||
const int stride = MaxVectorSize / sizeof(jint) * lmul;
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", "arrays_hashcode_powers_of_31");
|
||||
address start = __ pc();
|
||||
for (int i = stride; i >= 0; i--) {
|
||||
jint power_of_31 = 1;
|
||||
for (int j = i; j > 0; j--) {
|
||||
power_of_31 = java_multiply(power_of_31, 31);
|
||||
}
|
||||
__ emit_int32(power_of_31);
|
||||
}
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
#endif // COMPILER2
|
||||
|
||||
/**
|
||||
@ -6818,6 +6836,10 @@ static const int64_t right_3_bits = right_n_bits(3);
|
||||
StubRoutines::_bigIntegerRightShiftWorker = generate_bigIntegerRightShift();
|
||||
}
|
||||
|
||||
if (UseVectorizedHashCodeIntrinsic && UseRVV) {
|
||||
StubRoutines::riscv::_arrays_hashcode_powers_of_31 = generate_arrays_hashcode_powers_of_31();
|
||||
}
|
||||
|
||||
if (UseSHA256Intrinsics) {
|
||||
Sha2Generator sha2(_masm, this);
|
||||
StubRoutines::_sha256_implCompress = sha2.generate_sha256_implCompress(StubId::stubgen_sha256_implCompress_id);
|
||||
|
||||
@ -35,20 +35,20 @@
|
||||
|
||||
uint32_t VM_Version::_initial_vector_length = 0;
|
||||
|
||||
#define DEF_RV_EXT_FEATURE(NAME, PRETTY, LINUX_BIT, FSTRING, FLAGF) \
|
||||
VM_Version::NAME##RVExtFeatureValue VM_Version::NAME;
|
||||
#define DEF_RV_EXT_FEATURE(PRETTY, LINUX_BIT, FSTRING, FLAGF) \
|
||||
VM_Version::ext_##PRETTY##RVExtFeatureValue VM_Version::ext_##PRETTY;
|
||||
RV_EXT_FEATURE_FLAGS(DEF_RV_EXT_FEATURE)
|
||||
#undef DEF_RV_EXT_FEATURE
|
||||
|
||||
#define DEF_RV_NON_EXT_FEATURE(NAME, PRETTY, LINUX_BIT, FSTRING, FLAGF) \
|
||||
VM_Version::NAME##RVNonExtFeatureValue VM_Version::NAME;
|
||||
#define DEF_RV_NON_EXT_FEATURE(PRETTY, LINUX_BIT, FSTRING, FLAGF) \
|
||||
VM_Version::PRETTY##RVNonExtFeatureValue VM_Version::PRETTY;
|
||||
RV_NON_EXT_FEATURE_FLAGS(DEF_RV_NON_EXT_FEATURE)
|
||||
#undef DEF_RV_NON_EXT_FEATURE
|
||||
|
||||
#define ADD_RV_EXT_FEATURE_IN_LIST(NAME, PRETTY, LINUX_BIT, FSTRING, FLAGF) \
|
||||
&VM_Version::NAME,
|
||||
#define ADD_RV_NON_EXT_FEATURE_IN_LIST(NAME, PRETTY, LINUX_BIT, FSTRING, FLAGF) \
|
||||
&VM_Version::NAME,
|
||||
#define ADD_RV_EXT_FEATURE_IN_LIST(PRETTY, LINUX_BIT, FSTRING, FLAGF) \
|
||||
&VM_Version::ext_##PRETTY,
|
||||
#define ADD_RV_NON_EXT_FEATURE_IN_LIST(PRETTY, LINUX_BIT, FSTRING, FLAGF) \
|
||||
&VM_Version::PRETTY,
|
||||
VM_Version::RVFeatureValue* VM_Version::_feature_list[] = {
|
||||
RV_EXT_FEATURE_FLAGS(ADD_RV_EXT_FEATURE_IN_LIST)
|
||||
RV_NON_EXT_FEATURE_FLAGS(ADD_RV_NON_EXT_FEATURE_IN_LIST)
|
||||
@ -148,7 +148,7 @@ void VM_Version::common_initialize() {
|
||||
FLAG_SET_DEFAULT(UseSignumIntrinsic, true);
|
||||
}
|
||||
|
||||
if (UseRVC && !ext_C.enabled()) {
|
||||
if (UseRVC && !ext_c.enabled()) {
|
||||
warning("RVC is not supported on this CPU");
|
||||
FLAG_SET_DEFAULT(UseRVC, false);
|
||||
|
||||
|
||||
@ -185,52 +185,6 @@ class VM_Version : public Abstract_VM_Version {
|
||||
}
|
||||
};
|
||||
|
||||
// Frozen standard extensions
|
||||
// I RV64I
|
||||
// M Integer Multiplication and Division
|
||||
// A Atomic Instructions
|
||||
// F Single-Precision Floating-Point
|
||||
// D Single-Precision Floating-Point
|
||||
// (G = M + A + F + D)
|
||||
// Q Quad-Precision Floating-Point
|
||||
// C Compressed Instructions
|
||||
// H Hypervisor
|
||||
//
|
||||
// Others, open and non-standard
|
||||
// V Vector
|
||||
//
|
||||
// Cache Management Operations
|
||||
// Zicbom Cache Block Management Operations
|
||||
// Zicboz Cache Block Zero Operations
|
||||
// Zicbop Cache Block Prefetch Operations
|
||||
//
|
||||
// Bit-manipulation
|
||||
// Zba Address generation instructions
|
||||
// Zbb Basic bit-manipulation
|
||||
// Zbc Carry-less multiplication
|
||||
// Zbs Single-bit instructions
|
||||
//
|
||||
// Zfh Half-Precision Floating-Point instructions
|
||||
// Zfhmin Minimal Half-Precision Floating-Point instructions
|
||||
//
|
||||
// Zicond Conditional operations
|
||||
//
|
||||
// Zicsr Control and Status Register (CSR) Instructions
|
||||
// Zifencei Instruction-Fetch Fence
|
||||
// Zic64b Cache blocks must be 64 bytes in size, naturally aligned in the address space.
|
||||
// Zihintpause Pause instruction HINT
|
||||
//
|
||||
// Zc Code Size Reduction - Additional compressed instructions.
|
||||
// Zcb Simple code-size saving instructions
|
||||
//
|
||||
// Other features and settings
|
||||
// mvendorid Manufactory JEDEC id encoded, ISA vol 2 3.1.2..
|
||||
// marchid Id for microarch. Mvendorid plus marchid uniquely identify the microarch.
|
||||
// mimpid A unique encoding of the version of the processor implementation.
|
||||
// unaligned_scalar Performance of misaligned scalar accesses (unknown, emulated, slow, fast, unsupported)
|
||||
// unaligned_vector Performance of misaligned vector accesses (unknown, unspported, slow, fast)
|
||||
// satp mode SATP bits (number of virtual addr bits) mbare, sv39, sv48, sv57, sv64
|
||||
|
||||
public:
|
||||
|
||||
#define RV_NO_FLAG_BIT (BitsPerWord+1) // nth_bit will return 0 on values larger than BitsPerWord
|
||||
@ -239,48 +193,84 @@ class VM_Version : public Abstract_VM_Version {
|
||||
//
|
||||
// Fields description in `decl`:
|
||||
// declaration name, extension name, bit value from linux, feature string?, mapped flag)
|
||||
#define RV_EXT_FEATURE_FLAGS(decl) \
|
||||
decl(ext_I , i , ('I' - 'A'), true , NO_UPDATE_DEFAULT) \
|
||||
decl(ext_M , m , ('M' - 'A'), true , NO_UPDATE_DEFAULT) \
|
||||
decl(ext_A , a , ('A' - 'A'), true , NO_UPDATE_DEFAULT) \
|
||||
decl(ext_F , f , ('F' - 'A'), true , NO_UPDATE_DEFAULT) \
|
||||
decl(ext_D , d , ('D' - 'A'), true , NO_UPDATE_DEFAULT) \
|
||||
decl(ext_C , c , ('C' - 'A'), true , UPDATE_DEFAULT(UseRVC)) \
|
||||
decl(ext_Q , q , ('Q' - 'A'), true , NO_UPDATE_DEFAULT) \
|
||||
decl(ext_H , h , ('H' - 'A'), true , NO_UPDATE_DEFAULT) \
|
||||
decl(ext_V , v , ('V' - 'A'), true , UPDATE_DEFAULT(UseRVV)) \
|
||||
decl(ext_Zicbom , Zicbom , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZicbom)) \
|
||||
decl(ext_Zicboz , Zicboz , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZicboz)) \
|
||||
decl(ext_Zicbop , Zicbop , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZicbop)) \
|
||||
decl(ext_Zba , Zba , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZba)) \
|
||||
decl(ext_Zbb , Zbb , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZbb)) \
|
||||
decl(ext_Zbc , Zbc , RV_NO_FLAG_BIT, true , NO_UPDATE_DEFAULT) \
|
||||
decl(ext_Zbs , Zbs , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZbs)) \
|
||||
decl(ext_Zbkb , Zbkb , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZbkb)) \
|
||||
decl(ext_Zcb , Zcb , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZcb)) \
|
||||
decl(ext_Zfa , Zfa , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZfa)) \
|
||||
decl(ext_Zfh , Zfh , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZfh)) \
|
||||
decl(ext_Zfhmin , Zfhmin , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZfhmin)) \
|
||||
decl(ext_Zicsr , Zicsr , RV_NO_FLAG_BIT, true , NO_UPDATE_DEFAULT) \
|
||||
decl(ext_Zicntr , Zicntr , RV_NO_FLAG_BIT, true , NO_UPDATE_DEFAULT) \
|
||||
decl(ext_Zifencei , Zifencei , RV_NO_FLAG_BIT, true , NO_UPDATE_DEFAULT) \
|
||||
decl(ext_Zic64b , Zic64b , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZic64b)) \
|
||||
decl(ext_Ztso , Ztso , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZtso)) \
|
||||
decl(ext_Zihintpause , Zihintpause , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZihintpause)) \
|
||||
decl(ext_Zacas , Zacas , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZacas)) \
|
||||
decl(ext_Zvbb , Zvbb , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT_DEP(UseZvbb, &ext_V, nullptr)) \
|
||||
decl(ext_Zvbc , Zvbc , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT_DEP(UseZvbc, &ext_V, nullptr)) \
|
||||
decl(ext_Zvfh , Zvfh , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT_DEP(UseZvfh, &ext_V, &ext_Zfh, nullptr)) \
|
||||
decl(ext_Zvkn , Zvkn , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT_DEP(UseZvkn, &ext_V, nullptr)) \
|
||||
decl(ext_Zicond , Zicond , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZicond)) \
|
||||
#define RV_EXT_FEATURE_FLAGS(decl) \
|
||||
/* A Atomic Instructions */ \
|
||||
decl(a , ('A' - 'A'), true , NO_UPDATE_DEFAULT) \
|
||||
/* C Compressed Instructions */ \
|
||||
decl(c , ('C' - 'A'), true , UPDATE_DEFAULT(UseRVC)) \
|
||||
/* D Single-Precision Floating-Point */ \
|
||||
decl(d , ('D' - 'A'), true , NO_UPDATE_DEFAULT) \
|
||||
/* F Single-Precision Floating-Point */ \
|
||||
decl(f , ('F' - 'A'), true , NO_UPDATE_DEFAULT) \
|
||||
/* H Hypervisor */ \
|
||||
decl(h , ('H' - 'A'), true , NO_UPDATE_DEFAULT) \
|
||||
/* I RV64I */ \
|
||||
decl(i , ('I' - 'A'), true , NO_UPDATE_DEFAULT) \
|
||||
/* M Integer Multiplication and Division */ \
|
||||
decl(m , ('M' - 'A'), true , NO_UPDATE_DEFAULT) \
|
||||
/* Q Quad-Precision Floating-Point */ \
|
||||
decl(q , ('Q' - 'A'), true , NO_UPDATE_DEFAULT) \
|
||||
/* V Vector */ \
|
||||
decl(v , ('V' - 'A'), true , UPDATE_DEFAULT(UseRVV)) \
|
||||
\
|
||||
/* ----------------------- Other extensions ----------------------- */ \
|
||||
\
|
||||
/* Atomic compare-and-swap (CAS) instructions */ \
|
||||
decl(Zacas , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZacas)) \
|
||||
/* Zba Address generation instructions */ \
|
||||
decl(Zba , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZba)) \
|
||||
/* Zbb Basic bit-manipulation */ \
|
||||
decl(Zbb , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZbb)) \
|
||||
/* Zbc Carry-less multiplication */ \
|
||||
decl(Zbc , RV_NO_FLAG_BIT, true , NO_UPDATE_DEFAULT) \
|
||||
/* Bitmanip instructions for Cryptography */ \
|
||||
decl(Zbkb , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZbkb)) \
|
||||
/* Zbs Single-bit instructions */ \
|
||||
decl(Zbs , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZbs)) \
|
||||
/* Zcb Simple code-size saving instructions */ \
|
||||
decl(Zcb , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZcb)) \
|
||||
/* Additional Floating-Point instructions */ \
|
||||
decl(Zfa , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZfa)) \
|
||||
/* Zfh Half-Precision Floating-Point instructions */ \
|
||||
decl(Zfh , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZfh)) \
|
||||
/* Zfhmin Minimal Half-Precision Floating-Point instructions */ \
|
||||
decl(Zfhmin , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZfhmin)) \
|
||||
/* Zicbom Cache Block Management Operations */ \
|
||||
decl(Zicbom , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZicbom)) \
|
||||
/* Zicbop Cache Block Prefetch Operations */ \
|
||||
decl(Zicbop , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZicbop)) \
|
||||
/* Zicboz Cache Block Zero Operations */ \
|
||||
decl(Zicboz , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZicboz)) \
|
||||
/* Base Counters and Timers */ \
|
||||
decl(Zicntr , RV_NO_FLAG_BIT, true , NO_UPDATE_DEFAULT) \
|
||||
/* Zicond Conditional operations */ \
|
||||
decl(Zicond , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZicond)) \
|
||||
/* Zicsr Control and Status Register (CSR) Instructions */ \
|
||||
decl(Zicsr , RV_NO_FLAG_BIT, true , NO_UPDATE_DEFAULT) \
|
||||
/* Zic64b Cache blocks must be 64 bytes in size, naturally aligned in the address space. */ \
|
||||
decl(Zic64b , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZic64b)) \
|
||||
/* Zifencei Instruction-Fetch Fence */ \
|
||||
decl(Zifencei , RV_NO_FLAG_BIT, true , NO_UPDATE_DEFAULT) \
|
||||
/* Zihintpause Pause instruction HINT */ \
|
||||
decl(Zihintpause , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZihintpause)) \
|
||||
/* Total Store Ordering */ \
|
||||
decl(Ztso , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZtso)) \
|
||||
/* Vector Basic Bit-manipulation */ \
|
||||
decl(Zvbb , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT_DEP(UseZvbb, &ext_v, nullptr)) \
|
||||
/* Vector Carryless Multiplication */ \
|
||||
decl(Zvbc , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT_DEP(UseZvbc, &ext_v, nullptr)) \
|
||||
/* Vector Extension for Half-Precision Floating-Point */ \
|
||||
decl(Zvfh , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT_DEP(UseZvfh, &ext_v, &ext_Zfh, nullptr)) \
|
||||
/* Shorthand for Zvkned + Zvknhb + Zvkb + Zvkt */ \
|
||||
decl(Zvkn , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT_DEP(UseZvkn, &ext_v, nullptr)) \
|
||||
|
||||
#define DECLARE_RV_EXT_FEATURE(NAME, PRETTY, LINUX_BIT, FSTRING, FLAGF) \
|
||||
struct NAME##RVExtFeatureValue : public RVExtFeatureValue { \
|
||||
NAME##RVExtFeatureValue() : \
|
||||
RVExtFeatureValue(#PRETTY, LINUX_BIT, RVExtFeatures::CPU_##NAME, FSTRING) {} \
|
||||
FLAGF; \
|
||||
}; \
|
||||
static NAME##RVExtFeatureValue NAME; \
|
||||
#define DECLARE_RV_EXT_FEATURE(PRETTY, LINUX_BIT, FSTRING, FLAGF) \
|
||||
struct ext_##PRETTY##RVExtFeatureValue : public RVExtFeatureValue { \
|
||||
ext_##PRETTY##RVExtFeatureValue() : \
|
||||
RVExtFeatureValue(#PRETTY, LINUX_BIT, RVExtFeatures::CPU_##ext_##PRETTY, FSTRING) {} \
|
||||
FLAGF; \
|
||||
}; \
|
||||
static ext_##PRETTY##RVExtFeatureValue ext_##PRETTY; \
|
||||
|
||||
RV_EXT_FEATURE_FLAGS(DECLARE_RV_EXT_FEATURE)
|
||||
#undef DECLARE_RV_EXT_FEATURE
|
||||
@ -288,21 +278,27 @@ class VM_Version : public Abstract_VM_Version {
|
||||
// Non-extension features
|
||||
//
|
||||
#define RV_NON_EXT_FEATURE_FLAGS(decl) \
|
||||
decl(mvendorid , VendorId , RV_NO_FLAG_BIT, false, NO_UPDATE_DEFAULT) \
|
||||
decl(marchid , ArchId , RV_NO_FLAG_BIT, false, NO_UPDATE_DEFAULT) \
|
||||
decl(mimpid , ImpId , RV_NO_FLAG_BIT, false, NO_UPDATE_DEFAULT) \
|
||||
decl(satp_mode , SATP , RV_NO_FLAG_BIT, false, NO_UPDATE_DEFAULT) \
|
||||
decl(unaligned_scalar , UnalignedScalar , RV_NO_FLAG_BIT, false, NO_UPDATE_DEFAULT) \
|
||||
decl(unaligned_vector , UnalignedVector , RV_NO_FLAG_BIT, false, NO_UPDATE_DEFAULT) \
|
||||
decl(zicboz_block_size, ZicbozBlockSize , RV_NO_FLAG_BIT, false, NO_UPDATE_DEFAULT) \
|
||||
/* Id for microarch. Mvendorid plus marchid uniquely identify the microarch. */ \
|
||||
decl(marchid , RV_NO_FLAG_BIT, false, NO_UPDATE_DEFAULT) \
|
||||
/* A unique encoding of the version of the processor implementation. */ \
|
||||
decl(mimpid , RV_NO_FLAG_BIT, false, NO_UPDATE_DEFAULT) \
|
||||
/* SATP bits (number of virtual addr bits) mbare, sv39, sv48, sv57, sv64 */ \
|
||||
decl(satp_mode , RV_NO_FLAG_BIT, false, NO_UPDATE_DEFAULT) \
|
||||
/* Performance of misaligned scalar accesses (unknown, emulated, slow, fast, unsupported) */ \
|
||||
decl(unaligned_scalar , RV_NO_FLAG_BIT, false, NO_UPDATE_DEFAULT) \
|
||||
/* Performance of misaligned vector accesses (unknown, unspported, slow, fast) */ \
|
||||
decl(unaligned_vector , RV_NO_FLAG_BIT, false, NO_UPDATE_DEFAULT) \
|
||||
/* Manufactory JEDEC id encoded, ISA vol 2 3.1.2.. */ \
|
||||
decl(mvendorid , RV_NO_FLAG_BIT, false, NO_UPDATE_DEFAULT) \
|
||||
decl(zicboz_block_size , RV_NO_FLAG_BIT, false, NO_UPDATE_DEFAULT) \
|
||||
|
||||
#define DECLARE_RV_NON_EXT_FEATURE(NAME, PRETTY, LINUX_BIT, FSTRING, FLAGF) \
|
||||
struct NAME##RVNonExtFeatureValue : public RVNonExtFeatureValue { \
|
||||
NAME##RVNonExtFeatureValue() : \
|
||||
#define DECLARE_RV_NON_EXT_FEATURE(PRETTY, LINUX_BIT, FSTRING, FLAGF) \
|
||||
struct PRETTY##RVNonExtFeatureValue : public RVNonExtFeatureValue { \
|
||||
PRETTY##RVNonExtFeatureValue() : \
|
||||
RVNonExtFeatureValue(#PRETTY, LINUX_BIT, FSTRING) {} \
|
||||
FLAGF; \
|
||||
}; \
|
||||
static NAME##RVNonExtFeatureValue NAME; \
|
||||
static PRETTY##RVNonExtFeatureValue PRETTY; \
|
||||
|
||||
RV_NON_EXT_FEATURE_FLAGS(DECLARE_RV_NON_EXT_FEATURE)
|
||||
#undef DECLARE_RV_NON_EXT_FEATURE
|
||||
@ -312,7 +308,7 @@ private:
|
||||
class RVExtFeatures : public CHeapObj<mtCode> {
|
||||
public:
|
||||
enum RVFeatureIndex {
|
||||
#define DECLARE_RV_FEATURE_ENUM(NAME, PRETTY, LINUX_BIT, FSTRING, FLAGF) CPU_##NAME,
|
||||
#define DECLARE_RV_FEATURE_ENUM(PRETTY, LINUX_BIT, FSTRING, FLAGF) CPU_##ext_##PRETTY,
|
||||
|
||||
RV_EXT_FEATURE_FLAGS(DECLARE_RV_FEATURE_ENUM)
|
||||
MAX_CPU_FEATURE_INDEX
|
||||
|
||||
@ -282,13 +282,6 @@ LIR_Opr FrameMap::stack_pointer() {
|
||||
return Z_SP_opr;
|
||||
}
|
||||
|
||||
// JSR 292
|
||||
// On ZARCH_64, there is no need to save the SP, because neither
|
||||
// method handle intrinsics nor compiled lambda forms modify it.
|
||||
LIR_Opr FrameMap::method_handle_invoke_SP_save_opr() {
|
||||
return LIR_OprFact::illegalOpr;
|
||||
}
|
||||
|
||||
bool FrameMap::validate_frame() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1980,11 +1980,6 @@ RegMask Matcher::modL_proj_mask() {
|
||||
return _Z_RARG3_LONG_REG_mask;
|
||||
}
|
||||
|
||||
// Copied from sparc.
|
||||
const RegMask Matcher::method_handle_invoke_SP_save_mask() {
|
||||
return RegMask();
|
||||
}
|
||||
|
||||
// Should the matcher clone input 'm' of node 'n'?
|
||||
bool Matcher::pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
|
||||
if (is_encode_and_store_pattern(n, m)) {
|
||||
|
||||
@ -326,13 +326,6 @@ LIR_Opr FrameMap::stack_pointer() {
|
||||
return FrameMap::rsp_opr;
|
||||
}
|
||||
|
||||
// JSR 292
|
||||
// On x86, there is no need to save the SP, because neither
|
||||
// method handle intrinsics, nor compiled lambda forms modify it.
|
||||
LIR_Opr FrameMap::method_handle_invoke_SP_save_opr() {
|
||||
return LIR_OprFact::illegalOpr;
|
||||
}
|
||||
|
||||
bool FrameMap::validate_frame() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -219,8 +219,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
|
||||
|
||||
nmethod* nm = sender_blob->as_nmethod_or_null();
|
||||
if (nm != nullptr) {
|
||||
if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) ||
|
||||
nm->method()->is_method_handle_intrinsic()) {
|
||||
if (nm->is_deopt_entry(sender_pc) || nm->method()->is_method_handle_intrinsic()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -443,47 +442,6 @@ JavaThread** frame::saved_thread_address(const frame& f) {
|
||||
return thread_addr;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// frame::verify_deopt_original_pc
|
||||
//
|
||||
// Verifies the calculated original PC of a deoptimization PC for the
|
||||
// given unextended SP.
|
||||
#ifdef ASSERT
|
||||
void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp) {
|
||||
frame fr;
|
||||
|
||||
// This is ugly but it's better than to change {get,set}_original_pc
|
||||
// to take an SP value as argument. And it's only a debugging
|
||||
// method anyway.
|
||||
fr._unextended_sp = unextended_sp;
|
||||
|
||||
address original_pc = nm->get_original_pc(&fr);
|
||||
assert(nm->insts_contains_inclusive(original_pc),
|
||||
"original PC must be in the main code section of the compiled method (or must be immediately following it) original_pc: " INTPTR_FORMAT " unextended_sp: " INTPTR_FORMAT " name: %s", p2i(original_pc), p2i(unextended_sp), nm->name());
|
||||
}
|
||||
#endif
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// frame::adjust_unextended_sp
|
||||
#ifdef ASSERT
|
||||
void frame::adjust_unextended_sp() {
|
||||
// On x86, sites calling method handle intrinsics and lambda forms are treated
|
||||
// as any other call site. Therefore, no special action is needed when we are
|
||||
// returning to any of these call sites.
|
||||
|
||||
if (_cb != nullptr) {
|
||||
nmethod* sender_nm = _cb->as_nmethod_or_null();
|
||||
if (sender_nm != nullptr) {
|
||||
// If the sender PC is a deoptimization point, get the original PC.
|
||||
if (sender_nm->is_deopt_entry(_pc) ||
|
||||
sender_nm->is_deopt_mh_entry(_pc)) {
|
||||
verify_deopt_original_pc(sender_nm, _unextended_sp);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// frame::sender_for_interpreter_frame
|
||||
frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -138,17 +138,10 @@
|
||||
int _offset_unextended_sp; // for use in stack-chunk frames
|
||||
};
|
||||
|
||||
void adjust_unextended_sp() NOT_DEBUG_RETURN;
|
||||
|
||||
intptr_t* ptr_at_addr(int offset) const {
|
||||
return (intptr_t*) addr_at(offset);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
// Used in frame::sender_for_{interpreter,compiled}_frame
|
||||
static void verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp);
|
||||
#endif
|
||||
|
||||
public:
|
||||
// Constructors
|
||||
|
||||
|
||||
@ -111,8 +111,6 @@ inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) {
|
||||
}
|
||||
|
||||
inline void frame::setup(address pc) {
|
||||
adjust_unextended_sp();
|
||||
|
||||
address original_pc = get_deopt_original_pc();
|
||||
if (original_pc != nullptr) {
|
||||
_pc = original_pc;
|
||||
@ -209,7 +207,6 @@ inline frame::frame(intptr_t* sp, intptr_t* fp) {
|
||||
// assert(_pc != nullptr, "no pc?");
|
||||
|
||||
_cb = CodeCache::find_blob(_pc);
|
||||
adjust_unextended_sp();
|
||||
|
||||
address original_pc = get_deopt_original_pc();
|
||||
if (original_pc != nullptr) {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -34,8 +34,6 @@ const bool CCallingConventionRequiresIntsAsLongs = false;
|
||||
|
||||
#define SUPPORTS_NATIVE_CX8
|
||||
|
||||
#define SUPPORT_MONITOR_COUNT
|
||||
|
||||
#define CPU_MULTI_COPY_ATOMIC
|
||||
|
||||
// The expected size in bytes of a cache line.
|
||||
|
||||
@ -2431,14 +2431,6 @@ void MacroAssembler::pop_cont_fastpath() {
|
||||
bind(L_done);
|
||||
}
|
||||
|
||||
void MacroAssembler::inc_held_monitor_count() {
|
||||
incrementq(Address(r15_thread, JavaThread::held_monitor_count_offset()));
|
||||
}
|
||||
|
||||
void MacroAssembler::dec_held_monitor_count() {
|
||||
decrementq(Address(r15_thread, JavaThread::held_monitor_count_offset()));
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
void MacroAssembler::stop_if_in_cont(Register cont, const char* name) {
|
||||
Label no_cont;
|
||||
@ -5847,7 +5839,7 @@ void MacroAssembler::generate_fill(BasicType t, bool aligned,
|
||||
orl(value, rtmp);
|
||||
}
|
||||
|
||||
cmpptr(count, 2<<shift); // Short arrays (< 8 bytes) fill by element
|
||||
cmpptr(count, 8 << shift); // Short arrays (< 32 bytes) fill by element
|
||||
jcc(Assembler::below, L_fill_4_bytes); // use unsigned cmp
|
||||
if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) {
|
||||
Label L_skip_align2;
|
||||
@ -5910,13 +5902,36 @@ void MacroAssembler::generate_fill(BasicType t, bool aligned,
|
||||
BIND(L_check_fill_64_bytes_avx2);
|
||||
}
|
||||
// Fill 64-byte chunks
|
||||
Label L_fill_64_bytes_loop;
|
||||
vpbroadcastd(xtmp, xtmp, Assembler::AVX_256bit);
|
||||
|
||||
subptr(count, 16 << shift);
|
||||
jcc(Assembler::less, L_check_fill_32_bytes);
|
||||
align(16);
|
||||
|
||||
// align data for 64-byte chunks
|
||||
Label L_fill_64_bytes_loop, L_align_64_bytes_loop;
|
||||
if (EnableX86ECoreOpts) {
|
||||
// align 'big' arrays to cache lines to minimize split_stores
|
||||
cmpptr(count, 96 << shift);
|
||||
jcc(Assembler::below, L_fill_64_bytes_loop);
|
||||
|
||||
// Find the bytes needed for alignment
|
||||
movptr(rtmp, to);
|
||||
andptr(rtmp, 0x1c);
|
||||
jcc(Assembler::zero, L_fill_64_bytes_loop);
|
||||
negptr(rtmp); // number of bytes to fill 32-rtmp. it filled by 2 mov by 32
|
||||
addptr(rtmp, 32);
|
||||
shrptr(rtmp, 2 - shift);// get number of elements from bytes
|
||||
subptr(count, rtmp); // adjust count by number of elements
|
||||
|
||||
align(16);
|
||||
BIND(L_align_64_bytes_loop);
|
||||
movdl(Address(to, 0), xtmp);
|
||||
addptr(to, 4);
|
||||
subptr(rtmp, 1 << shift);
|
||||
jcc(Assembler::greater, L_align_64_bytes_loop);
|
||||
}
|
||||
|
||||
align(16);
|
||||
BIND(L_fill_64_bytes_loop);
|
||||
vmovdqu(Address(to, 0), xtmp);
|
||||
vmovdqu(Address(to, 32), xtmp);
|
||||
@ -5924,6 +5939,7 @@ void MacroAssembler::generate_fill(BasicType t, bool aligned,
|
||||
subptr(count, 16 << shift);
|
||||
jcc(Assembler::greaterEqual, L_fill_64_bytes_loop);
|
||||
|
||||
align(16);
|
||||
BIND(L_check_fill_32_bytes);
|
||||
addptr(count, 8 << shift);
|
||||
jccb(Assembler::less, L_check_fill_8_bytes);
|
||||
@ -5968,6 +5984,7 @@ void MacroAssembler::generate_fill(BasicType t, bool aligned,
|
||||
//
|
||||
// length is too short, just fill qwords
|
||||
//
|
||||
align(16);
|
||||
BIND(L_fill_8_bytes_loop);
|
||||
movq(Address(to, 0), xtmp);
|
||||
addptr(to, 8);
|
||||
@ -5976,14 +5993,22 @@ void MacroAssembler::generate_fill(BasicType t, bool aligned,
|
||||
jcc(Assembler::greaterEqual, L_fill_8_bytes_loop);
|
||||
}
|
||||
}
|
||||
// fill trailing 4 bytes
|
||||
BIND(L_fill_4_bytes);
|
||||
testl(count, 1<<shift);
|
||||
|
||||
Label L_fill_4_bytes_loop;
|
||||
testl(count, 1 << shift);
|
||||
jccb(Assembler::zero, L_fill_2_bytes);
|
||||
|
||||
align(16);
|
||||
BIND(L_fill_4_bytes_loop);
|
||||
movl(Address(to, 0), value);
|
||||
addptr(to, 4);
|
||||
|
||||
BIND(L_fill_4_bytes);
|
||||
subptr(count, 1 << shift);
|
||||
jccb(Assembler::greaterEqual, L_fill_4_bytes_loop);
|
||||
|
||||
if (t == T_BYTE || t == T_SHORT) {
|
||||
Label L_fill_byte;
|
||||
addptr(to, 4);
|
||||
BIND(L_fill_2_bytes);
|
||||
// fill trailing 2 bytes
|
||||
testl(count, 1<<(shift-1));
|
||||
|
||||
@ -472,9 +472,6 @@ class MacroAssembler: public Assembler {
|
||||
void push_cont_fastpath();
|
||||
void pop_cont_fastpath();
|
||||
|
||||
void inc_held_monitor_count();
|
||||
void dec_held_monitor_count();
|
||||
|
||||
DEBUG_ONLY(void stop_if_in_cont(Register cont_reg, const char* name);)
|
||||
|
||||
// Round up to a power of two
|
||||
|
||||
@ -23,67 +23,24 @@
|
||||
*/
|
||||
|
||||
#include "rdtsc_x86.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/globals_extension.hpp"
|
||||
#include "runtime/javaThread.hpp"
|
||||
#include "runtime/os.inline.hpp"
|
||||
#include "runtime/orderAccess.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#include "vm_version_x86.hpp"
|
||||
|
||||
static jlong _epoch = 0;
|
||||
static bool rdtsc_elapsed_counter_enabled = false;
|
||||
static jlong tsc_frequency = 0;
|
||||
DEBUG_ONLY(volatile int Rdtsc::_initialized = 0;)
|
||||
jlong Rdtsc::_epoch = 0;
|
||||
jlong Rdtsc::_tsc_frequency = 0;
|
||||
|
||||
static jlong set_epoch() {
|
||||
jlong Rdtsc::set_epoch() {
|
||||
assert(0 == _epoch, "invariant");
|
||||
_epoch = os::rdtsc();
|
||||
return _epoch;
|
||||
}
|
||||
|
||||
// Base loop to estimate ticks frequency for tsc counter from user mode.
|
||||
// Volatiles and sleep() are used to prevent compiler from applying optimizations.
|
||||
static void do_time_measurements(volatile jlong& time_base,
|
||||
volatile jlong& time_fast,
|
||||
volatile jlong& time_base_elapsed,
|
||||
volatile jlong& time_fast_elapsed) {
|
||||
static const unsigned int FT_SLEEP_MILLISECS = 1;
|
||||
const unsigned int loopcount = 3;
|
||||
|
||||
volatile jlong start = 0;
|
||||
volatile jlong fstart = 0;
|
||||
volatile jlong end = 0;
|
||||
volatile jlong fend = 0;
|
||||
|
||||
// Figure out the difference between rdtsc and os provided timer.
|
||||
// base algorithm adopted from JRockit.
|
||||
for (unsigned int times = 0; times < loopcount; times++) {
|
||||
start = os::elapsed_counter();
|
||||
OrderAccess::fence();
|
||||
fstart = os::rdtsc();
|
||||
|
||||
// use sleep to prevent compiler from optimizing
|
||||
JavaThread::current()->sleep(FT_SLEEP_MILLISECS);
|
||||
|
||||
end = os::elapsed_counter();
|
||||
OrderAccess::fence();
|
||||
fend = os::rdtsc();
|
||||
|
||||
time_base += end - start;
|
||||
time_fast += fend - fstart;
|
||||
|
||||
// basis for calculating the os tick start
|
||||
// to fast time tick start offset
|
||||
time_base_elapsed += end;
|
||||
time_fast_elapsed += (fend - _epoch);
|
||||
}
|
||||
|
||||
time_base /= loopcount;
|
||||
time_fast /= loopcount;
|
||||
time_base_elapsed /= loopcount;
|
||||
time_fast_elapsed /= loopcount;
|
||||
}
|
||||
|
||||
static jlong initialize_frequency() {
|
||||
assert(0 == tsc_frequency, "invariant");
|
||||
jlong Rdtsc::initialize_frequency() {
|
||||
assert(0 == _tsc_frequency, "invariant");
|
||||
assert(0 == _epoch, "invariant");
|
||||
const jlong initial_counter = set_epoch();
|
||||
if (initial_counter == 0) {
|
||||
@ -102,29 +59,6 @@ static jlong initialize_frequency() {
|
||||
// for invariant tsc platforms, take the maximum qualified cpu frequency
|
||||
tsc_freq = (double)VM_Version::maximum_qualified_cpu_frequency();
|
||||
os_to_tsc_conv_factor = tsc_freq / os_freq;
|
||||
} else {
|
||||
// use measurements to estimate
|
||||
// a conversion factor and the tsc frequency
|
||||
|
||||
volatile jlong time_base = 0;
|
||||
volatile jlong time_fast = 0;
|
||||
volatile jlong time_base_elapsed = 0;
|
||||
volatile jlong time_fast_elapsed = 0;
|
||||
|
||||
// do measurements to get base data
|
||||
// on os timer and fast ticks tsc time relation.
|
||||
do_time_measurements(time_base, time_fast, time_base_elapsed, time_fast_elapsed);
|
||||
|
||||
// if invalid measurements, cannot proceed
|
||||
if (time_fast == 0 || time_base == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
os_to_tsc_conv_factor = (double)time_fast / (double)time_base;
|
||||
if (os_to_tsc_conv_factor > 1) {
|
||||
// estimate on tsc counter frequency
|
||||
tsc_freq = os_to_tsc_conv_factor * os_freq;
|
||||
}
|
||||
}
|
||||
|
||||
if ((tsc_freq < 0) || (tsc_freq > 0 && tsc_freq <= os_freq) || (os_to_tsc_conv_factor <= 1)) {
|
||||
@ -136,47 +70,52 @@ static jlong initialize_frequency() {
|
||||
return (jlong)tsc_freq;
|
||||
}
|
||||
|
||||
static bool initialize_elapsed_counter() {
|
||||
tsc_frequency = initialize_frequency();
|
||||
return tsc_frequency != 0 && _epoch != 0;
|
||||
bool Rdtsc::initialize_elapsed_counter() {
|
||||
_tsc_frequency = initialize_frequency();
|
||||
return _tsc_frequency != 0 && _epoch != 0;
|
||||
}
|
||||
|
||||
static bool ergonomics() {
|
||||
const bool invtsc_support = Rdtsc::is_supported();
|
||||
if (FLAG_IS_DEFAULT(UseFastUnorderedTimeStamps) && invtsc_support) {
|
||||
FLAG_SET_ERGO(UseFastUnorderedTimeStamps, true);
|
||||
}
|
||||
if (Rdtsc::is_supported()) {
|
||||
// Use rdtsc when it is supported by default
|
||||
FLAG_SET_ERGO_IF_DEFAULT(UseFastUnorderedTimeStamps, true);
|
||||
} else if (UseFastUnorderedTimeStamps) {
|
||||
assert(!FLAG_IS_DEFAULT(UseFastUnorderedTimeStamps), "Unexpected default value");
|
||||
|
||||
bool ft_enabled = UseFastUnorderedTimeStamps && invtsc_support;
|
||||
|
||||
if (!ft_enabled) {
|
||||
if (UseFastUnorderedTimeStamps && VM_Version::supports_tsc()) {
|
||||
warning("\nThe hardware does not support invariant tsc (INVTSC) register and/or cannot guarantee tsc synchronization between sockets at startup.\n"\
|
||||
"Values returned via rdtsc() are not guaranteed to be accurate, esp. when comparing values from cross sockets reads. Enabling UseFastUnorderedTimeStamps on non-invariant tsc hardware should be considered experimental.\n");
|
||||
ft_enabled = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!ft_enabled) {
|
||||
// Warn if unable to support command-line flag
|
||||
if (UseFastUnorderedTimeStamps && !VM_Version::supports_tsc()) {
|
||||
if (VM_Version::supports_tsc()) {
|
||||
warning("Ignoring UseFastUnorderedTimeStamps, the hardware does not support invariant tsc (INVTSC) register and/or cannot guarantee tsc synchronization between sockets at startup.\n"
|
||||
"Values returned via rdtsc() are not guaranteed to be accurate, esp. when comparing values from cross sockets reads.");
|
||||
} else {
|
||||
warning("Ignoring UseFastUnorderedTimeStamps, hardware does not support normal tsc");
|
||||
}
|
||||
|
||||
// We do not support non invariant rdtsc
|
||||
FLAG_SET_ERGO(UseFastUnorderedTimeStamps, false);
|
||||
}
|
||||
|
||||
return ft_enabled;
|
||||
return UseFastUnorderedTimeStamps;
|
||||
}
|
||||
|
||||
bool Rdtsc::initialize() {
|
||||
precond(AtomicAccess::xchg(&_initialized, 1) == 0);
|
||||
assert(0 == _tsc_frequency, "invariant");
|
||||
assert(0 == _epoch, "invariant");
|
||||
|
||||
if (!ergonomics()) {
|
||||
// We decided to ergonomically not support rdtsc.
|
||||
return false;
|
||||
}
|
||||
|
||||
// Try to initialize the elapsed counter
|
||||
return initialize_elapsed_counter();
|
||||
}
|
||||
|
||||
bool Rdtsc::is_supported() {
|
||||
return VM_Version::supports_tscinv_ext();
|
||||
}
|
||||
|
||||
bool Rdtsc::is_elapsed_counter_enabled() {
|
||||
return rdtsc_elapsed_counter_enabled;
|
||||
}
|
||||
|
||||
jlong Rdtsc::frequency() {
|
||||
return tsc_frequency;
|
||||
return _tsc_frequency;
|
||||
}
|
||||
|
||||
jlong Rdtsc::elapsed_counter() {
|
||||
@ -191,19 +130,7 @@ jlong Rdtsc::raw() {
|
||||
return os::rdtsc();
|
||||
}
|
||||
|
||||
bool Rdtsc::initialize() {
|
||||
static bool initialized = false;
|
||||
if (!initialized) {
|
||||
assert(!rdtsc_elapsed_counter_enabled, "invariant");
|
||||
VM_Version::initialize_tsc();
|
||||
assert(0 == tsc_frequency, "invariant");
|
||||
assert(0 == _epoch, "invariant");
|
||||
bool result = initialize_elapsed_counter(); // init hw
|
||||
if (result) {
|
||||
result = ergonomics(); // check logical state
|
||||
}
|
||||
rdtsc_elapsed_counter_enabled = result;
|
||||
initialized = true;
|
||||
}
|
||||
return rdtsc_elapsed_counter_enabled;
|
||||
bool Rdtsc::enabled() {
|
||||
static bool enabled = initialize();
|
||||
return enabled;
|
||||
}
|
||||
|
||||
@ -38,14 +38,24 @@
|
||||
// INVTSC is a minimal requirement for auto-enablement.
|
||||
|
||||
class Rdtsc : AllStatic {
|
||||
private:
|
||||
DEBUG_ONLY(static volatile int _initialized;)
|
||||
static jlong _epoch;
|
||||
static jlong _tsc_frequency;
|
||||
|
||||
static jlong set_epoch();
|
||||
|
||||
static jlong initialize_frequency();
|
||||
static bool initialize_elapsed_counter();
|
||||
static bool initialize();
|
||||
|
||||
public:
|
||||
static jlong elapsed_counter(); // provides quick time stamps
|
||||
static jlong frequency(); // tsc register
|
||||
static bool is_supported(); // InvariantTSC
|
||||
static jlong raw(); // direct rdtsc() access
|
||||
static bool is_elapsed_counter_enabled(); // turn off with -XX:-UseFastUnorderedTimeStamps
|
||||
static jlong epoch();
|
||||
static bool initialize();
|
||||
static bool enabled();
|
||||
};
|
||||
|
||||
#endif // CPU_X86_RDTSC_X86_HPP
|
||||
|
||||
@ -1352,11 +1352,8 @@ static void fill_continuation_entry(MacroAssembler* masm, Register reg_cont_obj,
|
||||
|
||||
__ movptr(rax, Address(r15_thread, JavaThread::cont_fastpath_offset()));
|
||||
__ movptr(Address(rsp, ContinuationEntry::parent_cont_fastpath_offset()), rax);
|
||||
__ movq(rax, Address(r15_thread, JavaThread::held_monitor_count_offset()));
|
||||
__ movq(Address(rsp, ContinuationEntry::parent_held_monitor_count_offset()), rax);
|
||||
|
||||
__ movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), 0);
|
||||
__ movq(Address(r15_thread, JavaThread::held_monitor_count_offset()), 0);
|
||||
}
|
||||
|
||||
//---------------------------- continuation_enter_cleanup ---------------------------
|
||||
@ -1380,49 +1377,6 @@ static void continuation_enter_cleanup(MacroAssembler* masm) {
|
||||
#endif
|
||||
__ movptr(rbx, Address(rsp, ContinuationEntry::parent_cont_fastpath_offset()));
|
||||
__ movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), rbx);
|
||||
|
||||
if (CheckJNICalls) {
|
||||
// Check if this is a virtual thread continuation
|
||||
Label L_skip_vthread_code;
|
||||
__ cmpl(Address(rsp, ContinuationEntry::flags_offset()), 0);
|
||||
__ jcc(Assembler::equal, L_skip_vthread_code);
|
||||
|
||||
// If the held monitor count is > 0 and this vthread is terminating then
|
||||
// it failed to release a JNI monitor. So we issue the same log message
|
||||
// that JavaThread::exit does.
|
||||
__ cmpptr(Address(r15_thread, JavaThread::jni_monitor_count_offset()), 0);
|
||||
__ jcc(Assembler::equal, L_skip_vthread_code);
|
||||
|
||||
// rax may hold an exception oop, save it before the call
|
||||
__ push(rax);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::log_jni_monitor_still_held));
|
||||
__ pop(rax);
|
||||
|
||||
// For vthreads we have to explicitly zero the JNI monitor count of the carrier
|
||||
// on termination. The held count is implicitly zeroed below when we restore from
|
||||
// the parent held count (which has to be zero).
|
||||
__ movq(Address(r15_thread, JavaThread::jni_monitor_count_offset()), 0);
|
||||
|
||||
__ bind(L_skip_vthread_code);
|
||||
}
|
||||
#ifdef ASSERT
|
||||
else {
|
||||
// Check if this is a virtual thread continuation
|
||||
Label L_skip_vthread_code;
|
||||
__ cmpl(Address(rsp, ContinuationEntry::flags_offset()), 0);
|
||||
__ jcc(Assembler::equal, L_skip_vthread_code);
|
||||
|
||||
// See comment just above. If not checking JNI calls the JNI count is only
|
||||
// needed for assertion checking.
|
||||
__ movq(Address(r15_thread, JavaThread::jni_monitor_count_offset()), 0);
|
||||
|
||||
__ bind(L_skip_vthread_code);
|
||||
}
|
||||
#endif
|
||||
|
||||
__ movq(rbx, Address(rsp, ContinuationEntry::parent_held_monitor_count_offset()));
|
||||
__ movq(Address(r15_thread, JavaThread::held_monitor_count_offset()), rbx);
|
||||
|
||||
__ movptr(rbx, Address(rsp, ContinuationEntry::parent_offset()));
|
||||
__ movptr(Address(r15_thread, JavaThread::cont_entry_offset()), rbx);
|
||||
__ addptr(rsp, checked_cast<int32_t>(ContinuationEntry::size()));
|
||||
|
||||
@ -62,7 +62,7 @@ address VM_Version::_cpuinfo_segv_addr_apx = nullptr;
|
||||
address VM_Version::_cpuinfo_cont_addr_apx = nullptr;
|
||||
|
||||
static BufferBlob* stub_blob;
|
||||
static const int stub_size = 2000;
|
||||
static const int stub_size = 2550;
|
||||
|
||||
int VM_Version::VM_Features::_features_bitmap_size = sizeof(VM_Version::VM_Features::_features_bitmap) / BytesPerLong;
|
||||
|
||||
@ -73,10 +73,12 @@ extern "C" {
|
||||
typedef void (*get_cpu_info_stub_t)(void*);
|
||||
typedef void (*detect_virt_stub_t)(uint32_t, uint32_t*);
|
||||
typedef void (*clear_apx_test_state_t)(void);
|
||||
typedef void (*getCPUIDBrandString_stub_t)(void*);
|
||||
}
|
||||
static get_cpu_info_stub_t get_cpu_info_stub = nullptr;
|
||||
static detect_virt_stub_t detect_virt_stub = nullptr;
|
||||
static clear_apx_test_state_t clear_apx_test_state_stub = nullptr;
|
||||
static getCPUIDBrandString_stub_t getCPUIDBrandString_stub = nullptr;
|
||||
|
||||
bool VM_Version::supports_clflush() {
|
||||
// clflush should always be available on x86_64
|
||||
@ -2131,6 +2133,8 @@ void VM_Version::initialize() {
|
||||
g.generate_detect_virt());
|
||||
clear_apx_test_state_stub = CAST_TO_FN_PTR(clear_apx_test_state_t,
|
||||
g.clear_apx_test_state());
|
||||
getCPUIDBrandString_stub = CAST_TO_FN_PTR(getCPUIDBrandString_stub_t,
|
||||
g.generate_getCPUIDBrandString());
|
||||
get_processor_features();
|
||||
|
||||
Assembler::precompute_instructions();
|
||||
@ -2187,15 +2191,6 @@ typedef enum {
|
||||
TM_FLAG = 0x20000000
|
||||
} FeatureEdxFlag;
|
||||
|
||||
static BufferBlob* cpuid_brand_string_stub_blob;
|
||||
static const int cpuid_brand_string_stub_size = 550;
|
||||
|
||||
extern "C" {
|
||||
typedef void (*getCPUIDBrandString_stub_t)(void*);
|
||||
}
|
||||
|
||||
static getCPUIDBrandString_stub_t getCPUIDBrandString_stub = nullptr;
|
||||
|
||||
// VM_Version statics
|
||||
enum {
|
||||
ExtendedFamilyIdLength_INTEL = 16,
|
||||
@ -2488,19 +2483,6 @@ const char* const _feature_extended_ecx_id[] = {
|
||||
""
|
||||
};
|
||||
|
||||
void VM_Version::initialize_tsc(void) {
|
||||
ResourceMark rm;
|
||||
|
||||
cpuid_brand_string_stub_blob = BufferBlob::create("getCPUIDBrandString_stub", cpuid_brand_string_stub_size);
|
||||
if (cpuid_brand_string_stub_blob == nullptr) {
|
||||
vm_exit_during_initialization("Unable to allocate getCPUIDBrandString_stub");
|
||||
}
|
||||
CodeBuffer c(cpuid_brand_string_stub_blob);
|
||||
VM_Version_StubGenerator g(&c);
|
||||
getCPUIDBrandString_stub = CAST_TO_FN_PTR(getCPUIDBrandString_stub_t,
|
||||
g.generate_getCPUIDBrandString());
|
||||
}
|
||||
|
||||
const char* VM_Version::cpu_model_description(void) {
|
||||
uint32_t cpu_family = extended_cpu_family();
|
||||
uint32_t cpu_model = extended_cpu_model();
|
||||
@ -2589,7 +2571,12 @@ void VM_Version::resolve_cpu_information_details(void) {
|
||||
_no_of_threads = os::processor_count();
|
||||
|
||||
// find out number of threads per cpu package
|
||||
int threads_per_package = threads_per_core() * cores_per_cpu();
|
||||
int threads_per_package = _cpuid_info.tpl_cpuidB1_ebx.bits.logical_cpus;
|
||||
if (threads_per_package == 0) {
|
||||
// Fallback code to avoid div by zero in subsequent code.
|
||||
// CPUID 0Bh (ECX = 1) might return 0 on older AMD processor (EPYC 7763 at least)
|
||||
threads_per_package = threads_per_core() * cores_per_cpu();
|
||||
}
|
||||
|
||||
// use amount of threads visible to the process in order to guess number of sockets
|
||||
_no_of_sockets = _no_of_threads / threads_per_package;
|
||||
@ -2743,6 +2730,10 @@ size_t VM_Version::cpu_write_support_string(char* const buf, size_t buf_len) {
|
||||
WRITE_TO_BUF("Invariant TSC");
|
||||
}
|
||||
|
||||
if (supports_hybrid()) {
|
||||
WRITE_TO_BUF("Hybrid Architecture");
|
||||
}
|
||||
|
||||
return written;
|
||||
}
|
||||
|
||||
|
||||
@ -1093,7 +1093,6 @@ public:
|
||||
|
||||
static bool supports_tscinv_ext(void);
|
||||
|
||||
static void initialize_tsc();
|
||||
static void initialize_cpu_information(void);
|
||||
};
|
||||
|
||||
|
||||
@ -1697,11 +1697,6 @@ RegMask Matcher::modL_proj_mask() {
|
||||
return LONG_RDX_REG_mask();
|
||||
}
|
||||
|
||||
// Register for saving SP into on method handle invokes. Not used on x86_64.
|
||||
const RegMask Matcher::method_handle_invoke_SP_save_mask() {
|
||||
return NO_REG_mask();
|
||||
}
|
||||
|
||||
%}
|
||||
|
||||
//----------ENCODING BLOCK-----------------------------------------------------
|
||||
|
||||
@ -67,6 +67,14 @@ ATTRIBUTE_NO_ASAN static bool _SafeFetchXX_internal(const T *adr, T* result) {
|
||||
|
||||
T n = 0;
|
||||
|
||||
#ifdef AIX
|
||||
// AIX allows reading from nullptr without signalling
|
||||
if (adr == nullptr) {
|
||||
*result = 0;
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
// Set up a jump buffer. Anchor its pointer in TLS. Then read from the unsafe address.
|
||||
// If that address was invalid, we fault, and in the signal handler we will jump back
|
||||
// to the jump point.
|
||||
|
||||
@ -621,9 +621,7 @@ int JVM_HANDLE_XXX_SIGNAL(int sig, siginfo_t* info,
|
||||
if (cb != nullptr && cb->is_nmethod()) {
|
||||
nmethod* nm = cb->as_nmethod();
|
||||
assert(nm->insts_contains_inclusive(pc), "");
|
||||
address deopt = nm->is_method_handle_return(pc) ?
|
||||
nm->deopt_mh_handler_begin() :
|
||||
nm->deopt_handler_begin();
|
||||
address deopt = nm->deopt_handler_begin();
|
||||
assert(deopt != nullptr, "");
|
||||
|
||||
frame fr = os::fetch_frame_from_context(uc);
|
||||
|
||||
@ -2630,14 +2630,13 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
|
||||
DWORD exception_code = exception_record->ExceptionCode;
|
||||
#if defined(_M_ARM64)
|
||||
address pc = (address) exceptionInfo->ContextRecord->Pc;
|
||||
|
||||
if (handle_safefetch(exception_code, pc, (void*)exceptionInfo->ContextRecord)) {
|
||||
return EXCEPTION_CONTINUE_EXECUTION;
|
||||
}
|
||||
#elif defined(_M_AMD64)
|
||||
address pc = (address) exceptionInfo->ContextRecord->Rip;
|
||||
#else
|
||||
#error unknown architecture
|
||||
#endif
|
||||
Thread* t = Thread::current_or_null_safe();
|
||||
|
||||
#if defined(_M_AMD64)
|
||||
if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
|
||||
VM_Version::is_cpuinfo_segv_addr(pc)) {
|
||||
// Verify that OS save/restore AVX registers.
|
||||
@ -2650,6 +2649,8 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
|
||||
VM_Version::clear_apx_test_state();
|
||||
return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr_apx());
|
||||
}
|
||||
#else
|
||||
#error unknown architecture
|
||||
#endif
|
||||
|
||||
#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
|
||||
@ -2660,6 +2661,7 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
|
||||
}
|
||||
#endif
|
||||
|
||||
Thread* t = Thread::current_or_null_safe();
|
||||
if (t != nullptr && t->is_Java_thread()) {
|
||||
JavaThread* thread = JavaThread::cast(t);
|
||||
bool in_java = thread->thread_state() == _thread_in_Java;
|
||||
@ -2690,10 +2692,8 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
|
||||
// Fatal red zone violation.
|
||||
overflow_state->disable_stack_red_zone();
|
||||
tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
|
||||
#if !defined(USE_VECTORED_EXCEPTION_HANDLING)
|
||||
report_error(t, exception_code, pc, exception_record,
|
||||
exceptionInfo->ContextRecord);
|
||||
#endif
|
||||
return EXCEPTION_CONTINUE_SEARCH;
|
||||
}
|
||||
} else if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
|
||||
@ -2745,10 +2745,8 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
|
||||
}
|
||||
|
||||
// Stack overflow or null pointer exception in native code.
|
||||
#if !defined(USE_VECTORED_EXCEPTION_HANDLING)
|
||||
report_error(t, exception_code, pc, exception_record,
|
||||
exceptionInfo->ContextRecord);
|
||||
#endif
|
||||
return EXCEPTION_CONTINUE_SEARCH;
|
||||
} // /EXCEPTION_ACCESS_VIOLATION
|
||||
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
@ -2797,9 +2795,7 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
|
||||
if (cb != nullptr && cb->is_nmethod()) {
|
||||
nmethod* nm = cb->as_nmethod();
|
||||
frame fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
|
||||
address deopt = nm->is_method_handle_return(pc) ?
|
||||
nm->deopt_mh_handler_begin() :
|
||||
nm->deopt_handler_begin();
|
||||
address deopt = nm->deopt_handler_begin();
|
||||
assert(nm->insts_contains_inclusive(pc), "");
|
||||
nm->set_original_pc(&fr, pc);
|
||||
// Set pc to handler
|
||||
@ -2810,41 +2806,21 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
|
||||
}
|
||||
}
|
||||
|
||||
#if !defined(USE_VECTORED_EXCEPTION_HANDLING)
|
||||
if (exception_code != EXCEPTION_BREAKPOINT) {
|
||||
bool should_report_error = (exception_code != EXCEPTION_BREAKPOINT);
|
||||
|
||||
#if defined(_M_ARM64)
|
||||
should_report_error = should_report_error &&
|
||||
FAILED(exception_code) &&
|
||||
(exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION);
|
||||
#endif
|
||||
|
||||
if (should_report_error) {
|
||||
report_error(t, exception_code, pc, exception_record,
|
||||
exceptionInfo->ContextRecord);
|
||||
}
|
||||
#endif
|
||||
return EXCEPTION_CONTINUE_SEARCH;
|
||||
}
|
||||
|
||||
#if defined(USE_VECTORED_EXCEPTION_HANDLING)
|
||||
LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
|
||||
PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
|
||||
#if defined(_M_ARM64)
|
||||
address pc = (address) exceptionInfo->ContextRecord->Pc;
|
||||
#elif defined(_M_AMD64)
|
||||
address pc = (address) exceptionInfo->ContextRecord->Rip;
|
||||
#else
|
||||
#error unknown architecture
|
||||
#endif
|
||||
|
||||
// Fast path for code part of the code cache
|
||||
if (CodeCache::low_bound() <= pc && pc < CodeCache::high_bound()) {
|
||||
return topLevelExceptionFilter(exceptionInfo);
|
||||
}
|
||||
|
||||
// If the exception occurred in the codeCache, pass control
|
||||
// to our normal exception handler.
|
||||
CodeBlob* cb = CodeCache::find_blob(pc);
|
||||
if (cb != nullptr) {
|
||||
return topLevelExceptionFilter(exceptionInfo);
|
||||
}
|
||||
|
||||
return EXCEPTION_CONTINUE_SEARCH;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(USE_VECTORED_EXCEPTION_HANDLING)
|
||||
LONG WINAPI topLevelUnhandledExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
|
||||
@ -4521,7 +4497,7 @@ jint os::init_2(void) {
|
||||
// Setup Windows Exceptions
|
||||
|
||||
#if defined(USE_VECTORED_EXCEPTION_HANDLING)
|
||||
topLevelVectoredExceptionHandler = AddVectoredExceptionHandler(1, topLevelVectoredExceptionFilter);
|
||||
topLevelVectoredExceptionHandler = AddVectoredExceptionHandler(1, topLevelExceptionFilter);
|
||||
previousUnhandledExceptionFilter = SetUnhandledExceptionFilter(topLevelUnhandledExceptionFilter);
|
||||
#endif
|
||||
|
||||
|
||||
@ -150,6 +150,8 @@ public:
|
||||
// signal support
|
||||
static void* install_signal_handler(int sig, signal_handler_t handler);
|
||||
static void* user_handler();
|
||||
|
||||
static void context_set_pc(CONTEXT* uc, address pc);
|
||||
};
|
||||
|
||||
#endif // OS_WINDOWS_OS_WINDOWS_HPP
|
||||
|
||||
64
src/hotspot/os/windows/safefetch_static_windows.cpp
Normal file
64
src/hotspot/os/windows/safefetch_static_windows.cpp
Normal file
@ -0,0 +1,64 @@
|
||||
/*
|
||||
* Copyright (c) 2022 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
#include "os_windows.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/safefetch.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
#ifdef SAFEFETCH_METHOD_STATIC_ASSEMBLY
|
||||
|
||||
// SafeFetch handling, static assembly style:
|
||||
//
|
||||
// SafeFetch32 and SafeFetchN are implemented via static assembly
|
||||
// and live in os_cpu/xx_xx/safefetch_xx_xx.S
|
||||
|
||||
extern "C" char _SafeFetch32_continuation[];
|
||||
extern "C" char _SafeFetch32_fault[];
|
||||
|
||||
#ifdef _LP64
|
||||
extern "C" char _SafeFetchN_continuation[];
|
||||
extern "C" char _SafeFetchN_fault[];
|
||||
#endif // _LP64
|
||||
|
||||
bool handle_safefetch(int exception_code, address pc, void* context) {
|
||||
CONTEXT* ctx = (CONTEXT*)context;
|
||||
if (exception_code == EXCEPTION_ACCESS_VIOLATION && ctx != nullptr) {
|
||||
if (pc == (address)_SafeFetch32_fault) {
|
||||
os::win32::context_set_pc(ctx, (address)_SafeFetch32_continuation);
|
||||
return true;
|
||||
}
|
||||
#ifdef _LP64
|
||||
if (pc == (address)_SafeFetchN_fault) {
|
||||
os::win32::context_set_pc(ctx, (address)_SafeFetchN_continuation);
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif // SAFEFETCH_METHOD_STATIC_ASSEMBLY
|
||||
@ -177,16 +177,16 @@ void RiscvHwprobe::add_features_from_query_result() {
|
||||
VM_Version::mimpid.enable_feature(query[RISCV_HWPROBE_KEY_MIMPID].value);
|
||||
}
|
||||
if (is_set(RISCV_HWPROBE_KEY_BASE_BEHAVIOR, RISCV_HWPROBE_BASE_BEHAVIOR_IMA)) {
|
||||
VM_Version::ext_I.enable_feature();
|
||||
VM_Version::ext_M.enable_feature();
|
||||
VM_Version::ext_A.enable_feature();
|
||||
VM_Version::ext_i.enable_feature();
|
||||
VM_Version::ext_m.enable_feature();
|
||||
VM_Version::ext_a.enable_feature();
|
||||
}
|
||||
if (is_set(RISCV_HWPROBE_KEY_IMA_EXT_0, RISCV_HWPROBE_IMA_FD)) {
|
||||
VM_Version::ext_F.enable_feature();
|
||||
VM_Version::ext_D.enable_feature();
|
||||
VM_Version::ext_f.enable_feature();
|
||||
VM_Version::ext_d.enable_feature();
|
||||
}
|
||||
if (is_set(RISCV_HWPROBE_KEY_IMA_EXT_0, RISCV_HWPROBE_IMA_C)) {
|
||||
VM_Version::ext_C.enable_feature();
|
||||
VM_Version::ext_c.enable_feature();
|
||||
}
|
||||
if (is_set(RISCV_HWPROBE_KEY_IMA_EXT_0, RISCV_HWPROBE_IMA_V)) {
|
||||
// Linux signal return bug when using vector with vlen > 128b in pre 6.8.5.
|
||||
@ -199,7 +199,7 @@ void RiscvHwprobe::add_features_from_query_result() {
|
||||
log.info("Vector not enabled automatically via hwprobe, but can be turned on with -XX:+UseRVV.");
|
||||
}
|
||||
} else {
|
||||
VM_Version::ext_V.enable_feature();
|
||||
VM_Version::ext_v.enable_feature();
|
||||
}
|
||||
}
|
||||
if (is_set(RISCV_HWPROBE_KEY_IMA_EXT_0, RISCV_HWPROBE_EXT_ZBA)) {
|
||||
|
||||
@ -105,15 +105,15 @@ uint32_t VM_Version::cpu_vector_length() {
|
||||
|
||||
void VM_Version::setup_cpu_available_features() {
|
||||
|
||||
assert(ext_I.feature_bit() == HWCAP_ISA_I, "Bit for I must follow Linux HWCAP");
|
||||
assert(ext_M.feature_bit() == HWCAP_ISA_M, "Bit for M must follow Linux HWCAP");
|
||||
assert(ext_A.feature_bit() == HWCAP_ISA_A, "Bit for A must follow Linux HWCAP");
|
||||
assert(ext_F.feature_bit() == HWCAP_ISA_F, "Bit for F must follow Linux HWCAP");
|
||||
assert(ext_D.feature_bit() == HWCAP_ISA_D, "Bit for D must follow Linux HWCAP");
|
||||
assert(ext_C.feature_bit() == HWCAP_ISA_C, "Bit for C must follow Linux HWCAP");
|
||||
assert(ext_Q.feature_bit() == HWCAP_ISA_Q, "Bit for Q must follow Linux HWCAP");
|
||||
assert(ext_H.feature_bit() == HWCAP_ISA_H, "Bit for H must follow Linux HWCAP");
|
||||
assert(ext_V.feature_bit() == HWCAP_ISA_V, "Bit for V must follow Linux HWCAP");
|
||||
assert(ext_i.feature_bit() == HWCAP_ISA_I, "Bit for I must follow Linux HWCAP");
|
||||
assert(ext_m.feature_bit() == HWCAP_ISA_M, "Bit for M must follow Linux HWCAP");
|
||||
assert(ext_a.feature_bit() == HWCAP_ISA_A, "Bit for A must follow Linux HWCAP");
|
||||
assert(ext_f.feature_bit() == HWCAP_ISA_F, "Bit for F must follow Linux HWCAP");
|
||||
assert(ext_d.feature_bit() == HWCAP_ISA_D, "Bit for D must follow Linux HWCAP");
|
||||
assert(ext_c.feature_bit() == HWCAP_ISA_C, "Bit for C must follow Linux HWCAP");
|
||||
assert(ext_q.feature_bit() == HWCAP_ISA_Q, "Bit for Q must follow Linux HWCAP");
|
||||
assert(ext_h.feature_bit() == HWCAP_ISA_H, "Bit for H must follow Linux HWCAP");
|
||||
assert(ext_v.feature_bit() == HWCAP_ISA_V, "Bit for V must follow Linux HWCAP");
|
||||
|
||||
if (!RiscvHwprobe::probe_features()) {
|
||||
os_aux_features();
|
||||
|
||||
@ -115,6 +115,10 @@ intptr_t* os::fetch_bcp_from_context(const void* ucVoid) {
|
||||
return reinterpret_cast<intptr_t*>(uc->REG_BCP);
|
||||
}
|
||||
|
||||
void os::win32::context_set_pc(CONTEXT* uc, address pc) {
|
||||
uc->Pc = (intptr_t)pc;
|
||||
}
|
||||
|
||||
bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread,
|
||||
struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) {
|
||||
PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
|
||||
|
||||
@ -0,0 +1,65 @@
|
||||
;
|
||||
; Copyright (c) 2022 SAP SE. All rights reserved.
|
||||
; Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
; DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
;
|
||||
; This code is free software; you can redistribute it and/or modify it
|
||||
; under the terms of the GNU General Public License version 2 only, as
|
||||
; published by the Free Software Foundation.
|
||||
;
|
||||
; This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
; version 2 for more details (a copy is included in the LICENSE file that
|
||||
; accompanied this code).
|
||||
;
|
||||
; You should have received a copy of the GNU General Public License version
|
||||
; 2 along with this work; if not, write to the Free Software Foundation,
|
||||
; Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
;
|
||||
; Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
; or visit www.oracle.com if you need additional information or have any
|
||||
; questions.
|
||||
;
|
||||
|
||||
; Support for int SafeFetch32(int* address, int defaultval);
|
||||
;
|
||||
; x0 : address
|
||||
; w1 : defaultval
|
||||
|
||||
; needed to align function start to 4 byte
|
||||
ALIGN 4
|
||||
EXPORT _SafeFetch32_fault
|
||||
EXPORT _SafeFetch32_continuation
|
||||
EXPORT SafeFetch32_impl
|
||||
AREA safefetch_text, CODE
|
||||
|
||||
SafeFetch32_impl
|
||||
_SafeFetch32_fault
|
||||
ldr w0, [x0]
|
||||
ret
|
||||
|
||||
_SafeFetch32_continuation
|
||||
mov x0, x1
|
||||
ret
|
||||
|
||||
; Support for intptr_t SafeFetchN(intptr_t* address, intptr_t defaultval);
|
||||
;
|
||||
; x0 : address
|
||||
; x1 : defaultval
|
||||
|
||||
ALIGN 4
|
||||
EXPORT _SafeFetchN_fault
|
||||
EXPORT _SafeFetchN_continuation
|
||||
EXPORT SafeFetchN_impl
|
||||
|
||||
SafeFetchN_impl
|
||||
_SafeFetchN_fault
|
||||
ldr x0, [x0]
|
||||
ret
|
||||
|
||||
_SafeFetchN_continuation
|
||||
mov x0, x1
|
||||
ret
|
||||
|
||||
END
|
||||
@ -57,7 +57,6 @@ public:
|
||||
OSR_Entry,
|
||||
Exceptions, // Offset where exception handler lives
|
||||
Deopt, // Offset where deopt handler lives
|
||||
DeoptMH, // Offset where MethodHandle deopt handler lives
|
||||
UnwindHandler, // Offset to default unwind handler
|
||||
max_Entries };
|
||||
|
||||
@ -77,7 +76,6 @@ public:
|
||||
_values[OSR_Entry ] = 0;
|
||||
_values[Exceptions ] = -1;
|
||||
_values[Deopt ] = -1;
|
||||
_values[DeoptMH ] = -1;
|
||||
_values[UnwindHandler ] = -1;
|
||||
}
|
||||
|
||||
|
||||
@ -310,14 +310,6 @@ void Compilation::emit_code_epilog(LIR_Assembler* assembler) {
|
||||
code_offsets->set_value(CodeOffsets::Deopt, assembler->emit_deopt_handler());
|
||||
CHECK_BAILOUT();
|
||||
|
||||
// Emit the MethodHandle deopt handler code (if required).
|
||||
if (has_method_handle_invokes()) {
|
||||
// We can use the same code as for the normal deopt handler, we
|
||||
// just need a different entry point address.
|
||||
code_offsets->set_value(CodeOffsets::DeoptMH, assembler->emit_deopt_handler());
|
||||
CHECK_BAILOUT();
|
||||
}
|
||||
|
||||
// Emit the handler to remove the activation from the stack and
|
||||
// dispatch to the caller.
|
||||
offsets()->set_value(CodeOffsets::UnwindHandler, assembler->emit_unwind_handler());
|
||||
@ -574,7 +566,6 @@ Compilation::Compilation(AbstractCompiler* compiler, ciEnv* env, ciMethod* metho
|
||||
, _has_unsafe_access(false)
|
||||
, _has_irreducible_loops(false)
|
||||
, _would_profile(false)
|
||||
, _has_method_handle_invokes(false)
|
||||
, _has_reserved_stack_access(method->has_reserved_stack_access())
|
||||
, _has_monitors(method->is_synchronized() || method->has_monitor_bytecodes())
|
||||
, _has_scoped_access(method->is_scoped())
|
||||
|
||||
@ -79,7 +79,6 @@ class Compilation: public StackObj {
|
||||
bool _has_unsafe_access;
|
||||
bool _has_irreducible_loops;
|
||||
bool _would_profile;
|
||||
bool _has_method_handle_invokes; // True if this method has MethodHandle invokes.
|
||||
bool _has_reserved_stack_access;
|
||||
bool _has_monitors; // Fastpath monitors detection for Continuations
|
||||
bool _has_scoped_access; // For shared scope closure
|
||||
@ -180,10 +179,6 @@ class Compilation: public StackObj {
|
||||
// Statistics gathering
|
||||
void notice_inlined_method(ciMethod* method);
|
||||
|
||||
// JSR 292
|
||||
bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
|
||||
void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
|
||||
|
||||
bool has_reserved_stack_access() const { return _has_reserved_stack_access; }
|
||||
void set_has_reserved_stack_access(bool z) { _has_reserved_stack_access = z; }
|
||||
|
||||
|
||||
@ -155,9 +155,6 @@ class FrameMap : public CompilationResourceObj {
|
||||
// Opr representing the stack_pointer on this platform
|
||||
static LIR_Opr stack_pointer();
|
||||
|
||||
// JSR 292
|
||||
static LIR_Opr method_handle_invoke_SP_save_opr();
|
||||
|
||||
static BasicTypeArray* signature_type_array_for(const ciMethod* method);
|
||||
|
||||
// for outgoing calls, these also update the reserved area to
|
||||
|
||||
@ -190,7 +190,6 @@ CodeEmitInfo::CodeEmitInfo(ValueStack* stack, XHandlers* exception_handlers, boo
|
||||
, _exception_handlers(exception_handlers)
|
||||
, _oop_map(nullptr)
|
||||
, _stack(stack)
|
||||
, _is_method_handle_invoke(false)
|
||||
, _deoptimize_on_exception(deoptimize_on_exception)
|
||||
, _force_reexecute(false) {
|
||||
assert(_stack != nullptr, "must be non null");
|
||||
@ -203,7 +202,6 @@ CodeEmitInfo::CodeEmitInfo(CodeEmitInfo* info, ValueStack* stack)
|
||||
, _exception_handlers(nullptr)
|
||||
, _oop_map(nullptr)
|
||||
, _stack(stack == nullptr ? info->_stack : stack)
|
||||
, _is_method_handle_invoke(info->_is_method_handle_invoke)
|
||||
, _deoptimize_on_exception(info->_deoptimize_on_exception)
|
||||
, _force_reexecute(info->_force_reexecute) {
|
||||
|
||||
@ -218,7 +216,7 @@ void CodeEmitInfo::record_debug_info(DebugInformationRecorder* recorder, int pc_
|
||||
// record the safepoint before recording the debug info for enclosing scopes
|
||||
recorder->add_safepoint(pc_offset, _oop_map->deep_copy());
|
||||
bool reexecute = _force_reexecute || _scope_debug_info->should_reexecute();
|
||||
_scope_debug_info->record_debug_info(recorder, pc_offset, reexecute, _is_method_handle_invoke);
|
||||
_scope_debug_info->record_debug_info(recorder, pc_offset, reexecute);
|
||||
recorder->end_safepoint(pc_offset);
|
||||
}
|
||||
|
||||
|
||||
@ -234,7 +234,7 @@ class IRScopeDebugInfo: public CompilationResourceObj {
|
||||
//Whether we should reexecute this bytecode for deopt
|
||||
bool should_reexecute();
|
||||
|
||||
void record_debug_info(DebugInformationRecorder* recorder, int pc_offset, bool reexecute, bool is_method_handle_invoke = false) {
|
||||
void record_debug_info(DebugInformationRecorder* recorder, int pc_offset, bool reexecute) {
|
||||
if (caller() != nullptr) {
|
||||
// Order is significant: Must record caller first.
|
||||
caller()->record_debug_info(recorder, pc_offset, false/*reexecute*/);
|
||||
@ -248,7 +248,7 @@ class IRScopeDebugInfo: public CompilationResourceObj {
|
||||
bool has_ea_local_in_scope = false;
|
||||
bool arg_escape = false;
|
||||
recorder->describe_scope(pc_offset, methodHandle(), scope()->method(), bci(),
|
||||
reexecute, rethrow_exception, is_method_handle_invoke, return_oop,
|
||||
reexecute, rethrow_exception, return_oop,
|
||||
has_ea_local_in_scope, arg_escape, locvals, expvals, monvals);
|
||||
}
|
||||
};
|
||||
@ -262,7 +262,6 @@ class CodeEmitInfo: public CompilationResourceObj {
|
||||
XHandlers* _exception_handlers;
|
||||
OopMap* _oop_map;
|
||||
ValueStack* _stack; // used by deoptimization (contains also monitors
|
||||
bool _is_method_handle_invoke; // true if the associated call site is a MethodHandle call site.
|
||||
bool _deoptimize_on_exception;
|
||||
bool _force_reexecute; // force the reexecute flag on, used for patching stub
|
||||
|
||||
@ -288,9 +287,6 @@ class CodeEmitInfo: public CompilationResourceObj {
|
||||
void add_register_oop(LIR_Opr opr);
|
||||
void record_debug_info(DebugInformationRecorder* recorder, int pc_offset);
|
||||
|
||||
bool is_method_handle_invoke() const { return _is_method_handle_invoke; }
|
||||
void set_is_method_handle_invoke(bool x) { _is_method_handle_invoke = x; }
|
||||
|
||||
bool force_reexecute() const { return _force_reexecute; }
|
||||
void set_force_reexecute() { _force_reexecute = true; }
|
||||
|
||||
|
||||
@ -709,11 +709,6 @@ void LIR_OpVisitState::visit(LIR_Op* op) {
|
||||
}
|
||||
|
||||
if (opJavaCall->_info) do_info(opJavaCall->_info);
|
||||
if (FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr &&
|
||||
opJavaCall->is_method_handle_invoke()) {
|
||||
opJavaCall->_method_handle_invoke_SP_save_opr = FrameMap::method_handle_invoke_SP_save_opr();
|
||||
do_temp(opJavaCall->_method_handle_invoke_SP_save_opr);
|
||||
}
|
||||
do_call();
|
||||
if (opJavaCall->_result->is_valid()) do_output(opJavaCall->_result);
|
||||
|
||||
|
||||
@ -1176,7 +1176,6 @@ class LIR_OpJavaCall: public LIR_OpCall {
|
||||
private:
|
||||
ciMethod* _method;
|
||||
LIR_Opr _receiver;
|
||||
LIR_Opr _method_handle_invoke_SP_save_opr; // Used in LIR_OpVisitState::visit to store the reference to FrameMap::method_handle_invoke_SP_save_opr.
|
||||
|
||||
public:
|
||||
LIR_OpJavaCall(LIR_Code code, ciMethod* method,
|
||||
@ -1186,7 +1185,6 @@ class LIR_OpJavaCall: public LIR_OpCall {
|
||||
: LIR_OpCall(code, addr, result, arguments, info)
|
||||
, _method(method)
|
||||
, _receiver(receiver)
|
||||
, _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr)
|
||||
{ assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
|
||||
|
||||
LIR_OpJavaCall(LIR_Code code, ciMethod* method,
|
||||
@ -1195,7 +1193,6 @@ class LIR_OpJavaCall: public LIR_OpCall {
|
||||
: LIR_OpCall(code, (address)vtable_offset, result, arguments, info)
|
||||
, _method(method)
|
||||
, _receiver(receiver)
|
||||
, _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr)
|
||||
{ assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
|
||||
|
||||
LIR_Opr receiver() const { return _receiver; }
|
||||
|
||||
@ -478,12 +478,6 @@ void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
|
||||
fatal("unexpected op code: %s", op->name());
|
||||
break;
|
||||
}
|
||||
|
||||
// JSR 292
|
||||
// Record if this method has MethodHandle invokes.
|
||||
if (op->is_method_handle_invoke()) {
|
||||
compilation()->set_has_method_handle_invokes(true);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
@ -2712,19 +2712,7 @@ void LIRGenerator::do_Invoke(Invoke* x) {
|
||||
// emit invoke code
|
||||
assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
|
||||
|
||||
// JSR 292
|
||||
// Preserve the SP over MethodHandle call sites, if needed.
|
||||
ciMethod* target = x->target();
|
||||
bool is_method_handle_invoke = (// %%% FIXME: Are both of these relevant?
|
||||
target->is_method_handle_intrinsic() ||
|
||||
target->is_compiled_lambda_form());
|
||||
if (is_method_handle_invoke) {
|
||||
info->set_is_method_handle_invoke(true);
|
||||
if(FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr) {
|
||||
__ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
|
||||
}
|
||||
}
|
||||
|
||||
switch (x->code()) {
|
||||
case Bytecodes::_invokestatic:
|
||||
__ call_static(target, result_register,
|
||||
@ -2757,13 +2745,6 @@ void LIRGenerator::do_Invoke(Invoke* x) {
|
||||
break;
|
||||
}
|
||||
|
||||
// JSR 292
|
||||
// Restore the SP after MethodHandle call sites, if needed.
|
||||
if (is_method_handle_invoke
|
||||
&& FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr) {
|
||||
__ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
|
||||
}
|
||||
|
||||
if (result_register->is_valid()) {
|
||||
LIR_Opr result = rlock_result(x);
|
||||
__ move(result_register, result);
|
||||
|
||||
@ -541,9 +541,6 @@ extern void vm_exit(int code);
|
||||
// unpack_with_exception entry instead. This makes life for the exception blob easier
|
||||
// because making that same check and diverting is painful from assembly language.
|
||||
JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* current, oopDesc* ex, address pc, nmethod*& nm))
|
||||
// Reset method handle flag.
|
||||
current->set_is_method_handle_return(false);
|
||||
|
||||
Handle exception(current, ex);
|
||||
|
||||
// This function is called when we are about to throw an exception. Therefore,
|
||||
@ -622,8 +619,6 @@ JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* c
|
||||
if (guard_pages_enabled) {
|
||||
address fast_continuation = nm->handler_for_exception_and_pc(exception, pc);
|
||||
if (fast_continuation != nullptr) {
|
||||
// Set flag if return address is a method handle call site.
|
||||
current->set_is_method_handle_return(nm->is_method_handle_return(pc));
|
||||
return fast_continuation;
|
||||
}
|
||||
}
|
||||
@ -660,8 +655,6 @@ JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* c
|
||||
}
|
||||
|
||||
current->set_vm_result_oop(exception());
|
||||
// Set flag if return address is a method handle call site.
|
||||
current->set_is_method_handle_return(nm->is_method_handle_return(pc));
|
||||
|
||||
if (log_is_enabled(Info, exceptions)) {
|
||||
ResourceMark rm;
|
||||
|
||||
@ -33,7 +33,10 @@
|
||||
#include "memory/metaspaceClosure.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/method.hpp"
|
||||
#include "oops/methodCounters.hpp"
|
||||
#include "oops/methodData.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/trainingData.hpp"
|
||||
#include "runtime/fieldDescriptor.inline.hpp"
|
||||
#include "runtime/globals_extension.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
@ -348,9 +351,18 @@ void AOTMapLogger::log_metaspace_objects_impl(address region_base, address regio
|
||||
case MetaspaceObj::MethodType:
|
||||
log_method((Method*)src, requested_addr, type_name, bytes, current);
|
||||
break;
|
||||
case MetaspaceObj::MethodCountersType:
|
||||
log_method_counters((MethodCounters*)src, requested_addr, type_name, bytes, current);
|
||||
break;
|
||||
case MetaspaceObj::MethodDataType:
|
||||
log_method_data((MethodData*)src, requested_addr, type_name, bytes, current);
|
||||
break;
|
||||
case MetaspaceObj::SymbolType:
|
||||
log_symbol((Symbol*)src, requested_addr, type_name, bytes, current);
|
||||
break;
|
||||
case MetaspaceObj::KlassTrainingDataType:
|
||||
log_klass_training_data((KlassTrainingData*)src, requested_addr, type_name, bytes, current);
|
||||
break;
|
||||
default:
|
||||
log_debug(aot, map)(_LOG_PREFIX, p2i(requested_addr), type_name, bytes);
|
||||
break;
|
||||
@ -389,6 +401,18 @@ void AOTMapLogger::log_const_method(ConstMethod* cm, address requested_addr, con
|
||||
log_debug(aot, map)(_LOG_PREFIX " %s", p2i(requested_addr), type_name, bytes, cm->method()->external_name());
|
||||
}
|
||||
|
||||
void AOTMapLogger::log_method_counters(MethodCounters* mc, address requested_addr, const char* type_name,
|
||||
int bytes, Thread* current) {
|
||||
ResourceMark rm(current);
|
||||
log_debug(aot, map)(_LOG_PREFIX " %s", p2i(requested_addr), type_name, bytes, mc->method()->external_name());
|
||||
}
|
||||
|
||||
void AOTMapLogger::log_method_data(MethodData* md, address requested_addr, const char* type_name,
|
||||
int bytes, Thread* current) {
|
||||
ResourceMark rm(current);
|
||||
log_debug(aot, map)(_LOG_PREFIX " %s", p2i(requested_addr), type_name, bytes, md->method()->external_name());
|
||||
}
|
||||
|
||||
void AOTMapLogger::log_klass(Klass* k, address requested_addr, const char* type_name,
|
||||
int bytes, Thread* current) {
|
||||
ResourceMark rm(current);
|
||||
@ -407,6 +431,16 @@ void AOTMapLogger::log_symbol(Symbol* s, address requested_addr, const char* typ
|
||||
log_debug(aot, map)(_LOG_PREFIX " %s", p2i(requested_addr), type_name, bytes,
|
||||
s->as_quoted_ascii());
|
||||
}
|
||||
void AOTMapLogger::log_klass_training_data(KlassTrainingData* ktd, address requested_addr, const char* type_name,
|
||||
int bytes, Thread* current) {
|
||||
ResourceMark rm(current);
|
||||
if (ktd->has_holder()) {
|
||||
log_debug(aot, map)(_LOG_PREFIX " %s", p2i(requested_addr), type_name, bytes,
|
||||
ktd->name()->as_klass_external_name());
|
||||
} else {
|
||||
log_debug(aot, map)(_LOG_PREFIX, p2i(requested_addr), type_name, bytes);
|
||||
}
|
||||
}
|
||||
|
||||
#undef _LOG_PREFIX
|
||||
|
||||
|
||||
@ -35,6 +35,7 @@
|
||||
class ArchiveHeapInfo;
|
||||
class DumpRegion;
|
||||
class FileMapInfo;
|
||||
class KlassTrainingData;
|
||||
class outputStream;
|
||||
|
||||
// Write detailed info to a mapfile to analyze contents of the AOT cache/CDS archive.
|
||||
@ -98,9 +99,14 @@ class AOTMapLogger : AllStatic {
|
||||
static void log_constant_pool_cache(ConstantPoolCache* cpc, address requested_addr,
|
||||
const char* type_name, int bytes, Thread* current);
|
||||
static void log_const_method(ConstMethod* cm, address requested_addr, const char* type_name, int bytes, Thread* current);
|
||||
static void log_method_counters(MethodCounters* mc, address requested_addr, const char* type_name, int bytes,
|
||||
Thread* current);
|
||||
static void log_method_data(MethodData* md, address requested_addr, const char* type_name, int bytes,
|
||||
Thread* current);
|
||||
static void log_klass(Klass* k, address requested_addr, const char* type_name, int bytes, Thread* current);
|
||||
static void log_method(Method* m, address requested_addr, const char* type_name, int bytes, Thread* current);
|
||||
static void log_symbol(Symbol* s, address requested_addr, const char* type_name, int bytes, Thread* current);
|
||||
static void log_klass_training_data(KlassTrainingData* ktd, address requested_addr, const char* type_name, int bytes, Thread* current);
|
||||
|
||||
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
|
||||
@ -4678,11 +4678,15 @@ const char* ClassFileParser::skip_over_field_signature(const char* signature,
|
||||
return signature + 1;
|
||||
case JVM_SIGNATURE_CLASS: {
|
||||
if (_major_version < JAVA_1_5_VERSION) {
|
||||
signature++;
|
||||
length--;
|
||||
// Skip over the class name if one is there
|
||||
const char* const p = skip_over_field_name(signature + 1, true, --length);
|
||||
|
||||
const char* const p = skip_over_field_name(signature, true, length);
|
||||
assert(p == nullptr || p > signature, "must parse one character at least");
|
||||
// The next character better be a semicolon
|
||||
if (p && (p - signature) > 1 && p[0] == JVM_SIGNATURE_ENDCLASS) {
|
||||
if (p != nullptr && // Parse of field name succeeded.
|
||||
p - signature < static_cast<int>(length) && // There is at least one character left to parse.
|
||||
p[0] == JVM_SIGNATURE_ENDCLASS) {
|
||||
return p + 1;
|
||||
}
|
||||
}
|
||||
|
||||
@ -174,7 +174,6 @@ InstanceKlass* SystemDictionaryShared::acquire_class_for_current_thread(
|
||||
|
||||
// No longer holding SharedDictionary_lock
|
||||
// No need to lock, as <ik> can be held only by a single thread.
|
||||
loader_data->add_class(ik);
|
||||
|
||||
// Get the package entry.
|
||||
PackageEntry* pkg_entry = CDSProtectionDomain::get_package_entry_from_class(ik, class_loader);
|
||||
|
||||
@ -119,9 +119,11 @@ void vmClasses::resolve_all(TRAPS) {
|
||||
// after vmSymbols::initialize() is called but before any classes are pre-loaded.
|
||||
ClassLoader::classLoader_init2(THREAD);
|
||||
|
||||
#if INCLUDE_CDS
|
||||
if (CDSConfig::is_using_aot_linked_classes()) {
|
||||
AOTLinkedClassBulkLoader::preload_classes(THREAD);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Preload commonly used klasses
|
||||
vmClassID scan = vmClassID::FIRST;
|
||||
|
||||
@ -23,23 +23,24 @@
|
||||
*/
|
||||
|
||||
#include "code/codeBehaviours.hpp"
|
||||
#include "code/nmethod.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
|
||||
CompiledICProtectionBehaviour* CompiledICProtectionBehaviour::_current = nullptr;
|
||||
|
||||
bool DefaultICProtectionBehaviour::lock(nmethod* method) {
|
||||
if (is_safe(method)) {
|
||||
bool DefaultICProtectionBehaviour::lock(nmethod* nm) {
|
||||
if (is_safe(nm)) {
|
||||
return false;
|
||||
}
|
||||
CompiledIC_lock->lock_without_safepoint_check();
|
||||
return true;
|
||||
}
|
||||
|
||||
void DefaultICProtectionBehaviour::unlock(nmethod* method) {
|
||||
void DefaultICProtectionBehaviour::unlock(nmethod* nm) {
|
||||
CompiledIC_lock->unlock();
|
||||
}
|
||||
|
||||
bool DefaultICProtectionBehaviour::is_safe(nmethod* method) {
|
||||
return SafepointSynchronize::is_at_safepoint() || CompiledIC_lock->owned_by_self();
|
||||
bool DefaultICProtectionBehaviour::is_safe(nmethod* nm) {
|
||||
return SafepointSynchronize::is_at_safepoint() || CompiledIC_lock->owned_by_self() || (NMethodState_lock->owned_by_self() && nm->is_not_installed());
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user