Merge branch 'master' into linkclass

This commit is contained in:
Jan Kratochvil 2025-10-09 14:59:32 +02:00
commit b0be560b9c
518 changed files with 10819 additions and 5010 deletions

View File

@ -327,8 +327,8 @@ jobs:
uses: ./.github/workflows/build-macos.yml
with:
platform: macos-x64
runs-on: 'macos-13'
xcode-toolset-version: '14.3.1'
runs-on: 'macos-15-intel'
xcode-toolset-version: '16.4'
configure-arguments: ${{ github.event.inputs.configure-arguments }}
make-arguments: ${{ github.event.inputs.make-arguments }}
dry-run: ${{ needs.prepare.outputs.dry-run == 'true' }}
@ -340,8 +340,8 @@ jobs:
uses: ./.github/workflows/build-macos.yml
with:
platform: macos-aarch64
runs-on: 'macos-14'
xcode-toolset-version: '15.4'
runs-on: 'macos-15'
xcode-toolset-version: '16.4'
configure-arguments: ${{ github.event.inputs.configure-arguments }}
make-arguments: ${{ github.event.inputs.make-arguments }}
dry-run: ${{ needs.prepare.outputs.dry-run == 'true' }}
@ -432,9 +432,9 @@ jobs:
with:
platform: macos-aarch64
bootjdk-platform: macos-aarch64
runs-on: macos-14
runs-on: macos-15
dry-run: ${{ needs.prepare.outputs.dry-run == 'true' }}
xcode-toolset-version: '15.4'
xcode-toolset-version: '16.4'
debug-suffix: -debug
test-windows-x64:

View File

@ -63,7 +63,7 @@ TOOL_GENERATECURRENCYDATA = $(JAVA_SMALL) -cp $(BUILDTOOLS_OUTPUTDIR)/jdk_tools_
TOOL_TZDB = $(JAVA_SMALL) -cp $(BUILDTOOLS_OUTPUTDIR)/jdk_tools_classes \
build.tools.tzdb.TzdbZoneRulesCompiler
TOOL_BLOCKED_CERTS = $(JAVA_SMALL) -Xlog:disable -cp $(BUILDTOOLS_OUTPUTDIR)/jdk_tools_classes \
TOOL_BLOCKED_CERTS = $(JAVA_SMALL) -cp $(BUILDTOOLS_OUTPUTDIR)/jdk_tools_classes \
--add-exports java.base/sun.security.util=ALL-UNNAMED \
build.tools.blockedcertsconverter.BlockedCertsConverter

View File

@ -444,6 +444,9 @@ AC_DEFUN_ONCE([BOOTJDK_SETUP_BOOT_JDK_ARGUMENTS],
# Force en-US environment
UTIL_ADD_JVM_ARG_IF_OK([-Duser.language=en -Duser.country=US],boot_jdk_jvmargs,[$JAVA])
UTIL_ADD_JVM_ARG_IF_OK([-Xlog:all=off:stdout],boot_jdk_jvmargs,[$JAVA])
UTIL_ADD_JVM_ARG_IF_OK([-Xlog:all=warning:stderr],boot_jdk_jvmargs,[$JAVA])
if test "x$BOOTJDK_USE_LOCAL_CDS" = xtrue; then
# Use our own CDS archive
UTIL_ADD_JVM_ARG_IF_OK([$boot_jdk_cds_args -Xshare:auto],boot_jdk_jvmargs,[$JAVA])

View File

@ -32,11 +32,6 @@
547d 92ca
53da 9b7e
446e f86f
#
# we should use this one instead of the 4260<-ff0d
#4260 2212
4260 ff0d
#
426A 00A6
43A1 301C
444A 2014

View File

@ -25,13 +25,6 @@
# 4260 <--> 2212
# 426A <--> 00A6
#
# Warning:
# "our old" implementation seems agree with above "new" mappings
# except the entries 4260 <-> 2212. To keep the "compatbility"
# with the "old" implementation, I changed the entries "temporarily"
# 4260 <-> 2212
# 4260 <- ff0d
#
00 0000
01 0001
02 0002
@ -407,8 +400,7 @@ FF 009F
425D FF09
425E FF1B
425F FFE2
#4260 FF0D
4260 2212
4260 FF0D
4261 FF0F
426A FFE4
426B FF0C

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2013, 2025, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -57,61 +57,61 @@
COMMA := ,
os := $(shell uname -o)
cpu := $(shell uname -p)
OS := $(shell uname -o)
CPU := $(shell uname -m)
# Figure out what platform this is building on.
me := $(cpu)-$(if $(findstring Linux,$(os)),linux-gnu)
ME := $(CPU)-$(if $(findstring Linux,$(OS)),linux-gnu)
$(info Building on platform $(me))
$(info Building on platform $(ME))
#
# By default just build for the current platform, which is assumed to be Linux
#
ifeq ($(TARGETS), )
platforms := $(me)
host_platforms := $(platforms)
PLATFORMS := $(ME)
HOST_PLATFORMS := $(PLATFORMS)
else
platforms := $(subst $(COMMA), , $(TARGETS))
host_platforms := $(me)
PLATFORMS := $(subst $(COMMA), , $(TARGETS))
HOST_PLATFORMS := $(ME)
endif
target_platforms := $(platforms)
$(info host_platforms $(host_platforms))
$(info target_platforms $(target_platforms))
TARGET_PLATFORMS := $(PLATFORMS)
$(info HOST_PLATFORMS $(HOST_PLATFORMS))
$(info TARGET_PLATFORMS $(TARGET_PLATFORMS))
all compile : $(platforms)
all compile : $(PLATFORMS)
ifeq ($(SKIP_ME), )
$(foreach p,$(filter-out $(me),$(platforms)),$(eval $(p) : $$(me)))
$(foreach p,$(filter-out $(ME),$(PLATFORMS)),$(eval $(p) : $$(ME)))
endif
OUTPUT_ROOT = $(abspath ../../build/devkit)
RESULT = $(OUTPUT_ROOT)/result
submakevars = HOST=$@ BUILD=$(me) RESULT=$(RESULT) OUTPUT_ROOT=$(OUTPUT_ROOT)
SUBMAKEVARS = HOST=$@ BUILD=$(ME) RESULT=$(RESULT) OUTPUT_ROOT=$(OUTPUT_ROOT)
$(host_platforms) :
$(HOST_PLATFORMS) :
@echo 'Building compilers for $@'
@echo 'Targets: $(target_platforms)'
for p in $(filter $@, $(target_platforms)) $(filter-out $@, $(target_platforms)); do \
$(MAKE) -f Tools.gmk download-rpms $(submakevars) \
@echo 'Targets: $(TARGET_PLATFORMS)'
for p in $(filter $@, $(TARGET_PLATFORMS)) $(filter-out $@, $(TARGET_PLATFORMS)); do \
$(MAKE) -f Tools.gmk download-rpms $(SUBMAKEVARS) \
TARGET=$$p PREFIX=$(RESULT)/$@-to-$$p && \
$(MAKE) -f Tools.gmk all $(submakevars) \
$(MAKE) -f Tools.gmk all $(SUBMAKEVARS) \
TARGET=$$p PREFIX=$(RESULT)/$@-to-$$p && \
$(MAKE) -f Tools.gmk ccache $(submakevars) \
$(MAKE) -f Tools.gmk ccache $(SUBMAKEVARS) \
TARGET=$@ PREFIX=$(RESULT)/$@-to-$$p || exit 1 ; \
done
@echo 'All done"'
today := $(shell date +%Y%m%d)
TODAY := $(shell date +%Y%m%d)
define Mktar
$(1)-to-$(2)_tar = $$(RESULT)/sdk-$(1)-to-$(2)-$$(today).tar.gz
$(1)-to-$(2)_tar = $$(RESULT)/sdk-$(1)-to-$(2)-$$(TODAY).tar.gz
$$($(1)-to-$(2)_tar) : PLATFORM = $(1)-to-$(2)
TARFILES += $$($(1)-to-$(2)_tar)
endef
$(foreach p,$(host_platforms),$(foreach t,$(target_platforms),$(eval $(call Mktar,$(p),$(t)))))
$(foreach p,$(HOST_PLATFORMS),$(foreach t,$(TARGET_PLATFORMS),$(eval $(call Mktar,$(p),$(t)))))
tars : all $(TARFILES)
onlytars : $(TARFILES)
@ -119,9 +119,9 @@ onlytars : $(TARFILES)
$(MAKE) -r -f Tars.gmk SRC_DIR=$(RESULT)/$(PLATFORM) TAR_FILE=$@
clean :
rm -rf $(addprefix ../../build/devkit/, result $(host_platforms))
rm -rf $(addprefix ../../build/devkit/, result $(HOST_PLATFORMS))
dist-clean: clean
rm -rf $(addprefix ../../build/devkit/, src download)
FORCE :
.PHONY : all compile tars $(configs) $(host_platforms) clean dist-clean
.PHONY : all compile tars $(HOST_PLATFORMS) clean dist-clean

View File

@ -39,7 +39,7 @@
# Fix this...
#
uppercase = $(shell echo $1 | tr a-z A-Z)
lowercase = $(shell echo $1 | tr A-Z a-z)
$(info TARGET=$(TARGET))
$(info HOST=$(HOST))
@ -104,26 +104,26 @@ endif
################################################################################
# Define external dependencies
gcc_ver_only := 14.2.0
binutils_ver_only := 2.43
ccache_ver_only := 4.10.2
GCC_VER_ONLY := 14.2.0
BINUTILS_VER_ONLY := 2.43
CCACHE_VER_ONLY := 4.10.2
CCACHE_CMAKE_BASED := 1
mpfr_ver_only := 4.2.1
gmp_ver_only := 6.3.0
mpc_ver_only := 1.3.1
gdb_ver_only := 15.2
MPFR_VER_ONLY := 4.2.1
GMP_VER_ONLY := 6.3.0
MPC_VER_ONLY := 1.3.1
GDB_VER_ONLY := 15.2
dependencies := gcc binutils ccache mpfr gmp mpc gdb
DEPENDENCIES := GCC BINUTILS CCACHE MPFR GMP MPC GDB
$(foreach dep,$(dependencies),$(eval $(dep)_ver := $(dep)-$($(dep)_ver_only)))
$(foreach dep,$(DEPENDENCIES),$(eval $(dep)_VER := $(call lowercase,$(dep)-$($(dep)_VER_ONLY))))
GCC := http://ftp.gnu.org/pub/gnu/gcc/$(gcc_ver)/$(gcc_ver).tar.xz
BINUTILS := http://ftp.gnu.org/pub/gnu/binutils/$(binutils_ver).tar.gz
CCACHE := https://github.com/ccache/ccache/releases/download/v$(ccache_ver_only)/$(ccache_ver).tar.xz
MPFR := https://www.mpfr.org/$(mpfr_ver)/$(mpfr_ver).tar.bz2
GMP := http://ftp.gnu.org/pub/gnu/gmp/$(gmp_ver).tar.bz2
MPC := http://ftp.gnu.org/pub/gnu/mpc/$(mpc_ver).tar.gz
GDB := http://ftp.gnu.org/gnu/gdb/$(gdb_ver).tar.xz
GCC_URL := https://ftp.gnu.org/pub/gnu/gcc/$(GCC_VER)/$(GCC_VER).tar.xz
BINUTILS_URL := https://ftp.gnu.org/pub/gnu/binutils/$(BINUTILS_VER).tar.gz
CCACHE_URL := https://github.com/ccache/ccache/releases/download/v$(CCACHE_VER_ONLY)/$(CCACHE_VER).tar.xz
MPFR_URL := https://www.mpfr.org/$(MPFR_VER)/$(MPFR_VER).tar.bz2
GMP_URL := https://ftp.gnu.org/pub/gnu/gmp/$(GMP_VER).tar.bz2
MPC_URL := https://ftp.gnu.org/pub/gnu/mpc/$(MPC_VER).tar.gz
GDB_URL := https://ftp.gnu.org/gnu/gdb/$(GDB_VER).tar.xz
REQUIRED_MIN_MAKE_MAJOR_VERSION := 4
ifneq ($(REQUIRED_MIN_MAKE_MAJOR_VERSION),)
@ -180,10 +180,10 @@ DOWNLOAD_RPMS := $(DOWNLOAD)/rpms/$(TARGET)-$(LINUX_VERSION)
SRCDIR := $(OUTPUT_ROOT)/src
# Marker file for unpacking rpms
rpms := $(SYSROOT)/rpms_unpacked
RPMS := $(SYSROOT)/rpms_unpacked
# Need to patch libs that are linker scripts to use non-absolute paths
libs := $(SYSROOT)/libs_patched
LIBS := $(SYSROOT)/libs_patched
################################################################################
# Download RPMs
@ -201,7 +201,7 @@ download-rpms:
# Generate downloading + unpacking of sources.
define Download
# Allow override
$(1)_DIRNAME ?= $(basename $(basename $(notdir $($(1)))))
$(1)_DIRNAME ?= $(basename $(basename $(notdir $($(1)_URL))))
$(1)_DIR = $(abspath $(SRCDIR)/$$($(1)_DIRNAME))
ifeq ($$($(1)_CMAKE_BASED),)
$(1)_CFG = $$($(1)_DIR)/configure
@ -212,7 +212,7 @@ define Download
$(1)_SRC_MARKER = $$($(1)_DIR)/CMakeLists.txt
$(1)_CONFIG = $$(CMAKE_CONFIG) $$($(1)_DIR)
endif
$(1)_FILE = $(DOWNLOAD)/$(notdir $($(1)))
$(1)_FILE = $(DOWNLOAD)/$(notdir $($(1)_URL))
$$($(1)_SRC_MARKER) : $$($(1)_FILE)
mkdir -p $$(SRCDIR)
@ -224,11 +224,11 @@ define Download
touch $$@
$$($(1)_FILE) :
wget -P $(DOWNLOAD) $$($(1))
wget -P $(DOWNLOAD) $$($(1)_URL)
endef
# Download and unpack all source packages
$(foreach dep,$(dependencies),$(eval $(call Download,$(call uppercase,$(dep)))))
$(foreach dep,$(DEPENDENCIES),$(eval $(call Download,$(dep))))
################################################################################
# Unpack RPMS
@ -250,7 +250,7 @@ RPM_FILE_LIST := $(sort $(foreach a, $(RPM_ARCHS), \
# Note. For building linux you should install rpm2cpio.
define unrpm
$(SYSROOT)/$(notdir $(1)).unpacked : $(1)
$$(rpms) : $(SYSROOT)/$(notdir $(1)).unpacked
$$(RPMS) : $(SYSROOT)/$(notdir $(1)).unpacked
endef
%.unpacked :
@ -277,7 +277,7 @@ $(foreach p,$(RPM_FILE_LIST),$(eval $(call unrpm,$(p))))
# have it anyway, but just to make sure...
# Patch libc.so and libpthread.so to force linking against libraries in sysroot
# and not the ones installed on the build machine.
$(libs) : $(rpms)
$(LIBS) : $(RPMS)
@echo Patching libc and pthreads
@(for f in `find $(SYSROOT) -name libc.so -o -name libpthread.so`; do \
(cat $$f | sed -e 's|/usr/lib64/||g' \
@ -293,10 +293,10 @@ $(libs) : $(rpms)
# Create links for ffi header files so that they become visible by default when using the
# devkit.
ifeq ($(ARCH), x86_64)
$(SYSROOT)/usr/include/ffi.h: $(rpms)
$(SYSROOT)/usr/include/ffi.h: $(RPMS)
cd $(@D) && rm -f $(@F) && ln -s ../lib/libffi-*/include/$(@F) .
$(SYSROOT)/usr/include/ffitarget.h: $(rpms)
$(SYSROOT)/usr/include/ffitarget.h: $(RPMS)
cd $(@D) && rm -f $(@F) && ln -s ../lib/libffi-*/include/$(@F) .
SYSROOT_LINKS += $(SYSROOT)/usr/include/ffi.h $(SYSROOT)/usr/include/ffitarget.h
@ -305,7 +305,7 @@ endif
################################################################################
# Define marker files for each source package to be compiled
$(foreach dep,$(dependencies),$(eval $(dep) = $(TARGETDIR)/$($(dep)_ver).done))
$(foreach dep,$(DEPENDENCIES),$(eval $(dep) = $(TARGETDIR)/$($(dep)_VER).done))
################################################################################
@ -345,48 +345,48 @@ TOOLS ?= $(call declare_tools,_FOR_TARGET,$(TARGET)-)
# CFLAG_<name> to most likely -m32.
define mk_bfd
$$(info Libs for $(1))
$$(BUILDDIR)/$$(binutils_ver)-$(subst /,-,$(1))/Makefile \
$$(BUILDDIR)/$$(BINUTILS_VER)-$(subst /,-,$(1))/Makefile \
: CFLAGS += $$(CFLAGS_$(1))
$$(BUILDDIR)/$$(binutils_ver)-$(subst /,-,$(1))/Makefile \
$$(BUILDDIR)/$$(BINUTILS_VER)-$(subst /,-,$(1))/Makefile \
: LIBDIRS = --libdir=$(TARGETDIR)/$(1)
bfdlib += $$(TARGETDIR)/$$(binutils_ver)-$(subst /,-,$(1)).done
bfdmakes += $$(BUILDDIR)/$$(binutils_ver)-$(subst /,-,$(1))/Makefile
BFDLIB += $$(TARGETDIR)/$$(BINUTILS_VER)-$(subst /,-,$(1)).done
BFDMAKES += $$(BUILDDIR)/$$(BINUTILS_VER)-$(subst /,-,$(1))/Makefile
endef
# Create one set of bfds etc for each multilib arch
$(foreach l,$(LIBDIRS),$(eval $(call mk_bfd,$(l))))
# Only build these two libs.
$(bfdlib) : MAKECMD = all-libiberty all-bfd
$(bfdlib) : INSTALLCMD = install-libiberty install-bfd
$(BFDLIB) : MAKECMD = all-libiberty all-bfd
$(BFDLIB) : INSTALLCMD = install-libiberty install-bfd
# Building targets libbfd + libiberty. HOST==TARGET, i.e not
# for a cross env.
$(bfdmakes) : CONFIG = --target=$(TARGET) \
$(BFDMAKES) : CONFIG = --target=$(TARGET) \
--host=$(TARGET) --build=$(BUILD) \
--prefix=$(TARGETDIR) \
--with-sysroot=$(SYSROOT) \
$(LIBDIRS)
$(bfdmakes) : TOOLS = $(call declare_tools,_FOR_TARGET,$(TARGET)-) $(call declare_tools,,$(TARGET)-)
$(BFDMAKES) : TOOLS = $(call declare_tools,_FOR_TARGET,$(TARGET)-) $(call declare_tools,,$(TARGET)-)
################################################################################
$(gcc) \
$(binutils) \
$(gmp) \
$(mpfr) \
$(mpc) \
$(bfdmakes) \
$(ccache) : ENVS += $(TOOLS)
$(GCC) \
$(BINUTILS) \
$(GMP) \
$(MPFR) \
$(MPC) \
$(BFDMAKES) \
$(CCACHE) : ENVS += $(TOOLS)
# libdir to work around hateful bfd stuff installing into wrong dirs...
# ensure we have 64 bit bfd support in the HOST library. I.e our
# compiler on i686 will know 64 bit symbols, BUT later
# we build just the libs again for TARGET, then with whatever the arch
# wants.
$(BUILDDIR)/$(binutils_ver)/Makefile : CONFIG += --enable-64-bit-bfd --libdir=$(PREFIX)/$(word 1,$(LIBDIRS))
$(BUILDDIR)/$(BINUTILS_VER)/Makefile : CONFIG += --enable-64-bit-bfd --libdir=$(PREFIX)/$(word 1,$(LIBDIRS))
ifeq ($(filter $(ARCH), s390x riscv64 ppc64le), )
# gold compiles but cannot link properly on s390x @ gcc 13.2 and Fedore 41
@ -397,8 +397,8 @@ endif
# Makefile creation. Simply run configure in build dir.
# Setting CFLAGS to -O2 generates a much faster ld.
$(bfdmakes) \
$(BUILDDIR)/$(binutils_ver)/Makefile \
$(BFDMAKES) \
$(BUILDDIR)/$(BINUTILS_VER)/Makefile \
: $(BINUTILS_CFG)
$(info Configuring $@. Log in $(@D)/log.config)
@mkdir -p $(@D)
@ -417,7 +417,7 @@ $(BUILDDIR)/$(binutils_ver)/Makefile \
) > $(@D)/log.config 2>&1
@echo 'done'
$(BUILDDIR)/$(mpfr_ver)/Makefile \
$(BUILDDIR)/$(MPFR_VER)/Makefile \
: $(MPFR_CFG)
$(info Configuring $@. Log in $(@D)/log.config)
@mkdir -p $(@D)
@ -432,7 +432,7 @@ $(BUILDDIR)/$(mpfr_ver)/Makefile \
) > $(@D)/log.config 2>&1
@echo 'done'
$(BUILDDIR)/$(gmp_ver)/Makefile \
$(BUILDDIR)/$(GMP_VER)/Makefile \
: $(GMP_CFG)
$(info Configuring $@. Log in $(@D)/log.config)
@mkdir -p $(@D)
@ -449,7 +449,7 @@ $(BUILDDIR)/$(gmp_ver)/Makefile \
) > $(@D)/log.config 2>&1
@echo 'done'
$(BUILDDIR)/$(mpc_ver)/Makefile \
$(BUILDDIR)/$(MPC_VER)/Makefile \
: $(MPC_CFG)
$(info Configuring $@. Log in $(@D)/log.config)
@mkdir -p $(@D)
@ -468,11 +468,11 @@ $(BUILDDIR)/$(mpc_ver)/Makefile \
# Only valid if glibc target -> linux
# proper destructor handling for c++
ifneq (,$(findstring linux,$(TARGET)))
$(BUILDDIR)/$(gcc_ver)/Makefile : CONFIG += --enable-__cxa_atexit
$(BUILDDIR)/$(GCC_VER)/Makefile : CONFIG += --enable-__cxa_atexit
endif
ifeq ($(ARCH), armhfp)
$(BUILDDIR)/$(gcc_ver)/Makefile : CONFIG += --with-float=hard
$(BUILDDIR)/$(GCC_VER)/Makefile : CONFIG += --with-float=hard
endif
ifneq ($(filter riscv64 ppc64le s390x, $(ARCH)), )
@ -487,7 +487,7 @@ endif
# skip native language.
# and link and assemble with the binutils we created
# earlier, so --with-gnu*
$(BUILDDIR)/$(gcc_ver)/Makefile \
$(BUILDDIR)/$(GCC_VER)/Makefile \
: $(GCC_CFG)
$(info Configuring $@. Log in $(@D)/log.config)
mkdir -p $(@D)
@ -509,17 +509,17 @@ $(BUILDDIR)/$(gcc_ver)/Makefile \
@echo 'done'
# need binutils for gcc
$(gcc) : $(binutils)
$(GCC) : $(BINUTILS)
# as of 4.3 or so need these for doing config
$(BUILDDIR)/$(gcc_ver)/Makefile : $(gmp) $(mpfr) $(mpc)
$(mpfr) : $(gmp)
$(mpc) : $(gmp) $(mpfr)
$(BUILDDIR)/$(GCC_VER)/Makefile : $(GMP) $(MPFR) $(MPC)
$(MPFR) : $(GMP)
$(MPC) : $(GMP) $(MPFR)
################################################################################
# Build gdb but only where host and target match
ifeq ($(HOST), $(TARGET))
$(BUILDDIR)/$(gdb_ver)/Makefile: $(GDB_CFG)
$(BUILDDIR)/$(GDB_VER)/Makefile: $(GDB_CFG)
$(info Configuring $@. Log in $(@D)/log.config)
mkdir -p $(@D)
( \
@ -532,9 +532,9 @@ ifeq ($(HOST), $(TARGET))
) > $(@D)/log.config 2>&1
@echo 'done'
$(gdb): $(gcc)
$(GDB): $(GCC)
else
$(BUILDDIR)/$(gdb_ver)/Makefile:
$(BUILDDIR)/$(GDB_VER)/Makefile:
$(info Faking $@, not used when cross-compiling)
mkdir -p $(@D)
echo "install:" > $@
@ -543,7 +543,7 @@ endif
################################################################################
# very straightforward. just build a ccache. it is only for host.
$(BUILDDIR)/$(ccache_ver)/Makefile \
$(BUILDDIR)/$(CCACHE_VER)/Makefile \
: $(CCACHE_SRC_MARKER)
$(info Configuring $@. Log in $(@D)/log.config)
@mkdir -p $(@D)
@ -554,12 +554,12 @@ $(BUILDDIR)/$(ccache_ver)/Makefile \
) > $(@D)/log.config 2>&1
@echo 'done'
gccpatch = $(TARGETDIR)/gcc-patched
GCC_PATCHED = $(TARGETDIR)/gcc-patched
################################################################################
# For some reason cpp is not created as a target-compiler
ifeq ($(HOST),$(TARGET))
$(gccpatch) : $(gcc) link_libs
$(GCC_PATCHED) : $(GCC) link_libs
@echo -n 'Creating compiler symlinks...'
@for f in cpp; do \
if [ ! -e $(PREFIX)/bin/$(TARGET)-$$f ]; \
@ -587,7 +587,7 @@ ifeq ($(HOST),$(TARGET))
done;)
@echo 'done'
else
$(gccpatch) :
$(GCC_PATCHED) :
@echo 'done'
endif
@ -615,7 +615,7 @@ $(PREFIX)/devkit.info:
echo '# This file describes to configure how to interpret the contents of this' >> $@
echo '# devkit' >> $@
echo '' >> $@
echo 'DEVKIT_NAME="$(gcc_ver) - $(LINUX_VERSION)"' >> $@
echo 'DEVKIT_NAME="$(GCC_VER) - $(LINUX_VERSION)"' >> $@
echo 'DEVKIT_TOOLCHAIN_PATH="$$DEVKIT_ROOT/bin"' >> $@
echo 'DEVKIT_SYSROOT="$$DEVKIT_ROOT/$(TARGET)/sysroot"' >> $@
echo 'DEVKIT_EXTRA_PATH="$$DEVKIT_ROOT/bin"' >> $@
@ -651,32 +651,32 @@ ifeq ($(TARGET), $(HOST))
@echo 'Creating missing $* soft link'
ln -s $(TARGET)-$* $@
missing-links := $(addprefix $(PREFIX)/bin/, \
addr2line ar as c++ c++filt dwp elfedit g++ gcc gcc-$(gcc_ver_only) gprof ld ld.bfd \
MISSING_LINKS := $(addprefix $(PREFIX)/bin/, \
addr2line ar as c++ c++filt dwp elfedit g++ gcc gcc-$(GCC_VER_ONLY) gprof ld ld.bfd \
ld.gold nm objcopy objdump ranlib readelf size strings strip)
endif
# Add link to work around "plugin needed to handle lto object" (JDK-8344272)
$(PREFIX)/lib/bfd-plugins/liblto_plugin.so: $(PREFIX)/libexec/gcc/$(TARGET)/$(gcc_ver_only)/liblto_plugin.so
$(PREFIX)/lib/bfd-plugins/liblto_plugin.so: $(PREFIX)/libexec/gcc/$(TARGET)/$(GCC_VER_ONLY)/liblto_plugin.so
@echo 'Creating missing $(@F) soft link'
@mkdir -p $(@D)
ln -s $$(realpath -s --relative-to=$(@D) $<) $@
missing-links += $(PREFIX)/lib/bfd-plugins/liblto_plugin.so
MISSING_LINKS += $(PREFIX)/lib/bfd-plugins/liblto_plugin.so
################################################################################
bfdlib : $(bfdlib)
binutils : $(binutils)
rpms : $(rpms)
libs : $(libs)
bfdlib : $(BFDLIB)
binutils : $(BINUTILS)
rpms : $(RPMS)
libs : $(LIBS)
sysroot : rpms libs
gcc : sysroot $(gcc) $(gccpatch)
gdb : $(gdb)
all : binutils gcc bfdlib $(PREFIX)/devkit.info $(missing-links) $(SYSROOT_LINKS) \
gcc : sysroot $(GCC) $(GCC_PATCHED)
gdb : $(GDB)
all : binutils gcc bfdlib $(PREFIX)/devkit.info $(MISSING_LINKS) $(SYSROOT_LINKS) \
$(THESE_MAKEFILES) gdb
# this is only built for host. so separate.
ccache : $(ccache)
ccache : $(CCACHE)
.PHONY : gcc all binutils bfdlib link_libs rpms libs sysroot

View File

@ -93,7 +93,7 @@ elif test "x$TARGET_PLATFORM" = xlinux_x64; then
rpm2cpio $OUTPUT_ROOT/m4-$M4_VERSION.el6.x86_64.rpm | cpio -d -i
elif test "x$TARGET_PLATFORM" = xlinux_x86; then
M4_VERSION=1.4.13-5
wget http://yum.oracle.com/repo/OracleLinux/OL6/latest/i386/getPackage/m4-$M4_VERSION.el6.i686.rpm
wget https://yum.oracle.com/repo/OracleLinux/OL6/latest/i386/getPackage/m4-$M4_VERSION.el6.i686.rpm
cd $IMAGE_DIR
rpm2cpio $OUTPUT_ROOT/m4-$M4_VERSION.el6.i686.rpm | cpio -d -i
else

View File

@ -2568,10 +2568,6 @@ RegMask Matcher::modL_proj_mask() {
return RegMask();
}
const RegMask Matcher::method_handle_invoke_SP_save_mask() {
return FP_REG_mask();
}
bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
Node* u = addp->fast_out(i);

View File

@ -383,13 +383,6 @@ LIR_Opr FrameMap::stack_pointer() {
return FrameMap::sp_opr;
}
// JSR 292
LIR_Opr FrameMap::method_handle_invoke_SP_save_opr() {
return LIR_OprFact::illegalOpr; // Not needed on aarch64
}
bool FrameMap::validate_frame() {
return true;
}

View File

@ -228,8 +228,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
nmethod* nm = sender_blob->as_nmethod_or_null();
if (nm != nullptr) {
if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) ||
nm->method()->is_method_handle_intrinsic()) {
if (nm->is_deopt_entry(sender_pc) || nm->method()->is_method_handle_intrinsic()) {
return false;
}
}
@ -454,48 +453,6 @@ JavaThread** frame::saved_thread_address(const frame& f) {
return thread_addr;
}
//------------------------------------------------------------------------------
// frame::verify_deopt_original_pc
//
// Verifies the calculated original PC of a deoptimization PC for the
// given unextended SP.
#ifdef ASSERT
void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp) {
frame fr;
// This is ugly but it's better than to change {get,set}_original_pc
// to take an SP value as argument. And it's only a debugging
// method anyway.
fr._unextended_sp = unextended_sp;
address original_pc = nm->get_original_pc(&fr);
assert(nm->insts_contains_inclusive(original_pc),
"original PC must be in the main code section of the compiled method (or must be immediately following it)");
}
#endif
//------------------------------------------------------------------------------
// frame::adjust_unextended_sp
#ifdef ASSERT
void frame::adjust_unextended_sp() {
// On aarch64, sites calling method handle intrinsics and lambda forms are treated
// as any other call site. Therefore, no special action is needed when we are
// returning to any of these call sites.
if (_cb != nullptr) {
nmethod* sender_nm = _cb->as_nmethod_or_null();
if (sender_nm != nullptr) {
// If the sender PC is a deoptimization point, get the original PC.
if (sender_nm->is_deopt_entry(_pc) ||
sender_nm->is_deopt_mh_entry(_pc)) {
verify_deopt_original_pc(sender_nm, _unextended_sp);
}
}
}
}
#endif
//------------------------------------------------------------------------------
// frame::sender_for_interpreter_frame
frame frame::sender_for_interpreter_frame(RegisterMap* map) const {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -141,8 +141,6 @@
int _offset_unextended_sp; // for use in stack-chunk frames
};
void adjust_unextended_sp() NOT_DEBUG_RETURN;
// true means _sp value is correct and we can use it to get the sender's sp
// of the compiled frame, otherwise, _sp value may be invalid and we can use
// _fp to get the sender's sp if PreserveFramePointer is enabled.
@ -152,11 +150,6 @@
return (intptr_t*) addr_at(offset);
}
#ifdef ASSERT
// Used in frame::sender_for_{interpreter,compiled}_frame
static void verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp);
#endif
public:
// Constructors

View File

@ -116,8 +116,6 @@ inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) {
}
inline void frame::setup(address pc) {
adjust_unextended_sp();
address original_pc = get_deopt_original_pc();
if (original_pc != nullptr) {
_pc = original_pc;
@ -223,7 +221,6 @@ inline frame::frame(intptr_t* sp, intptr_t* fp) {
// assert(_pc != nullptr, "no pc?");
_cb = CodeCache::find_blob(_pc);
adjust_unextended_sp();
address original_pc = get_deopt_original_pc();
if (original_pc != nullptr) {

View File

@ -35,8 +35,6 @@ const bool CCallingConventionRequiresIntsAsLongs = false;
#define SUPPORTS_NATIVE_CX8
#define SUPPORT_MONITOR_COUNT
// Aarch64 was not originally defined to be multi-copy-atomic, but now
// is. See: "Simplifying ARM Concurrency: Multicopy-atomic Axiomatic
// and Operational Models for ARMv8"

View File

@ -5630,38 +5630,6 @@ void MacroAssembler::tlab_allocate(Register obj,
bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
}
void MacroAssembler::inc_held_monitor_count(Register tmp) {
Address dst(rthread, JavaThread::held_monitor_count_offset());
#ifdef ASSERT
ldr(tmp, dst);
increment(tmp);
str(tmp, dst);
Label ok;
tbz(tmp, 63, ok);
STOP("assert(held monitor count underflow)");
should_not_reach_here();
bind(ok);
#else
increment(dst);
#endif
}
void MacroAssembler::dec_held_monitor_count(Register tmp) {
Address dst(rthread, JavaThread::held_monitor_count_offset());
#ifdef ASSERT
ldr(tmp, dst);
decrement(tmp);
str(tmp, dst);
Label ok;
tbz(tmp, 63, ok);
STOP("assert(held monitor count underflow)");
should_not_reach_here();
bind(ok);
#else
decrement(dst);
#endif
}
void MacroAssembler::verify_tlab() {
#ifdef ASSERT
if (UseTLAB && VerifyOops) {

View File

@ -983,9 +983,6 @@ public:
void push_cont_fastpath(Register java_thread = rthread);
void pop_cont_fastpath(Register java_thread = rthread);
void inc_held_monitor_count(Register tmp);
void dec_held_monitor_count(Register tmp);
// Round up to a power of two
void round_to(Register reg, int modulus);

View File

@ -90,7 +90,6 @@ void Relocation::pd_set_call_destination(address x) {
void trampoline_stub_Relocation::pd_fix_owner_after_move() {
NativeCall* call = nativeCall_at(owner());
assert(call->raw_destination() == owner(), "destination should be empty");
address trampoline = addr();
address dest = nativeCallTrampolineStub_at(trampoline)->destination();
if (!Assembler::reachable_from_branch_at(owner(), dest)) {

View File

@ -985,11 +985,8 @@ static void fill_continuation_entry(MacroAssembler* masm) {
__ ldr(rscratch1, Address(rthread, JavaThread::cont_fastpath_offset()));
__ str(rscratch1, Address(sp, ContinuationEntry::parent_cont_fastpath_offset()));
__ ldr(rscratch1, Address(rthread, JavaThread::held_monitor_count_offset()));
__ str(rscratch1, Address(sp, ContinuationEntry::parent_held_monitor_count_offset()));
__ str(zr, Address(rthread, JavaThread::cont_fastpath_offset()));
__ str(zr, Address(rthread, JavaThread::held_monitor_count_offset()));
}
// on entry, sp points to the ContinuationEntry
@ -1005,50 +1002,6 @@ static void continuation_enter_cleanup(MacroAssembler* masm) {
#endif
__ ldr(rscratch1, Address(sp, ContinuationEntry::parent_cont_fastpath_offset()));
__ str(rscratch1, Address(rthread, JavaThread::cont_fastpath_offset()));
if (CheckJNICalls) {
// Check if this is a virtual thread continuation
Label L_skip_vthread_code;
__ ldrw(rscratch1, Address(sp, ContinuationEntry::flags_offset()));
__ cbzw(rscratch1, L_skip_vthread_code);
// If the held monitor count is > 0 and this vthread is terminating then
// it failed to release a JNI monitor. So we issue the same log message
// that JavaThread::exit does.
__ ldr(rscratch1, Address(rthread, JavaThread::jni_monitor_count_offset()));
__ cbz(rscratch1, L_skip_vthread_code);
// Save return value potentially containing the exception oop in callee-saved R19.
__ mov(r19, r0);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::log_jni_monitor_still_held));
// Restore potential return value.
__ mov(r0, r19);
// For vthreads we have to explicitly zero the JNI monitor count of the carrier
// on termination. The held count is implicitly zeroed below when we restore from
// the parent held count (which has to be zero).
__ str(zr, Address(rthread, JavaThread::jni_monitor_count_offset()));
__ bind(L_skip_vthread_code);
}
#ifdef ASSERT
else {
// Check if this is a virtual thread continuation
Label L_skip_vthread_code;
__ ldrw(rscratch1, Address(sp, ContinuationEntry::flags_offset()));
__ cbzw(rscratch1, L_skip_vthread_code);
// See comment just above. If not checking JNI calls the JNI count is only
// needed for assertion checking.
__ str(zr, Address(rthread, JavaThread::jni_monitor_count_offset()));
__ bind(L_skip_vthread_code);
}
#endif
__ ldr(rscratch1, Address(sp, ContinuationEntry::parent_held_monitor_count_offset()));
__ str(rscratch1, Address(rthread, JavaThread::held_monitor_count_offset()));
__ ldr(rscratch2, Address(sp, ContinuationEntry::parent_offset()));
__ str(rscratch2, Address(rthread, JavaThread::cont_entry_offset()));
__ add(rfp, sp, (int)ContinuationEntry::size());

View File

@ -1,6 +1,7 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2020, Red Hat Inc. All rights reserved.
* Copyright 2025 Arm Limited and/or its affiliates.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -222,10 +223,13 @@ void VM_Version::initialize() {
// Neoverse
// N1: 0xd0c
// N2: 0xd49
// N3: 0xd8e
// V1: 0xd40
// V2: 0xd4f
// V3: 0xd84
if (_cpu == CPU_ARM && (model_is(0xd0c) || model_is(0xd49) ||
model_is(0xd40) || model_is(0xd4f))) {
model_is(0xd40) || model_is(0xd4f) ||
model_is(0xd8e) || model_is(0xd84))) {
if (FLAG_IS_DEFAULT(UseSIMDForMemoryOps)) {
FLAG_SET_DEFAULT(UseSIMDForMemoryOps, true);
}
@ -260,7 +264,9 @@ void VM_Version::initialize() {
// Neoverse
// V1: 0xd40
// V2: 0xd4f
if (_cpu == CPU_ARM && (model_is(0xd40) || model_is(0xd4f))) {
// V3: 0xd84
if (_cpu == CPU_ARM &&
(model_is(0xd40) || model_is(0xd4f) || model_is(0xd84))) {
if (FLAG_IS_DEFAULT(UseCryptoPmullForCRC32)) {
FLAG_SET_DEFAULT(UseCryptoPmullForCRC32, true);
}

View File

@ -1154,10 +1154,6 @@ RegMask Matcher::modL_proj_mask() {
return RegMask();
}
const RegMask Matcher::method_handle_invoke_SP_save_mask() {
return FP_REGP_mask();
}
bool maybe_far_call(const CallNode *n) {
return !MacroAssembler::_reachable_from_cache(n->as_Call()->entry_point());
}
@ -1248,23 +1244,6 @@ encode %{
__ set_inst_mark(mark);
%}
enc_class preserve_SP %{
// preserve mark
address mark = __ inst_mark();
DEBUG_ONLY(int off0 = __ offset());
// FP is preserved across all calls, even compiled calls.
// Use it to preserve SP in places where the callee might change the SP.
__ mov(Rmh_SP_save, SP);
DEBUG_ONLY(int off1 = __ offset());
assert(off1 - off0 == 4, "correct size prediction");
// restore mark
__ set_inst_mark(mark);
%}
enc_class restore_SP %{
__ mov(SP, Rmh_SP_save);
%}
enc_class Java_Dynamic_Call (method meth) %{
Register R8_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode());
assert(R8_ic_reg == Ricklass, "should be");
@ -8799,7 +8778,6 @@ instruct safePoint_poll(iRegP poll, R12RegI tmp, flagsReg icc) %{
// Call Java Static Instruction
instruct CallStaticJavaDirect( method meth ) %{
match(CallStaticJava);
predicate(! ((CallStaticJavaNode*)n)->is_method_handle_invoke());
effect(USE meth);
ins_cost(CALL_COST);
@ -8808,20 +8786,6 @@ instruct CallStaticJavaDirect( method meth ) %{
ins_pipe(simple_call);
%}
// Call Java Static Instruction (method handle version)
instruct CallStaticJavaHandle( method meth ) %{
match(CallStaticJava);
predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke());
effect(USE meth);
// FP is saved by all callees (for interpreter stack correction).
// We use it here for a similar purpose, in {preserve,restore}_FP.
ins_cost(CALL_COST);
format %{ "CALL,static/MethodHandle ==> " %}
ins_encode( SetInstMark, preserve_SP, Java_Static_Call( meth ), restore_SP, call_epilog, ClearInstMark );
ins_pipe(simple_call);
%}
// Call Java Dynamic Instruction
instruct CallDynamicJavaDirect( method meth ) %{
match(CallDynamicJava);

View File

@ -1,5 +1,5 @@
//
// Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
@ -432,8 +432,7 @@ OptoRegPair c2::return_value(int ideal_reg) {
int MachCallStaticJavaNode::ret_addr_offset() {
bool far = (_method == nullptr) ? maybe_far_call(this) : !cache_reachable();
return ((far ? 3 : 1) + (_method_handle_invoke ? 1 : 0)) *
NativeInstruction::instruction_size;
return (far ? 3 : 1) * NativeInstruction::instruction_size;
}
int MachCallDynamicJavaNode::ret_addr_offset() {

View File

@ -174,11 +174,6 @@ LIR_Opr FrameMap::stack_pointer() {
return FrameMap::SP_opr;
}
LIR_Opr FrameMap::method_handle_invoke_SP_save_opr() {
assert(Rmh_SP_save == FP, "Fix register used for saving SP for MethodHandle calls");
return FP_opr;
}
bool FrameMap::validate_frame() {
int max_offset = in_bytes(framesize_in_bytes());
int java_index = 0;

View File

@ -275,14 +275,6 @@ OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address targe
}
static void restore_sp_for_method_handle(StubAssembler* sasm) {
// Restore SP from its saved reg (FP) if the exception PC is a MethodHandle call site.
__ ldr_s32(Rtemp, Address(Rthread, JavaThread::is_method_handle_return_offset()));
__ cmp(Rtemp, 0);
__ mov(SP, Rmh_SP_save, ne);
}
OopMapSet* Runtime1::generate_handle_exception(StubId id, StubAssembler* sasm) {
__ block_comment("generate_handle_exception");
@ -339,7 +331,6 @@ OopMapSet* Runtime1::generate_handle_exception(StubId id, StubAssembler* sasm) {
break;
case StubId::c1_handle_exception_from_callee_id:
restore_live_registers_without_return(sasm); // must not jump immediately to handler
restore_sp_for_method_handle(sasm);
__ ret();
break;
default: ShouldNotReachHere();
@ -372,9 +363,6 @@ void Runtime1::generate_unwind_exception(StubAssembler* sasm) {
// Jump to handler
__ verify_not_null_oop(Rexception_obj);
// JSR292 extension
restore_sp_for_method_handle(sasm);
__ jump(R0);
}

View File

@ -329,56 +329,6 @@ JavaThread** frame::saved_thread_address(const frame& f) {
return nullptr;
}
//------------------------------------------------------------------------------
// frame::verify_deopt_original_pc
//
// Verifies the calculated original PC of a deoptimization PC for the
// given unextended SP. The unextended SP might also be the saved SP
// for MethodHandle call sites.
#ifdef ASSERT
void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp, bool is_method_handle_return) {
frame fr;
// This is ugly but it's better than to change {get,set}_original_pc
// to take an SP value as argument. And it's only a debugging
// method anyway.
fr._unextended_sp = unextended_sp;
address original_pc = nm->get_original_pc(&fr);
assert(nm->insts_contains_inclusive(original_pc),
"original PC must be in the main code section of the compiled method (or must be immediately following it)");
assert(nm->is_method_handle_return(original_pc) == is_method_handle_return, "must be");
}
#endif
//------------------------------------------------------------------------------
// frame::adjust_unextended_sp
void frame::adjust_unextended_sp() {
// same as on x86
// If we are returning to a compiled MethodHandle call site, the
// saved_fp will in fact be a saved value of the unextended SP. The
// simplest way to tell whether we are returning to such a call site
// is as follows:
nmethod* sender_nm = (_cb == nullptr) ? nullptr : _cb->as_nmethod_or_null();
if (sender_nm != nullptr) {
// If the sender PC is a deoptimization point, get the original
// PC. For MethodHandle call site the unextended_sp is stored in
// saved_fp.
if (sender_nm->is_deopt_mh_entry(_pc)) {
DEBUG_ONLY(verify_deopt_mh_original_pc(sender_nm, _fp));
_unextended_sp = _fp;
}
else if (sender_nm->is_deopt_entry(_pc)) {
DEBUG_ONLY(verify_deopt_original_pc(sender_nm, _unextended_sp));
}
else if (sender_nm->is_method_handle_return(_pc)) {
_unextended_sp = _fp;
}
}
}
//------------------------------------------------------------------------------
// frame::update_map_with_saved_link
void frame::update_map_with_saved_link(RegisterMap* map, intptr_t** link_addr) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -85,20 +85,11 @@
// original sp.
intptr_t* _unextended_sp;
void adjust_unextended_sp();
intptr_t* ptr_at_addr(int offset) const {
return (intptr_t*) addr_at(offset);
}
#ifdef ASSERT
// Used in frame::sender_for_{interpreter,compiled}_frame
static void verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp, bool is_method_handle_return = false);
static void verify_deopt_mh_original_pc(nmethod* nm, intptr_t* unextended_sp) {
verify_deopt_original_pc(nm, unextended_sp, true);
}
#endif
public:
// Constructors

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -112,8 +112,6 @@ inline void frame::init(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, add
}
inline void frame::setup(address pc) {
adjust_unextended_sp();
address original_pc = get_deopt_original_pc();
if (original_pc != nullptr) {
_pc = original_pc;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -364,7 +364,6 @@ class VFPSystemRegisterImpl : public AbstractRegisterImpl {
// This does not seem to conflict with Rexception_pc
// In case of issues, R3 might be OK but adapters calling the runtime would have to save it
#define R5_mh R5 // MethodHandle register, used during the call setup
#define Rmh_SP_save FP // for C1
/*
* C++ Interpreter Register Defines

View File

@ -264,11 +264,6 @@ ExceptionBlob* OptoRuntime::generate_exception_blob() {
__ raw_pop(FP, LR);
// Restore SP from its saved reg (FP) if the exception PC is a MethodHandle call site.
__ ldr(Rtemp, Address(Rthread, JavaThread::is_method_handle_return_offset()));
__ cmp(Rtemp, 0);
__ mov(SP, Rmh_SP_save, ne);
// R0 contains handler address
// Since this may be the deopt blob we must set R5 to look like we returned
// from the original pc that threw the exception

View File

@ -374,15 +374,6 @@ LIR_Opr FrameMap::stack_pointer() {
return SP_opr;
}
// JSR 292
// On PPC64, there is no need to save the SP, because neither
// method handle intrinsics, nor compiled lambda forms modify it.
LIR_Opr FrameMap::method_handle_invoke_SP_save_opr() {
return LIR_OprFact::illegalOpr;
}
bool FrameMap::validate_frame() {
int max_offset = in_bytes(framesize_in_bytes());
int java_index = 0;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -43,8 +43,6 @@ const bool CCallingConventionRequiresIntsAsLongs = true;
#define SUPPORTS_NATIVE_CX8
#define SUPPORT_MONITOR_COUNT
// PPC64 is not specified as multi-copy-atomic
// So we must not #define CPU_MULTI_COPY_ATOMIC

View File

@ -402,7 +402,7 @@ void NativePostCallNop::make_deopt() {
bool NativePostCallNop::patch(int32_t oopmap_slot, int32_t cb_offset) {
int32_t i2, i1;
assert(is_aligned(cb_offset, 4), "cb offset alignment does not match instruction alignment");
assert(!decode(i1, i2), "already patched");
assert(!decode(i1, i2) || NMethodRelocation, "already patched");
cb_offset = cb_offset >> 2;
if (((oopmap_slot & ppc_oopmap_slot_mask) != oopmap_slot) || ((cb_offset & ppc_cb_offset_mask) != cb_offset)) {

View File

@ -2473,10 +2473,6 @@ RegMask Matcher::modL_proj_mask() {
return RegMask();
}
const RegMask Matcher::method_handle_invoke_SP_save_mask() {
return RegMask();
}
%}
//----------ENCODING BLOCK-----------------------------------------------------
@ -3434,7 +3430,6 @@ encode %{
// Create the call node.
CallDynamicJavaDirectSchedNode *call = new CallDynamicJavaDirectSchedNode();
call->_method_handle_invoke = _method_handle_invoke;
call->_vtable_index = _vtable_index;
call->_method = _method;
call->_optimized_virtual = _optimized_virtual;

View File

@ -1639,7 +1639,6 @@ static void fill_continuation_entry(MacroAssembler* masm, Register reg_cont_obj,
assert_different_registers(reg_cont_obj, reg_flags);
Register zero = R8_ARG6;
Register tmp2 = R9_ARG7;
Register tmp3 = R10_ARG8;
DEBUG_ONLY(__ block_comment("fill {"));
#ifdef ASSERT
@ -1655,12 +1654,9 @@ static void fill_continuation_entry(MacroAssembler* masm, Register reg_cont_obj,
__ stw(zero, in_bytes(ContinuationEntry::pin_count_offset()), R1_SP);
__ ld_ptr(tmp2, JavaThread::cont_fastpath_offset(), R16_thread);
__ ld(tmp3, in_bytes(JavaThread::held_monitor_count_offset()), R16_thread);
__ st_ptr(tmp2, ContinuationEntry::parent_cont_fastpath_offset(), R1_SP);
__ std(tmp3, in_bytes(ContinuationEntry::parent_held_monitor_count_offset()), R1_SP);
__ st_ptr(zero, JavaThread::cont_fastpath_offset(), R16_thread);
__ std(zero, in_bytes(JavaThread::held_monitor_count_offset()), R16_thread);
DEBUG_ONLY(__ block_comment("} fill"));
}
@ -1681,7 +1677,6 @@ static void fill_continuation_entry(MacroAssembler* masm, Register reg_cont_obj,
static void continuation_enter_cleanup(MacroAssembler* masm) {
Register tmp1 = R8_ARG6;
Register tmp2 = R9_ARG7;
Register tmp3 = R10_ARG8;
#ifdef ASSERT
__ block_comment("clean {");
@ -1692,57 +1687,8 @@ static void continuation_enter_cleanup(MacroAssembler* masm) {
__ ld_ptr(tmp1, ContinuationEntry::parent_cont_fastpath_offset(), R1_SP);
__ st_ptr(tmp1, JavaThread::cont_fastpath_offset(), R16_thread);
if (CheckJNICalls) {
// Check if this is a virtual thread continuation
Label L_skip_vthread_code;
__ lwz(R0, in_bytes(ContinuationEntry::flags_offset()), R1_SP);
__ cmpwi(CR0, R0, 0);
__ beq(CR0, L_skip_vthread_code);
// If the held monitor count is > 0 and this vthread is terminating then
// it failed to release a JNI monitor. So we issue the same log message
// that JavaThread::exit does.
__ ld(R0, in_bytes(JavaThread::jni_monitor_count_offset()), R16_thread);
__ cmpdi(CR0, R0, 0);
__ beq(CR0, L_skip_vthread_code);
// Save return value potentially containing the exception oop
Register ex_oop = R15_esp; // nonvolatile register
__ mr(ex_oop, R3_RET);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::log_jni_monitor_still_held));
// Restore potental return value
__ mr(R3_RET, ex_oop);
// For vthreads we have to explicitly zero the JNI monitor count of the carrier
// on termination. The held count is implicitly zeroed below when we restore from
// the parent held count (which has to be zero).
__ li(tmp1, 0);
__ std(tmp1, in_bytes(JavaThread::jni_monitor_count_offset()), R16_thread);
__ bind(L_skip_vthread_code);
}
#ifdef ASSERT
else {
// Check if this is a virtual thread continuation
Label L_skip_vthread_code;
__ lwz(R0, in_bytes(ContinuationEntry::flags_offset()), R1_SP);
__ cmpwi(CR0, R0, 0);
__ beq(CR0, L_skip_vthread_code);
// See comment just above. If not checking JNI calls the JNI count is only
// needed for assertion checking.
__ li(tmp1, 0);
__ std(tmp1, in_bytes(JavaThread::jni_monitor_count_offset()), R16_thread);
__ bind(L_skip_vthread_code);
}
#endif
__ ld(tmp2, in_bytes(ContinuationEntry::parent_held_monitor_count_offset()), R1_SP);
__ ld_ptr(tmp3, ContinuationEntry::parent_offset(), R1_SP);
__ std(tmp2, in_bytes(JavaThread::held_monitor_count_offset()), R16_thread);
__ st_ptr(tmp3, JavaThread::cont_entry_offset(), R16_thread);
__ ld_ptr(tmp2, ContinuationEntry::parent_offset(), R1_SP);
__ st_ptr(tmp2, JavaThread::cont_entry_offset(), R16_thread);
DEBUG_ONLY(__ block_comment("} clean"));
}

View File

@ -377,11 +377,6 @@ LIR_Opr FrameMap::stack_pointer() {
return FrameMap::sp_opr;
}
// JSR 292
LIR_Opr FrameMap::method_handle_invoke_SP_save_opr() {
return LIR_OprFact::illegalOpr; // Not needed on riscv
}
bool FrameMap::validate_frame() {
return true;
}

View File

@ -1687,6 +1687,7 @@ void C2_MacroAssembler::arrays_hashcode(Register ary, Register cnt, Register res
Register tmp4, Register tmp5, Register tmp6,
BasicType eltype)
{
assert(!UseRVV, "sanity");
assert_different_registers(ary, cnt, result, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, t0, t1);
const int elsize = arrays_hashcode_elsize(eltype);
@ -1759,29 +1760,143 @@ void C2_MacroAssembler::arrays_hashcode(Register ary, Register cnt, Register res
BLOCK_COMMENT("} // arrays_hashcode");
}
void C2_MacroAssembler::arrays_hashcode_v(Register ary, Register cnt, Register result,
Register tmp1, Register tmp2, Register tmp3,
BasicType eltype)
{
assert(UseRVV, "sanity");
assert(StubRoutines::riscv::arrays_hashcode_powers_of_31() != nullptr, "sanity");
assert_different_registers(ary, cnt, result, tmp1, tmp2, tmp3, t0, t1);
// The MaxVectorSize should have been set by detecting RVV max vector register
// size when check UseRVV (i.e. MaxVectorSize == VM_Version::_initial_vector_length).
// Let's use T_INT as all hashCode calculations eventually deal with ints.
const int lmul = 2;
const int stride = MaxVectorSize / sizeof(jint) * lmul;
const int elsize_bytes = arrays_hashcode_elsize(eltype);
const int elsize_shift = exact_log2(elsize_bytes);
switch (eltype) {
case T_BOOLEAN: BLOCK_COMMENT("arrays_hashcode_v(unsigned byte) {"); break;
case T_CHAR: BLOCK_COMMENT("arrays_hashcode_v(char) {"); break;
case T_BYTE: BLOCK_COMMENT("arrays_hashcode_v(byte) {"); break;
case T_SHORT: BLOCK_COMMENT("arrays_hashcode_v(short) {"); break;
case T_INT: BLOCK_COMMENT("arrays_hashcode_v(int) {"); break;
default:
ShouldNotReachHere();
}
const Register pow31_highest = tmp1;
const Register ary_end = tmp2;
const Register consumed = tmp3;
const VectorRegister v_sum = v2;
const VectorRegister v_src = v4;
const VectorRegister v_coeffs = v6;
const VectorRegister v_tmp = v8;
const address adr_pows31 = StubRoutines::riscv::arrays_hashcode_powers_of_31()
+ sizeof(jint);
Label VEC_LOOP, DONE, SCALAR_TAIL, SCALAR_TAIL_LOOP;
// NB: at this point (a) 'result' already has some value,
// (b) 'cnt' is not 0 or 1, see java code for details.
andi(t0, cnt, ~(stride - 1));
beqz(t0, SCALAR_TAIL);
la(t1, ExternalAddress(adr_pows31));
lw(pow31_highest, Address(t1, -1 * sizeof(jint)));
vsetvli(consumed, cnt, Assembler::e32, Assembler::m2);
vle32_v(v_coeffs, t1); // 31^^(stride - 1) ... 31^^0
vmv_v_x(v_sum, x0);
bind(VEC_LOOP);
arrays_hashcode_elload_v(v_src, v_tmp, ary, eltype);
vmul_vv(v_src, v_src, v_coeffs);
vmadd_vx(v_sum, pow31_highest, v_src);
mulw(result, result, pow31_highest);
shadd(ary, consumed, ary, t0, elsize_shift);
subw(cnt, cnt, consumed);
andi(t1, cnt, ~(stride - 1));
bnez(t1, VEC_LOOP);
vmv_s_x(v_tmp, x0);
vredsum_vs(v_sum, v_sum, v_tmp);
vmv_x_s(t0, v_sum);
addw(result, result, t0);
beqz(cnt, DONE);
bind(SCALAR_TAIL);
shadd(ary_end, cnt, ary, t0, elsize_shift);
bind(SCALAR_TAIL_LOOP);
arrays_hashcode_elload(t0, Address(ary), eltype);
slli(t1, result, 5); // optimize 31 * result
subw(result, t1, result); // with result<<5 - result
addw(result, result, t0);
addi(ary, ary, elsize_bytes);
bne(ary, ary_end, SCALAR_TAIL_LOOP);
bind(DONE);
BLOCK_COMMENT("} // arrays_hashcode_v");
}
int C2_MacroAssembler::arrays_hashcode_elsize(BasicType eltype) {
switch (eltype) {
case T_BOOLEAN: return sizeof(jboolean);
case T_BYTE: return sizeof(jbyte);
case T_SHORT: return sizeof(jshort);
case T_CHAR: return sizeof(jchar);
case T_INT: return sizeof(jint);
default:
ShouldNotReachHere();
return -1;
case T_BOOLEAN: return sizeof(jboolean);
case T_BYTE: return sizeof(jbyte);
case T_SHORT: return sizeof(jshort);
case T_CHAR: return sizeof(jchar);
case T_INT: return sizeof(jint);
default:
ShouldNotReachHere();
return -1;
}
}
void C2_MacroAssembler::arrays_hashcode_elload(Register dst, Address src, BasicType eltype) {
switch (eltype) {
// T_BOOLEAN used as surrogate for unsigned byte
case T_BOOLEAN: lbu(dst, src); break;
case T_BYTE: lb(dst, src); break;
case T_SHORT: lh(dst, src); break;
case T_CHAR: lhu(dst, src); break;
case T_INT: lw(dst, src); break;
default:
ShouldNotReachHere();
// T_BOOLEAN used as surrogate for unsigned byte
case T_BOOLEAN: lbu(dst, src); break;
case T_BYTE: lb(dst, src); break;
case T_SHORT: lh(dst, src); break;
case T_CHAR: lhu(dst, src); break;
case T_INT: lw(dst, src); break;
default:
ShouldNotReachHere();
}
}
void C2_MacroAssembler::arrays_hashcode_elload_v(VectorRegister vdst,
VectorRegister vtmp,
Register src,
BasicType eltype) {
assert_different_registers(vdst, vtmp);
switch (eltype) {
case T_BOOLEAN:
vle8_v(vtmp, src);
vzext_vf4(vdst, vtmp);
break;
case T_BYTE:
vle8_v(vtmp, src);
vsext_vf4(vdst, vtmp);
break;
case T_CHAR:
vle16_v(vtmp, src);
vzext_vf2(vdst, vtmp);
break;
case T_SHORT:
vle16_v(vtmp, src);
vsext_vf2(vdst, vtmp);
break;
case T_INT:
vle32_v(vdst, src);
break;
default:
ShouldNotReachHere();
}
}

View File

@ -92,11 +92,15 @@
Register tmp3, Register tmp4,
Register tmp5, Register tmp6,
BasicType eltype);
// helper function for arrays_hashcode
int arrays_hashcode_elsize(BasicType eltype);
void arrays_hashcode_elload(Register dst, Address src, BasicType eltype);
void arrays_hashcode_v(Register ary, Register cnt, Register result,
Register tmp1, Register tmp2, Register tmp3,
BasicType eltype);
void arrays_hashcode_elload_v(VectorRegister vdst, VectorRegister vtmp,
Register src, BasicType eltype);
void string_equals(Register r1, Register r2,
Register result, Register cnt1);

View File

@ -217,8 +217,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
nmethod* nm = sender_blob->as_nmethod_or_null();
if (nm != nullptr) {
if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) ||
nm->method()->is_method_handle_intrinsic()) {
if (nm->is_deopt_entry(sender_pc) || nm->method()->is_method_handle_intrinsic()) {
return false;
}
}
@ -427,49 +426,6 @@ JavaThread** frame::saved_thread_address(const frame& f) {
return thread_addr;
}
//------------------------------------------------------------------------------
// frame::verify_deopt_original_pc
//
// Verifies the calculated original PC of a deoptimization PC for the
// given unextended SP.
#ifdef ASSERT
void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp) {
frame fr;
// This is ugly but it's better than to change {get,set}_original_pc
// to take an SP value as argument. And it's only a debugging
// method anyway.
fr._unextended_sp = unextended_sp;
assert_cond(nm != nullptr);
address original_pc = nm->get_original_pc(&fr);
assert(nm->insts_contains_inclusive(original_pc),
"original PC must be in the main code section of the compiled method (or must be immediately following it)");
}
#endif
//------------------------------------------------------------------------------
// frame::adjust_unextended_sp
#ifdef ASSERT
void frame::adjust_unextended_sp() {
// On riscv, sites calling method handle intrinsics and lambda forms are treated
// as any other call site. Therefore, no special action is needed when we are
// returning to any of these call sites.
if (_cb != nullptr) {
nmethod* sender_nm = _cb->as_nmethod_or_null();
if (sender_nm != nullptr) {
// If the sender PC is a deoptimization point, get the original PC.
if (sender_nm->is_deopt_entry(_pc) ||
sender_nm->is_deopt_mh_entry(_pc)) {
verify_deopt_original_pc(sender_nm, _unextended_sp);
}
}
}
}
#endif
//------------------------------------------------------------------------------
// frame::sender_for_interpreter_frame
frame frame::sender_for_interpreter_frame(RegisterMap* map) const {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -179,17 +179,10 @@
int _offset_unextended_sp; // for use in stack-chunk frames
};
void adjust_unextended_sp() NOT_DEBUG_RETURN;
intptr_t* ptr_at_addr(int offset) const {
return (intptr_t*) addr_at(offset);
}
#ifdef ASSERT
// Used in frame::sender_for_{interpreter,compiled}_frame
static void verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp);
#endif
public:
// Constructors

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -114,8 +114,6 @@ inline void frame::init(intptr_t* ptr_sp, intptr_t* ptr_fp, address pc) {
}
inline void frame::setup(address pc) {
adjust_unextended_sp();
address original_pc = get_deopt_original_pc();
if (original_pc != nullptr) {
_pc = original_pc;
@ -215,7 +213,6 @@ inline frame::frame(intptr_t* ptr_sp, intptr_t* ptr_fp) {
// value.
_cb = CodeCache::find_blob(_pc);
adjust_unextended_sp();
address original_pc = get_deopt_original_pc();
if (original_pc != nullptr) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -44,8 +44,6 @@ const bool CCallingConventionRequiresIntsAsLongs = false;
#define SUPPORTS_NATIVE_CX8
#define SUPPORT_MONITOR_COUNT
#define SUPPORT_RESERVED_STACK_AREA
#define USE_POINTERS_TO_REGISTER_IMPL_ARRAY

View File

@ -225,36 +225,6 @@ void MacroAssembler::pop_cont_fastpath(Register java_thread) {
bind(done);
}
void MacroAssembler::inc_held_monitor_count(Register tmp) {
Address dst(xthread, JavaThread::held_monitor_count_offset());
ld(tmp, dst);
addi(tmp, tmp, 1);
sd(tmp, dst);
#ifdef ASSERT
Label ok;
test_bit(tmp, tmp, 63);
beqz(tmp, ok);
STOP("assert(held monitor count overflow)");
should_not_reach_here();
bind(ok);
#endif
}
void MacroAssembler::dec_held_monitor_count(Register tmp) {
Address dst(xthread, JavaThread::held_monitor_count_offset());
ld(tmp, dst);
subi(tmp, tmp, 1);
sd(tmp, dst);
#ifdef ASSERT
Label ok;
test_bit(tmp, tmp, 63);
beqz(tmp, ok);
STOP("assert(held monitor count underflow)");
should_not_reach_here();
bind(ok);
#endif
}
int MacroAssembler::align(int modulus, int extra_offset) {
CompressibleScope scope(this);
intptr_t before = offset();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -849,9 +849,6 @@ public:
void push_cont_fastpath(Register java_thread = xthread);
void pop_cont_fastpath(Register java_thread = xthread);
void inc_held_monitor_count(Register tmp);
void dec_held_monitor_count(Register tmp);
// if heap base register is used - reinit it with the correct value
void reinit_heapbase();

View File

@ -2152,10 +2152,6 @@ RegMask Matcher::modL_proj_mask() {
return RegMask();
}
const RegMask Matcher::method_handle_invoke_SP_save_mask() {
return FP_REG_mask();
}
bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
assert_cond(addp != nullptr);
for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
@ -10995,6 +10991,7 @@ instruct arrays_hashcode(iRegP_R11 ary, iRegI_R12 cnt, iRegI_R10 result, immI ba
iRegLNoSp tmp3, iRegLNoSp tmp4,
iRegLNoSp tmp5, iRegLNoSp tmp6, rFlagsReg cr)
%{
predicate(!UseRVV);
match(Set result (VectorizedHashCode (Binary ary cnt) (Binary result basic_type)));
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6,
USE_KILL ary, USE_KILL cnt, USE basic_type, KILL cr);

View File

@ -4080,6 +4080,28 @@ instruct varray_equalsC(iRegP_R11 ary1, iRegP_R12 ary2, iRegI_R10 result,
ins_pipe(pipe_class_memory);
%}
// fast ArraysSupport.vectorizedHashCode
instruct varrays_hashcode(iRegP_R11 ary, iRegI_R12 cnt, iRegI_R10 result, immI basic_type,
vReg_V2 v2, vReg_V3 v3, vReg_V4 v4, vReg_V5 v5,
vReg_V6 v6, vReg_V7 v7, vReg_V8 v8, vReg_V9 v9,
iRegLNoSp tmp1, iRegLNoSp tmp2, iRegLNoSp tmp3,
rFlagsReg cr)
%{
predicate(UseRVV);
match(Set result (VectorizedHashCode (Binary ary cnt) (Binary result basic_type)));
effect(USE_KILL ary, USE_KILL cnt, USE basic_type,
TEMP v2, TEMP v3, TEMP v4, TEMP v5, TEMP v6, TEMP v7, TEMP v8, TEMP v9,
TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
format %{ "Array HashCode array[] $ary,$cnt,$result,$basic_type -> $result // KILL all" %}
ins_encode %{
__ arrays_hashcode_v($ary$$Register, $cnt$$Register, $result$$Register,
$tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
(BasicType)$basic_type$$constant);
%}
ins_pipe(pipe_class_memory);
%}
instruct vstring_compareU_128b(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
iRegI_R10 result, vReg_V4 v4, vReg_V5 v5, vReg_V6 v6, vReg_V7 v7,
vReg_V8 v8, vReg_V9 v9, vReg_V10 v10, vReg_V11 v11,

View File

@ -885,11 +885,8 @@ static void fill_continuation_entry(MacroAssembler* masm) {
__ ld(t0, Address(xthread, JavaThread::cont_fastpath_offset()));
__ sd(t0, Address(sp, ContinuationEntry::parent_cont_fastpath_offset()));
__ ld(t0, Address(xthread, JavaThread::held_monitor_count_offset()));
__ sd(t0, Address(sp, ContinuationEntry::parent_held_monitor_count_offset()));
__ sd(zr, Address(xthread, JavaThread::cont_fastpath_offset()));
__ sd(zr, Address(xthread, JavaThread::held_monitor_count_offset()));
}
// on entry, sp points to the ContinuationEntry
@ -905,50 +902,6 @@ static void continuation_enter_cleanup(MacroAssembler* masm) {
__ ld(t0, Address(sp, ContinuationEntry::parent_cont_fastpath_offset()));
__ sd(t0, Address(xthread, JavaThread::cont_fastpath_offset()));
if (CheckJNICalls) {
// Check if this is a virtual thread continuation
Label L_skip_vthread_code;
__ lwu(t0, Address(sp, ContinuationEntry::flags_offset()));
__ beqz(t0, L_skip_vthread_code);
// If the held monitor count is > 0 and this vthread is terminating then
// it failed to release a JNI monitor. So we issue the same log message
// that JavaThread::exit does.
__ ld(t0, Address(xthread, JavaThread::jni_monitor_count_offset()));
__ beqz(t0, L_skip_vthread_code);
// Save return value potentially containing the exception oop in callee-saved x9
__ mv(x9, x10);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::log_jni_monitor_still_held));
// Restore potential return value
__ mv(x10, x9);
// For vthreads we have to explicitly zero the JNI monitor count of the carrier
// on termination. The held count is implicitly zeroed below when we restore from
// the parent held count (which has to be zero).
__ sd(zr, Address(xthread, JavaThread::jni_monitor_count_offset()));
__ bind(L_skip_vthread_code);
}
#ifdef ASSERT
else {
// Check if this is a virtual thread continuation
Label L_skip_vthread_code;
__ lwu(t0, Address(sp, ContinuationEntry::flags_offset()));
__ beqz(t0, L_skip_vthread_code);
// See comment just above. If not checking JNI calls the JNI count is only
// needed for assertion checking.
__ sd(zr, Address(xthread, JavaThread::jni_monitor_count_offset()));
__ bind(L_skip_vthread_code);
}
#endif
__ ld(t0, Address(sp, ContinuationEntry::parent_held_monitor_count_offset()));
__ sd(t0, Address(xthread, JavaThread::held_monitor_count_offset()));
__ ld(t0, Address(sp, ContinuationEntry::parent_offset()));
__ sd(t0, Address(xthread, JavaThread::cont_entry_offset()));
__ add(fp, sp, (int)ContinuationEntry::size() + 2 * wordSize /* 2 extra words to match up with leave() */);

View File

@ -73,6 +73,9 @@
do_stub(compiler, string_indexof_linear_ul) \
do_arch_entry(riscv, compiler, string_indexof_linear_ul, \
string_indexof_linear_ul, string_indexof_linear_ul) \
do_stub(compiler, arrays_hashcode_powers_of_31) \
do_arch_entry(riscv, compiler, arrays_hashcode_powers_of_31, \
arrays_hashcode_powers_of_31, arrays_hashcode_powers_of_31) \
#define STUBGEN_FINAL_BLOBS_ARCH_DO(do_stub, \

View File

@ -6624,6 +6624,24 @@ static const int64_t right_3_bits = right_n_bits(3);
return start;
}
address generate_arrays_hashcode_powers_of_31() {
assert(UseRVV, "sanity");
const int lmul = 2;
const int stride = MaxVectorSize / sizeof(jint) * lmul;
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", "arrays_hashcode_powers_of_31");
address start = __ pc();
for (int i = stride; i >= 0; i--) {
jint power_of_31 = 1;
for (int j = i; j > 0; j--) {
power_of_31 = java_multiply(power_of_31, 31);
}
__ emit_int32(power_of_31);
}
return start;
}
#endif // COMPILER2
/**
@ -6818,6 +6836,10 @@ static const int64_t right_3_bits = right_n_bits(3);
StubRoutines::_bigIntegerRightShiftWorker = generate_bigIntegerRightShift();
}
if (UseVectorizedHashCodeIntrinsic && UseRVV) {
StubRoutines::riscv::_arrays_hashcode_powers_of_31 = generate_arrays_hashcode_powers_of_31();
}
if (UseSHA256Intrinsics) {
Sha2Generator sha2(_masm, this);
StubRoutines::_sha256_implCompress = sha2.generate_sha256_implCompress(StubId::stubgen_sha256_implCompress_id);

View File

@ -282,13 +282,6 @@ LIR_Opr FrameMap::stack_pointer() {
return Z_SP_opr;
}
// JSR 292
// On ZARCH_64, there is no need to save the SP, because neither
// method handle intrinsics nor compiled lambda forms modify it.
LIR_Opr FrameMap::method_handle_invoke_SP_save_opr() {
return LIR_OprFact::illegalOpr;
}
bool FrameMap::validate_frame() {
return true;
}

View File

@ -1980,11 +1980,6 @@ RegMask Matcher::modL_proj_mask() {
return _Z_RARG3_LONG_REG_mask;
}
// Copied from sparc.
const RegMask Matcher::method_handle_invoke_SP_save_mask() {
return RegMask();
}
// Should the matcher clone input 'm' of node 'n'?
bool Matcher::pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
if (is_encode_and_store_pattern(n, m)) {

View File

@ -326,13 +326,6 @@ LIR_Opr FrameMap::stack_pointer() {
return FrameMap::rsp_opr;
}
// JSR 292
// On x86, there is no need to save the SP, because neither
// method handle intrinsics, nor compiled lambda forms modify it.
LIR_Opr FrameMap::method_handle_invoke_SP_save_opr() {
return LIR_OprFact::illegalOpr;
}
bool FrameMap::validate_frame() {
return true;
}

View File

@ -219,8 +219,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
nmethod* nm = sender_blob->as_nmethod_or_null();
if (nm != nullptr) {
if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) ||
nm->method()->is_method_handle_intrinsic()) {
if (nm->is_deopt_entry(sender_pc) || nm->method()->is_method_handle_intrinsic()) {
return false;
}
}
@ -443,47 +442,6 @@ JavaThread** frame::saved_thread_address(const frame& f) {
return thread_addr;
}
//------------------------------------------------------------------------------
// frame::verify_deopt_original_pc
//
// Verifies the calculated original PC of a deoptimization PC for the
// given unextended SP.
#ifdef ASSERT
void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp) {
frame fr;
// This is ugly but it's better than to change {get,set}_original_pc
// to take an SP value as argument. And it's only a debugging
// method anyway.
fr._unextended_sp = unextended_sp;
address original_pc = nm->get_original_pc(&fr);
assert(nm->insts_contains_inclusive(original_pc),
"original PC must be in the main code section of the compiled method (or must be immediately following it) original_pc: " INTPTR_FORMAT " unextended_sp: " INTPTR_FORMAT " name: %s", p2i(original_pc), p2i(unextended_sp), nm->name());
}
#endif
//------------------------------------------------------------------------------
// frame::adjust_unextended_sp
#ifdef ASSERT
void frame::adjust_unextended_sp() {
// On x86, sites calling method handle intrinsics and lambda forms are treated
// as any other call site. Therefore, no special action is needed when we are
// returning to any of these call sites.
if (_cb != nullptr) {
nmethod* sender_nm = _cb->as_nmethod_or_null();
if (sender_nm != nullptr) {
// If the sender PC is a deoptimization point, get the original PC.
if (sender_nm->is_deopt_entry(_pc) ||
sender_nm->is_deopt_mh_entry(_pc)) {
verify_deopt_original_pc(sender_nm, _unextended_sp);
}
}
}
}
#endif
//------------------------------------------------------------------------------
// frame::sender_for_interpreter_frame
frame frame::sender_for_interpreter_frame(RegisterMap* map) const {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -138,17 +138,10 @@
int _offset_unextended_sp; // for use in stack-chunk frames
};
void adjust_unextended_sp() NOT_DEBUG_RETURN;
intptr_t* ptr_at_addr(int offset) const {
return (intptr_t*) addr_at(offset);
}
#ifdef ASSERT
// Used in frame::sender_for_{interpreter,compiled}_frame
static void verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp);
#endif
public:
// Constructors

View File

@ -111,8 +111,6 @@ inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) {
}
inline void frame::setup(address pc) {
adjust_unextended_sp();
address original_pc = get_deopt_original_pc();
if (original_pc != nullptr) {
_pc = original_pc;
@ -209,7 +207,6 @@ inline frame::frame(intptr_t* sp, intptr_t* fp) {
// assert(_pc != nullptr, "no pc?");
_cb = CodeCache::find_blob(_pc);
adjust_unextended_sp();
address original_pc = get_deopt_original_pc();
if (original_pc != nullptr) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,8 +34,6 @@ const bool CCallingConventionRequiresIntsAsLongs = false;
#define SUPPORTS_NATIVE_CX8
#define SUPPORT_MONITOR_COUNT
#define CPU_MULTI_COPY_ATOMIC
// The expected size in bytes of a cache line.

View File

@ -2431,14 +2431,6 @@ void MacroAssembler::pop_cont_fastpath() {
bind(L_done);
}
void MacroAssembler::inc_held_monitor_count() {
incrementq(Address(r15_thread, JavaThread::held_monitor_count_offset()));
}
void MacroAssembler::dec_held_monitor_count() {
decrementq(Address(r15_thread, JavaThread::held_monitor_count_offset()));
}
#ifdef ASSERT
void MacroAssembler::stop_if_in_cont(Register cont, const char* name) {
Label no_cont;
@ -5847,7 +5839,7 @@ void MacroAssembler::generate_fill(BasicType t, bool aligned,
orl(value, rtmp);
}
cmpptr(count, 2<<shift); // Short arrays (< 8 bytes) fill by element
cmpptr(count, 8 << shift); // Short arrays (< 32 bytes) fill by element
jcc(Assembler::below, L_fill_4_bytes); // use unsigned cmp
if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) {
Label L_skip_align2;
@ -5910,13 +5902,36 @@ void MacroAssembler::generate_fill(BasicType t, bool aligned,
BIND(L_check_fill_64_bytes_avx2);
}
// Fill 64-byte chunks
Label L_fill_64_bytes_loop;
vpbroadcastd(xtmp, xtmp, Assembler::AVX_256bit);
subptr(count, 16 << shift);
jcc(Assembler::less, L_check_fill_32_bytes);
align(16);
// align data for 64-byte chunks
Label L_fill_64_bytes_loop, L_align_64_bytes_loop;
if (EnableX86ECoreOpts) {
// align 'big' arrays to cache lines to minimize split_stores
cmpptr(count, 96 << shift);
jcc(Assembler::below, L_fill_64_bytes_loop);
// Find the bytes needed for alignment
movptr(rtmp, to);
andptr(rtmp, 0x1c);
jcc(Assembler::zero, L_fill_64_bytes_loop);
negptr(rtmp); // number of bytes to fill 32-rtmp. it filled by 2 mov by 32
addptr(rtmp, 32);
shrptr(rtmp, 2 - shift);// get number of elements from bytes
subptr(count, rtmp); // adjust count by number of elements
align(16);
BIND(L_align_64_bytes_loop);
movdl(Address(to, 0), xtmp);
addptr(to, 4);
subptr(rtmp, 1 << shift);
jcc(Assembler::greater, L_align_64_bytes_loop);
}
align(16);
BIND(L_fill_64_bytes_loop);
vmovdqu(Address(to, 0), xtmp);
vmovdqu(Address(to, 32), xtmp);
@ -5924,6 +5939,7 @@ void MacroAssembler::generate_fill(BasicType t, bool aligned,
subptr(count, 16 << shift);
jcc(Assembler::greaterEqual, L_fill_64_bytes_loop);
align(16);
BIND(L_check_fill_32_bytes);
addptr(count, 8 << shift);
jccb(Assembler::less, L_check_fill_8_bytes);
@ -5968,6 +5984,7 @@ void MacroAssembler::generate_fill(BasicType t, bool aligned,
//
// length is too short, just fill qwords
//
align(16);
BIND(L_fill_8_bytes_loop);
movq(Address(to, 0), xtmp);
addptr(to, 8);
@ -5976,14 +5993,22 @@ void MacroAssembler::generate_fill(BasicType t, bool aligned,
jcc(Assembler::greaterEqual, L_fill_8_bytes_loop);
}
}
// fill trailing 4 bytes
BIND(L_fill_4_bytes);
testl(count, 1<<shift);
Label L_fill_4_bytes_loop;
testl(count, 1 << shift);
jccb(Assembler::zero, L_fill_2_bytes);
align(16);
BIND(L_fill_4_bytes_loop);
movl(Address(to, 0), value);
addptr(to, 4);
BIND(L_fill_4_bytes);
subptr(count, 1 << shift);
jccb(Assembler::greaterEqual, L_fill_4_bytes_loop);
if (t == T_BYTE || t == T_SHORT) {
Label L_fill_byte;
addptr(to, 4);
BIND(L_fill_2_bytes);
// fill trailing 2 bytes
testl(count, 1<<(shift-1));

View File

@ -472,9 +472,6 @@ class MacroAssembler: public Assembler {
void push_cont_fastpath();
void pop_cont_fastpath();
void inc_held_monitor_count();
void dec_held_monitor_count();
DEBUG_ONLY(void stop_if_in_cont(Register cont_reg, const char* name);)
// Round up to a power of two

View File

@ -1352,11 +1352,8 @@ static void fill_continuation_entry(MacroAssembler* masm, Register reg_cont_obj,
__ movptr(rax, Address(r15_thread, JavaThread::cont_fastpath_offset()));
__ movptr(Address(rsp, ContinuationEntry::parent_cont_fastpath_offset()), rax);
__ movq(rax, Address(r15_thread, JavaThread::held_monitor_count_offset()));
__ movq(Address(rsp, ContinuationEntry::parent_held_monitor_count_offset()), rax);
__ movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), 0);
__ movq(Address(r15_thread, JavaThread::held_monitor_count_offset()), 0);
}
//---------------------------- continuation_enter_cleanup ---------------------------
@ -1380,49 +1377,6 @@ static void continuation_enter_cleanup(MacroAssembler* masm) {
#endif
__ movptr(rbx, Address(rsp, ContinuationEntry::parent_cont_fastpath_offset()));
__ movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), rbx);
if (CheckJNICalls) {
// Check if this is a virtual thread continuation
Label L_skip_vthread_code;
__ cmpl(Address(rsp, ContinuationEntry::flags_offset()), 0);
__ jcc(Assembler::equal, L_skip_vthread_code);
// If the held monitor count is > 0 and this vthread is terminating then
// it failed to release a JNI monitor. So we issue the same log message
// that JavaThread::exit does.
__ cmpptr(Address(r15_thread, JavaThread::jni_monitor_count_offset()), 0);
__ jcc(Assembler::equal, L_skip_vthread_code);
// rax may hold an exception oop, save it before the call
__ push(rax);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::log_jni_monitor_still_held));
__ pop(rax);
// For vthreads we have to explicitly zero the JNI monitor count of the carrier
// on termination. The held count is implicitly zeroed below when we restore from
// the parent held count (which has to be zero).
__ movq(Address(r15_thread, JavaThread::jni_monitor_count_offset()), 0);
__ bind(L_skip_vthread_code);
}
#ifdef ASSERT
else {
// Check if this is a virtual thread continuation
Label L_skip_vthread_code;
__ cmpl(Address(rsp, ContinuationEntry::flags_offset()), 0);
__ jcc(Assembler::equal, L_skip_vthread_code);
// See comment just above. If not checking JNI calls the JNI count is only
// needed for assertion checking.
__ movq(Address(r15_thread, JavaThread::jni_monitor_count_offset()), 0);
__ bind(L_skip_vthread_code);
}
#endif
__ movq(rbx, Address(rsp, ContinuationEntry::parent_held_monitor_count_offset()));
__ movq(Address(r15_thread, JavaThread::held_monitor_count_offset()), rbx);
__ movptr(rbx, Address(rsp, ContinuationEntry::parent_offset()));
__ movptr(Address(r15_thread, JavaThread::cont_entry_offset()), rbx);
__ addptr(rsp, checked_cast<int32_t>(ContinuationEntry::size()));

View File

@ -1697,11 +1697,6 @@ RegMask Matcher::modL_proj_mask() {
return LONG_RDX_REG_mask();
}
// Register for saving SP into on method handle invokes. Not used on x86_64.
const RegMask Matcher::method_handle_invoke_SP_save_mask() {
return NO_REG_mask();
}
%}
//----------ENCODING BLOCK-----------------------------------------------------

View File

@ -665,15 +665,13 @@ int CgroupSubsystem::active_processor_count() {
* -1 for unlimited
* OSCONTAINER_ERROR for not supported
*/
jlong CgroupSubsystem::memory_limit_in_bytes() {
jlong CgroupSubsystem::memory_limit_in_bytes(julong upper_bound) {
CachingCgroupController<CgroupMemoryController>* contrl = memory_controller();
CachedMetric* memory_limit = contrl->metrics_cache();
if (!memory_limit->should_check_metric()) {
return memory_limit->value();
}
julong phys_mem = static_cast<julong>(os::Linux::physical_memory());
log_trace(os, container)("total physical memory: " JULONG_FORMAT, phys_mem);
jlong mem_limit = contrl->controller()->read_memory_limit_in_bytes(phys_mem);
jlong mem_limit = contrl->controller()->read_memory_limit_in_bytes(upper_bound);
// Update cached metric to avoid re-reading container settings too often
memory_limit->set_value(mem_limit, OSCONTAINER_CACHE_TIMEOUT);
return mem_limit;
@ -841,21 +839,16 @@ jlong CgroupController::limit_from_str(char* limit_str) {
// CgroupSubsystem implementations
jlong CgroupSubsystem::memory_and_swap_limit_in_bytes() {
julong phys_mem = static_cast<julong>(os::Linux::physical_memory());
julong host_swap = os::Linux::host_swap();
return memory_controller()->controller()->memory_and_swap_limit_in_bytes(phys_mem, host_swap);
jlong CgroupSubsystem::memory_and_swap_limit_in_bytes(julong upper_mem_bound, julong upper_swap_bound) {
return memory_controller()->controller()->memory_and_swap_limit_in_bytes(upper_mem_bound, upper_swap_bound);
}
jlong CgroupSubsystem::memory_and_swap_usage_in_bytes() {
julong phys_mem = static_cast<julong>(os::Linux::physical_memory());
julong host_swap = os::Linux::host_swap();
return memory_controller()->controller()->memory_and_swap_usage_in_bytes(phys_mem, host_swap);
jlong CgroupSubsystem::memory_and_swap_usage_in_bytes(julong upper_mem_bound, julong upper_swap_bound) {
return memory_controller()->controller()->memory_and_swap_usage_in_bytes(upper_mem_bound, upper_swap_bound);
}
jlong CgroupSubsystem::memory_soft_limit_in_bytes() {
julong phys_mem = static_cast<julong>(os::Linux::physical_memory());
return memory_controller()->controller()->memory_soft_limit_in_bytes(phys_mem);
jlong CgroupSubsystem::memory_soft_limit_in_bytes(julong upper_bound) {
return memory_controller()->controller()->memory_soft_limit_in_bytes(upper_bound);
}
jlong CgroupSubsystem::memory_throttle_limit_in_bytes() {
@ -894,7 +887,6 @@ jlong CgroupSubsystem::cpu_usage_in_micros() {
return cpuacct_controller()->cpu_usage_in_micros();
}
void CgroupSubsystem::print_version_specific_info(outputStream* st) {
julong phys_mem = static_cast<julong>(os::Linux::physical_memory());
memory_controller()->controller()->print_version_specific_info(st, phys_mem);
void CgroupSubsystem::print_version_specific_info(outputStream* st, julong upper_mem_bound) {
memory_controller()->controller()->print_version_specific_info(st, upper_mem_bound);
}

View File

@ -233,14 +233,14 @@ class CgroupMemoryController: public CHeapObj<mtInternal> {
public:
virtual jlong read_memory_limit_in_bytes(julong upper_bound) = 0;
virtual jlong memory_usage_in_bytes() = 0;
virtual jlong memory_and_swap_limit_in_bytes(julong host_mem, julong host_swap) = 0;
virtual jlong memory_and_swap_usage_in_bytes(julong host_mem, julong host_swap) = 0;
virtual jlong memory_and_swap_limit_in_bytes(julong upper_mem_bound, julong upper_swap_bound) = 0;
virtual jlong memory_and_swap_usage_in_bytes(julong upper_mem_bound, julong upper_swap_bound) = 0;
virtual jlong memory_soft_limit_in_bytes(julong upper_bound) = 0;
virtual jlong memory_throttle_limit_in_bytes() = 0;
virtual jlong memory_max_usage_in_bytes() = 0;
virtual jlong rss_usage_in_bytes() = 0;
virtual jlong cache_usage_in_bytes() = 0;
virtual void print_version_specific_info(outputStream* st, julong host_mem) = 0;
virtual void print_version_specific_info(outputStream* st, julong upper_mem_bound) = 0;
virtual bool needs_hierarchy_adjustment() = 0;
virtual bool is_read_only() = 0;
virtual const char* subsystem_path() = 0;
@ -251,7 +251,7 @@ class CgroupMemoryController: public CHeapObj<mtInternal> {
class CgroupSubsystem: public CHeapObj<mtInternal> {
public:
jlong memory_limit_in_bytes();
jlong memory_limit_in_bytes(julong upper_bound);
int active_processor_count();
virtual jlong pids_max() = 0;
@ -272,14 +272,14 @@ class CgroupSubsystem: public CHeapObj<mtInternal> {
jlong cpu_usage_in_micros();
jlong memory_usage_in_bytes();
jlong memory_and_swap_limit_in_bytes();
jlong memory_and_swap_usage_in_bytes();
jlong memory_soft_limit_in_bytes();
jlong memory_and_swap_limit_in_bytes(julong upper_mem_bound, julong upper_swap_bound);
jlong memory_and_swap_usage_in_bytes(julong upper_mem_bound, julong upper_swap_bound);
jlong memory_soft_limit_in_bytes(julong upper_bound);
jlong memory_throttle_limit_in_bytes();
jlong memory_max_usage_in_bytes();
jlong rss_usage_in_bytes();
jlong cache_usage_in_bytes();
void print_version_specific_info(outputStream* st);
void print_version_specific_info(outputStream* st, julong upper_mem_bound);
};
// Utility class for storing info retrieved from /proc/cgroups,

View File

@ -136,35 +136,35 @@ bool CgroupV1Controller::needs_hierarchy_adjustment() {
}
static inline
void verbose_log(julong read_mem_limit, julong host_mem) {
void verbose_log(julong read_mem_limit, julong upper_mem_bound) {
if (log_is_enabled(Debug, os, container)) {
jlong mem_limit = (jlong)read_mem_limit; // account for negative values
if (mem_limit < 0 || read_mem_limit >= host_mem) {
if (mem_limit < 0 || read_mem_limit >= upper_mem_bound) {
const char *reason;
if (mem_limit == OSCONTAINER_ERROR) {
reason = "failed";
} else if (mem_limit == -1) {
reason = "unlimited";
} else {
assert(read_mem_limit >= host_mem, "Expected read value exceeding host_mem");
assert(read_mem_limit >= upper_mem_bound, "Expected read value exceeding upper memory bound");
// Exceeding physical memory is treated as unlimited. This implementation
// caps it at host_mem since Cg v1 has no value to represent 'max'.
reason = "ignored";
}
log_debug(os, container)("container memory limit %s: " JLONG_FORMAT ", using host value " JLONG_FORMAT,
reason, mem_limit, host_mem);
log_debug(os, container)("container memory limit %s: " JLONG_FORMAT ", upper bound is " JLONG_FORMAT,
reason, mem_limit, upper_mem_bound);
}
}
}
jlong CgroupV1MemoryController::read_memory_limit_in_bytes(julong phys_mem) {
jlong CgroupV1MemoryController::read_memory_limit_in_bytes(julong upper_bound) {
julong memlimit;
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.limit_in_bytes", "Memory Limit", memlimit);
if (memlimit >= phys_mem) {
verbose_log(memlimit, phys_mem);
if (memlimit >= upper_bound) {
verbose_log(memlimit, upper_bound);
return (jlong)-1;
} else {
verbose_log(memlimit, phys_mem);
verbose_log(memlimit, upper_bound);
return (jlong)memlimit;
}
}
@ -181,10 +181,10 @@ jlong CgroupV1MemoryController::read_memory_limit_in_bytes(julong phys_mem) {
* * -1 if there isn't any limit in place (note: includes values which exceed a physical
* upper bound)
*/
jlong CgroupV1MemoryController::read_mem_swap(julong host_total_memsw) {
jlong CgroupV1MemoryController::read_mem_swap(julong upper_memsw_bound) {
julong memswlimit;
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.memsw.limit_in_bytes", "Memory and Swap Limit", memswlimit);
if (memswlimit >= host_total_memsw) {
if (memswlimit >= upper_memsw_bound) {
log_trace(os, container)("Memory and Swap Limit is: Unlimited");
return (jlong)-1;
} else {
@ -192,8 +192,8 @@ jlong CgroupV1MemoryController::read_mem_swap(julong host_total_memsw) {
}
}
jlong CgroupV1MemoryController::memory_and_swap_limit_in_bytes(julong host_mem, julong host_swap) {
jlong memory_swap = read_mem_swap(host_mem + host_swap);
jlong CgroupV1MemoryController::memory_and_swap_limit_in_bytes(julong upper_mem_bound, julong upper_swap_bound) {
jlong memory_swap = read_mem_swap(upper_mem_bound + upper_swap_bound);
if (memory_swap == -1) {
return memory_swap;
}
@ -202,7 +202,7 @@ jlong CgroupV1MemoryController::memory_and_swap_limit_in_bytes(julong host_mem,
// supported.
jlong swappiness = read_mem_swappiness();
if (swappiness == 0 || memory_swap == OSCONTAINER_ERROR) {
jlong memlimit = read_memory_limit_in_bytes(host_mem);
jlong memlimit = read_memory_limit_in_bytes(upper_mem_bound);
if (memory_swap == OSCONTAINER_ERROR) {
log_trace(os, container)("Memory and Swap Limit has been reset to " JLONG_FORMAT " because swap is not supported", memlimit);
} else {
@ -220,9 +220,9 @@ jlong memory_swap_usage_impl(CgroupController* ctrl) {
return (jlong)memory_swap_usage;
}
jlong CgroupV1MemoryController::memory_and_swap_usage_in_bytes(julong phys_mem, julong host_swap) {
jlong memory_sw_limit = memory_and_swap_limit_in_bytes(phys_mem, host_swap);
jlong memory_limit = read_memory_limit_in_bytes(phys_mem);
jlong CgroupV1MemoryController::memory_and_swap_usage_in_bytes(julong upper_mem_bound, julong upper_swap_bound) {
jlong memory_sw_limit = memory_and_swap_limit_in_bytes(upper_mem_bound, upper_swap_bound);
jlong memory_limit = read_memory_limit_in_bytes(upper_mem_bound);
if (memory_sw_limit > 0 && memory_limit > 0) {
jlong delta_swap = memory_sw_limit - memory_limit;
if (delta_swap > 0) {
@ -238,10 +238,10 @@ jlong CgroupV1MemoryController::read_mem_swappiness() {
return (jlong)swappiness;
}
jlong CgroupV1MemoryController::memory_soft_limit_in_bytes(julong phys_mem) {
jlong CgroupV1MemoryController::memory_soft_limit_in_bytes(julong upper_bound) {
julong memsoftlimit;
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.soft_limit_in_bytes", "Memory Soft Limit", memsoftlimit);
if (memsoftlimit >= phys_mem) {
if (memsoftlimit >= upper_bound) {
log_trace(os, container)("Memory Soft Limit is: Unlimited");
return (jlong)-1;
} else {
@ -336,10 +336,10 @@ jlong CgroupV1MemoryController::kernel_memory_usage_in_bytes() {
return (jlong)kmem_usage;
}
jlong CgroupV1MemoryController::kernel_memory_limit_in_bytes(julong phys_mem) {
jlong CgroupV1MemoryController::kernel_memory_limit_in_bytes(julong upper_bound) {
julong kmem_limit;
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.kmem.limit_in_bytes", "Kernel Memory Limit", kmem_limit);
if (kmem_limit >= phys_mem) {
if (kmem_limit >= upper_bound) {
return (jlong)-1;
}
return (jlong)kmem_limit;
@ -351,9 +351,9 @@ jlong CgroupV1MemoryController::kernel_memory_max_usage_in_bytes() {
return (jlong)kmem_max_usage;
}
void CgroupV1MemoryController::print_version_specific_info(outputStream* st, julong phys_mem) {
void CgroupV1MemoryController::print_version_specific_info(outputStream* st, julong mem_bound) {
jlong kmem_usage = kernel_memory_usage_in_bytes();
jlong kmem_limit = kernel_memory_limit_in_bytes(phys_mem);
jlong kmem_limit = kernel_memory_limit_in_bytes(mem_bound);
jlong kmem_max_usage = kernel_memory_max_usage_in_bytes();
OSContainer::print_container_helper(st, kmem_limit, "kernel_memory_limit_in_bytes");

View File

@ -79,17 +79,17 @@ class CgroupV1MemoryController final : public CgroupMemoryController {
}
jlong read_memory_limit_in_bytes(julong upper_bound) override;
jlong memory_usage_in_bytes() override;
jlong memory_and_swap_limit_in_bytes(julong host_mem, julong host_swap) override;
jlong memory_and_swap_usage_in_bytes(julong host_mem, julong host_swap) override;
jlong memory_and_swap_limit_in_bytes(julong upper_mem_bound, julong upper_swap_bound) override;
jlong memory_and_swap_usage_in_bytes(julong upper_mem_bound, julong upper_swap_bound) override;
jlong memory_soft_limit_in_bytes(julong upper_bound) override;
jlong memory_throttle_limit_in_bytes() override;
jlong memory_max_usage_in_bytes() override;
jlong rss_usage_in_bytes() override;
jlong cache_usage_in_bytes() override;
jlong kernel_memory_usage_in_bytes();
jlong kernel_memory_limit_in_bytes(julong host_mem);
jlong kernel_memory_limit_in_bytes(julong upper_bound);
jlong kernel_memory_max_usage_in_bytes();
void print_version_specific_info(outputStream* st, julong host_mem) override;
void print_version_specific_info(outputStream* st, julong upper_mem_bound) override;
bool needs_hierarchy_adjustment() override {
return reader()->needs_hierarchy_adjustment();
}
@ -101,7 +101,7 @@ class CgroupV1MemoryController final : public CgroupMemoryController {
const char* cgroup_path() override { return reader()->cgroup_path(); }
private:
jlong read_mem_swappiness();
jlong read_mem_swap(julong host_total_memsw);
jlong read_mem_swap(julong upper_memsw_bound);
public:
CgroupV1MemoryController(const CgroupV1Controller& reader)

View File

@ -181,7 +181,7 @@ jlong CgroupV2MemoryController::memory_usage_in_bytes() {
return (jlong)memusage;
}
jlong CgroupV2MemoryController::memory_soft_limit_in_bytes(julong phys_mem) {
jlong CgroupV2MemoryController::memory_soft_limit_in_bytes(julong upper_bound) {
jlong mem_soft_limit;
CONTAINER_READ_NUMBER_CHECKED_MAX(reader(), "/memory.low", "Memory Soft Limit", mem_soft_limit);
return mem_soft_limit;
@ -224,19 +224,19 @@ jlong CgroupV2MemoryController::cache_usage_in_bytes() {
// respectively. In order to properly report a cgroup v1 like
// compound value we need to sum the two values. Setting a swap limit
// without also setting a memory limit is not allowed.
jlong CgroupV2MemoryController::memory_and_swap_limit_in_bytes(julong phys_mem,
julong host_swap /* unused in cg v2 */) {
jlong CgroupV2MemoryController::memory_and_swap_limit_in_bytes(julong upper_mem_bound,
julong upper_swap_bound /* unused in cg v2 */) {
jlong swap_limit;
bool is_ok = reader()->read_number_handle_max("/memory.swap.max", &swap_limit);
if (!is_ok) {
// Some container tests rely on this trace logging to happen.
log_trace(os, container)("Swap Limit failed: %d", OSCONTAINER_ERROR);
// swap disabled at kernel level, treat it as no swap
return read_memory_limit_in_bytes(phys_mem);
return read_memory_limit_in_bytes(upper_mem_bound);
}
log_trace(os, container)("Swap Limit is: " JLONG_FORMAT, swap_limit);
if (swap_limit >= 0) {
jlong memory_limit = read_memory_limit_in_bytes(phys_mem);
jlong memory_limit = read_memory_limit_in_bytes(upper_mem_bound);
assert(memory_limit >= 0, "swap limit without memory limit?");
return memory_limit + swap_limit;
}
@ -252,7 +252,7 @@ jlong memory_swap_current_value(CgroupV2Controller* ctrl) {
return (jlong)swap_current;
}
jlong CgroupV2MemoryController::memory_and_swap_usage_in_bytes(julong host_mem, julong host_swap) {
jlong CgroupV2MemoryController::memory_and_swap_usage_in_bytes(julong upper_mem_bound, julong upper_swap_bound) {
jlong memory_usage = memory_usage_in_bytes();
if (memory_usage >= 0) {
jlong swap_current = memory_swap_current_value(reader());
@ -276,7 +276,7 @@ jlong memory_limit_value(CgroupV2Controller* ctrl) {
* memory limit in bytes or
* -1 for unlimited, OSCONTAINER_ERROR for an error
*/
jlong CgroupV2MemoryController::read_memory_limit_in_bytes(julong phys_mem) {
jlong CgroupV2MemoryController::read_memory_limit_in_bytes(julong upper_bound) {
jlong limit = memory_limit_value(reader());
if (log_is_enabled(Trace, os, container)) {
if (limit == -1) {
@ -287,18 +287,18 @@ jlong CgroupV2MemoryController::read_memory_limit_in_bytes(julong phys_mem) {
}
if (log_is_enabled(Debug, os, container)) {
julong read_limit = (julong)limit; // avoid signed/unsigned compare
if (limit < 0 || read_limit >= phys_mem) {
if (limit < 0 || read_limit >= upper_bound) {
const char* reason;
if (limit == -1) {
reason = "unlimited";
} else if (limit == OSCONTAINER_ERROR) {
reason = "failed";
} else {
assert(read_limit >= phys_mem, "Expected mem limit to exceed host memory");
assert(read_limit >= upper_bound, "Expected mem limit to exceed upper memory bound");
reason = "ignored";
}
log_debug(os, container)("container memory limit %s: " JLONG_FORMAT ", using host value " JLONG_FORMAT,
reason, limit, phys_mem);
log_debug(os, container)("container memory limit %s: " JLONG_FORMAT ", upper bound is " JLONG_FORMAT,
reason, limit, upper_bound);
}
}
return limit;
@ -327,7 +327,7 @@ bool CgroupV2Controller::needs_hierarchy_adjustment() {
return strcmp(_cgroup_path, "/") != 0;
}
void CgroupV2MemoryController::print_version_specific_info(outputStream* st, julong phys_mem) {
void CgroupV2MemoryController::print_version_specific_info(outputStream* st, julong upper_mem_bound) {
jlong swap_current = memory_swap_current_value(reader());
jlong swap_limit = memory_swap_limit_value(reader());

View File

@ -115,15 +115,15 @@ class CgroupV2MemoryController final: public CgroupMemoryController {
}
jlong read_memory_limit_in_bytes(julong upper_bound) override;
jlong memory_and_swap_limit_in_bytes(julong host_mem, julong host_swp) override;
jlong memory_and_swap_usage_in_bytes(julong host_mem, julong host_swp) override;
jlong memory_and_swap_limit_in_bytes(julong upper_mem_bound, julong upper_swap_bound) override;
jlong memory_and_swap_usage_in_bytes(julong upper_mem_bound, julong upper_swap_bound) override;
jlong memory_soft_limit_in_bytes(julong upper_bound) override;
jlong memory_throttle_limit_in_bytes() override;
jlong memory_usage_in_bytes() override;
jlong memory_max_usage_in_bytes() override;
jlong rss_usage_in_bytes() override;
jlong cache_usage_in_bytes() override;
void print_version_specific_info(outputStream* st, julong host_mem) override;
void print_version_specific_info(outputStream* st, julong upper_mem_bound) override;
bool is_read_only() override {
return reader()->is_read_only();
}

View File

@ -84,8 +84,8 @@ void OSContainer::init() {
// We can be in one of two cases:
// 1.) On a physical Linux system without any limit
// 2.) On a physical Linux system with a limit enforced by other means (like systemd slice)
any_mem_cpu_limit_present = cgroup_subsystem->memory_limit_in_bytes() > 0 ||
os::Linux::active_processor_count() != cgroup_subsystem->active_processor_count();
any_mem_cpu_limit_present = memory_limit_in_bytes() > 0 ||
os::Linux::active_processor_count() != active_processor_count();
if (any_mem_cpu_limit_present) {
reason = " because either a cpu or a memory limit is present";
} else {
@ -103,24 +103,47 @@ const char * OSContainer::container_type() {
return cgroup_subsystem->container_type();
}
bool OSContainer::available_memory_in_container(julong& value) {
jlong mem_limit = memory_limit_in_bytes();
jlong mem_usage = memory_usage_in_bytes();
if (mem_limit > 0 && mem_usage <= 0) {
log_debug(os, container)("container memory usage failed: " JLONG_FORMAT, mem_usage);
}
if (mem_limit <= 0 || mem_usage <= 0) {
return false;
}
value = mem_limit > mem_usage ? static_cast<julong>(mem_limit - mem_usage) : 0;
return true;
}
jlong OSContainer::memory_limit_in_bytes() {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
return cgroup_subsystem->memory_limit_in_bytes();
julong phys_mem = static_cast<julong>(os::Linux::physical_memory());
return cgroup_subsystem->memory_limit_in_bytes(phys_mem);
}
jlong OSContainer::memory_and_swap_limit_in_bytes() {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
return cgroup_subsystem->memory_and_swap_limit_in_bytes();
julong phys_mem = static_cast<julong>(os::Linux::physical_memory());
julong host_swap = os::Linux::host_swap();
return cgroup_subsystem->memory_and_swap_limit_in_bytes(phys_mem, host_swap);
}
jlong OSContainer::memory_and_swap_usage_in_bytes() {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
return cgroup_subsystem->memory_and_swap_usage_in_bytes();
julong phys_mem = static_cast<julong>(os::Linux::physical_memory());
julong host_swap = os::Linux::host_swap();
return cgroup_subsystem->memory_and_swap_usage_in_bytes(phys_mem, host_swap);
}
jlong OSContainer::memory_soft_limit_in_bytes() {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
return cgroup_subsystem->memory_soft_limit_in_bytes();
julong phys_mem = static_cast<julong>(os::Linux::physical_memory());
return cgroup_subsystem->memory_soft_limit_in_bytes(phys_mem);
}
jlong OSContainer::memory_throttle_limit_in_bytes() {
@ -150,7 +173,8 @@ jlong OSContainer::cache_usage_in_bytes() {
void OSContainer::print_version_specific_info(outputStream* st) {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
cgroup_subsystem->print_version_specific_info(st);
julong phys_mem = static_cast<julong>(os::Linux::physical_memory());
cgroup_subsystem->print_version_specific_info(st, phys_mem);
}
char * OSContainer::cpu_cpuset_cpus() {

View File

@ -50,6 +50,7 @@ class OSContainer: AllStatic {
static inline bool is_containerized();
static const char * container_type();
static bool available_memory_in_container(julong& value);
static jlong memory_limit_in_bytes();
static jlong memory_and_swap_limit_in_bytes();
static jlong memory_and_swap_usage_in_bytes();

View File

@ -213,33 +213,20 @@ static bool suppress_primordial_thread_resolution = false;
// utility functions
julong os::Linux::available_memory_in_container() {
julong avail_mem = static_cast<julong>(-1L);
if (OSContainer::is_containerized()) {
jlong mem_limit = OSContainer::memory_limit_in_bytes();
jlong mem_usage;
if (mem_limit > 0 && (mem_usage = OSContainer::memory_usage_in_bytes()) < 1) {
log_debug(os, container)("container memory usage failed: " JLONG_FORMAT ", using host value", mem_usage);
}
if (mem_limit > 0 && mem_usage > 0) {
avail_mem = mem_limit > mem_usage ? (julong)mem_limit - (julong)mem_usage : 0;
}
}
return avail_mem;
}
bool os::available_memory(physical_memory_size_type& value) {
return Linux::available_memory(value);
}
bool os::Linux::available_memory(physical_memory_size_type& value) {
julong avail_mem = available_memory_in_container();
if (avail_mem != static_cast<julong>(-1L)) {
julong avail_mem = 0;
if (OSContainer::is_containerized() && OSContainer::available_memory_in_container(avail_mem)) {
log_trace(os)("available container memory: " JULONG_FORMAT, avail_mem);
value = static_cast<physical_memory_size_type>(avail_mem);
return true;
}
return Linux::available_memory(value);
}
bool os::Linux::available_memory(physical_memory_size_type& value) {
julong avail_mem = static_cast<julong>(-1L);
FILE *fp = os::fopen("/proc/meminfo", "r");
if (fp != nullptr) {
char buf[80];
@ -264,24 +251,25 @@ bool os::Linux::available_memory(physical_memory_size_type& value) {
}
bool os::free_memory(physical_memory_size_type& value) {
julong free_mem = 0;
if (OSContainer::is_containerized() && OSContainer::available_memory_in_container(free_mem)) {
log_trace(os)("free container memory: " JULONG_FORMAT, free_mem);
value = static_cast<physical_memory_size_type>(free_mem);
return true;
}
return Linux::free_memory(value);
}
bool os::Linux::free_memory(physical_memory_size_type& value) {
// values in struct sysinfo are "unsigned long"
struct sysinfo si;
julong free_mem = available_memory_in_container();
if (free_mem != static_cast<julong>(-1L)) {
log_trace(os)("free container memory: " JULONG_FORMAT, free_mem);
value = static_cast<physical_memory_size_type>(free_mem);
return true;
}
int ret = sysinfo(&si);
if (ret != 0) {
return false;
}
free_mem = (julong)si.freeram * si.mem_unit;
julong free_mem = (julong)si.freeram * si.mem_unit;
log_trace(os)("free memory: " JULONG_FORMAT, free_mem);
value = static_cast<physical_memory_size_type>(free_mem);
return true;

View File

@ -67,6 +67,14 @@ ATTRIBUTE_NO_ASAN static bool _SafeFetchXX_internal(const T *adr, T* result) {
T n = 0;
#ifdef AIX
// AIX allows reading from nullptr without signalling
if (adr == nullptr) {
*result = 0;
return false;
}
#endif
// Set up a jump buffer. Anchor its pointer in TLS. Then read from the unsafe address.
// If that address was invalid, we fault, and in the signal handler we will jump back
// to the jump point.

View File

@ -621,9 +621,7 @@ int JVM_HANDLE_XXX_SIGNAL(int sig, siginfo_t* info,
if (cb != nullptr && cb->is_nmethod()) {
nmethod* nm = cb->as_nmethod();
assert(nm->insts_contains_inclusive(pc), "");
address deopt = nm->is_method_handle_return(pc) ?
nm->deopt_mh_handler_begin() :
nm->deopt_handler_begin();
address deopt = nm->deopt_handler_begin();
assert(deopt != nullptr, "");
frame fr = os::fetch_frame_from_context(uc);

View File

@ -2797,9 +2797,7 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
if (cb != nullptr && cb->is_nmethod()) {
nmethod* nm = cb->as_nmethod();
frame fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
address deopt = nm->is_method_handle_return(pc) ?
nm->deopt_mh_handler_begin() :
nm->deopt_handler_begin();
address deopt = nm->deopt_handler_begin();
assert(nm->insts_contains_inclusive(pc), "");
nm->set_original_pc(&fr, pc);
// Set pc to handler

View File

@ -759,20 +759,15 @@ void ArchDesc::declare_pipe_classes(FILE *fp_hpp) {
if (_pipeline->_maxcycleused <= 32) {
fprintf(fp_hpp, "protected:\n");
fprintf(fp_hpp, " %s _mask;\n\n", _pipeline->_maxcycleused <= 32 ? "uint" : "uint64_t" );
fprintf(fp_hpp, " uint32_t _mask;\n\n");
fprintf(fp_hpp, "public:\n");
fprintf(fp_hpp, " Pipeline_Use_Cycle_Mask() : _mask(0) {}\n\n");
if (_pipeline->_maxcycleused <= 32)
fprintf(fp_hpp, " Pipeline_Use_Cycle_Mask(uint mask) : _mask(mask) {}\n\n");
else {
fprintf(fp_hpp, " Pipeline_Use_Cycle_Mask(uint mask1, uint mask2) : _mask((((uint64_t)mask1) << 32) | mask2) {}\n\n");
fprintf(fp_hpp, " Pipeline_Use_Cycle_Mask(uint64_t mask) : _mask(mask) {}\n\n");
}
fprintf(fp_hpp, " Pipeline_Use_Cycle_Mask(uint32_t mask) : _mask(mask) {}\n\n");
fprintf(fp_hpp, " bool overlaps(const Pipeline_Use_Cycle_Mask &in2) const {\n");
fprintf(fp_hpp, " return ((_mask & in2._mask) != 0);\n");
fprintf(fp_hpp, " }\n\n");
fprintf(fp_hpp, " Pipeline_Use_Cycle_Mask& operator<<=(int n) {\n");
fprintf(fp_hpp, " _mask <<= n;\n");
fprintf(fp_hpp, " _mask <<= (n < 32) ? n : 31;\n");
fprintf(fp_hpp, " return *this;\n");
fprintf(fp_hpp, " }\n\n");
fprintf(fp_hpp, " void Or(const Pipeline_Use_Cycle_Mask &in2) {\n");
@ -785,7 +780,7 @@ void ArchDesc::declare_pipe_classes(FILE *fp_hpp) {
fprintf(fp_hpp, "protected:\n");
uint masklen = (_pipeline->_maxcycleused + 31) >> 5;
uint l;
fprintf(fp_hpp, " uint ");
fprintf(fp_hpp, " uint32_t ");
for (l = 1; l <= masklen; l++)
fprintf(fp_hpp, "_mask%d%s", l, l < masklen ? ", " : ";\n\n");
fprintf(fp_hpp, "public:\n");
@ -794,7 +789,7 @@ void ArchDesc::declare_pipe_classes(FILE *fp_hpp) {
fprintf(fp_hpp, "_mask%d(0)%s", l, l < masklen ? ", " : " {}\n\n");
fprintf(fp_hpp, " Pipeline_Use_Cycle_Mask(");
for (l = 1; l <= masklen; l++)
fprintf(fp_hpp, "uint mask%d%s", l, l < masklen ? ", " : ") : ");
fprintf(fp_hpp, "uint32_t mask%d%s", l, l < masklen ? ", " : ") : ");
for (l = 1; l <= masklen; l++)
fprintf(fp_hpp, "_mask%d(mask%d)%s", l, l, l < masklen ? ", " : " {}\n\n");
@ -805,10 +800,10 @@ void ArchDesc::declare_pipe_classes(FILE *fp_hpp) {
fprintf(fp_hpp, " return out;\n");
fprintf(fp_hpp, " }\n\n");
fprintf(fp_hpp, " bool overlaps(const Pipeline_Use_Cycle_Mask &in2) const {\n");
fprintf(fp_hpp, " return (");
fprintf(fp_hpp, " return ");
for (l = 1; l <= masklen; l++)
fprintf(fp_hpp, "((_mask%d & in2._mask%d) != 0)%s", l, l, l < masklen ? " || " : "");
fprintf(fp_hpp, ") ? true : false;\n");
fprintf(fp_hpp, ";\n");
fprintf(fp_hpp, " }\n\n");
fprintf(fp_hpp, " Pipeline_Use_Cycle_Mask& operator<<=(int n) {\n");
fprintf(fp_hpp, " if (n >= 32)\n");
@ -819,10 +814,10 @@ void ArchDesc::declare_pipe_classes(FILE *fp_hpp) {
fprintf(fp_hpp, " } while ((n -= 32) >= 32);\n\n");
fprintf(fp_hpp, " if (n > 0) {\n");
fprintf(fp_hpp, " uint m = 32 - n;\n");
fprintf(fp_hpp, " uint mask = (1 << n) - 1;\n");
fprintf(fp_hpp, " uint temp%d = mask & (_mask%d >> m); _mask%d <<= n;\n", 2, 1, 1);
fprintf(fp_hpp, " uint32_t mask = (1 << n) - 1;\n");
fprintf(fp_hpp, " uint32_t temp%d = mask & (_mask%d >> m); _mask%d <<= n;\n", 2, 1, 1);
for (l = 2; l < masklen; l++) {
fprintf(fp_hpp, " uint temp%d = mask & (_mask%d >> m); _mask%d <<= n; _mask%d |= temp%d;\n", l+1, l, l, l, l);
fprintf(fp_hpp, " uint32_t temp%d = mask & (_mask%d >> m); _mask%d <<= n; _mask%d |= temp%d;\n", l+1, l, l, l, l);
}
fprintf(fp_hpp, " _mask%d <<= n; _mask%d |= temp%d;\n", masklen, masklen, masklen);
fprintf(fp_hpp, " }\n");
@ -872,8 +867,7 @@ void ArchDesc::declare_pipe_classes(FILE *fp_hpp) {
fprintf(fp_hpp, " }\n\n");
fprintf(fp_hpp, " void step(uint cycles) {\n");
fprintf(fp_hpp, " _used = 0;\n");
fprintf(fp_hpp, " uint max_shift = 8 * sizeof(_mask) - 1;\n");
fprintf(fp_hpp, " _mask <<= (cycles < max_shift) ? cycles : max_shift;\n");
fprintf(fp_hpp, " _mask <<= cycles;\n");
fprintf(fp_hpp, " }\n\n");
fprintf(fp_hpp, " friend class Pipeline_Use;\n");
fprintf(fp_hpp, "};\n\n");

View File

@ -57,7 +57,6 @@ public:
OSR_Entry,
Exceptions, // Offset where exception handler lives
Deopt, // Offset where deopt handler lives
DeoptMH, // Offset where MethodHandle deopt handler lives
UnwindHandler, // Offset to default unwind handler
max_Entries };
@ -77,7 +76,6 @@ public:
_values[OSR_Entry ] = 0;
_values[Exceptions ] = -1;
_values[Deopt ] = -1;
_values[DeoptMH ] = -1;
_values[UnwindHandler ] = -1;
}

View File

@ -310,14 +310,6 @@ void Compilation::emit_code_epilog(LIR_Assembler* assembler) {
code_offsets->set_value(CodeOffsets::Deopt, assembler->emit_deopt_handler());
CHECK_BAILOUT();
// Emit the MethodHandle deopt handler code (if required).
if (has_method_handle_invokes()) {
// We can use the same code as for the normal deopt handler, we
// just need a different entry point address.
code_offsets->set_value(CodeOffsets::DeoptMH, assembler->emit_deopt_handler());
CHECK_BAILOUT();
}
// Emit the handler to remove the activation from the stack and
// dispatch to the caller.
offsets()->set_value(CodeOffsets::UnwindHandler, assembler->emit_unwind_handler());
@ -574,7 +566,6 @@ Compilation::Compilation(AbstractCompiler* compiler, ciEnv* env, ciMethod* metho
, _has_unsafe_access(false)
, _has_irreducible_loops(false)
, _would_profile(false)
, _has_method_handle_invokes(false)
, _has_reserved_stack_access(method->has_reserved_stack_access())
, _has_monitors(method->is_synchronized() || method->has_monitor_bytecodes())
, _has_scoped_access(method->is_scoped())

View File

@ -79,7 +79,6 @@ class Compilation: public StackObj {
bool _has_unsafe_access;
bool _has_irreducible_loops;
bool _would_profile;
bool _has_method_handle_invokes; // True if this method has MethodHandle invokes.
bool _has_reserved_stack_access;
bool _has_monitors; // Fastpath monitors detection for Continuations
bool _has_scoped_access; // For shared scope closure
@ -180,10 +179,6 @@ class Compilation: public StackObj {
// Statistics gathering
void notice_inlined_method(ciMethod* method);
// JSR 292
bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
bool has_reserved_stack_access() const { return _has_reserved_stack_access; }
void set_has_reserved_stack_access(bool z) { _has_reserved_stack_access = z; }

View File

@ -155,9 +155,6 @@ class FrameMap : public CompilationResourceObj {
// Opr representing the stack_pointer on this platform
static LIR_Opr stack_pointer();
// JSR 292
static LIR_Opr method_handle_invoke_SP_save_opr();
static BasicTypeArray* signature_type_array_for(const ciMethod* method);
// for outgoing calls, these also update the reserved area to

View File

@ -190,7 +190,6 @@ CodeEmitInfo::CodeEmitInfo(ValueStack* stack, XHandlers* exception_handlers, boo
, _exception_handlers(exception_handlers)
, _oop_map(nullptr)
, _stack(stack)
, _is_method_handle_invoke(false)
, _deoptimize_on_exception(deoptimize_on_exception)
, _force_reexecute(false) {
assert(_stack != nullptr, "must be non null");
@ -203,7 +202,6 @@ CodeEmitInfo::CodeEmitInfo(CodeEmitInfo* info, ValueStack* stack)
, _exception_handlers(nullptr)
, _oop_map(nullptr)
, _stack(stack == nullptr ? info->_stack : stack)
, _is_method_handle_invoke(info->_is_method_handle_invoke)
, _deoptimize_on_exception(info->_deoptimize_on_exception)
, _force_reexecute(info->_force_reexecute) {
@ -218,7 +216,7 @@ void CodeEmitInfo::record_debug_info(DebugInformationRecorder* recorder, int pc_
// record the safepoint before recording the debug info for enclosing scopes
recorder->add_safepoint(pc_offset, _oop_map->deep_copy());
bool reexecute = _force_reexecute || _scope_debug_info->should_reexecute();
_scope_debug_info->record_debug_info(recorder, pc_offset, reexecute, _is_method_handle_invoke);
_scope_debug_info->record_debug_info(recorder, pc_offset, reexecute);
recorder->end_safepoint(pc_offset);
}

View File

@ -234,7 +234,7 @@ class IRScopeDebugInfo: public CompilationResourceObj {
//Whether we should reexecute this bytecode for deopt
bool should_reexecute();
void record_debug_info(DebugInformationRecorder* recorder, int pc_offset, bool reexecute, bool is_method_handle_invoke = false) {
void record_debug_info(DebugInformationRecorder* recorder, int pc_offset, bool reexecute) {
if (caller() != nullptr) {
// Order is significant: Must record caller first.
caller()->record_debug_info(recorder, pc_offset, false/*reexecute*/);
@ -248,7 +248,7 @@ class IRScopeDebugInfo: public CompilationResourceObj {
bool has_ea_local_in_scope = false;
bool arg_escape = false;
recorder->describe_scope(pc_offset, methodHandle(), scope()->method(), bci(),
reexecute, rethrow_exception, is_method_handle_invoke, return_oop,
reexecute, rethrow_exception, return_oop,
has_ea_local_in_scope, arg_escape, locvals, expvals, monvals);
}
};
@ -262,7 +262,6 @@ class CodeEmitInfo: public CompilationResourceObj {
XHandlers* _exception_handlers;
OopMap* _oop_map;
ValueStack* _stack; // used by deoptimization (contains also monitors
bool _is_method_handle_invoke; // true if the associated call site is a MethodHandle call site.
bool _deoptimize_on_exception;
bool _force_reexecute; // force the reexecute flag on, used for patching stub
@ -288,9 +287,6 @@ class CodeEmitInfo: public CompilationResourceObj {
void add_register_oop(LIR_Opr opr);
void record_debug_info(DebugInformationRecorder* recorder, int pc_offset);
bool is_method_handle_invoke() const { return _is_method_handle_invoke; }
void set_is_method_handle_invoke(bool x) { _is_method_handle_invoke = x; }
bool force_reexecute() const { return _force_reexecute; }
void set_force_reexecute() { _force_reexecute = true; }

View File

@ -709,11 +709,6 @@ void LIR_OpVisitState::visit(LIR_Op* op) {
}
if (opJavaCall->_info) do_info(opJavaCall->_info);
if (FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr &&
opJavaCall->is_method_handle_invoke()) {
opJavaCall->_method_handle_invoke_SP_save_opr = FrameMap::method_handle_invoke_SP_save_opr();
do_temp(opJavaCall->_method_handle_invoke_SP_save_opr);
}
do_call();
if (opJavaCall->_result->is_valid()) do_output(opJavaCall->_result);

View File

@ -1176,7 +1176,6 @@ class LIR_OpJavaCall: public LIR_OpCall {
private:
ciMethod* _method;
LIR_Opr _receiver;
LIR_Opr _method_handle_invoke_SP_save_opr; // Used in LIR_OpVisitState::visit to store the reference to FrameMap::method_handle_invoke_SP_save_opr.
public:
LIR_OpJavaCall(LIR_Code code, ciMethod* method,
@ -1186,7 +1185,6 @@ class LIR_OpJavaCall: public LIR_OpCall {
: LIR_OpCall(code, addr, result, arguments, info)
, _method(method)
, _receiver(receiver)
, _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr)
{ assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
LIR_OpJavaCall(LIR_Code code, ciMethod* method,
@ -1195,7 +1193,6 @@ class LIR_OpJavaCall: public LIR_OpCall {
: LIR_OpCall(code, (address)vtable_offset, result, arguments, info)
, _method(method)
, _receiver(receiver)
, _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr)
{ assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
LIR_Opr receiver() const { return _receiver; }

View File

@ -478,12 +478,6 @@ void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
fatal("unexpected op code: %s", op->name());
break;
}
// JSR 292
// Record if this method has MethodHandle invokes.
if (op->is_method_handle_invoke()) {
compilation()->set_has_method_handle_invokes(true);
}
}

View File

@ -2712,19 +2712,7 @@ void LIRGenerator::do_Invoke(Invoke* x) {
// emit invoke code
assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
// JSR 292
// Preserve the SP over MethodHandle call sites, if needed.
ciMethod* target = x->target();
bool is_method_handle_invoke = (// %%% FIXME: Are both of these relevant?
target->is_method_handle_intrinsic() ||
target->is_compiled_lambda_form());
if (is_method_handle_invoke) {
info->set_is_method_handle_invoke(true);
if(FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr) {
__ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
}
}
switch (x->code()) {
case Bytecodes::_invokestatic:
__ call_static(target, result_register,
@ -2757,13 +2745,6 @@ void LIRGenerator::do_Invoke(Invoke* x) {
break;
}
// JSR 292
// Restore the SP after MethodHandle call sites, if needed.
if (is_method_handle_invoke
&& FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr) {
__ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
}
if (result_register->is_valid()) {
LIR_Opr result = rlock_result(x);
__ move(result_register, result);

View File

@ -541,9 +541,6 @@ extern void vm_exit(int code);
// unpack_with_exception entry instead. This makes life for the exception blob easier
// because making that same check and diverting is painful from assembly language.
JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* current, oopDesc* ex, address pc, nmethod*& nm))
// Reset method handle flag.
current->set_is_method_handle_return(false);
Handle exception(current, ex);
// This function is called when we are about to throw an exception. Therefore,
@ -622,8 +619,6 @@ JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* c
if (guard_pages_enabled) {
address fast_continuation = nm->handler_for_exception_and_pc(exception, pc);
if (fast_continuation != nullptr) {
// Set flag if return address is a method handle call site.
current->set_is_method_handle_return(nm->is_method_handle_return(pc));
return fast_continuation;
}
}
@ -660,8 +655,6 @@ JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* c
}
current->set_vm_result_oop(exception());
// Set flag if return address is a method handle call site.
current->set_is_method_handle_return(nm->is_method_handle_return(pc));
if (log_is_enabled(Info, exceptions)) {
ResourceMark rm;

View File

@ -33,6 +33,8 @@
#include "memory/metaspaceClosure.hpp"
#include "memory/resourceArea.hpp"
#include "oops/method.hpp"
#include "oops/methodCounters.hpp"
#include "oops/methodData.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/fieldDescriptor.inline.hpp"
#include "runtime/globals_extension.hpp"
@ -348,6 +350,12 @@ void AOTMapLogger::log_metaspace_objects_impl(address region_base, address regio
case MetaspaceObj::MethodType:
log_method((Method*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceObj::MethodCountersType:
log_method_counters((MethodCounters*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceObj::MethodDataType:
log_method_data((MethodData*)src, requested_addr, type_name, bytes, current);
break;
case MetaspaceObj::SymbolType:
log_symbol((Symbol*)src, requested_addr, type_name, bytes, current);
break;
@ -389,6 +397,18 @@ void AOTMapLogger::log_const_method(ConstMethod* cm, address requested_addr, con
log_debug(aot, map)(_LOG_PREFIX " %s", p2i(requested_addr), type_name, bytes, cm->method()->external_name());
}
void AOTMapLogger::log_method_counters(MethodCounters* mc, address requested_addr, const char* type_name,
int bytes, Thread* current) {
ResourceMark rm(current);
log_debug(aot, map)(_LOG_PREFIX " %s", p2i(requested_addr), type_name, bytes, mc->method()->external_name());
}
void AOTMapLogger::log_method_data(MethodData* md, address requested_addr, const char* type_name,
int bytes, Thread* current) {
ResourceMark rm(current);
log_debug(aot, map)(_LOG_PREFIX " %s", p2i(requested_addr), type_name, bytes, md->method()->external_name());
}
void AOTMapLogger::log_klass(Klass* k, address requested_addr, const char* type_name,
int bytes, Thread* current) {
ResourceMark rm(current);

View File

@ -98,6 +98,10 @@ class AOTMapLogger : AllStatic {
static void log_constant_pool_cache(ConstantPoolCache* cpc, address requested_addr,
const char* type_name, int bytes, Thread* current);
static void log_const_method(ConstMethod* cm, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_method_counters(MethodCounters* mc, address requested_addr, const char* type_name, int bytes,
Thread* current);
static void log_method_data(MethodData* md, address requested_addr, const char* type_name, int bytes,
Thread* current);
static void log_klass(Klass* k, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_method(Method* m, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_symbol(Symbol* s, address requested_addr, const char* type_name, int bytes, Thread* current);

View File

@ -37,7 +37,6 @@
// which ensures that for each oop, at most one ciObject is created.
// This invariant allows efficient implementation of ciObject.
class ciObjectFactory : public ArenaObj {
friend class VMStructs;
friend class ciEnv;
private:

View File

@ -4678,11 +4678,15 @@ const char* ClassFileParser::skip_over_field_signature(const char* signature,
return signature + 1;
case JVM_SIGNATURE_CLASS: {
if (_major_version < JAVA_1_5_VERSION) {
signature++;
length--;
// Skip over the class name if one is there
const char* const p = skip_over_field_name(signature + 1, true, --length);
const char* const p = skip_over_field_name(signature, true, length);
assert(p == nullptr || p > signature, "must parse one character at least");
// The next character better be a semicolon
if (p && (p - signature) > 1 && p[0] == JVM_SIGNATURE_ENDCLASS) {
if (p != nullptr && // Parse of field name succeeded.
p - signature < static_cast<int>(length) && // There is at least one character left to parse.
p[0] == JVM_SIGNATURE_ENDCLASS) {
return p + 1;
}
}

View File

@ -254,7 +254,6 @@ template <
bool (*EQUALS)(V value, K key, int len)
>
class CompactHashtable : public SimpleCompactHashtable {
friend class VMStructs;
V decode(u4 encoded_value) const {
return DECODE(_base_address, encoded_value);

View File

@ -174,7 +174,6 @@ InstanceKlass* SystemDictionaryShared::acquire_class_for_current_thread(
// No longer holding SharedDictionary_lock
// No need to lock, as <ik> can be held only by a single thread.
loader_data->add_class(ik);
// Get the package entry.
PackageEntry* pkg_entry = CDSProtectionDomain::get_package_entry_from_class(ik, class_loader);

View File

@ -23,23 +23,24 @@
*/
#include "code/codeBehaviours.hpp"
#include "code/nmethod.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/safepoint.hpp"
CompiledICProtectionBehaviour* CompiledICProtectionBehaviour::_current = nullptr;
bool DefaultICProtectionBehaviour::lock(nmethod* method) {
if (is_safe(method)) {
bool DefaultICProtectionBehaviour::lock(nmethod* nm) {
if (is_safe(nm)) {
return false;
}
CompiledIC_lock->lock_without_safepoint_check();
return true;
}
void DefaultICProtectionBehaviour::unlock(nmethod* method) {
void DefaultICProtectionBehaviour::unlock(nmethod* nm) {
CompiledIC_lock->unlock();
}
bool DefaultICProtectionBehaviour::is_safe(nmethod* method) {
return SafepointSynchronize::is_at_safepoint() || CompiledIC_lock->owned_by_self();
bool DefaultICProtectionBehaviour::is_safe(nmethod* nm) {
return SafepointSynchronize::is_at_safepoint() || CompiledIC_lock->owned_by_self() || (NMethodState_lock->owned_by_self() && nm->is_not_installed());
}

View File

@ -33,18 +33,18 @@ class CompiledICProtectionBehaviour {
static CompiledICProtectionBehaviour* _current;
public:
virtual bool lock(nmethod* method) = 0;
virtual void unlock(nmethod* method) = 0;
virtual bool is_safe(nmethod* method) = 0;
virtual bool lock(nmethod* nm) = 0;
virtual void unlock(nmethod* nm) = 0;
virtual bool is_safe(nmethod* nm) = 0;
static CompiledICProtectionBehaviour* current() { return _current; }
static void set_current(CompiledICProtectionBehaviour* current) { _current = current; }
};
class DefaultICProtectionBehaviour: public CompiledICProtectionBehaviour, public CHeapObj<mtInternal> {
virtual bool lock(nmethod* method);
virtual void unlock(nmethod* method);
virtual bool is_safe(nmethod* method);
virtual bool lock(nmethod* nm);
virtual void unlock(nmethod* nm);
virtual bool is_safe(nmethod* nm);
};
#endif // SHARE_CODE_CODEBEHAVIOURS_HPP

View File

@ -325,7 +325,6 @@ public:
// RuntimeBlob: used for non-compiled method code (adapters, stubs, blobs)
class RuntimeBlob : public CodeBlob {
friend class VMStructs;
public:
// Creation
@ -634,7 +633,6 @@ class DeoptimizationBlob: public SingletonBlob {
#ifdef COMPILER2
class UncommonTrapBlob: public SingletonBlob {
friend class VMStructs;
private:
// Creation support
UncommonTrapBlob(
@ -658,7 +656,6 @@ class UncommonTrapBlob: public SingletonBlob {
// ExceptionBlob: used for exception unwinding in compiled code (currently only used by Compiler 2)
class ExceptionBlob: public SingletonBlob {
friend class VMStructs;
private:
// Creation support
ExceptionBlob(
@ -695,7 +692,6 @@ class ExceptionBlob: public SingletonBlob {
// SafepointBlob: handles illegal_instruction exceptions during a safepoint
class SafepointBlob: public SingletonBlob {
friend class VMStructs;
private:
// Creation support
SafepointBlob(

View File

@ -259,7 +259,7 @@ class CodeCache : AllStatic {
static bool heap_available(CodeBlobType code_blob_type);
// Returns the CodeBlobType for the given nmethod
static CodeBlobType get_code_blob_type(nmethod* nm) {
static CodeBlobType get_code_blob_type(const nmethod* nm) {
return get_code_heap(nm)->code_blob_type();
}

View File

@ -55,8 +55,8 @@ CompiledICLocker::~CompiledICLocker() {
}
}
bool CompiledICLocker::is_safe(nmethod* method) {
return CompiledICProtectionBehaviour::current()->is_safe(method);
bool CompiledICLocker::is_safe(nmethod* nm) {
return CompiledICProtectionBehaviour::current()->is_safe(nm);
}
bool CompiledICLocker::is_safe(address code) {

View File

@ -50,7 +50,7 @@ class CompiledICLocker: public StackObj {
public:
CompiledICLocker(nmethod* method);
~CompiledICLocker();
static bool is_safe(nmethod* method);
static bool is_safe(nmethod* nm);
static bool is_safe(address code);
};

View File

@ -283,7 +283,6 @@ void DebugInformationRecorder::describe_scope(int pc_offset,
int bci,
bool reexecute,
bool rethrow_exception,
bool is_method_handle_invoke,
bool return_oop,
bool has_ea_local_in_scope,
bool arg_escape,
@ -301,7 +300,6 @@ void DebugInformationRecorder::describe_scope(int pc_offset,
// Record flags into pcDesc.
last_pd->set_should_reexecute(reexecute);
last_pd->set_rethrow_exception(rethrow_exception);
last_pd->set_is_method_handle_invoke(is_method_handle_invoke);
last_pd->set_return_oop(return_oop);
last_pd->set_has_ea_local_in_scope(has_ea_local_in_scope);
last_pd->set_arg_escape(arg_escape);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -105,7 +105,6 @@ class DebugInformationRecorder: public ResourceObj {
int bci,
bool reexecute,
bool rethrow_exception = false,
bool is_method_handle_invoke = false,
bool return_oop = false,
bool has_ea_local_in_scope = false,
bool arg_escape = false,

View File

@ -42,7 +42,6 @@ class DepChange;
// finding nmethods which might need to be deoptimized.
//
class nmethodBucket: public CHeapObj<mtClass> {
friend class VMStructs;
private:
nmethod* _nmethod;
nmethodBucket* volatile _next;
@ -68,7 +67,6 @@ class nmethodBucket: public CHeapObj<mtClass> {
// and uint64_t integer recording the safepoint counter at the last cleanup.
//
class DependencyContext : public StackObj {
friend class VMStructs;
friend class TestDependencyContext;
private:
nmethodBucket* volatile* _dependency_context_addr;

View File

@ -465,14 +465,6 @@ static int adjust_pcs_size(int pcs_size) {
return nsize;
}
bool nmethod::is_method_handle_return(address return_pc) {
if (!has_method_handle_invokes()) return false;
PcDesc* pd = pc_desc_at(return_pc);
if (pd == nullptr)
return false;
return pd->is_method_handle_invoke();
}
// Returns a string version of the method state.
const char* nmethod::state() const {
int state = get_state();
@ -762,7 +754,7 @@ Method* nmethod::attached_method_before_pc(address pc) {
}
void nmethod::clear_inline_caches() {
assert(SafepointSynchronize::is_at_safepoint(), "clearing of IC's only allowed at safepoint");
assert(SafepointSynchronize::is_at_safepoint() || (NMethodState_lock->owned_by_self() && is_not_installed()), "clearing of IC's only allowed at safepoint or when not installed");
RelocIterator iter(this);
while (iter.next()) {
iter.reloc()->clear_inline_cache();
@ -1154,7 +1146,8 @@ nmethod* nmethod::new_nmethod(const methodHandle& method,
#if INCLUDE_JVMCI
+ align_up(speculations_len , oopSize)
#endif
+ align_up(debug_info->data_size() , oopSize);
+ align_up(debug_info->data_size() , oopSize)
+ align_up(ImmutableDataReferencesCounterSize, oopSize);
// First, allocate space for immutable data in C heap.
address immutable_data = nullptr;
@ -1231,7 +1224,6 @@ void nmethod::init_defaults(CodeBuffer *code_buffer, CodeOffsets* offsets) {
_state = not_installed;
_has_unsafe_access = 0;
_has_method_handle_invokes = 0;
_has_wide_vectors = 0;
_has_monitors = 0;
_has_scoped_access = 0;
@ -1310,7 +1302,6 @@ nmethod::nmethod(
// Native wrappers do not have deopt handlers. Make the values
// something that will never match a pc like the nmethod vtable entry
_deopt_handler_offset = 0;
_deopt_mh_handler_offset = 0;
_unwind_handler_offset = 0;
CHECKED_CAST(_oops_size, uint16_t, align_up(code_buffer->total_oop_size(), oopSize));
@ -1381,10 +1372,266 @@ nmethod::nmethod(
}
}
nmethod::nmethod(const nmethod &nm) : CodeBlob(nm._name, nm._kind, nm._size, nm._header_size)
{
if (nm._oop_maps != nullptr) {
_oop_maps = nm._oop_maps->clone();
} else {
_oop_maps = nullptr;
}
_size = nm._size;
_relocation_size = nm._relocation_size;
_content_offset = nm._content_offset;
_code_offset = nm._code_offset;
_data_offset = nm._data_offset;
_frame_size = nm._frame_size;
S390_ONLY( _ctable_offset = nm._ctable_offset; )
_header_size = nm._header_size;
_frame_complete_offset = nm._frame_complete_offset;
_kind = nm._kind;
_caller_must_gc_arguments = nm._caller_must_gc_arguments;
#ifndef PRODUCT
_asm_remarks.share(nm._asm_remarks);
_dbg_strings.share(nm._dbg_strings);
#endif
// Allocate memory and copy mutable data to C heap
_mutable_data_size = nm._mutable_data_size;
if (_mutable_data_size > 0) {
_mutable_data = (address)os::malloc(_mutable_data_size, mtCode);
if (_mutable_data == nullptr) {
vm_exit_out_of_memory(_mutable_data_size, OOM_MALLOC_ERROR, "nmethod: no space for mutable data");
}
memcpy(mutable_data_begin(), nm.mutable_data_begin(), nm.mutable_data_size());
} else {
_mutable_data = nullptr;
}
_deoptimization_generation = 0;
_gc_epoch = CodeCache::gc_epoch();
_method = nm._method;
_osr_link = nullptr;
// Increment number of references to immutable data to share it between nmethods
_immutable_data_size = nm._immutable_data_size;
if (_immutable_data_size > 0) {
_immutable_data = nm._immutable_data;
set_immutable_data_references_counter(get_immutable_data_references_counter() + 1);
} else {
_immutable_data = blob_end();
}
_exception_cache = nullptr;
_gc_data = nullptr;
_oops_do_mark_nmethods = nullptr;
_oops_do_mark_link = nullptr;
_compiled_ic_data = nullptr;
if (nm._osr_entry_point != nullptr) {
_osr_entry_point = (nm._osr_entry_point - (address) &nm) + (address) this;
} else {
_osr_entry_point = nullptr;
}
_entry_offset = nm._entry_offset;
_verified_entry_offset = nm._verified_entry_offset;
_entry_bci = nm._entry_bci;
_skipped_instructions_size = nm._skipped_instructions_size;
_stub_offset = nm._stub_offset;
_exception_offset = nm._exception_offset;
_deopt_handler_offset = nm._deopt_handler_offset;
_unwind_handler_offset = nm._unwind_handler_offset;
_num_stack_arg_slots = nm._num_stack_arg_slots;
_oops_size = nm._oops_size;
#if INCLUDE_JVMCI
_metadata_size = nm._metadata_size;
#endif
_nul_chk_table_offset = nm._nul_chk_table_offset;
_handler_table_offset = nm._handler_table_offset;
_scopes_pcs_offset = nm._scopes_pcs_offset;
_scopes_data_offset = nm._scopes_data_offset;
#if INCLUDE_JVMCI
_speculations_offset = nm._speculations_offset;
#endif
_orig_pc_offset = nm._orig_pc_offset;
_compile_id = nm._compile_id;
_comp_level = nm._comp_level;
_compiler_type = nm._compiler_type;
_is_unloading_state = nm._is_unloading_state;
_state = not_installed;
_has_unsafe_access = nm._has_unsafe_access;
_has_wide_vectors = nm._has_wide_vectors;
_has_monitors = nm._has_monitors;
_has_scoped_access = nm._has_scoped_access;
_has_flushed_dependencies = nm._has_flushed_dependencies;
_is_unlinked = nm._is_unlinked;
_load_reported = nm._load_reported;
_deoptimization_status = nm._deoptimization_status;
if (nm._pc_desc_container != nullptr) {
_pc_desc_container = new PcDescContainer(scopes_pcs_begin());
} else {
_pc_desc_container = nullptr;
}
// Copy nmethod contents excluding header
// - Constant part (doubles, longs and floats used in nmethod)
// - Code part:
// - Code body
// - Exception handler
// - Stub code
// - OOP table
memcpy(consts_begin(), nm.consts_begin(), nm.data_end() - nm.consts_begin());
post_init();
}
nmethod* nmethod::relocate(CodeBlobType code_blob_type) {
assert(NMethodRelocation, "must enable use of function");
// Locks required to be held by caller to ensure the nmethod
// is not modified or purged from code cache during relocation
assert_lock_strong(CodeCache_lock);
assert_lock_strong(Compile_lock);
assert(CompiledICLocker::is_safe(this), "mt unsafe call");
if (!is_relocatable()) {
return nullptr;
}
run_nmethod_entry_barrier();
nmethod* nm_copy = new (size(), code_blob_type) nmethod(*this);
if (nm_copy == nullptr) {
return nullptr;
}
// Fix relocation
RelocIterator iter(nm_copy);
CodeBuffer src(this);
CodeBuffer dst(nm_copy);
while (iter.next()) {
#ifdef USE_TRAMPOLINE_STUB_FIX_OWNER
// Direct calls may no longer be in range and the use of a trampoline may now be required.
// Instead, allow trampoline relocations to update their owners and perform the necessary checks.
if (iter.reloc()->is_call()) {
address trampoline = trampoline_stub_Relocation::get_trampoline_for(iter.reloc()->addr(), nm_copy);
if (trampoline != nullptr) {
continue;
}
}
#endif
iter.reloc()->fix_relocation_after_move(&src, &dst);
}
// To make dependency checking during class loading fast, record
// the nmethod dependencies in the classes it is dependent on.
// This allows the dependency checking code to simply walk the
// class hierarchy above the loaded class, checking only nmethods
// which are dependent on those classes. The slow way is to
// check every nmethod for dependencies which makes it linear in
// the number of methods compiled. For applications with a lot
// classes the slow way is too slow.
for (Dependencies::DepStream deps(nm_copy); deps.next(); ) {
if (deps.type() == Dependencies::call_site_target_value) {
// CallSite dependencies are managed on per-CallSite instance basis.
oop call_site = deps.argument_oop(0);
MethodHandles::add_dependent_nmethod(call_site, nm_copy);
} else {
InstanceKlass* ik = deps.context_type();
if (ik == nullptr) {
continue; // ignore things like evol_method
}
// record this nmethod as dependent on this klass
ik->add_dependent_nmethod(nm_copy);
}
}
MutexLocker ml_NMethodState_lock(NMethodState_lock, Mutex::_no_safepoint_check_flag);
// Verify the nm we copied from is still valid
if (!is_marked_for_deoptimization() && is_in_use()) {
assert(method() != nullptr && method()->code() == this, "should be if is in use");
nm_copy->clear_inline_caches();
// Attempt to start using the copy
if (nm_copy->make_in_use()) {
ICache::invalidate_range(nm_copy->code_begin(), nm_copy->code_size());
methodHandle mh(Thread::current(), nm_copy->method());
nm_copy->method()->set_code(mh, nm_copy);
make_not_used();
nm_copy->post_compiled_method_load_event();
nm_copy->log_relocated_nmethod(this);
return nm_copy;
}
}
nm_copy->make_not_used();
return nullptr;
}
bool nmethod::is_relocatable() {
if (!is_java_method()) {
return false;
}
if (!is_in_use()) {
return false;
}
if (is_osr_method()) {
return false;
}
if (is_marked_for_deoptimization()) {
return false;
}
#if INCLUDE_JVMCI
if (jvmci_nmethod_data() != nullptr && jvmci_nmethod_data()->has_mirror()) {
return false;
}
#endif
if (is_unloading()) {
return false;
}
if (has_evol_metadata()) {
return false;
}
return true;
}
void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw () {
return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level));
}
void* nmethod::operator new(size_t size, int nmethod_size, CodeBlobType code_blob_type) throw () {
return CodeCache::allocate(nmethod_size, code_blob_type);
}
void* nmethod::operator new(size_t size, int nmethod_size, bool allow_NonNMethod_space) throw () {
// Try MethodNonProfiled and MethodProfiled.
void* return_value = CodeCache::allocate(nmethod_size, CodeBlobType::MethodNonProfiled);
@ -1458,11 +1705,6 @@ nmethod::nmethod(
} else {
_deopt_handler_offset = -1;
}
if (offsets->value(CodeOffsets::DeoptMH) != -1) {
_deopt_mh_handler_offset = code_offset() + offsets->value(CodeOffsets::DeoptMH);
} else {
_deopt_mh_handler_offset = -1;
}
} else
#endif
{
@ -1472,11 +1714,6 @@ nmethod::nmethod(
_exception_offset = _stub_offset + offsets->value(CodeOffsets::Exceptions);
_deopt_handler_offset = _stub_offset + offsets->value(CodeOffsets::Deopt);
if (offsets->value(CodeOffsets::DeoptMH) != -1) {
_deopt_mh_handler_offset = _stub_offset + offsets->value(CodeOffsets::DeoptMH);
} else {
_deopt_mh_handler_offset = -1;
}
}
if (offsets->value(CodeOffsets::UnwindHandler) != -1) {
// C1 generates UnwindHandler at the end of instructions section.
@ -1514,9 +1751,9 @@ nmethod::nmethod(
#if INCLUDE_JVMCI
_speculations_offset = _scopes_data_offset + align_up(debug_info->data_size(), oopSize);
DEBUG_ONLY( int immutable_data_end_offset = _speculations_offset + align_up(speculations_len, oopSize); )
DEBUG_ONLY( int immutable_data_end_offset = _speculations_offset + align_up(speculations_len, oopSize) + align_up(ImmutableDataReferencesCounterSize, oopSize); )
#else
DEBUG_ONLY( int immutable_data_end_offset = _scopes_data_offset + align_up(debug_info->data_size(), oopSize); )
DEBUG_ONLY( int immutable_data_end_offset = _scopes_data_offset + align_up(debug_info->data_size(), oopSize) + align_up(ImmutableDataReferencesCounterSize, oopSize); )
#endif
assert(immutable_data_end_offset <= immutable_data_size, "wrong read-only data size: %d > %d",
immutable_data_end_offset, immutable_data_size);
@ -1549,6 +1786,7 @@ nmethod::nmethod(
memcpy(speculations_begin(), speculations, speculations_len);
}
#endif
set_immutable_data_references_counter(1);
post_init();
@ -1615,6 +1853,40 @@ void nmethod::log_new_nmethod() const {
}
}
void nmethod::log_relocated_nmethod(nmethod* original) const {
if (LogCompilation && xtty != nullptr) {
ttyLocker ttyl;
xtty->begin_elem("relocated nmethod");
log_identity(xtty);
xtty->print(" entry='" INTPTR_FORMAT "' size='%d'", p2i(code_begin()), size());
const char* original_code_heap_name = CodeCache::get_code_heap_name(CodeCache::get_code_blob_type(original));
xtty->print(" original_address='" INTPTR_FORMAT "'", p2i(original));
xtty->print(" original_code_heap='%s'", original_code_heap_name);
const char* new_code_heap_name = CodeCache::get_code_heap_name(CodeCache::get_code_blob_type(this));
xtty->print(" new_address='" INTPTR_FORMAT "'", p2i(this));
xtty->print(" new_code_heap='%s'", new_code_heap_name);
LOG_OFFSET(xtty, relocation);
LOG_OFFSET(xtty, consts);
LOG_OFFSET(xtty, insts);
LOG_OFFSET(xtty, stub);
LOG_OFFSET(xtty, scopes_data);
LOG_OFFSET(xtty, scopes_pcs);
LOG_OFFSET(xtty, dependencies);
LOG_OFFSET(xtty, handler_table);
LOG_OFFSET(xtty, nul_chk_table);
LOG_OFFSET(xtty, oops);
LOG_OFFSET(xtty, metadata);
xtty->method(method());
xtty->stamp();
xtty->end_elem();
}
}
#undef LOG_OFFSET
@ -2147,9 +2419,18 @@ void nmethod::purge(bool unregister_nmethod) {
delete[] _compiled_ic_data;
if (_immutable_data != blob_end()) {
os::free(_immutable_data);
int reference_count = get_immutable_data_references_counter();
assert(reference_count > 0, "immutable data has no references");
set_immutable_data_references_counter(reference_count - 1);
// Free memory if this is the last nmethod referencing immutable data
if (reference_count == 0) {
os::free(_immutable_data);
}
_immutable_data = blob_end(); // Valid not null address
}
if (unregister_nmethod) {
Universe::heap()->unregister_nmethod(this);
}
@ -2693,15 +2974,6 @@ void nmethod::copy_scopes_pcs(PcDesc* pcs, int count) {
"must end with a sentinel");
#endif //ASSERT
// Search for MethodHandle invokes and tag the nmethod.
for (int i = 0; i < count; i++) {
if (pcs[i].is_method_handle_invoke()) {
set_has_method_handle_invokes(true);
break;
}
}
assert(has_method_handle_invokes() == (_deopt_mh_handler_offset != -1), "must have deopt mh handler");
int size = count * sizeof(PcDesc);
assert(scopes_pcs_size() >= size, "oob");
memcpy(scopes_pcs_begin(), pcs, size);
@ -3711,7 +3983,6 @@ const char* nmethod::nmethod_section_label(address pos) const {
if (pos == code_begin()) label = "[Instructions begin]";
if (pos == entry_point()) label = "[Entry Point]";
if (pos == verified_entry_point()) label = "[Verified Entry Point]";
if (has_method_handle_invokes() && (pos == deopt_mh_handler_begin())) label = "[Deopt MH Handler Code]";
if (pos == consts_begin() && pos != insts_begin()) label = "[Constants]";
// Check stub_code before checking exception_handler or deopt_handler.
if (pos == this->stub_begin()) label = "[Stub Code]";

View File

@ -90,7 +90,6 @@ class ExceptionCache : public CHeapObj<mtCode> {
// cache pc descs found in earlier inquiries
class PcDescCache {
friend class VMStructs;
private:
enum { cache_size = 4 };
// The array elements MUST be volatile! Several threads may modify
@ -155,6 +154,7 @@ public:
// - Scopes data array
// - Scopes pcs array
// - JVMCI speculations array
// - Nmethod reference counter
#if INCLUDE_JVMCI
class FailedSpeculation;
@ -168,6 +168,8 @@ class nmethod : public CodeBlob {
friend class JVMCINMethodData;
friend class DeoptimizationScope;
#define ImmutableDataReferencesCounterSize ((int)sizeof(int))
private:
// Used to track in which deoptimize handshake this method will be deoptimized.
@ -227,9 +229,6 @@ class nmethod : public CodeBlob {
// All deoptee's will resume execution at this location described by
// this offset.
int _deopt_handler_offset;
// All deoptee's at a MethodHandle call site will resume execution
// at this location described by this offset.
int _deopt_mh_handler_offset;
// Offset (from insts_end) of the unwind handler if it exists
int16_t _unwind_handler_offset;
// Number of arguments passed on the stack
@ -268,7 +267,6 @@ class nmethod : public CodeBlob {
// set during construction
uint8_t _has_unsafe_access:1, // May fault due to unsafe access.
_has_method_handle_invokes:1,// Has this method MethodHandle invokes?
_has_wide_vectors:1, // Preserve wide vectors at safepoints
_has_monitors:1, // Fastpath monitor detection for continuations
_has_scoped_access:1, // used by for shared scope closure (scopedMemoryAccess.cpp)
@ -335,8 +333,11 @@ class nmethod : public CodeBlob {
#endif
);
nmethod(const nmethod &nm);
// helper methods
void* operator new(size_t size, int nmethod_size, int comp_level) throw();
void* operator new(size_t size, int nmethod_size, CodeBlobType code_blob_type) throw();
// For method handle intrinsics: Try MethodNonProfiled, MethodProfiled and NonNMethod.
// Attention: Only allow NonNMethod space for special nmethods which don't need to be
@ -569,6 +570,12 @@ public:
#endif
);
// Relocate the nmethod to the code heap identified by code_blob_type.
// Returns nullptr if the code heap does not have enough space, the
// nmethod is unrelocatable, or the nmethod is invalidated during relocation,
// otherwise the relocated nmethod. The original nmethod will be marked not entrant.
nmethod* relocate(CodeBlobType code_blob_type);
static nmethod* new_native_nmethod(const methodHandle& method,
int compile_id,
CodeBuffer *code_buffer,
@ -585,6 +592,8 @@ public:
bool is_java_method () const { return _method != nullptr && !_method->is_native(); }
bool is_osr_method () const { return _entry_bci != InvocationEntryBci; }
bool is_relocatable();
// Compiler task identification. Note that all OSR methods
// are numbered in an independent sequence if CICountOSR is true,
// and native method wrappers are also numbered independently if
@ -607,7 +616,6 @@ public:
address stub_end () const { return code_end() ; }
address exception_begin () const { return header_begin() + _exception_offset ; }
address deopt_handler_begin () const { return header_begin() + _deopt_handler_offset ; }
address deopt_mh_handler_begin() const { return header_begin() + _deopt_mh_handler_offset ; }
address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (insts_end() - _unwind_handler_offset) : nullptr; }
oop* oops_begin () const { return (oop*) data_begin(); }
oop* oops_end () const { return (oop*) data_end(); }
@ -638,11 +646,13 @@ public:
#if INCLUDE_JVMCI
address scopes_data_end () const { return _immutable_data + _speculations_offset ; }
address speculations_begin () const { return _immutable_data + _speculations_offset ; }
address speculations_end () const { return immutable_data_end(); }
address speculations_end () const { return immutable_data_end() - ImmutableDataReferencesCounterSize ; }
#else
address scopes_data_end () const { return immutable_data_end(); }
address scopes_data_end () const { return immutable_data_end() - ImmutableDataReferencesCounterSize ; }
#endif
address immutable_data_references_counter_begin () const { return immutable_data_end() - ImmutableDataReferencesCounterSize ; }
// Sizes
int immutable_data_size() const { return _immutable_data_size; }
int consts_size () const { return int( consts_end () - consts_begin ()); }
@ -746,9 +756,6 @@ public:
bool has_scoped_access() const { return _has_scoped_access; }
void set_has_scoped_access(bool z) { _has_scoped_access = z; }
bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
bool has_wide_vectors() const { return _has_wide_vectors; }
void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
@ -819,12 +826,9 @@ public:
ExceptionCache* exception_cache_entry_for_exception(Handle exception);
// MethodHandle
bool is_method_handle_return(address return_pc);
// Deopt
// Return true is the PC is one would expect if the frame is being deopted.
inline bool is_deopt_pc(address pc);
inline bool is_deopt_mh_entry(address pc);
inline bool is_deopt_entry(address pc);
// Accessor/mutator for the original pc of a frame before a frame was deopted.
@ -958,6 +962,9 @@ public:
bool load_reported() const { return _load_reported; }
void set_load_reported() { _load_reported = true; }
inline int get_immutable_data_references_counter() { return *((int*)immutable_data_references_counter_begin()); }
inline void set_immutable_data_references_counter(int count) { *((int*)immutable_data_references_counter_begin()) = count; }
public:
// ScopeDesc retrieval operation
PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); }
@ -1026,6 +1033,7 @@ public:
// Logging
void log_identity(xmlStream* log) const;
void log_new_nmethod() const;
void log_relocated_nmethod(nmethod* original) const;
void log_state_change(InvalidationReason invalidation_reason) const;
// Prints block-level comments, including nmethod specific block labels:

View File

@ -31,16 +31,12 @@
#include "runtime/atomicAccess.hpp"
#include "runtime/frame.hpp"
inline bool nmethod::is_deopt_pc(address pc) { return is_deopt_entry(pc) || is_deopt_mh_entry(pc); }
inline bool nmethod::is_deopt_pc(address pc) { return is_deopt_entry(pc); }
inline bool nmethod::is_deopt_entry(address pc) {
return pc == deopt_handler_begin();
}
inline bool nmethod::is_deopt_mh_entry(address pc) {
return pc == deopt_mh_handler_begin();
}
// class ExceptionCache methods
inline int ExceptionCache::count() { return AtomicAccess::load_acquire(&_count); }

Some files were not shown because too many files have changed in this diff Show More