Merge branch 'master' into 8044609-ssl

This commit is contained in:
Seán Coffey 2025-08-11 15:14:23 +00:00
commit 3116f43ea8
756 changed files with 19702 additions and 9771 deletions

View File

@ -64,26 +64,26 @@ jobs:
gnu-arch: aarch64
debian-arch: arm64
debian-repository: https://httpredir.debian.org/debian/
debian-version: bullseye
debian-version: bookworm
tolerate-sysroot-errors: false
- target-cpu: arm
gnu-arch: arm
debian-arch: armhf
debian-repository: https://httpredir.debian.org/debian/
debian-version: bullseye
debian-version: bookworm
tolerate-sysroot-errors: false
gnu-abi: eabihf
- target-cpu: s390x
gnu-arch: s390x
debian-arch: s390x
debian-repository: https://httpredir.debian.org/debian/
debian-version: bullseye
debian-version: bookworm
tolerate-sysroot-errors: false
- target-cpu: ppc64le
gnu-arch: powerpc64le
debian-arch: ppc64el
debian-repository: https://httpredir.debian.org/debian/
debian-version: bullseye
debian-version: bookworm
tolerate-sysroot-errors: false
- target-cpu: riscv64
gnu-arch: riscv64

View File

@ -1491,12 +1491,12 @@ following targets are known to work:</p>
</tr>
</tbody>
</table>
<p><code>BASE_OS</code> must be one of "OEL6" for Oracle Enterprise
Linux 6 or "Fedora" (if not specified "OEL6" will be the default). If
the base OS is "Fedora" the corresponding Fedora release can be
specified with the help of the <code>BASE_OS_VERSION</code> option (with
"27" as default version). If the build is successful, the new devkits
can be found in the <code>build/devkit/result</code> subdirectory:</p>
<p><code>BASE_OS</code> must be one of <code>OL</code> for Oracle
Enterprise Linux or <code>Fedora</code>. If the base OS is
<code>Fedora</code> the corresponding Fedora release can be specified
with the help of the <code>BASE_OS_VERSION</code> option. If the build
is successful, the new devkits can be found in the
<code>build/devkit/result</code> subdirectory:</p>
<pre><code>cd make/devkit
make TARGETS=&quot;ppc64le-linux-gnu aarch64-linux-gnu&quot; BASE_OS=Fedora BASE_OS_VERSION=21
ls -1 ../../build/devkit/result/

View File

@ -1285,12 +1285,10 @@ at least the following targets are known to work:
| ppc64le-linux-gnu |
| s390x-linux-gnu |
`BASE_OS` must be one of "OEL6" for Oracle Enterprise Linux 6 or "Fedora" (if
not specified "OEL6" will be the default). If the base OS is "Fedora" the
corresponding Fedora release can be specified with the help of the
`BASE_OS_VERSION` option (with "27" as default version). If the build is
successful, the new devkits can be found in the `build/devkit/result`
subdirectory:
`BASE_OS` must be one of `OL` for Oracle Enterprise Linux or `Fedora`. If the
base OS is `Fedora` the corresponding Fedora release can be specified with the
help of the `BASE_OS_VERSION` option. If the build is successful, the new
devkits can be found in the `build/devkit/result` subdirectory:
```
cd make/devkit

View File

@ -85,7 +85,7 @@ CreateHkTargets = \
################################################################################
# Include module specific build settings
THIS_SNIPPET := modules/$(MODULE)/Java.gmk
THIS_SNIPPET := $(call GetModuleSnippetName, Java)
ifneq ($(wildcard $(THIS_SNIPPET)), )
include MakeSnippetStart.gmk

View File

@ -184,7 +184,7 @@ endif
################################################################################
# Include module specific build settings
THIS_SNIPPET := modules/$(MODULE)/Jmod.gmk
THIS_SNIPPET := $(call GetModuleSnippetName, Jmod)
ifneq ($(wildcard $(THIS_SNIPPET)), )
include MakeSnippetStart.gmk

View File

@ -236,8 +236,8 @@ define create_overview_file
#
ifneq ($$($1_GROUPS), )
$1_OVERVIEW_TEXT += \
<p>This document is divided into \
$$(subst 2,two,$$(subst 3,three,$$(words $$($1_GROUPS)))) sections:</p> \
<p>This document has \
$$(subst 2,two,$$(subst 3,three,$$(words $$($1_GROUPS)))) major sections:</p> \
<blockquote><dl> \
#
$1_OVERVIEW_TEXT += $$(foreach g, $$($1_GROUPS), \
@ -246,7 +246,10 @@ define create_overview_file
)
$1_OVERVIEW_TEXT += \
</dl></blockquote> \
#
<p><a href="../specs/index.html">Related documents</a> specify the Java \
programming language, the Java Virtual Machine, various protocols and file \
formats pertaining to the Java platform, and tools included in the JDK.</p> \
#
endif
$1_OVERVIEW_TEXT += \
</body></html> \

View File

@ -270,6 +270,7 @@ endif
# Since debug symbols are not included in the jmod files, they need to be copied
# in manually after generating the images.
# These variables are read by SetupCopyDebuginfo
ALL_JDK_MODULES := $(JDK_MODULES)
ALL_JRE_MODULES := $(sort $(JRE_MODULES), $(foreach m, $(JRE_MODULES), \
$(call FindTransitiveDepsForModule, $m)))

View File

@ -1407,7 +1407,7 @@ CLEAN_SUPPORT_DIRS += demos
CLEAN_SUPPORT_DIR_TARGETS := $(addprefix clean-, $(CLEAN_SUPPORT_DIRS))
CLEAN_TESTS += hotspot-jtreg-native jdk-jtreg-native lib
CLEAN_TEST_TARGETS += $(addprefix clean-test-, $(CLEAN_TESTS))
CLEAN_PHASES := gensrc java native include
CLEAN_PHASES += gensrc java native include
CLEAN_PHASE_TARGETS := $(addprefix clean-, $(CLEAN_PHASES))
CLEAN_MODULE_TARGETS := $(addprefix clean-, $(ALL_MODULES))
# Construct targets of the form clean-$module-$phase

View File

@ -149,7 +149,7 @@ endef
################################################################################
PHASE_MAKEDIRS := $(TOPDIR)/make
PHASE_MAKEDIRS += $(TOPDIR)/make
# Helper macro for DeclareRecipesForPhase
# Declare a recipe for calling the module and phase specific makefile.

View File

@ -34,18 +34,23 @@ include MakeFileStart.gmk
################################################################################
include CopyFiles.gmk
include Modules.gmk
MODULE_SRC := $(TOPDIR)/src/$(MODULE)
# Define the snippet for MakeSnippetStart/End
THIS_SNIPPET := modules/$(MODULE)/$(MAKEFILE_PREFIX).gmk
################################################################################
# Include module specific build settings
include MakeSnippetStart.gmk
THIS_SNIPPET := $(call GetModuleSnippetName, $(MAKEFILE_PREFIX))
# Include the file being wrapped.
include $(THIS_SNIPPET)
ifneq ($(wildcard $(THIS_SNIPPET)), )
include MakeSnippetStart.gmk
include MakeSnippetEnd.gmk
# Include the file being wrapped.
include $(THIS_SNIPPET)
include MakeSnippetEnd.gmk
endif
ifeq ($(MAKEFILE_PREFIX), Lib)
# We need to keep track of what libraries are generated/needed by this

View File

@ -736,8 +736,15 @@ AC_DEFUN([FLAGS_SETUP_CFLAGS_CPU_DEP],
$1_CFLAGS_CPU_JVM="${$1_CFLAGS_CPU_JVM} -mminimal-toc"
elif test "x$FLAGS_CPU" = xppc64le; then
# Little endian machine uses ELFv2 ABI.
# Use Power8, this is the first CPU to support PPC64 LE with ELFv2 ABI.
$1_CFLAGS_CPU="-mcpu=power8 -mtune=power10"
# Use Power8 for target cpu, this is the first CPU to support PPC64 LE with ELFv2 ABI.
# Use Power10 for tuning target, this is supported by gcc >= 10
POWER_TUNE_VERSION="-mtune=power10"
FLAGS_COMPILER_CHECK_ARGUMENTS(ARGUMENT: [${POWER_TUNE_VERSION}],
IF_FALSE: [
POWER_TUNE_VERSION="-mtune=power8"
]
)
$1_CFLAGS_CPU="-mcpu=power8 ${POWER_TUNE_VERSION}"
$1_CFLAGS_CPU_JVM="${$1_CFLAGS_CPU_JVM} -DABI_ELFv2"
fi
elif test "x$FLAGS_CPU" = xs390x; then

View File

@ -178,6 +178,10 @@ define SetupJavaCompilationBody
$1_SAFE_NAME := $$(strip $$(subst /,_, $1))
ifeq ($$($1_LOG_ACTION), )
$1_LOG_ACTION := Compiling
endif
ifeq ($$($1_SMALL_JAVA), )
# If unspecified, default to true
$1_SMALL_JAVA := true
@ -472,7 +476,7 @@ define SetupJavaCompilationBody
# list of files.
$$($1_FILELIST): $$($1_SRCS) $$($1_VARDEPS_FILE)
$$(call MakeDir, $$(@D))
$$(call LogWarn, Compiling up to $$(words $$($1_SRCS)) files for $1)
$$(call LogWarn, $$($1_LOG_ACTION) up to $$(words $$($1_SRCS)) files for $1)
$$(eval $$(call ListPathsSafely, $1_SRCS, $$($1_FILELIST)))
# Create a $$($1_MODFILELIST) file with significant modified dependencies

View File

@ -33,7 +33,7 @@ include $(TOPDIR)/make/conf/module-loader-map.conf
# Append platform-specific and upgradeable modules
PLATFORM_MODULES += $(PLATFORM_MODULES_$(OPENJDK_TARGET_OS)) \
$(UPGRADEABLE_PLATFORM_MODULES)
$(UPGRADEABLE_PLATFORM_MODULES) $(CUSTOM_UPGRADEABLE_PLATFORM_MODULES)
################################################################################
# Setup module sets for docs
@ -216,7 +216,7 @@ endif
# Find dependencies ("requires") for a given module.
# Param 1: Module to find dependencies for.
FindDepsForModule = \
$(DEPS_$(strip $1))
$(filter-out $(IMPORT_MODULES), $(DEPS_$(strip $1)))
# Find dependencies ("requires") transitively in 3 levels for a given module.
# Param 1: Module to find dependencies for.
@ -254,7 +254,8 @@ FindTransitiveIndirectDepsForModules = \
# Upgradeable modules are those that are either defined as upgradeable or that
# require an upradeable module.
FindAllUpgradeableModules = \
$(sort $(filter-out $(MODULES_FILTER), $(UPGRADEABLE_PLATFORM_MODULES)))
$(sort $(filter-out $(MODULES_FILTER), \
$(UPGRADEABLE_PLATFORM_MODULES) $(CUSTOM_UPGRADEABLE_PLATFORM_MODULES)))
################################################################################
@ -316,6 +317,19 @@ define ReadImportMetaData
$$(eval $$(call ReadSingleImportMetaData, $$m)))
endef
################################################################################
# Get a full snippet path for the current module and a given base name.
#
# Param 1 - The base name of the snippet file to include
GetModuleSnippetName = \
$(if $(CUSTOM_MODULE_MAKE_ROOT), \
$(if $(wildcard $(CUSTOM_MODULE_MAKE_ROOT)/$(MODULE)/$(strip $1).gmk), \
$(CUSTOM_MODULE_MAKE_ROOT)/$(MODULE)/$(strip $1).gmk, \
$(wildcard modules/$(MODULE)/$(strip $1).gmk) \
), \
$(wildcard modules/$(MODULE)/$(strip $1).gmk) \
)
################################################################################
endif # include guard

View File

@ -177,7 +177,8 @@ ifeq ($(ENABLE_HEADLESS_ONLY), false)
endif
LIBSPLASHSCREEN_CFLAGS += -DSPLASHSCREEN -DPNG_NO_MMX_CODE \
-DPNG_ARM_NEON_OPT=0 -DPNG_ARM_NEON_IMPLEMENTATION=0
-DPNG_ARM_NEON_OPT=0 -DPNG_ARM_NEON_IMPLEMENTATION=0 \
-DPNG_LOONGARCH_LSX_OPT=0
ifeq ($(call isTargetOs, linux)+$(call isTargetCpuArch, ppc), true+true)
LIBSPLASHSCREEN_CFLAGS += -DPNG_POWERPC_VSX_OPT=0

View File

@ -121,15 +121,15 @@ ifeq ($(call isTargetOs, windows), true)
TARGETS += $(BUILD_LIBJPACKAGE)
##############################################################################
## Build libwixhelper
## Build libmsica
##############################################################################
# Build Wix custom action helper
# Build MSI custom action library
# Output library in resources dir, and symbols in the object dir
$(eval $(call SetupJdkLibrary, BUILD_LIBWIXHELPER, \
NAME := wixhelper, \
$(eval $(call SetupJdkLibrary, BUILD_LIBMSICA, \
NAME := msica, \
OUTPUT_DIR := $(JPACKAGE_OUTPUT_DIR), \
SYMBOLS_DIR := $(SUPPORT_OUTPUTDIR)/native/$(MODULE)/libwixhelper, \
SYMBOLS_DIR := $(SUPPORT_OUTPUTDIR)/native/$(MODULE)/libmsica, \
ONLY_EXPORTED := true, \
OPTIMIZATION := LOW, \
EXTRA_SRC := common, \
@ -139,7 +139,7 @@ ifeq ($(call isTargetOs, windows), true)
LIBS_windows := msi.lib ole32.lib shell32.lib shlwapi.lib user32.lib, \
))
TARGETS += $(BUILD_LIBWIXHELPER)
TARGETS += $(BUILD_LIBMSICA)
##############################################################################
## Build msiwrapper

View File

@ -881,6 +881,46 @@ reg_class vectorx_reg(
V31, V31_H, V31_J, V31_K
);
// Class for vector register V10
reg_class v10_veca_reg(
V10, V10_H, V10_J, V10_K
);
// Class for vector register V11
reg_class v11_veca_reg(
V11, V11_H, V11_J, V11_K
);
// Class for vector register V12
reg_class v12_veca_reg(
V12, V12_H, V12_J, V12_K
);
// Class for vector register V13
reg_class v13_veca_reg(
V13, V13_H, V13_J, V13_K
);
// Class for vector register V17
reg_class v17_veca_reg(
V17, V17_H, V17_J, V17_K
);
// Class for vector register V18
reg_class v18_veca_reg(
V18, V18_H, V18_J, V18_K
);
// Class for vector register V23
reg_class v23_veca_reg(
V23, V23_H, V23_J, V23_K
);
// Class for vector register V24
reg_class v24_veca_reg(
V24, V24_H, V24_J, V24_K
);
// Class for 128 bit register v0
reg_class v0_reg(
V0, V0_H
@ -4969,6 +5009,86 @@ operand vReg()
interface(REG_INTER);
%}
operand vReg_V10()
%{
constraint(ALLOC_IN_RC(v10_veca_reg));
match(vReg);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vReg_V11()
%{
constraint(ALLOC_IN_RC(v11_veca_reg));
match(vReg);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vReg_V12()
%{
constraint(ALLOC_IN_RC(v12_veca_reg));
match(vReg);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vReg_V13()
%{
constraint(ALLOC_IN_RC(v13_veca_reg));
match(vReg);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vReg_V17()
%{
constraint(ALLOC_IN_RC(v17_veca_reg));
match(vReg);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vReg_V18()
%{
constraint(ALLOC_IN_RC(v18_veca_reg));
match(vReg);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vReg_V23()
%{
constraint(ALLOC_IN_RC(v23_veca_reg));
match(vReg);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vReg_V24()
%{
constraint(ALLOC_IN_RC(v24_veca_reg));
match(vReg);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vecA()
%{
constraint(ALLOC_IN_RC(vectora_reg));

View File

@ -257,6 +257,28 @@ source %{
return false;
}
break;
case Op_SelectFromTwoVector:
// The "tbl" instruction for two vector table is supported only in Neon and SVE2. Return
// false if vector length > 16B but supported SVE version < 2.
// For vector length of 16B, generate SVE2 "tbl" instruction if SVE2 is supported, else
// generate Neon "tbl" instruction to select from two vectors.
// This operation is disabled for doubles and longs on machines with SVE < 2 and instead
// the default VectorRearrange + VectorBlend is generated because the performance of the default
// implementation was better than or equal to the implementation for SelectFromTwoVector.
if (UseSVE < 2 && (type2aelembytes(bt) == 8 || length_in_bytes > 16)) {
return false;
}
// Because the SVE2 "tbl" instruction is unpredicated and partial operations cannot be generated
// using masks, we disable this operation on machines where length_in_bytes < MaxVectorSize
// on that machine with the only exception of 8B vector length. This is because at the time of
// writing this, there is no SVE2 machine available with length_in_bytes > 8 and
// length_in_bytes < MaxVectorSize to test this operation on (for example - there isn't an
// SVE2 machine available with MaxVectorSize = 32 to test a case with length_in_bytes = 16).
if (UseSVE == 2 && length_in_bytes > 8 && length_in_bytes < MaxVectorSize) {
return false;
}
break;
default:
break;
}
@ -7172,3 +7194,71 @@ instruct vexpandBits(vReg dst, vReg src1, vReg src2) %{
%}
ins_pipe(pipe_slow);
%}
// ------------------------------------- SelectFromTwoVector ------------------------------------
// The Neon and SVE2 tbl instruction for two vector lookup requires both the source vectors to be
// consecutive. The match rules for SelectFromTwoVector reserve two consecutive vector registers
// for src1 and src2.
// Four combinations of vector registers for vselect_from_two_vectors are chosen at random
// (two from volatile and two from non-volatile set) which gives more freedom to the register
// allocator to choose the best pair of source registers at that point.
instruct vselect_from_two_vectors_10_11(vReg dst, vReg_V10 src1, vReg_V11 src2,
vReg index, vReg tmp) %{
effect(TEMP_DEF dst, TEMP tmp);
match(Set dst (SelectFromTwoVector (Binary index src1) src2));
format %{ "vselect_from_two_vectors_10_11 $dst, $src1, $src2, $index\t# KILL $tmp" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this);
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
__ select_from_two_vectors($dst$$FloatRegister, $src1$$FloatRegister,
$src2$$FloatRegister, $index$$FloatRegister,
$tmp$$FloatRegister, bt, length_in_bytes);
%}
ins_pipe(pipe_slow);
%}
instruct vselect_from_two_vectors_12_13(vReg dst, vReg_V12 src1, vReg_V13 src2,
vReg index, vReg tmp) %{
effect(TEMP_DEF dst, TEMP tmp);
match(Set dst (SelectFromTwoVector (Binary index src1) src2));
format %{ "vselect_from_two_vectors_12_13 $dst, $src1, $src2, $index\t# KILL $tmp" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this);
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
__ select_from_two_vectors($dst$$FloatRegister, $src1$$FloatRegister,
$src2$$FloatRegister, $index$$FloatRegister,
$tmp$$FloatRegister, bt, length_in_bytes);
%}
ins_pipe(pipe_slow);
%}
instruct vselect_from_two_vectors_17_18(vReg dst, vReg_V17 src1, vReg_V18 src2,
vReg index, vReg tmp) %{
effect(TEMP_DEF dst, TEMP tmp);
match(Set dst (SelectFromTwoVector (Binary index src1) src2));
format %{ "vselect_from_two_vectors_17_18 $dst, $src1, $src2, $index\t# KILL $tmp" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this);
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
__ select_from_two_vectors($dst$$FloatRegister, $src1$$FloatRegister,
$src2$$FloatRegister, $index$$FloatRegister,
$tmp$$FloatRegister, bt, length_in_bytes);
%}
ins_pipe(pipe_slow);
%}
instruct vselect_from_two_vectors_23_24(vReg dst, vReg_V23 src1, vReg_V24 src2,
vReg index, vReg tmp) %{
effect(TEMP_DEF dst, TEMP tmp);
match(Set dst (SelectFromTwoVector (Binary index src1) src2));
format %{ "vselect_from_two_vectors_23_24 $dst, $src1, $src2, $index\t# KILL $tmp" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this);
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
__ select_from_two_vectors($dst$$FloatRegister, $src1$$FloatRegister,
$src2$$FloatRegister, $index$$FloatRegister,
$tmp$$FloatRegister, bt, length_in_bytes);
%}
ins_pipe(pipe_slow);
%}

View File

@ -247,6 +247,28 @@ source %{
return false;
}
break;
case Op_SelectFromTwoVector:
// The "tbl" instruction for two vector table is supported only in Neon and SVE2. Return
// false if vector length > 16B but supported SVE version < 2.
// For vector length of 16B, generate SVE2 "tbl" instruction if SVE2 is supported, else
// generate Neon "tbl" instruction to select from two vectors.
// This operation is disabled for doubles and longs on machines with SVE < 2 and instead
// the default VectorRearrange + VectorBlend is generated because the performance of the default
// implementation was better than or equal to the implementation for SelectFromTwoVector.
if (UseSVE < 2 && (type2aelembytes(bt) == 8 || length_in_bytes > 16)) {
return false;
}
// Because the SVE2 "tbl" instruction is unpredicated and partial operations cannot be generated
// using masks, we disable this operation on machines where length_in_bytes < MaxVectorSize
// on that machine with the only exception of 8B vector length. This is because at the time of
// writing this, there is no SVE2 machine available with length_in_bytes > 8 and
// length_in_bytes < MaxVectorSize to test this operation on (for example - there isn't an
// SVE2 machine available with MaxVectorSize = 32 to test a case with length_in_bytes = 16).
if (UseSVE == 2 && length_in_bytes > 8 && length_in_bytes < MaxVectorSize) {
return false;
}
break;
default:
break;
}
@ -5154,3 +5176,34 @@ BITPERM(vcompressBits, CompressBitsV, sve_bext)
// ----------------------------------- ExpandBitsV ---------------------------------
BITPERM(vexpandBits, ExpandBitsV, sve_bdep)
// ------------------------------------- SelectFromTwoVector ------------------------------------
// The Neon and SVE2 tbl instruction for two vector lookup requires both the source vectors to be
// consecutive. The match rules for SelectFromTwoVector reserve two consecutive vector registers
// for src1 and src2.
// Four combinations of vector registers for vselect_from_two_vectors are chosen at random
// (two from volatile and two from non-volatile set) which gives more freedom to the register
// allocator to choose the best pair of source registers at that point.
dnl
dnl SELECT_FROM_TWO_VECTORS($1, $2 )
dnl SELECT_FROM_TWO_VECTORS(first_reg, second_reg)
define(`SELECT_FROM_TWO_VECTORS', `
instruct vselect_from_two_vectors_$1_$2(vReg dst, vReg_V$1 src1, vReg_V$2 src2,
vReg index, vReg tmp) %{
effect(TEMP_DEF dst, TEMP tmp);
match(Set dst (SelectFromTwoVector (Binary index src1) src2));
format %{ "vselect_from_two_vectors_$1_$2 $dst, $src1, $src2, $index\t# KILL $tmp" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this);
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
__ select_from_two_vectors($dst$$FloatRegister, $src1$$FloatRegister,
$src2$$FloatRegister, $index$$FloatRegister,
$tmp$$FloatRegister, bt, length_in_bytes);
%}
ins_pipe(pipe_slow);
%}')dnl
dnl
SELECT_FROM_TWO_VECTORS(10, 11)
SELECT_FROM_TWO_VECTORS(12, 13)
SELECT_FROM_TWO_VECTORS(17, 18)
SELECT_FROM_TWO_VECTORS(23, 24)

View File

@ -4231,12 +4231,29 @@ public:
sf(imm1, 9, 5), rf(Zd, 0);
}
// SVE programmable table lookup/permute using vector of element indices
void sve_tbl(FloatRegister Zd, SIMD_RegVariant T, FloatRegister Zn, FloatRegister Zm) {
private:
void _sve_tbl(FloatRegister Zd, SIMD_RegVariant T, FloatRegister Zn, unsigned reg_count, FloatRegister Zm) {
starti;
assert(T != Q, "invalid size");
// Only supports one or two vector lookup. One vector lookup was introduced in SVE1
// and two vector lookup in SVE2
assert(0 < reg_count && reg_count <= 2, "invalid number of registers");
int op11 = (reg_count == 1) ? 0b10 : 0b01;
f(0b00000101, 31, 24), f(T, 23, 22), f(0b1, 21), rf(Zm, 16);
f(0b001100, 15, 10), rf(Zn, 5), rf(Zd, 0);
f(0b001, 15, 13), f(op11, 12, 11), f(0b0, 10), rf(Zn, 5), rf(Zd, 0);
}
public:
// SVE/SVE2 Programmable table lookup in one or two vector table (zeroing)
void sve_tbl(FloatRegister Zd, SIMD_RegVariant T, FloatRegister Zn, FloatRegister Zm) {
_sve_tbl(Zd, T, Zn, 1, Zm);
}
void sve_tbl(FloatRegister Zd, SIMD_RegVariant T, FloatRegister Zn1, FloatRegister Zn2, FloatRegister Zm) {
assert(Zn1->successor() == Zn2, "invalid order of registers");
_sve_tbl(Zd, T, Zn1, 2, Zm);
}
// Shuffle active elements of vector to the right and fill with zero

View File

@ -2858,3 +2858,124 @@ void C2_MacroAssembler::reconstruct_frame_pointer(Register rtmp) {
add(rfp, sp, framesize - 2 * wordSize);
}
}
// Selects elements from two source vectors (src1, src2) based on index values in the index register
// using Neon instructions and places it in the destination vector element corresponding to the
// index vector element. Each index in the index register must be in the range - [0, 2 * NUM_ELEM),
// where NUM_ELEM is the number of BasicType elements per vector.
// If idx < NUM_ELEM --> selects src1[idx] (idx is an element of the index register)
// Otherwise, selects src2[idx NUM_ELEM]
void C2_MacroAssembler::select_from_two_vectors_neon(FloatRegister dst, FloatRegister src1,
FloatRegister src2, FloatRegister index,
FloatRegister tmp, unsigned vector_length_in_bytes) {
assert_different_registers(dst, src1, src2, tmp);
SIMD_Arrangement size = vector_length_in_bytes == 16 ? T16B : T8B;
if (vector_length_in_bytes == 16) {
assert(UseSVE <= 1, "sve must be <= 1");
assert(src1->successor() == src2, "Source registers must be ordered");
// If the vector length is 16B, then use the Neon "tbl" instruction with two vector table
tbl(dst, size, src1, 2, index);
} else { // vector length == 8
assert(UseSVE == 0, "must be Neon only");
// We need to fit both the source vectors (src1, src2) in a 128-bit register because the
// Neon "tbl" instruction supports only looking up 16B vectors. We then use the Neon "tbl"
// instruction with one vector lookup
ins(tmp, D, src1, 0, 0);
ins(tmp, D, src2, 1, 0);
tbl(dst, size, tmp, 1, index);
}
}
// Selects elements from two source vectors (src1, src2) based on index values in the index register
// using SVE/SVE2 instructions and places it in the destination vector element corresponding to the
// index vector element. Each index in the index register must be in the range - [0, 2 * NUM_ELEM),
// where NUM_ELEM is the number of BasicType elements per vector.
// If idx < NUM_ELEM --> selects src1[idx] (idx is an element of the index register)
// Otherwise, selects src2[idx NUM_ELEM]
void C2_MacroAssembler::select_from_two_vectors_sve(FloatRegister dst, FloatRegister src1,
FloatRegister src2, FloatRegister index,
FloatRegister tmp, SIMD_RegVariant T,
unsigned vector_length_in_bytes) {
assert_different_registers(dst, src1, src2, index, tmp);
if (vector_length_in_bytes == 8) {
// We need to fit both the source vectors (src1, src2) in a single vector register because the
// SVE "tbl" instruction is unpredicated and works on the entire vector which can lead to
// incorrect results if each source vector is only partially filled. We then use the SVE "tbl"
// instruction with one vector lookup
assert(UseSVE >= 1, "sve must be >= 1");
ins(tmp, D, src1, 0, 0);
ins(tmp, D, src2, 1, 0);
sve_tbl(dst, T, tmp, index);
} else { // UseSVE == 2 and vector_length_in_bytes > 8
// If the vector length is > 8, then use the SVE2 "tbl" instruction with the two vector table.
// The assertion - vector_length_in_bytes == MaxVectorSize ensures that this operation
// is not executed on machines where vector_length_in_bytes < MaxVectorSize
// with the only exception of 8B vector length.
assert(UseSVE == 2 && vector_length_in_bytes == MaxVectorSize, "must be");
assert(src1->successor() == src2, "Source registers must be ordered");
sve_tbl(dst, T, src1, src2, index);
}
}
void C2_MacroAssembler::select_from_two_vectors(FloatRegister dst, FloatRegister src1,
FloatRegister src2, FloatRegister index,
FloatRegister tmp, BasicType bt,
unsigned vector_length_in_bytes) {
assert_different_registers(dst, src1, src2, index, tmp);
// The cases that can reach this method are -
// - UseSVE = 0, vector_length_in_bytes = 8 or 16
// - UseSVE = 1, vector_length_in_bytes = 8 or 16
// - UseSVE = 2, vector_length_in_bytes >= 8
//
// SVE/SVE2 tbl instructions are generated when UseSVE = 1 with vector_length_in_bytes = 8
// and UseSVE = 2 with vector_length_in_bytes >= 8
//
// Neon instructions are generated when UseSVE = 0 with vector_length_in_bytes = 8 or 16 and
// UseSVE = 1 with vector_length_in_bytes = 16
if ((UseSVE == 1 && vector_length_in_bytes == 8) || UseSVE == 2) {
SIMD_RegVariant T = elemType_to_regVariant(bt);
select_from_two_vectors_sve(dst, src1, src2, index, tmp, T, vector_length_in_bytes);
return;
}
// The only BasicTypes that can reach here are T_SHORT, T_BYTE, T_INT and T_FLOAT
assert(bt != T_DOUBLE && bt != T_LONG, "unsupported basic type");
assert(vector_length_in_bytes <= 16, "length_in_bytes must be <= 16");
bool isQ = vector_length_in_bytes == 16;
SIMD_Arrangement size1 = isQ ? T16B : T8B;
SIMD_Arrangement size2 = esize2arrangement((uint)type2aelembytes(bt), isQ);
// Neon "tbl" instruction only supports byte tables, so we need to look at chunks of
// 2B for selecting shorts or chunks of 4B for selecting ints/floats from the table.
// The index values in "index" register are in the range of [0, 2 * NUM_ELEM) where NUM_ELEM
// is the number of elements that can fit in a vector. For ex. for T_SHORT with 64-bit vector length,
// the indices can range from [0, 8).
// As an example with 64-bit vector length and T_SHORT type - let index = [2, 5, 1, 0]
// Move a constant 0x02 in every byte of tmp - tmp = [0x0202, 0x0202, 0x0202, 0x0202]
// Multiply index vector with tmp to yield - dst = [0x0404, 0x0a0a, 0x0202, 0x0000]
// Move a constant 0x0100 in every 2B of tmp - tmp = [0x0100, 0x0100, 0x0100, 0x0100]
// Add the multiplied result to the vector in tmp to obtain the byte level
// offsets - dst = [0x0504, 0x0b0a, 0x0302, 0x0100]
// Use these offsets in the "tbl" instruction to select chunks of 2B.
if (bt == T_BYTE) {
select_from_two_vectors_neon(dst, src1, src2, index, tmp, vector_length_in_bytes);
} else {
int elem_size = (bt == T_SHORT) ? 2 : 4;
uint64_t tbl_offset = (bt == T_SHORT) ? 0x0100u : 0x03020100u;
mov(tmp, size1, elem_size);
mulv(dst, size2, index, tmp);
mov(tmp, size2, tbl_offset);
addv(dst, size1, dst, tmp); // "dst" now contains the processed index elements
// to select a set of 2B/4B
select_from_two_vectors_neon(dst, src1, src2, dst, tmp, vector_length_in_bytes);
}
}

View File

@ -34,6 +34,15 @@
void neon_reduce_logical_helper(int opc, bool sf, Register Rd, Register Rn, Register Rm,
enum shift_kind kind = Assembler::LSL, unsigned shift = 0);
void select_from_two_vectors_neon(FloatRegister dst, FloatRegister src1,
FloatRegister src2, FloatRegister index,
FloatRegister tmp, unsigned vector_length_in_bytes);
void select_from_two_vectors_sve(FloatRegister dst, FloatRegister src1,
FloatRegister src2, FloatRegister index,
FloatRegister tmp, SIMD_RegVariant T,
unsigned vector_length_in_bytes);
public:
// jdk.internal.util.ArraysSupport.vectorizedHashCode
address arrays_hashcode(Register ary, Register cnt, Register result, FloatRegister vdata0,
@ -193,4 +202,9 @@
void reconstruct_frame_pointer(Register rtmp);
// Select from a table of two vectors
void select_from_two_vectors(FloatRegister dst, FloatRegister src1, FloatRegister src2,
FloatRegister index, FloatRegister tmp, BasicType bt,
unsigned vector_length_in_bytes);
#endif // CPU_AARCH64_C2_MACROASSEMBLER_AARCH64_HPP

View File

@ -292,7 +292,8 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm,
} else {
assert(is_phantom, "only remaining strength");
assert(!is_narrow, "phantom access cannot be narrow");
__ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom));
// AOT saved adapters need relocation for this call.
__ lea(lr, RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom)));
}
__ blr(lr);
__ mov(rscratch1, r0);

View File

@ -2732,8 +2732,11 @@ class StubGenerator: public StubCodeGenerator {
address entry_jlong_arraycopy;
address entry_checkcast_arraycopy;
address ucm_common_error_exit = generate_unsafecopy_common_error_exit();
UnsafeMemoryAccess::set_common_exit_stub_pc(ucm_common_error_exit);
// generate the common exit first so later stubs can rely on it if
// they want an UnsafeMemoryAccess exit non-local to the stub
StubRoutines::_unsafecopy_common_exit = generate_unsafecopy_common_error_exit();
// register the stub as the default exit with class UnsafeMemoryAccess
UnsafeMemoryAccess::set_common_exit_stub_pc(StubRoutines::_unsafecopy_common_exit);
generate_copy_longs(StubId::stubgen_copy_byte_f_id, IN_HEAP | IS_ARRAY, copy_f, r0, r1, r15);
generate_copy_longs(StubId::stubgen_copy_byte_b_id, IN_HEAP | IS_ARRAY, copy_b, r0, r1, r15);
@ -11680,8 +11683,6 @@ class StubGenerator: public StubCodeGenerator {
}
if (UseCRC32Intrinsics) {
// set table address before stub generation which use it
StubRoutines::_crc_table_adr = (address)StubRoutines::aarch64::_crc_table;
StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32();
}

View File

@ -71,6 +71,10 @@ ATTRIBUTE_ALIGNED(64) uint32_t StubRoutines::aarch64::_dilithiumConsts[] =
/**
* crc_table[] from jdk/src/share/native/java/util/zip/zlib-1.2.5/crc32.h
*/
address StubRoutines::crc_table_addr() { return (address)StubRoutines::aarch64::_crc_table; }
address StubRoutines::crc32c_table_addr() { ShouldNotCallThis(); return nullptr; }
ATTRIBUTE_ALIGNED(4096) juint StubRoutines::aarch64::_crc_table[] =
{
// Table 0

View File

@ -47,6 +47,7 @@ enum platform_dependent_constants {
class aarch64 {
friend class StubGenerator;
friend class StubRoutines;
#if INCLUDE_JVMCI
friend class JVMCIVMStructs;
#endif

View File

@ -32,6 +32,7 @@
#include "runtime/vm_version.hpp"
#include "utilities/formatBuffer.hpp"
#include "utilities/macros.hpp"
#include "utilities/ostream.hpp"
int VM_Version::_cpu;
int VM_Version::_model;
@ -50,6 +51,8 @@ uintptr_t VM_Version::_pac_mask;
SpinWait VM_Version::_spin_wait;
const char* VM_Version::_features_names[MAX_CPU_FEATURES] = { nullptr };
static SpinWait get_spin_wait_desc() {
SpinWait spin_wait(OnSpinWaitInst, OnSpinWaitInstCount);
if (spin_wait.inst() == SpinWait::SB && !VM_Version::supports_sb()) {
@ -60,6 +63,11 @@ static SpinWait get_spin_wait_desc() {
}
void VM_Version::initialize() {
#define SET_CPU_FEATURE_NAME(id, name, bit) \
_features_names[bit] = XSTR(name);
CPU_FEATURE_FLAGS(SET_CPU_FEATURE_NAME)
#undef SET_CPU_FEATURE_NAME
_supports_atomic_getset4 = true;
_supports_atomic_getadd4 = true;
_supports_atomic_getset8 = true;
@ -194,7 +202,7 @@ void VM_Version::initialize() {
// Cortex A53
if (_cpu == CPU_ARM && model_is(0xd03)) {
_features |= CPU_A53MAC;
set_feature(CPU_A53MAC);
if (FLAG_IS_DEFAULT(UseSIMDForArrayEquals)) {
FLAG_SET_DEFAULT(UseSIMDForArrayEquals, false);
}
@ -234,7 +242,7 @@ void VM_Version::initialize() {
}
}
if (_features & (CPU_FP | CPU_ASIMD)) {
if (supports_feature(CPU_FP) || supports_feature(CPU_ASIMD)) {
if (FLAG_IS_DEFAULT(UseSignumIntrinsic)) {
FLAG_SET_DEFAULT(UseSignumIntrinsic, true);
}
@ -397,7 +405,7 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseGHASHIntrinsics, false);
}
if (_features & CPU_ASIMD) {
if (supports_feature(CPU_ASIMD)) {
if (FLAG_IS_DEFAULT(UseChaCha20Intrinsics)) {
UseChaCha20Intrinsics = true;
}
@ -408,7 +416,7 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseChaCha20Intrinsics, false);
}
if (_features & CPU_ASIMD) {
if (supports_feature(CPU_ASIMD)) {
if (FLAG_IS_DEFAULT(UseKyberIntrinsics)) {
UseKyberIntrinsics = true;
}
@ -419,7 +427,7 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseKyberIntrinsics, false);
}
if (_features & CPU_ASIMD) {
if (supports_feature(CPU_ASIMD)) {
if (FLAG_IS_DEFAULT(UseDilithiumIntrinsics)) {
UseDilithiumIntrinsics = true;
}
@ -620,32 +628,38 @@ void VM_Version::initialize() {
// Sync SVE related CPU features with flags
if (UseSVE < 2) {
_features &= ~CPU_SVE2;
_features &= ~CPU_SVEBITPERM;
clear_feature(CPU_SVE2);
clear_feature(CPU_SVEBITPERM);
}
if (UseSVE < 1) {
_features &= ~CPU_SVE;
clear_feature(CPU_SVE);
}
// Construct the "features" string
char buf[512];
int buf_used_len = os::snprintf_checked(buf, sizeof(buf), "0x%02x:0x%x:0x%03x:%d", _cpu, _variant, _model, _revision);
stringStream ss(512);
ss.print("0x%02x:0x%x:0x%03x:%d", _cpu, _variant, _model, _revision);
if (_model2) {
os::snprintf_checked(buf + buf_used_len, sizeof(buf) - buf_used_len, "(0x%03x)", _model2);
ss.print("(0x%03x)", _model2);
}
size_t features_offset = strnlen(buf, sizeof(buf));
#define ADD_FEATURE_IF_SUPPORTED(id, name, bit) \
do { \
if (VM_Version::supports_##name()) strcat(buf, ", " #name); \
} while(0);
CPU_FEATURE_FLAGS(ADD_FEATURE_IF_SUPPORTED)
#undef ADD_FEATURE_IF_SUPPORTED
ss.print(", ");
int features_offset = (int)ss.size();
insert_features_names(_features, ss);
_cpu_info_string = os::strdup(buf);
_cpu_info_string = ss.as_string(true);
_features_string = _cpu_info_string + features_offset;
}
_features_string = extract_features_string(_cpu_info_string,
strnlen(_cpu_info_string, sizeof(buf)),
features_offset);
void VM_Version::insert_features_names(uint64_t features, stringStream& ss) {
int i = 0;
ss.join([&]() {
while (i < MAX_CPU_FEATURES) {
if (supports_feature((VM_Version::Feature_Flag)i)) {
return _features_names[i++];
}
i += 1;
}
return (const char*)nullptr;
}, ", ");
}
#if defined(LINUX)

View File

@ -30,6 +30,10 @@
#include "runtime/abstract_vm_version.hpp"
#include "utilities/sizes.hpp"
class stringStream;
#define BIT_MASK(flag) (1ULL<<(flag))
class VM_Version : public Abstract_VM_Version {
friend class VMStructs;
friend class JVMCIVMStructs;
@ -66,6 +70,8 @@ public:
static void initialize();
static void check_virtualizations();
static void insert_features_names(uint64_t features, stringStream& ss);
static void print_platform_virtualization_info(outputStream*);
// Asserts
@ -139,17 +145,32 @@ enum Ampere_CPU_Model {
decl(A53MAC, a53mac, 31)
enum Feature_Flag {
#define DECLARE_CPU_FEATURE_FLAG(id, name, bit) CPU_##id = (1 << bit),
#define DECLARE_CPU_FEATURE_FLAG(id, name, bit) CPU_##id = bit,
CPU_FEATURE_FLAGS(DECLARE_CPU_FEATURE_FLAG)
#undef DECLARE_CPU_FEATURE_FLAG
MAX_CPU_FEATURES
};
STATIC_ASSERT(sizeof(_features) * BitsPerByte >= MAX_CPU_FEATURES);
static const char* _features_names[MAX_CPU_FEATURES];
// Feature identification
#define CPU_FEATURE_DETECTION(id, name, bit) \
static bool supports_##name() { return (_features & CPU_##id) != 0; };
static bool supports_##name() { return supports_feature(CPU_##id); }
CPU_FEATURE_FLAGS(CPU_FEATURE_DETECTION)
#undef CPU_FEATURE_DETECTION
static void set_feature(Feature_Flag flag) {
_features |= BIT_MASK(flag);
}
static void clear_feature(Feature_Flag flag) {
_features &= (~BIT_MASK(flag));
}
static bool supports_feature(Feature_Flag flag) {
return (_features & BIT_MASK(flag)) != 0;
}
static int cpu_family() { return _cpu; }
static int cpu_model() { return _model; }
static int cpu_model2() { return _model2; }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -95,8 +95,6 @@
}
static int adjust_reg_range(int range) {
// Reduce the number of available regs (to free Rheap_base) in case of compressed oops
if (UseCompressedOops || UseCompressedClassPointers) return range - 1;
return range;
}

View File

@ -2229,16 +2229,9 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
// We don't know the array types are compatible
if (basic_type != T_OBJECT) {
// Simple test for basic type arrays
if (UseCompressedClassPointers) {
// We don't need decode because we just need to compare
__ ldr_u32(tmp, Address(src, oopDesc::klass_offset_in_bytes()));
__ ldr_u32(tmp2, Address(dst, oopDesc::klass_offset_in_bytes()));
__ cmp_32(tmp, tmp2);
} else {
__ load_klass(tmp, src);
__ load_klass(tmp2, dst);
__ cmp(tmp, tmp2);
}
__ load_klass(tmp, src);
__ load_klass(tmp2, dst);
__ cmp(tmp, tmp2);
__ b(*stub->entry(), ne);
} else {
// For object arrays, if src is a sub class of dst then we can
@ -2461,12 +2454,7 @@ void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
if (info != nullptr) {
add_debug_info_for_null_check_here(info);
}
if (UseCompressedClassPointers) { // On 32 bit arm??
__ ldr_u32(result, Address(obj, oopDesc::klass_offset_in_bytes()));
} else {
__ ldr(result, Address(obj, oopDesc::klass_offset_in_bytes()));
}
__ ldr(result, Address(obj, oopDesc::klass_offset_in_bytes()));
}
void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {

View File

@ -3001,12 +3001,15 @@ class StubGenerator: public StubCodeGenerator {
void generate_arraycopy_stubs() {
// generate the common exit first so later stubs can rely on it if
// they want an UnsafeMemoryAccess exit non-local to the stub
StubRoutines::_unsafecopy_common_exit = generate_unsafecopy_common_error_exit();
// register the stub as the default exit with class UnsafeMemoryAccess
UnsafeMemoryAccess::set_common_exit_stub_pc(StubRoutines::_unsafecopy_common_exit);
// Note: the disjoint stubs must be generated first, some of
// the conjoint stubs use them.
address ucm_common_error_exit = generate_unsafecopy_common_error_exit();
UnsafeMemoryAccess::set_common_exit_stub_pc(ucm_common_error_exit);
// these need always status in case they are called from generic_arraycopy
StubRoutines::_jbyte_disjoint_arraycopy = generate_primitive_copy(StubId::stubgen_jbyte_disjoint_arraycopy_id);
StubRoutines::_jshort_disjoint_arraycopy = generate_primitive_copy(StubId::stubgen_jshort_disjoint_arraycopy_id);

View File

@ -36,3 +36,6 @@ STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY, DEFINE_ARCH_ENTRY_INIT)
#undef DEFINE_ARCH_ENTRY_INIT
#undef DEFINE_ARCH_ENTRY
address StubRoutines::crc_table_addr() { ShouldNotCallThis(); return nullptr; }
address StubRoutines::crc32c_table_addr() { ShouldNotCallThis(); return nullptr; }

View File

@ -174,6 +174,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
break;
case Interpreter::java_lang_math_fmaD:
case Interpreter::java_lang_math_fmaF:
case Interpreter::java_lang_math_sinh:
case Interpreter::java_lang_math_tanh:
case Interpreter::java_lang_math_cbrt:
// TODO: Implement intrinsic

View File

@ -3271,12 +3271,15 @@ class StubGenerator: public StubCodeGenerator {
}
void generate_arraycopy_stubs() {
// generate the common exit first so later stubs can rely on it if
// they want an UnsafeMemoryAccess exit non-local to the stub
StubRoutines::_unsafecopy_common_exit = generate_unsafecopy_common_error_exit();
// register the stub as the default exit with class UnsafeMemoryAccess
UnsafeMemoryAccess::set_common_exit_stub_pc(StubRoutines::_unsafecopy_common_exit);
// Note: the disjoint stubs must be generated first, some of
// the conjoint stubs use them.
address ucm_common_error_exit = generate_unsafecopy_common_error_exit();
UnsafeMemoryAccess::set_common_exit_stub_pc(ucm_common_error_exit);
// non-aligned disjoint versions
StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(StubId::stubgen_jbyte_disjoint_arraycopy_id);
StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(StubId::stubgen_jshort_disjoint_arraycopy_id);
@ -4982,13 +4985,11 @@ void generate_lookup_secondary_supers_table_stub() {
// CRC32 Intrinsics.
if (UseCRC32Intrinsics) {
StubRoutines::_crc_table_adr = StubRoutines::ppc::generate_crc_constants(REVERSE_CRC32_POLY);
StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes(StubId::stubgen_updateBytesCRC32_id);
}
// CRC32C Intrinsics.
if (UseCRC32CIntrinsics) {
StubRoutines::_crc32c_table_addr = StubRoutines::ppc::generate_crc_constants(REVERSE_CRC32C_POLY);
StubRoutines::_updateBytesCRC32C = generate_CRC32_updateBytes(StubId::stubgen_updateBytesCRC32C_id);
}

View File

@ -54,6 +54,7 @@ enum platform_dependent_constants {
class ppc {
friend class StubGenerator;
friend class StubRoutines;
private:
public:

View File

@ -74,6 +74,22 @@ static julong compute_inverse_poly(julong long_poly) {
return div;
}
static address _crc_table_addr = nullptr;
static address _crc32c_table_addr = nullptr;
address StubRoutines::crc_table_addr() {
if (_crc_table_addr == nullptr) {
_crc_table_addr = StubRoutines::ppc::generate_crc_constants(REVERSE_CRC32_POLY);
}
return _crc_table_addr;
}
address StubRoutines::crc32c_table_addr() {
if (_crc32c_table_addr == nullptr) {
_crc32c_table_addr = StubRoutines::ppc::generate_crc_constants(REVERSE_CRC32C_POLY);
}
return _crc32c_table_addr;
}
// Constants to fold n words as needed by macroAssembler.
address StubRoutines::ppc::generate_crc_constants(juint reverse_poly) {
// Layout of constant table:

View File

@ -1089,6 +1089,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
case Interpreter::java_lang_math_sin : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin); break;
case Interpreter::java_lang_math_cos : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcos); break;
case Interpreter::java_lang_math_tan : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dtan); break;
case Interpreter::java_lang_math_sinh : /* run interpreted */ break;
case Interpreter::java_lang_math_tanh : /* run interpreted */ break;
case Interpreter::java_lang_math_cbrt : /* run interpreted */ break;
case Interpreter::java_lang_math_abs : /* run interpreted */ break;

View File

@ -1952,16 +1952,15 @@ void C2_MacroAssembler::arrays_hashcode(Register ary, Register cnt, Register res
mv(pow31_3, 29791); // [31^^3]
mv(pow31_2, 961); // [31^^2]
slli(chunks_end, chunks, chunks_end_shift);
add(chunks_end, ary, chunks_end);
shadd(chunks_end, chunks, ary, t0, chunks_end_shift);
andi(cnt, cnt, stride - 1); // don't forget about tail!
bind(WIDE_LOOP);
mulw(result, result, pow31_4); // 31^^4 * h
arrays_hashcode_elload(t0, Address(ary, 0 * elsize), eltype);
arrays_hashcode_elload(t1, Address(ary, 1 * elsize), eltype);
arrays_hashcode_elload(tmp5, Address(ary, 2 * elsize), eltype);
arrays_hashcode_elload(tmp6, Address(ary, 3 * elsize), eltype);
mulw(result, result, pow31_4); // 31^^4 * h
mulw(t0, t0, pow31_3); // 31^^3 * ary[i+0]
addw(result, result, t0);
mulw(t1, t1, pow31_2); // 31^^2 * ary[i+1]
@ -1976,8 +1975,7 @@ void C2_MacroAssembler::arrays_hashcode(Register ary, Register cnt, Register res
beqz(cnt, DONE);
bind(TAIL);
slli(chunks_end, cnt, chunks_end_shift);
add(chunks_end, ary, chunks_end);
shadd(chunks_end, cnt, ary, t0, chunks_end_shift);
bind(TAIL_LOOP);
arrays_hashcode_elload(t0, Address(ary), eltype);

View File

@ -97,52 +97,52 @@ bool MacroAssembler::is_pc_relative_at(address instr) {
// auipc + load
// auipc + fload_load
return (is_auipc_at(instr)) &&
(is_addi_at(instr + instruction_size) ||
is_jalr_at(instr + instruction_size) ||
is_load_at(instr + instruction_size) ||
is_float_load_at(instr + instruction_size)) &&
(is_addi_at(instr + MacroAssembler::instruction_size) ||
is_jalr_at(instr + MacroAssembler::instruction_size) ||
is_load_at(instr + MacroAssembler::instruction_size) ||
is_float_load_at(instr + MacroAssembler::instruction_size)) &&
check_pc_relative_data_dependency(instr);
}
// ie:ld(Rd, Label)
bool MacroAssembler::is_load_pc_relative_at(address instr) {
return is_auipc_at(instr) && // auipc
is_ld_at(instr + instruction_size) && // ld
is_ld_at(instr + MacroAssembler::instruction_size) && // ld
check_load_pc_relative_data_dependency(instr);
}
bool MacroAssembler::is_movptr1_at(address instr) {
return is_lui_at(instr) && // Lui
is_addi_at(instr + instruction_size) && // Addi
is_slli_shift_at(instr + instruction_size * 2, 11) && // Slli Rd, Rs, 11
is_addi_at(instr + instruction_size * 3) && // Addi
is_slli_shift_at(instr + instruction_size * 4, 6) && // Slli Rd, Rs, 6
(is_addi_at(instr + instruction_size * 5) ||
is_jalr_at(instr + instruction_size * 5) ||
is_load_at(instr + instruction_size * 5)) && // Addi/Jalr/Load
is_addi_at(instr + MacroAssembler::instruction_size) && // Addi
is_slli_shift_at(instr + MacroAssembler::instruction_size * 2, 11) && // Slli Rd, Rs, 11
is_addi_at(instr + MacroAssembler::instruction_size * 3) && // Addi
is_slli_shift_at(instr + MacroAssembler::instruction_size * 4, 6) && // Slli Rd, Rs, 6
(is_addi_at(instr + MacroAssembler::instruction_size * 5) ||
is_jalr_at(instr + MacroAssembler::instruction_size * 5) ||
is_load_at(instr + MacroAssembler::instruction_size * 5)) && // Addi/Jalr/Load
check_movptr1_data_dependency(instr);
}
bool MacroAssembler::is_movptr2_at(address instr) {
return is_lui_at(instr) && // lui
is_lui_at(instr + instruction_size) && // lui
is_slli_shift_at(instr + instruction_size * 2, 18) && // slli Rd, Rs, 18
is_add_at(instr + instruction_size * 3) &&
(is_addi_at(instr + instruction_size * 4) ||
is_jalr_at(instr + instruction_size * 4) ||
is_load_at(instr + instruction_size * 4)) && // Addi/Jalr/Load
is_lui_at(instr + MacroAssembler::instruction_size) && // lui
is_slli_shift_at(instr + MacroAssembler::instruction_size * 2, 18) && // slli Rd, Rs, 18
is_add_at(instr + MacroAssembler::instruction_size * 3) &&
(is_addi_at(instr + MacroAssembler::instruction_size * 4) ||
is_jalr_at(instr + MacroAssembler::instruction_size * 4) ||
is_load_at(instr + MacroAssembler::instruction_size * 4)) && // Addi/Jalr/Load
check_movptr2_data_dependency(instr);
}
bool MacroAssembler::is_li16u_at(address instr) {
return is_lui_at(instr) && // lui
is_srli_at(instr + instruction_size) && // srli
is_srli_at(instr + MacroAssembler::instruction_size) && // srli
check_li16u_data_dependency(instr);
}
bool MacroAssembler::is_li32_at(address instr) {
return is_lui_at(instr) && // lui
is_addiw_at(instr + instruction_size) && // addiw
is_addiw_at(instr + MacroAssembler::instruction_size) && // addiw
check_li32_data_dependency(instr);
}
@ -5110,7 +5110,7 @@ address MacroAssembler::emit_reloc_call_address_stub(int insts_call_instruction_
int MacroAssembler::max_reloc_call_address_stub_size() {
// Max stub size: alignment nop, target address.
return 1 * instruction_size + wordSize;
return 1 * MacroAssembler::instruction_size + wordSize;
}
int MacroAssembler::static_call_stub_size() {

View File

@ -1240,7 +1240,7 @@ public:
void far_jump(const Address &entry, Register tmp = t1);
static int far_branch_size() {
return 2 * 4; // auipc + jalr, see far_call() & far_jump()
return 2 * MacroAssembler::instruction_size; // auipc + jalr, see far_call() & far_jump()
}
void load_byte_map_base(Register reg);
@ -1644,9 +1644,9 @@ public:
public:
enum {
// movptr
movptr1_instruction_size = 6 * instruction_size, // lui, addi, slli, addi, slli, addi. See movptr1().
movptr2_instruction_size = 5 * instruction_size, // lui, lui, slli, add, addi. See movptr2().
load_pc_relative_instruction_size = 2 * instruction_size // auipc, ld
movptr1_instruction_size = 6 * MacroAssembler::instruction_size, // lui, addi, slli, addi, slli, addi. See movptr1().
movptr2_instruction_size = 5 * MacroAssembler::instruction_size, // lui, lui, slli, add, addi. See movptr2().
load_pc_relative_instruction_size = 2 * MacroAssembler::instruction_size // auipc, ld
};
static bool is_load_pc_relative_at(address branch);
@ -1701,11 +1701,11 @@ public:
// addi/jalr/load
static bool check_movptr1_data_dependency(address instr) {
address lui = instr;
address addi1 = lui + instruction_size;
address slli1 = addi1 + instruction_size;
address addi2 = slli1 + instruction_size;
address slli2 = addi2 + instruction_size;
address last_instr = slli2 + instruction_size;
address addi1 = lui + MacroAssembler::instruction_size;
address slli1 = addi1 + MacroAssembler::instruction_size;
address addi2 = slli1 + MacroAssembler::instruction_size;
address slli2 = addi2 + MacroAssembler::instruction_size;
address last_instr = slli2 + MacroAssembler::instruction_size;
return extract_rs1(addi1) == extract_rd(lui) &&
extract_rs1(addi1) == extract_rd(addi1) &&
extract_rs1(slli1) == extract_rd(addi1) &&
@ -1725,10 +1725,10 @@ public:
// addi/jalr/load
static bool check_movptr2_data_dependency(address instr) {
address lui1 = instr;
address lui2 = lui1 + instruction_size;
address slli = lui2 + instruction_size;
address add = slli + instruction_size;
address last_instr = add + instruction_size;
address lui2 = lui1 + MacroAssembler::instruction_size;
address slli = lui2 + MacroAssembler::instruction_size;
address add = slli + MacroAssembler::instruction_size;
address last_instr = add + MacroAssembler::instruction_size;
return extract_rd(add) == extract_rd(lui2) &&
extract_rs1(add) == extract_rd(lui2) &&
extract_rs2(add) == extract_rd(slli) &&
@ -1742,7 +1742,7 @@ public:
// srli
static bool check_li16u_data_dependency(address instr) {
address lui = instr;
address srli = lui + instruction_size;
address srli = lui + MacroAssembler::instruction_size;
return extract_rs1(srli) == extract_rd(lui) &&
extract_rs1(srli) == extract_rd(srli);
@ -1753,7 +1753,7 @@ public:
// addiw
static bool check_li32_data_dependency(address instr) {
address lui = instr;
address addiw = lui + instruction_size;
address addiw = lui + MacroAssembler::instruction_size;
return extract_rs1(addiw) == extract_rd(lui) &&
extract_rs1(addiw) == extract_rd(addiw);
@ -1764,7 +1764,7 @@ public:
// jalr/addi/load/float_load
static bool check_pc_relative_data_dependency(address instr) {
address auipc = instr;
address last_instr = auipc + instruction_size;
address last_instr = auipc + MacroAssembler::instruction_size;
return extract_rs1(last_instr) == extract_rd(auipc);
}
@ -1774,7 +1774,7 @@ public:
// load
static bool check_load_pc_relative_data_dependency(address instr) {
address auipc = instr;
address load = auipc + instruction_size;
address load = auipc + MacroAssembler::instruction_size;
return extract_rd(load) == extract_rd(auipc) &&
extract_rs1(load) == extract_rd(load);

View File

@ -46,44 +46,11 @@ bool NativeInstruction::is_call_at(address addr) {
}
//-----------------------------------------------------------------------------
// NativeFarCall
//
// Implements direct far calling loading an address from the stub section version of reloc call.
// NativeCall
class NativeFarCall: public NativeInstruction {
public:
enum RISCV_specific_constants {
return_address_offset = 3 * NativeInstruction::instruction_size, // auipc + ld + jalr
};
address instruction_address() const { return addr_at(0); }
address next_instruction_address() const { return addr_at(return_address_offset); }
address return_address() const { return addr_at(return_address_offset); }
address destination() const;
address reloc_destination();
void set_destination(address dest);
void verify();
void print();
bool set_destination_mt_safe(address dest);
bool reloc_set_destination(address dest);
private:
address stub_address();
static void set_stub_address_destination_at(address dest, address value);
static address stub_address_destination_at(address src);
public:
static NativeFarCall* at(address addr);
static bool is_at(address addr);
static bool is_call_before(address return_address);
};
address NativeFarCall::destination() const {
address NativeCall::destination() const {
address addr = instruction_address();
assert(NativeFarCall::is_at(addr), "unexpected code at call site");
assert(NativeCall::is_at(addr), "unexpected code at call site");
address destination = MacroAssembler::target_addr_for_insn(addr);
@ -96,44 +63,36 @@ address NativeFarCall::destination() const {
return stub_address_destination_at(destination);
}
address NativeFarCall::reloc_destination() {
address NativeCall::reloc_destination() {
address call_addr = instruction_address();
assert(NativeFarCall::is_at(call_addr), "unexpected code at call site");
assert(NativeCall::is_at(call_addr), "unexpected code at call site");
CodeBlob *code = CodeCache::find_blob(call_addr);
assert(code != nullptr, "Could not find the containing code blob");
address stub_addr = nullptr;
if (code->is_nmethod()) {
// TODO: Need to revisit this when porting the AOT features.
stub_addr = trampoline_stub_Relocation::get_trampoline_for(call_addr, code->as_nmethod());
}
if (stub_addr != nullptr) {
stub_addr = MacroAssembler::target_addr_for_insn(call_addr);
assert(stub_addr != nullptr, "Sanity");
}
return stub_addr;
}
void NativeFarCall::set_destination(address dest) {
address addr = instruction_address();
assert(NativeFarCall::is_at(addr), "unexpected code at call site");
Unimplemented();
void NativeCall::verify() {
assert(NativeCall::is_at(instruction_address()), "unexpected code at call site");
}
void NativeFarCall::verify() {
assert(NativeFarCall::is_at(instruction_address()), "unexpected code at call site");
void NativeCall::print() {
assert(NativeCall::is_at(instruction_address()), "unexpected code at call site");
tty->print_cr(PTR_FORMAT ": auipc,ld,jalr x1, offset/reg, ", p2i(instruction_address()));
}
void NativeFarCall::print() {
assert(NativeFarCall::is_at(instruction_address()), "unexpected code at call site");
tty->print_cr(PTR_FORMAT ": auipc,ld,jalr x1, offset/reg, ", p2i(addr_at(0)));
}
bool NativeFarCall::set_destination_mt_safe(address dest) {
assert(NativeFarCall::is_at(addr_at(0)), "unexpected code at call site");
bool NativeCall::set_destination_mt_safe(address dest) {
assert(NativeCall::is_at(instruction_address()), "unexpected code at call site");
assert((CodeCache_lock->is_locked() || SafepointSynchronize::is_at_safepoint()) ||
CompiledICLocker::is_safe(addr_at(0)),
CompiledICLocker::is_safe(instruction_address()),
"concurrent code patching");
address stub_addr = stub_address();
@ -145,25 +104,25 @@ bool NativeFarCall::set_destination_mt_safe(address dest) {
return false;
}
bool NativeFarCall::reloc_set_destination(address dest) {
address call_addr = addr_at(0);
assert(NativeFarCall::is_at(call_addr), "unexpected code at call site");
bool NativeCall::reloc_set_destination(address dest) {
address call_addr = instruction_address();
assert(NativeCall::is_at(call_addr), "unexpected code at call site");
CodeBlob *code = CodeCache::find_blob(call_addr);
assert(code != nullptr, "Could not find the containing code blob");
address stub_addr = nullptr;
if (code->is_nmethod()) {
stub_addr = trampoline_stub_Relocation::get_trampoline_for(call_addr, code->as_nmethod());
}
if (stub_addr != nullptr) {
MacroAssembler::pd_patch_instruction_size(call_addr, stub_addr);
// TODO: Need to revisit this when porting the AOT features.
assert(dest != nullptr, "Sanity");
assert(dest == trampoline_stub_Relocation::get_trampoline_for(call_addr,
code->as_nmethod()), "Sanity");
MacroAssembler::pd_patch_instruction_size(call_addr, dest);
}
return true;
}
void NativeFarCall::set_stub_address_destination_at(address dest, address value) {
void NativeCall::set_stub_address_destination_at(address dest, address value) {
assert_cond(dest != nullptr);
assert_cond(value != nullptr);
@ -171,31 +130,24 @@ void NativeFarCall::set_stub_address_destination_at(address dest, address value)
OrderAccess::release();
}
address NativeFarCall::stub_address_destination_at(address src) {
address NativeCall::stub_address_destination_at(address src) {
assert_cond(src != nullptr);
address dest = (address)get_data64_at(src);
return dest;
}
address NativeFarCall::stub_address() {
address call_addr = addr_at(0);
address NativeCall::stub_address() {
address call_addr = instruction_address();
CodeBlob *code = CodeCache::find_blob(call_addr);
assert(code != nullptr, "Could not find the containing code blob");
address dest = MacroAssembler::pd_call_destination(call_addr);
address dest = MacroAssembler::target_addr_for_insn(call_addr);
assert(code->contains(dest), "Sanity");
return dest;
}
NativeFarCall* NativeFarCall::at(address addr) {
assert_cond(addr != nullptr);
assert(NativeFarCall::is_at(addr), "unexpected code at call site: %p", addr);
NativeFarCall* call = (NativeFarCall*)(addr);
return call;
}
bool NativeFarCall::is_at(address addr) {
bool NativeCall::is_at(address addr) {
assert_cond(addr != nullptr);
const int instr_size = NativeInstruction::instruction_size;
if (MacroAssembler::is_auipc_at(addr) &&
@ -211,59 +163,8 @@ bool NativeFarCall::is_at(address addr) {
return false;
}
bool NativeFarCall::is_call_before(address return_address) {
return NativeFarCall::is_at(return_address - return_address_offset);
}
//-----------------------------------------------------------------------------
// NativeCall
address NativeCall::instruction_address() const {
return NativeFarCall::at(addr_at(0))->instruction_address();
}
address NativeCall::next_instruction_address() const {
return NativeFarCall::at(addr_at(0))->next_instruction_address();
}
address NativeCall::return_address() const {
return NativeFarCall::at(addr_at(0))->return_address();
}
address NativeCall::destination() const {
return NativeFarCall::at(addr_at(0))->destination();
}
address NativeCall::reloc_destination() {
return NativeFarCall::at(addr_at(0))->reloc_destination();
}
void NativeCall::set_destination(address dest) {
NativeFarCall::at(addr_at(0))->set_destination(dest);
}
void NativeCall::verify() {
NativeFarCall::at(addr_at(0))->verify();;
}
void NativeCall::print() {
NativeFarCall::at(addr_at(0))->print();;
}
bool NativeCall::set_destination_mt_safe(address dest) {
return NativeFarCall::at(addr_at(0))->set_destination_mt_safe(dest);
}
bool NativeCall::reloc_set_destination(address dest) {
return NativeFarCall::at(addr_at(0))->reloc_set_destination(dest);
}
bool NativeCall::is_at(address addr) {
return NativeFarCall::is_at(addr);
}
bool NativeCall::is_call_before(address return_address) {
return NativeFarCall::is_call_before(return_address);
return NativeCall::is_at(return_address - NativeCall::instruction_size);
}
NativeCall* nativeCall_at(address addr) {
@ -276,7 +177,7 @@ NativeCall* nativeCall_at(address addr) {
NativeCall* nativeCall_before(address return_address) {
assert_cond(return_address != nullptr);
NativeCall* call = nullptr;
call = (NativeCall*)(return_address - NativeFarCall::return_address_offset);
call = (NativeCall*)(return_address - NativeCall::instruction_size);
DEBUG_ONLY(call->verify());
return call;
}

View File

@ -93,7 +93,6 @@ class NativeInstruction {
static uint64_t get_data64_at(address src) { return Bytes::get_native_u8(src); }
public:
inline friend NativeInstruction* nativeInstruction_at(address addr);
static bool maybe_cpool_ref(address instr) {
@ -111,6 +110,7 @@ NativeCall* nativeCall_before(address return_address);
// The NativeCall is an abstraction for accessing/manipulating native
// call instructions (used to manipulate inline caches, primitive &
// DSO calls, etc.).
// NativeCall is reloc call on RISC-V. See MacroAssembler::reloc_call.
class NativeCall: private NativeInstruction {
// private: when common code is using byte_size()
private:
@ -118,21 +118,21 @@ class NativeCall: private NativeInstruction {
// Use byte_size() as it can be changed in runtime
// Since instruction_size exists on NativeInstruction we need
// to overload and hide it.
instruction_size = 3 * Assembler::instruction_size // auipc + ld + jalr
instruction_size = 3 * NativeInstruction::instruction_size // auipc + ld + jalr
};
public:
public:
static int byte_size() {
return 3 * NativeInstruction::instruction_size; // auipc + ld + jalr
return NativeCall::instruction_size; // auipc + ld + jalr
}
// Creation
friend NativeCall* nativeCall_at(address addr);
friend NativeCall* nativeCall_before(address return_address);
address instruction_address() const;
address next_instruction_address() const;
address return_address() const;
address instruction_address() const { return addr_at(0); }
address next_instruction_address() const { return addr_at(NativeCall::instruction_size); }
address return_address() const { return addr_at(NativeCall::instruction_size); }
address destination() const;
address reloc_destination();
@ -140,12 +140,22 @@ class NativeCall: private NativeInstruction {
void verify();
void print();
void set_destination(address dest);
void set_destination(address dest) { Unimplemented(); }
// patch stub to target address of the reloc call
bool set_destination_mt_safe(address dest);
// patch reloc call to stub address
bool reloc_set_destination(address dest);
static bool is_at(address addr);
static bool is_call_before(address return_address);
private:
// return stub address, without checking stub address in locs
address stub_address();
// set target address at stub
static void set_stub_address_destination_at(address dest, address value);
// return target address at stub
static address stub_address_destination_at(address src);
};
// An interface for accessing/manipulating native mov reg, imm instructions.

View File

@ -2301,12 +2301,15 @@ class StubGenerator: public StubCodeGenerator {
address entry_jlong_arraycopy = nullptr;
address entry_checkcast_arraycopy = nullptr;
// generate the common exit first so later stubs can rely on it if
// they want an UnsafeMemoryAccess exit non-local to the stub
StubRoutines::_unsafecopy_common_exit = generate_unsafecopy_common_error_exit();
// register the stub as the default exit with class UnsafeMemoryAccess
UnsafeMemoryAccess::set_common_exit_stub_pc(StubRoutines::_unsafecopy_common_exit);
generate_copy_longs(StubId::stubgen_copy_byte_f_id, copy_f, c_rarg0, c_rarg1, t1);
generate_copy_longs(StubId::stubgen_copy_byte_b_id, copy_b, c_rarg0, c_rarg1, t1);
address ucm_common_error_exit = generate_unsafecopy_common_error_exit();
UnsafeMemoryAccess::set_common_exit_stub_pc(ucm_common_error_exit);
StubRoutines::riscv::_zero_blocks = generate_zero_blocks();
//*** jbyte
@ -6686,8 +6689,6 @@ static const int64_t right_3_bits = right_n_bits(3);
StubRoutines::_catch_exception_entry = generate_catch_exception();
if (UseCRC32Intrinsics) {
// set table address before stub generation which use it
StubRoutines::_crc_table_adr = (address)StubRoutines::riscv::_crc_table;
StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32();
}

View File

@ -52,6 +52,10 @@ bool StubRoutines::riscv::_completed = false;
/**
* crc_table[] from jdk/src/java.base/share/native/libzip/zlib/crc32.h
*/
address StubRoutines::crc_table_addr() { return (address)StubRoutines::riscv::_crc_table; }
address StubRoutines::crc32c_table_addr() { ShouldNotCallThis(); return nullptr; }
ATTRIBUTE_ALIGNED(4096) juint StubRoutines::riscv::_crc_table[] =
{
// Table 0

View File

@ -48,6 +48,7 @@ enum platform_dependent_constants {
class riscv {
friend class StubGenerator;
friend class StubRoutines;
#if INCLUDE_JVMCI
friend class JVMCIVMStructs;
#endif

View File

@ -1576,12 +1576,14 @@ class StubGenerator: public StubCodeGenerator {
void generate_arraycopy_stubs() {
// they want an UnsafeMemoryAccess exit non-local to the stub
StubRoutines::_unsafecopy_common_exit = generate_unsafecopy_common_error_exit();
// register the stub as the default exit with class UnsafeMemoryAccess
UnsafeMemoryAccess::set_common_exit_stub_pc(StubRoutines::_unsafecopy_common_exit);
// Note: the disjoint stubs must be generated first, some of
// the conjoint stubs use them.
address ucm_common_error_exit = generate_unsafecopy_common_error_exit();
UnsafeMemoryAccess::set_common_exit_stub_pc(ucm_common_error_exit);
StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_nonoop_copy (StubId::stubgen_jbyte_disjoint_arraycopy_id);
StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_nonoop_copy(StubId::stubgen_jshort_disjoint_arraycopy_id);
StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_nonoop_copy (StubId::stubgen_jint_disjoint_arraycopy_id);
@ -3308,12 +3310,10 @@ class StubGenerator: public StubCodeGenerator {
}
if (UseCRC32Intrinsics) {
StubRoutines::_crc_table_adr = (address)StubRoutines::zarch::_crc_table;
StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes();
}
if (UseCRC32CIntrinsics) {
StubRoutines::_crc32c_table_addr = (address)StubRoutines::zarch::_crc32c_table;
StubRoutines::_updateBytesCRC32C = generate_CRC32C_updateBytes();
}

View File

@ -78,14 +78,17 @@ void StubRoutines::zarch::generate_load_absolute_address(MacroAssembler* masm, R
#endif
}
address StubRoutines::crc_table_addr() { return (address)StubRoutines::zarch::_crc_table; }
address StubRoutines::crc32c_table_addr() { return (address)StubRoutines::zarch::_crc32c_table; }
void StubRoutines::zarch::generate_load_crc_table_addr(MacroAssembler* masm, Register table) {
const uint64_t table_contents = 0x77073096UL; // required contents of table[1]
generate_load_absolute_address(masm, table, StubRoutines::_crc_table_adr, table_contents);
generate_load_absolute_address(masm, table, StubRoutines::crc_table_addr(), table_contents);
}
void StubRoutines::zarch::generate_load_crc32c_table_addr(MacroAssembler* masm, Register table) {
const uint64_t table_contents = 0xf26b8303UL; // required contents of table[1]
generate_load_absolute_address(masm, table, StubRoutines::_crc32c_table_addr, table_contents);
generate_load_absolute_address(masm, table, StubRoutines::crc32c_table_addr(), table_contents);
}

View File

@ -62,6 +62,7 @@ enum method_handles_platform_dependent_constants {
class zarch {
friend class StubGenerator;
friend class StubRoutines;
public:
enum { nof_instance_allocators = 10 };

View File

@ -1239,6 +1239,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
case Interpreter::java_lang_math_sin : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin); break;
case Interpreter::java_lang_math_cos : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcos); break;
case Interpreter::java_lang_math_tan : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dtan); break;
case Interpreter::java_lang_math_sinh : /* run interpreted */ break;
case Interpreter::java_lang_math_tanh : /* run interpreted */ break;
case Interpreter::java_lang_math_cbrt : /* run interpreted */ break;
case Interpreter::java_lang_math_abs : /* run interpreted */ break;

View File

@ -720,8 +720,8 @@ void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
if (x->id() == vmIntrinsics::_dexp || x->id() == vmIntrinsics::_dlog ||
x->id() == vmIntrinsics::_dpow || x->id() == vmIntrinsics::_dcos ||
x->id() == vmIntrinsics::_dsin || x->id() == vmIntrinsics::_dtan ||
x->id() == vmIntrinsics::_dlog10 || x->id() == vmIntrinsics::_dtanh ||
x->id() == vmIntrinsics::_dcbrt
x->id() == vmIntrinsics::_dlog10 || x->id() == vmIntrinsics::_dsinh ||
x->id() == vmIntrinsics::_dtanh || x->id() == vmIntrinsics::_dcbrt
) {
do_LibmIntrinsic(x);
return;
@ -835,6 +835,12 @@ void LIRGenerator::do_LibmIntrinsic(Intrinsic* x) {
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), getThreadTemp(), result_reg, cc->args());
}
break;
case vmIntrinsics::_dsinh:
assert(StubRoutines::dsinh() != nullptr, "sinh intrinsic not found");
if (StubRoutines::dsinh() != nullptr) {
__ call_runtime_leaf(StubRoutines::dsinh(), getThreadTemp(), result_reg, cc->args());
}
break;
case vmIntrinsics::_dtanh:
assert(StubRoutines::dtanh() != nullptr, "tanh intrinsic not found");
if (StubRoutines::dtanh() != nullptr) {

View File

@ -37,7 +37,7 @@
do_arch_blob, \
do_arch_entry, \
do_arch_entry_init) \
do_arch_blob(initial, 20000 WINDOWS_ONLY(+1000)) \
do_arch_blob(initial, PRODUCT_ONLY(20000) NOT_PRODUCT(21000) WINDOWS_ONLY(+1000)) \
do_stub(initial, verify_mxcsr) \
do_arch_entry(x86, initial, verify_mxcsr, verify_mxcsr_entry, \
verify_mxcsr_entry) \

View File

@ -3689,6 +3689,9 @@ void StubGenerator::generate_libm_stubs() {
if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) {
StubRoutines::_dtan = generate_libmTan(); // from stubGenerator_x86_64_tan.cpp
}
if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsinh)) {
StubRoutines::_dsinh = generate_libmSinh(); // from stubGenerator_x86_64_sinh.cpp
}
if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtanh)) {
StubRoutines::_dtanh = generate_libmTanh(); // from stubGenerator_x86_64_tanh.cpp
}
@ -4095,15 +4098,11 @@ void StubGenerator::generate_initial_stubs() {
StubRoutines::x86::_double_sign_flip = generate_fp_mask(StubId::stubgen_double_sign_flip_id, 0x8000000000000000);
if (UseCRC32Intrinsics) {
// set table address before stub generation which use it
StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table;
StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32();
}
if (UseCRC32CIntrinsics) {
bool supports_clmul = VM_Version::supports_clmul();
StubRoutines::x86::generate_CRC32C_table(supports_clmul);
StubRoutines::_crc32c_table_addr = (address)StubRoutines::x86::_crc32c_table;
StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C(supports_clmul);
}

View File

@ -555,6 +555,7 @@ class StubGenerator: public StubCodeGenerator {
address generate_libmSin();
address generate_libmCos();
address generate_libmTan();
address generate_libmSinh();
address generate_libmTanh();
address generate_libmCbrt();
address generate_libmExp();

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2016, 2025, Intel Corporation. All rights reserved.
* Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
* Copyright (C) 2021, Tencent. All rights reserved.
* Intel Math Library (LIBM) Source Code
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2016, 2025, Intel Corporation. All rights reserved.
* Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
* Copyright (C) 2021, Tencent. All rights reserved.
* Intel Math Library (LIBM) Source Code
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2016, 2025, Intel Corporation. All rights reserved.
* Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
* Copyright (C) 2021, Tencent. All rights reserved.
* Intel Math Library (LIBM) Source Code
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.

View File

@ -0,0 +1,525 @@
/*
* Copyright (c) 2025, Intel Corporation. All rights reserved.
* Intel Math Library (LIBM) Source Code
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "macroAssembler_x86.hpp"
#include "stubGenerator_x86_64.hpp"
/******************************************************************************/
// ALGORITHM DESCRIPTION
// ---------------------
//
// sinh(x)=(exp(x)-exp(-x))/2
//
// Let |x|=xH+xL (upper 26 bits, lower 27 bits)
// log2(e) rounded to 26 bits (high part) plus a double precision low part is
// L2EH+L2EL (upper 26, lower 53 bits)
//
// Let xH*L2EH=k+f+r`, where (k+f)*2^7=int(xH*L2EH*2^7),
// f=0.b1 b2 ... b7, k integer
// 2^f is approximated as Tp[f]+Dp[f], and 2^{-f} as Tn[f]+Dn[f]
// Tp stores the high 53 bits, Dp stores (2^f-Tp[f]) rounded to double precision
//
// e^|x|=2^{k+f}*2^r, r=r`+xL*L2EH+|x|*L2EL, |r|<2^{-8}+2^{-14},
// for |x| in [23/64,3*2^7)
// e^{-|x|}=2^{-k-f}*2^{-r}
//
// e^|x| is approximated as 2^k*Tp+2^k*Tp*c1*r(1+c2*r+..+c5*r^4)+2^k*Dp=
// =2^k*Tp+2^k*Tp*P15+2^k*Dp
// e^{-|x|} approximated as 2^{-k}*Tn-2^{-k}*Tn*c1*r(1-c2*r+..+c5*r^4)+2^{-k}*Dn
//
// For |x| in [1/8, 3*2^7), sinh(x) is formed as
// RN(2^k*Tp-2^{-k}*Tn)+2^k*Tp*P15-2^{-k}*Tn*P`15-2^{-k}*TnL-2^{-k}*Dn+2^k*Dp
//
// For x in (3*2^7, 3*2^8), sign(x)*(e^|x|)/2 is returned, and
// the result is checked for overflow.
//
// For |x|<23/64, a Taylor polynomial expansion is used (degree 13)
// To reduce rounding errors, the p3*x^3 term is computed as
// (p3*xh^3)_high+[(p3*xl*(3*x*xh+xl^2))+(p3*xh^3)_low],
// where x=xh+xl, (xh are the leading 17 bits of x), and
// (p3*xh^3)_high=RN(x+p3*xh^3)-x
//
// Error bound:
// 0.51 ulp
//
// Special cases:
// sinh(NaN) = quiet NaN, and raise invalid exception
// sinh(+/-INF) = +/-INF
// sinh(+/-0) = +/-0
/******************************************************************************/
ATTRIBUTE_ALIGNED(8) static const juint _HALFMASK[] =
{
0xF8000000UL, 0x7FFFFFFFUL
};
ATTRIBUTE_ALIGNED(16) static const juint _MASK3[] =
{
0x00000000UL, 0xFFFFFFF0UL, 0x00000000UL, 0xFFFFFFF0UL
};
ATTRIBUTE_ALIGNED(16) static const juint _L2E[] =
{
0x60000000UL, 0x40671547UL, 0xF85DDF44UL, 0x3EC4AE0BUL
};
ATTRIBUTE_ALIGNED(16) static const juint _Shifter[] =
{
0x00000000UL, 0x43380000UL, 0x00000000UL, 0xC3380000UL
};
ATTRIBUTE_ALIGNED(16) static const juint _cv[] =
{
0xD704A0C0UL, 0x3E3C6B08UL, 0xD704A0C0UL, 0xBE3C6B08UL, 0xFEFA39EFUL,
0x3F662E42UL, 0xFEFA39EFUL, 0xBF662E42UL, 0x7F907D8BUL, 0x3D9F8445UL,
0x7F907D8BUL, 0x3D9F8445UL, 0xFFAC83B4UL, 0x3ED47FD3UL, 0xFFAC83B4UL,
0x3ED47FD3UL, 0xFEFA39EFUL, 0x3F762E42UL, 0xFEFA39EFUL, 0x3F762E42UL
};
ATTRIBUTE_ALIGNED(16) static const juint _pv[] =
{
0x13A86D08UL, 0x3DE61246UL, 0xA556C732UL, 0x3EC71DE3UL, 0x11111111UL,
0x3F811111UL, 0x55555555UL, 0x3FC55555UL, 0x67F544E1UL, 0x3E5AE645UL,
0x1A01A019UL, 0x3F2A01A0UL
};
ATTRIBUTE_ALIGNED(16) static const juint _T2f[] =
{
0x00000000UL, 0x3FF00000UL, 0x00000000UL, 0x00000000UL, 0xA9FB3335UL, 0x3FF0163DUL,
0x9AB8CDB7UL, 0x3C9B6129UL, 0x3E778061UL, 0x3FF02C9AUL, 0x535B085DUL, 0xBC719083UL,
0xE86E7F85UL, 0x3FF04315UL, 0x1977C96EUL, 0xBC90A31CUL, 0xD3158574UL, 0x3FF059B0UL,
0xA475B465UL, 0x3C8D73E2UL, 0x29DDF6DEUL, 0x3FF0706BUL, 0xE2B13C27UL, 0xBC8C91DFUL,
0x18759BC8UL, 0x3FF08745UL, 0x4BB284FFUL, 0x3C6186BEUL, 0xCAC6F383UL, 0x3FF09E3EUL,
0x18316136UL, 0x3C914878UL, 0x6CF9890FUL, 0x3FF0B558UL, 0x4ADC610BUL, 0x3C98A62EUL,
0x2B7247F7UL, 0x3FF0CC92UL, 0x16E24F71UL, 0x3C901EDCUL, 0x32D3D1A2UL, 0x3FF0E3ECUL,
0x27C57B52UL, 0x3C403A17UL, 0xAFFED31BUL, 0x3FF0FB66UL, 0xC44EBD7BUL, 0xBC6B9BEDUL,
0xD0125B51UL, 0x3FF11301UL, 0x39449B3AUL, 0xBC96C510UL, 0xC06C31CCUL, 0x3FF12ABDUL,
0xB36CA5C7UL, 0xBC51B514UL, 0xAEA92DE0UL, 0x3FF1429AUL, 0x9AF1369EUL, 0xBC932FBFUL,
0xC8A58E51UL, 0x3FF15A98UL, 0xB9EEAB0AUL, 0x3C82406AUL, 0x3C7D517BUL, 0x3FF172B8UL,
0xB9D78A76UL, 0xBC819041UL, 0x388C8DEAUL, 0x3FF18AF9UL, 0xD1970F6CUL, 0xBC911023UL,
0xEB6FCB75UL, 0x3FF1A35BUL, 0x7B4968E4UL, 0x3C8E5B4CUL, 0x84045CD4UL, 0x3FF1BBE0UL,
0x352EF607UL, 0xBC995386UL, 0x3168B9AAUL, 0x3FF1D487UL, 0x00A2643CUL, 0x3C9E016EUL,
0x22FCD91DUL, 0x3FF1ED50UL, 0x027BB78CUL, 0xBC91DF98UL, 0x88628CD6UL, 0x3FF2063BUL,
0x814A8495UL, 0x3C8DC775UL, 0x917DDC96UL, 0x3FF21F49UL, 0x9494A5EEUL, 0x3C82A97EUL,
0x6E756238UL, 0x3FF2387AUL, 0xB6C70573UL, 0x3C99B07EUL, 0x4FB2A63FUL, 0x3FF251CEUL,
0xBEF4F4A4UL, 0x3C8AC155UL, 0x65E27CDDUL, 0x3FF26B45UL, 0x9940E9D9UL, 0x3C82BD33UL,
0xE1F56381UL, 0x3FF284DFUL, 0x8C3F0D7EUL, 0xBC9A4C3AUL, 0xF51FDEE1UL, 0x3FF29E9DUL,
0xAFAD1255UL, 0x3C8612E8UL, 0xD0DAD990UL, 0x3FF2B87FUL, 0xD6381AA4UL, 0xBC410ADCUL,
0xA6E4030BUL, 0x3FF2D285UL, 0x54DB41D5UL, 0x3C900247UL, 0xA93E2F56UL, 0x3FF2ECAFUL,
0x45D52383UL, 0x3C71CA0FUL, 0x0A31B715UL, 0x3FF306FEUL, 0xD23182E4UL, 0x3C86F46AUL,
0xFC4CD831UL, 0x3FF32170UL, 0x8E18047CUL, 0x3C8A9CE7UL, 0xB26416FFUL, 0x3FF33C08UL,
0x843659A6UL, 0x3C932721UL, 0x5F929FF1UL, 0x3FF356C5UL, 0x5C4E4628UL, 0xBC8B5CEEUL,
0x373AA9CBUL, 0x3FF371A7UL, 0xBF42EAE2UL, 0xBC963AEAUL, 0x6D05D866UL, 0x3FF38CAEUL,
0x3C9904BDUL, 0xBC9E958DUL, 0x34E59FF7UL, 0x3FF3A7DBUL, 0xD661F5E3UL, 0xBC75E436UL,
0xC313A8E5UL, 0x3FF3C32DUL, 0x375D29C3UL, 0xBC9EFFF8UL, 0x4C123422UL, 0x3FF3DEA6UL,
0x11F09EBCUL, 0x3C8ADA09UL, 0x04AC801CUL, 0x3FF3FA45UL, 0xF956F9F3UL, 0xBC97D023UL,
0x21F72E2AUL, 0x3FF4160AUL, 0x1C309278UL, 0xBC5EF369UL, 0xD950A897UL, 0x3FF431F5UL,
0xE35F7999UL, 0xBC81C7DDUL, 0x6061892DUL, 0x3FF44E08UL, 0x04EF80D0UL, 0x3C489B7AUL,
0xED1D0057UL, 0x3FF46A41UL, 0xD1648A76UL, 0x3C9C944BUL, 0xB5C13CD0UL, 0x3FF486A2UL,
0xB69062F0UL, 0x3C73C1A3UL, 0xF0D7D3DEUL, 0x3FF4A32AUL, 0xF3D1BE56UL, 0x3C99CB62UL,
0xD5362A27UL, 0x3FF4BFDAUL, 0xAFEC42E2UL, 0x3C7D4397UL, 0x99FDDD0DUL, 0x3FF4DCB2UL,
0xBC6A7833UL, 0x3C98ECDBUL, 0x769D2CA7UL, 0x3FF4F9B2UL, 0xD25957E3UL, 0xBC94B309UL,
0xA2CF6642UL, 0x3FF516DAUL, 0x69BD93EFUL, 0xBC8F7685UL, 0x569D4F82UL, 0x3FF5342BUL,
0x1DB13CADUL, 0xBC807ABEUL, 0xCA5D920FUL, 0x3FF551A4UL, 0xEFEDE59BUL, 0xBC8D689CUL,
0x36B527DAUL, 0x3FF56F47UL, 0x011D93ADUL, 0x3C99BB2CUL, 0xD497C7FDUL, 0x3FF58D12UL,
0x5B9A1DE8UL, 0x3C8295E1UL, 0xDD485429UL, 0x3FF5AB07UL, 0x054647ADUL, 0x3C96324CUL,
0x8A5946B7UL, 0x3FF5C926UL, 0x816986A2UL, 0x3C3C4B1BUL, 0x15AD2148UL, 0x3FF5E76FUL,
0x3080E65EUL, 0x3C9BA6F9UL, 0xB976DC09UL, 0x3FF605E1UL, 0x9B56DE47UL, 0xBC93E242UL,
0xB03A5585UL, 0x3FF6247EUL, 0x7E40B497UL, 0xBC9383C1UL, 0x34CCC320UL, 0x3FF64346UL,
0x759D8933UL, 0xBC8C483CUL, 0x82552225UL, 0x3FF66238UL, 0x87591C34UL, 0xBC9BB609UL,
0xD44CA973UL, 0x3FF68155UL, 0x44F73E65UL, 0x3C6038AEUL, 0x667F3BCDUL, 0x3FF6A09EUL,
0x13B26456UL, 0xBC9BDD34UL, 0x750BDABFUL, 0x3FF6C012UL, 0x67FF0B0DUL, 0xBC728956UL,
0x3C651A2FUL, 0x3FF6DFB2UL, 0x683C88ABUL, 0xBC6BBE3AUL, 0xF9519484UL, 0x3FF6FF7DUL,
0x25860EF6UL, 0xBC883C0FUL, 0xE8EC5F74UL, 0x3FF71F75UL, 0x86887A99UL, 0xBC816E47UL,
0x48A58174UL, 0x3FF73F9AUL, 0x6C65D53CUL, 0xBC90A8D9UL, 0x564267C9UL, 0x3FF75FEBUL,
0x57316DD3UL, 0xBC902459UL, 0x4FDE5D3FUL, 0x3FF78069UL, 0x0A02162DUL, 0x3C9866B8UL,
0x73EB0187UL, 0x3FF7A114UL, 0xEE04992FUL, 0xBC841577UL, 0x0130C132UL, 0x3FF7C1EDUL,
0xD1164DD6UL, 0x3C9F124CUL, 0x36CF4E62UL, 0x3FF7E2F3UL, 0xBA15797EUL, 0x3C705D02UL,
0x543E1A12UL, 0x3FF80427UL, 0x626D972BUL, 0xBC927C86UL, 0x994CCE13UL, 0x3FF82589UL,
0xD41532D8UL, 0xBC9D4C1DUL, 0x4623C7ADUL, 0x3FF8471AUL, 0xA341CDFBUL, 0xBC88D684UL,
0x9B4492EDUL, 0x3FF868D9UL, 0x9BD4F6BAUL, 0xBC9FC6F8UL, 0xD98A6699UL, 0x3FF88AC7UL,
0xF37CB53AUL, 0x3C9994C2UL, 0x422AA0DBUL, 0x3FF8ACE5UL, 0x56864B27UL, 0x3C96E9F1UL,
0x16B5448CUL, 0x3FF8CF32UL, 0x32E9E3AAUL, 0xBC70D55EUL, 0x99157736UL, 0x3FF8F1AEUL,
0xA2E3976CUL, 0x3C85CC13UL, 0x0B91FFC6UL, 0x3FF9145BUL, 0x2E582524UL, 0xBC9DD679UL,
0xB0CDC5E5UL, 0x3FF93737UL, 0x81B57EBCUL, 0xBC675FC7UL, 0xCBC8520FUL, 0x3FF95A44UL,
0x96A5F039UL, 0xBC764B7CUL, 0x9FDE4E50UL, 0x3FF97D82UL, 0x7C1B85D1UL, 0xBC9D185BUL,
0x70CA07BAUL, 0x3FF9A0F1UL, 0x91CEE632UL, 0xBC9173BDUL, 0x82A3F090UL, 0x3FF9C491UL,
0xB071F2BEUL, 0x3C7C7C46UL, 0x19E32323UL, 0x3FF9E863UL, 0x78E64C6EUL, 0x3C7824CAUL,
0x7B5DE565UL, 0x3FFA0C66UL, 0x5D1CD533UL, 0xBC935949UL, 0xEC4A2D33UL, 0x3FFA309BUL,
0x7DDC36ABUL, 0x3C96305CUL, 0xB23E255DUL, 0x3FFA5503UL, 0xDB8D41E1UL, 0xBC9D2F6EUL,
0x1330B358UL, 0x3FFA799EUL, 0xCAC563C7UL, 0x3C9BCB7EUL, 0x5579FDBFUL, 0x3FFA9E6BUL,
0x0EF7FD31UL, 0x3C90FAC9UL, 0xBFD3F37AUL, 0x3FFAC36BUL, 0xCAE76CD0UL, 0xBC8F9234UL,
0x995AD3ADUL, 0x3FFAE89FUL, 0x345DCC81UL, 0x3C97A1CDUL, 0x298DB666UL, 0x3FFB0E07UL,
0x4C80E425UL, 0xBC9BDEF5UL, 0xB84F15FBUL, 0x3FFB33A2UL, 0x3084D708UL, 0xBC62805EUL,
0x8DE5593AUL, 0x3FFB5972UL, 0xBBBA6DE3UL, 0xBC9C71DFUL, 0xF2FB5E47UL, 0x3FFB7F76UL,
0x7E54AC3BUL, 0xBC75584FUL, 0x30A1064AUL, 0x3FFBA5B0UL, 0x0E54292EUL, 0xBC9EFCD3UL,
0x904BC1D2UL, 0x3FFBCC1EUL, 0x7A2D9E84UL, 0x3C823DD0UL, 0x5BD71E09UL, 0x3FFBF2C2UL,
0x3F6B9C73UL, 0xBC9EFDCAUL, 0xDD85529CUL, 0x3FFC199BUL, 0x895048DDUL, 0x3C811065UL,
0x5FFFD07AUL, 0x3FFC40ABUL, 0xE083C60AUL, 0x3C9B4537UL, 0x2E57D14BUL, 0x3FFC67F1UL,
0xFF483CADUL, 0x3C92884DUL, 0x9406E7B5UL, 0x3FFC8F6DUL, 0x48805C44UL, 0x3C71ACBCUL,
0xDCEF9069UL, 0x3FFCB720UL, 0xD1E949DCUL, 0x3C7503CBUL, 0x555DC3FAUL, 0x3FFCDF0BUL,
0x53829D72UL, 0xBC8DD83BUL, 0x4A07897CUL, 0x3FFD072DUL, 0x43797A9CUL, 0xBC9CBC37UL,
0x080D89F2UL, 0x3FFD2F87UL, 0x719D8578UL, 0xBC9D487BUL, 0xDCFBA487UL, 0x3FFD5818UL,
0xD75B3707UL, 0x3C82ED02UL, 0x16C98398UL, 0x3FFD80E3UL, 0x8BEDDFE8UL, 0xBC911EC1UL,
0x03DB3285UL, 0x3FFDA9E6UL, 0x696DB532UL, 0x3C9C2300UL, 0xF301B460UL, 0x3FFDD321UL,
0x78F018C3UL, 0x3C92DA57UL, 0x337B9B5FUL, 0x3FFDFC97UL, 0x4F184B5CUL, 0xBC91A5CDUL,
0x14F5A129UL, 0x3FFE2646UL, 0x817A1496UL, 0xBC97B627UL, 0xE78B3FF6UL, 0x3FFE502EUL,
0x80A9CC8FUL, 0x3C839E89UL, 0xFBC74C83UL, 0x3FFE7A51UL, 0xCA0C8DE2UL, 0x3C92D522UL,
0xA2A490DAUL, 0x3FFEA4AFUL, 0x179C2893UL, 0xBC9E9C23UL, 0x2D8E67F1UL, 0x3FFECF48UL,
0xB411AD8CUL, 0xBC9C93F3UL, 0xEE615A27UL, 0x3FFEFA1BUL, 0x86A4B6B0UL, 0x3C9DC7F4UL,
0x376BBA97UL, 0x3FFF252BUL, 0xBF0D8E43UL, 0x3C93A1A5UL, 0x5B6E4540UL, 0x3FFF5076UL,
0x2DD8A18BUL, 0x3C99D3E1UL, 0xAD9CBE14UL, 0x3FFF7BFDUL, 0xD006350AUL, 0xBC9DBB12UL,
0x819E90D8UL, 0x3FFFA7C1UL, 0xF3A5931EUL, 0x3C874853UL, 0x2B8F71F1UL, 0x3FFFD3C2UL,
0x966579E7UL, 0x3C62EB74UL
};
ATTRIBUTE_ALIGNED(16) static const juint _T2_neg_f[] =
{
0x00000000UL, 0x3FF00000UL, 0x00000000UL, 0x00000000UL, 0x2B8F71F1UL, 0x3FEFD3C2UL,
0x966579E7UL, 0x3C52EB74UL, 0x819E90D8UL, 0x3FEFA7C1UL, 0xF3A5931EUL, 0x3C774853UL,
0xAD9CBE14UL, 0x3FEF7BFDUL, 0xD006350AUL, 0xBC8DBB12UL, 0x5B6E4540UL, 0x3FEF5076UL,
0x2DD8A18BUL, 0x3C89D3E1UL, 0x376BBA97UL, 0x3FEF252BUL, 0xBF0D8E43UL, 0x3C83A1A5UL,
0xEE615A27UL, 0x3FEEFA1BUL, 0x86A4B6B0UL, 0x3C8DC7F4UL, 0x2D8E67F1UL, 0x3FEECF48UL,
0xB411AD8CUL, 0xBC8C93F3UL, 0xA2A490DAUL, 0x3FEEA4AFUL, 0x179C2893UL, 0xBC8E9C23UL,
0xFBC74C83UL, 0x3FEE7A51UL, 0xCA0C8DE2UL, 0x3C82D522UL, 0xE78B3FF6UL, 0x3FEE502EUL,
0x80A9CC8FUL, 0x3C739E89UL, 0x14F5A129UL, 0x3FEE2646UL, 0x817A1496UL, 0xBC87B627UL,
0x337B9B5FUL, 0x3FEDFC97UL, 0x4F184B5CUL, 0xBC81A5CDUL, 0xF301B460UL, 0x3FEDD321UL,
0x78F018C3UL, 0x3C82DA57UL, 0x03DB3285UL, 0x3FEDA9E6UL, 0x696DB532UL, 0x3C8C2300UL,
0x16C98398UL, 0x3FED80E3UL, 0x8BEDDFE8UL, 0xBC811EC1UL, 0xDCFBA487UL, 0x3FED5818UL,
0xD75B3707UL, 0x3C72ED02UL, 0x080D89F2UL, 0x3FED2F87UL, 0x719D8578UL, 0xBC8D487BUL,
0x4A07897CUL, 0x3FED072DUL, 0x43797A9CUL, 0xBC8CBC37UL, 0x555DC3FAUL, 0x3FECDF0BUL,
0x53829D72UL, 0xBC7DD83BUL, 0xDCEF9069UL, 0x3FECB720UL, 0xD1E949DCUL, 0x3C6503CBUL,
0x9406E7B5UL, 0x3FEC8F6DUL, 0x48805C44UL, 0x3C61ACBCUL, 0x2E57D14BUL, 0x3FEC67F1UL,
0xFF483CADUL, 0x3C82884DUL, 0x5FFFD07AUL, 0x3FEC40ABUL, 0xE083C60AUL, 0x3C8B4537UL,
0xDD85529CUL, 0x3FEC199BUL, 0x895048DDUL, 0x3C711065UL, 0x5BD71E09UL, 0x3FEBF2C2UL,
0x3F6B9C73UL, 0xBC8EFDCAUL, 0x904BC1D2UL, 0x3FEBCC1EUL, 0x7A2D9E84UL, 0x3C723DD0UL,
0x30A1064AUL, 0x3FEBA5B0UL, 0x0E54292EUL, 0xBC8EFCD3UL, 0xF2FB5E47UL, 0x3FEB7F76UL,
0x7E54AC3BUL, 0xBC65584FUL, 0x8DE5593AUL, 0x3FEB5972UL, 0xBBBA6DE3UL, 0xBC8C71DFUL,
0xB84F15FBUL, 0x3FEB33A2UL, 0x3084D708UL, 0xBC52805EUL, 0x298DB666UL, 0x3FEB0E07UL,
0x4C80E425UL, 0xBC8BDEF5UL, 0x995AD3ADUL, 0x3FEAE89FUL, 0x345DCC81UL, 0x3C87A1CDUL,
0xBFD3F37AUL, 0x3FEAC36BUL, 0xCAE76CD0UL, 0xBC7F9234UL, 0x5579FDBFUL, 0x3FEA9E6BUL,
0x0EF7FD31UL, 0x3C80FAC9UL, 0x1330B358UL, 0x3FEA799EUL, 0xCAC563C7UL, 0x3C8BCB7EUL,
0xB23E255DUL, 0x3FEA5503UL, 0xDB8D41E1UL, 0xBC8D2F6EUL, 0xEC4A2D33UL, 0x3FEA309BUL,
0x7DDC36ABUL, 0x3C86305CUL, 0x7B5DE565UL, 0x3FEA0C66UL, 0x5D1CD533UL, 0xBC835949UL,
0x19E32323UL, 0x3FE9E863UL, 0x78E64C6EUL, 0x3C6824CAUL, 0x82A3F090UL, 0x3FE9C491UL,
0xB071F2BEUL, 0x3C6C7C46UL, 0x70CA07BAUL, 0x3FE9A0F1UL, 0x91CEE632UL, 0xBC8173BDUL,
0x9FDE4E50UL, 0x3FE97D82UL, 0x7C1B85D1UL, 0xBC8D185BUL, 0xCBC8520FUL, 0x3FE95A44UL,
0x96A5F039UL, 0xBC664B7CUL, 0xB0CDC5E5UL, 0x3FE93737UL, 0x81B57EBCUL, 0xBC575FC7UL,
0x0B91FFC6UL, 0x3FE9145BUL, 0x2E582524UL, 0xBC8DD679UL, 0x99157736UL, 0x3FE8F1AEUL,
0xA2E3976CUL, 0x3C75CC13UL, 0x16B5448CUL, 0x3FE8CF32UL, 0x32E9E3AAUL, 0xBC60D55EUL,
0x422AA0DBUL, 0x3FE8ACE5UL, 0x56864B27UL, 0x3C86E9F1UL, 0xD98A6699UL, 0x3FE88AC7UL,
0xF37CB53AUL, 0x3C8994C2UL, 0x9B4492EDUL, 0x3FE868D9UL, 0x9BD4F6BAUL, 0xBC8FC6F8UL,
0x4623C7ADUL, 0x3FE8471AUL, 0xA341CDFBUL, 0xBC78D684UL, 0x994CCE13UL, 0x3FE82589UL,
0xD41532D8UL, 0xBC8D4C1DUL, 0x543E1A12UL, 0x3FE80427UL, 0x626D972BUL, 0xBC827C86UL,
0x36CF4E62UL, 0x3FE7E2F3UL, 0xBA15797EUL, 0x3C605D02UL, 0x0130C132UL, 0x3FE7C1EDUL,
0xD1164DD6UL, 0x3C8F124CUL, 0x73EB0187UL, 0x3FE7A114UL, 0xEE04992FUL, 0xBC741577UL,
0x4FDE5D3FUL, 0x3FE78069UL, 0x0A02162DUL, 0x3C8866B8UL, 0x564267C9UL, 0x3FE75FEBUL,
0x57316DD3UL, 0xBC802459UL, 0x48A58174UL, 0x3FE73F9AUL, 0x6C65D53CUL, 0xBC80A8D9UL,
0xE8EC5F74UL, 0x3FE71F75UL, 0x86887A99UL, 0xBC716E47UL, 0xF9519484UL, 0x3FE6FF7DUL,
0x25860EF6UL, 0xBC783C0FUL, 0x3C651A2FUL, 0x3FE6DFB2UL, 0x683C88ABUL, 0xBC5BBE3AUL,
0x750BDABFUL, 0x3FE6C012UL, 0x67FF0B0DUL, 0xBC628956UL, 0x667F3BCDUL, 0x3FE6A09EUL,
0x13B26456UL, 0xBC8BDD34UL, 0xD44CA973UL, 0x3FE68155UL, 0x44F73E65UL, 0x3C5038AEUL,
0x82552225UL, 0x3FE66238UL, 0x87591C34UL, 0xBC8BB609UL, 0x34CCC320UL, 0x3FE64346UL,
0x759D8933UL, 0xBC7C483CUL, 0xB03A5585UL, 0x3FE6247EUL, 0x7E40B497UL, 0xBC8383C1UL,
0xB976DC09UL, 0x3FE605E1UL, 0x9B56DE47UL, 0xBC83E242UL, 0x15AD2148UL, 0x3FE5E76FUL,
0x3080E65EUL, 0x3C8BA6F9UL, 0x8A5946B7UL, 0x3FE5C926UL, 0x816986A2UL, 0x3C2C4B1BUL,
0xDD485429UL, 0x3FE5AB07UL, 0x054647ADUL, 0x3C86324CUL, 0xD497C7FDUL, 0x3FE58D12UL,
0x5B9A1DE8UL, 0x3C7295E1UL, 0x36B527DAUL, 0x3FE56F47UL, 0x011D93ADUL, 0x3C89BB2CUL,
0xCA5D920FUL, 0x3FE551A4UL, 0xEFEDE59BUL, 0xBC7D689CUL, 0x569D4F82UL, 0x3FE5342BUL,
0x1DB13CADUL, 0xBC707ABEUL, 0xA2CF6642UL, 0x3FE516DAUL, 0x69BD93EFUL, 0xBC7F7685UL,
0x769D2CA7UL, 0x3FE4F9B2UL, 0xD25957E3UL, 0xBC84B309UL, 0x99FDDD0DUL, 0x3FE4DCB2UL,
0xBC6A7833UL, 0x3C88ECDBUL, 0xD5362A27UL, 0x3FE4BFDAUL, 0xAFEC42E2UL, 0x3C6D4397UL,
0xF0D7D3DEUL, 0x3FE4A32AUL, 0xF3D1BE56UL, 0x3C89CB62UL, 0xB5C13CD0UL, 0x3FE486A2UL,
0xB69062F0UL, 0x3C63C1A3UL, 0xED1D0057UL, 0x3FE46A41UL, 0xD1648A76UL, 0x3C8C944BUL,
0x6061892DUL, 0x3FE44E08UL, 0x04EF80D0UL, 0x3C389B7AUL, 0xD950A897UL, 0x3FE431F5UL,
0xE35F7999UL, 0xBC71C7DDUL, 0x21F72E2AUL, 0x3FE4160AUL, 0x1C309278UL, 0xBC4EF369UL,
0x04AC801CUL, 0x3FE3FA45UL, 0xF956F9F3UL, 0xBC87D023UL, 0x4C123422UL, 0x3FE3DEA6UL,
0x11F09EBCUL, 0x3C7ADA09UL, 0xC313A8E5UL, 0x3FE3C32DUL, 0x375D29C3UL, 0xBC8EFFF8UL,
0x34E59FF7UL, 0x3FE3A7DBUL, 0xD661F5E3UL, 0xBC65E436UL, 0x6D05D866UL, 0x3FE38CAEUL,
0x3C9904BDUL, 0xBC8E958DUL, 0x373AA9CBUL, 0x3FE371A7UL, 0xBF42EAE2UL, 0xBC863AEAUL,
0x5F929FF1UL, 0x3FE356C5UL, 0x5C4E4628UL, 0xBC7B5CEEUL, 0xB26416FFUL, 0x3FE33C08UL,
0x843659A6UL, 0x3C832721UL, 0xFC4CD831UL, 0x3FE32170UL, 0x8E18047CUL, 0x3C7A9CE7UL,
0x0A31B715UL, 0x3FE306FEUL, 0xD23182E4UL, 0x3C76F46AUL, 0xA93E2F56UL, 0x3FE2ECAFUL,
0x45D52383UL, 0x3C61CA0FUL, 0xA6E4030BUL, 0x3FE2D285UL, 0x54DB41D5UL, 0x3C800247UL,
0xD0DAD990UL, 0x3FE2B87FUL, 0xD6381AA4UL, 0xBC310ADCUL, 0xF51FDEE1UL, 0x3FE29E9DUL,
0xAFAD1255UL, 0x3C7612E8UL, 0xE1F56381UL, 0x3FE284DFUL, 0x8C3F0D7EUL, 0xBC8A4C3AUL,
0x65E27CDDUL, 0x3FE26B45UL, 0x9940E9D9UL, 0x3C72BD33UL, 0x4FB2A63FUL, 0x3FE251CEUL,
0xBEF4F4A4UL, 0x3C7AC155UL, 0x6E756238UL, 0x3FE2387AUL, 0xB6C70573UL, 0x3C89B07EUL,
0x917DDC96UL, 0x3FE21F49UL, 0x9494A5EEUL, 0x3C72A97EUL, 0x88628CD6UL, 0x3FE2063BUL,
0x814A8495UL, 0x3C7DC775UL, 0x22FCD91DUL, 0x3FE1ED50UL, 0x027BB78CUL, 0xBC81DF98UL,
0x3168B9AAUL, 0x3FE1D487UL, 0x00A2643CUL, 0x3C8E016EUL, 0x84045CD4UL, 0x3FE1BBE0UL,
0x352EF607UL, 0xBC895386UL, 0xEB6FCB75UL, 0x3FE1A35BUL, 0x7B4968E4UL, 0x3C7E5B4CUL,
0x388C8DEAUL, 0x3FE18AF9UL, 0xD1970F6CUL, 0xBC811023UL, 0x3C7D517BUL, 0x3FE172B8UL,
0xB9D78A76UL, 0xBC719041UL, 0xC8A58E51UL, 0x3FE15A98UL, 0xB9EEAB0AUL, 0x3C72406AUL,
0xAEA92DE0UL, 0x3FE1429AUL, 0x9AF1369EUL, 0xBC832FBFUL, 0xC06C31CCUL, 0x3FE12ABDUL,
0xB36CA5C7UL, 0xBC41B514UL, 0xD0125B51UL, 0x3FE11301UL, 0x39449B3AUL, 0xBC86C510UL,
0xAFFED31BUL, 0x3FE0FB66UL, 0xC44EBD7BUL, 0xBC5B9BEDUL, 0x32D3D1A2UL, 0x3FE0E3ECUL,
0x27C57B52UL, 0x3C303A17UL, 0x2B7247F7UL, 0x3FE0CC92UL, 0x16E24F71UL, 0x3C801EDCUL,
0x6CF9890FUL, 0x3FE0B558UL, 0x4ADC610BUL, 0x3C88A62EUL, 0xCAC6F383UL, 0x3FE09E3EUL,
0x18316136UL, 0x3C814878UL, 0x18759BC8UL, 0x3FE08745UL, 0x4BB284FFUL, 0x3C5186BEUL,
0x29DDF6DEUL, 0x3FE0706BUL, 0xE2B13C27UL, 0xBC7C91DFUL, 0xD3158574UL, 0x3FE059B0UL,
0xA475B465UL, 0x3C7D73E2UL, 0xE86E7F85UL, 0x3FE04315UL, 0x1977C96EUL, 0xBC80A31CUL,
0x3E778061UL, 0x3FE02C9AUL, 0x535B085DUL, 0xBC619083UL, 0xA9FB3335UL, 0x3FE0163DUL,
0x9AB8CDB7UL, 0x3C8B6129UL
};
#define __ _masm->
address StubGenerator::generate_libmSinh() {
StubId stub_id = StubId::stubgen_dsinh_id;
StubCodeMark mark(this, stub_id);
address start = __ pc();
Label L_2TAG_PACKET_0_0_2, L_2TAG_PACKET_1_0_2, L_2TAG_PACKET_3_0_2, L_2TAG_PACKET_4_0_2;
Label L_2TAG_PACKET_5_0_2, L_2TAG_PACKET_6_0_2;
Label B1_2, B1_5;
address HALFMASK = (address)_HALFMASK;
address MASK3 = (address)_MASK3;
address L2E = (address)_L2E;
address Shifter = (address)_Shifter;
address cv = (address)_cv;
address pv = (address)_pv;
address T2f = (address) _T2f;
address T2_neg_f = (address) _T2_neg_f;
__ enter(); // required for proper stackwalking of RuntimeStub frame
__ bind(B1_2);
__ xorpd(xmm4, xmm4);
__ movsd(xmm1, ExternalAddress(L2E), r11 /*rscratch*/);
__ movl(rax, 32768);
__ pinsrw(xmm4, rax, 3);
__ pextrw(rcx, xmm0, 3);
__ andnpd(xmm4, xmm0);
__ pshufd(xmm5, xmm4, 68);
__ movl(rdx, 32768);
__ andl(rdx, rcx);
__ andl(rcx, 32767);
__ subl(rcx, 16343);
__ cmpl(rcx, 177);
__ jcc(Assembler::aboveEqual, L_2TAG_PACKET_0_0_2); // Branch only if |x| is not in [23/64, 3*2^8)
__ movsd(xmm3, ExternalAddress(HALFMASK), r11 /*rscratch*/);
__ movsd(xmm2, ExternalAddress(L2E + 8), r11 /*rscratch*/);
__ movsd(xmm6, ExternalAddress(Shifter), r11 /*rscratch*/);
__ andpd(xmm3, xmm0);
__ subsd(xmm4, xmm3);
__ mulsd(xmm3, xmm1);
__ mulsd(xmm2, xmm5);
__ cvtsd2siq(rax, xmm3);
__ shll(rdx, 3);
__ orl(rax, rdx);
__ movq(xmm7, xmm3);
__ addsd(xmm3, xmm6);
__ mulsd(xmm1, xmm4);
__ xorpd(xmm5, xmm5);
__ subsd(xmm3, xmm6);
__ movapd(xmm4, ExternalAddress(cv), r11 /*rscratch*/);
__ addsd(xmm2, xmm1);
__ movapd(xmm6, ExternalAddress(cv + 16), r11 /*rscratch*/);
__ subsd(xmm7, xmm3);
__ movl(rdx, 32704);
__ pinsrw(xmm5, rdx, 3);
__ movapd(xmm1, ExternalAddress(cv + 32), r11 /*rscratch*/);
__ addsd(xmm2, xmm7);
__ movl(rdx, 127);
__ andl(rdx, rax);
__ addl(rdx, rdx);
__ shrl(rax, 3);
__ andl(rax, 65520);
__ addl(rax, 16352);
__ xorpd(xmm0, xmm0);
__ cmpl(rcx, 161);
__ jcc(Assembler::aboveEqual, L_2TAG_PACKET_1_0_2); // Branch only if |x| is not in [23/64, 3*2^7)
__ pshufd(xmm5, xmm5, 68);
__ pinsrw(xmm0, rax, 3);
__ pshufd(xmm0, xmm0, 68);
__ psubw(xmm5, xmm0);
__ lea(r8, ExternalAddress(T2f));
__ mulpd(xmm0, Address(r8, rdx, Address::times_8));
__ lea(r8, ExternalAddress(T2_neg_f));
__ mulpd(xmm5, Address(r8, rdx, Address::times_8));
__ pshufd(xmm3, xmm2, 68);
__ movapd(xmm7, ExternalAddress(cv + 48), r11 /*rscratch*/);
__ pshufd(xmm2, xmm2, 68);
__ mulpd(xmm3, xmm3);
__ mulpd(xmm4, xmm2);
__ mulpd(xmm6, xmm2);
__ mulpd(xmm2, ExternalAddress(cv + 64), r11 /*rscratch*/);
__ mulpd(xmm1, xmm3);
__ mulpd(xmm7, xmm3);
__ mulpd(xmm4, xmm3);
__ mulpd(xmm1, xmm3);
__ addpd(xmm6, xmm7);
__ movq(xmm7, xmm0);
__ addpd(xmm4, xmm1);
__ shufpd(xmm7, xmm5, 0);
__ subpd(xmm0, xmm5);
__ mulpd(xmm2, xmm7);
__ addpd(xmm4, xmm6);
__ subsd(xmm7, xmm0);
__ mulpd(xmm4, xmm2);
__ pshufd(xmm6, xmm0, 238);
__ subsd(xmm7, xmm5);
__ addpd(xmm4, xmm2);
__ addsd(xmm7, xmm6);
__ pshufd(xmm2, xmm4, 238);
__ addsd(xmm2, xmm7);
__ addsd(xmm2, xmm4);
__ addsd(xmm0, xmm2);
__ jmp(B1_5);
__ bind(L_2TAG_PACKET_1_0_2);
__ subl(rax, 16352);
__ movl(rcx, rax);
__ andl(rax, 32752);
__ shrl(rax, 1);
__ andl(rax, 65520);
__ subl(rcx, rax);
__ addl(rax, 16352);
__ pinsrw(xmm0, rax, 3);
__ pshufd(xmm0, xmm0, 68);
__ lea(r8, ExternalAddress(T2f));
__ mulpd(xmm0, Address(r8, rdx, Address::times_8));
__ pshufd(xmm3, xmm2, 68);
__ movsd(xmm7, ExternalAddress(cv + 48), r11 /*rscratch*/);
__ mulsd(xmm3, xmm3);
__ mulsd(xmm4, xmm2);
__ mulsd(xmm6, xmm2);
__ mulsd(xmm2, ExternalAddress(cv + 64), r11 /*rscratch*/);
__ mulsd(xmm1, xmm3);
__ mulsd(xmm7, xmm3);
__ mulsd(xmm4, xmm3);
__ addl(rcx, 16368);
__ pinsrw(xmm5, rcx, 3);
__ mulsd(xmm1, xmm3);
__ addsd(xmm6, xmm7);
__ addsd(xmm4, xmm1);
__ mulsd(xmm2, xmm0);
__ addsd(xmm4, xmm6);
__ mulsd(xmm4, xmm2);
__ pshufd(xmm6, xmm0, 238);
__ addsd(xmm4, xmm6);
__ addsd(xmm2, xmm4);
__ addsd(xmm0, xmm2);
__ mulsd(xmm0, xmm5);
__ jmp(B1_5);
__ bind(L_2TAG_PACKET_0_0_2);
__ addl(rcx, 16343);
__ cmpl(rcx, 16343);
__ jcc(Assembler::above, L_2TAG_PACKET_3_0_2); // Branch only if |x| > 23/64
__ cmpl(rcx, 15856);
__ jcc(Assembler::below, L_2TAG_PACKET_4_0_2); // Branch only if |x| < 2^-32
__ movapd(xmm1, ExternalAddress(pv), r11 /*rscratch*/);
__ pshufd(xmm6, xmm0, 68);
__ mulpd(xmm5, xmm5);
__ movapd(xmm2, ExternalAddress(pv + 16), r11 /*rscratch*/);
__ pshufd(xmm7, xmm0, 68);
__ movapd(xmm3, ExternalAddress(pv + 32), r11 /*rscratch*/);
__ pshufd(xmm4, xmm0, 68);
__ andpd(xmm6, ExternalAddress(MASK3), r11 /*rscratch*/);
__ mulpd(xmm1, xmm5);
__ mulsd(xmm2, xmm5);
__ subpd(xmm4, xmm6);
__ mulpd(xmm7, xmm5);
__ addpd(xmm1, xmm3);
__ pshufd(xmm3, xmm6, 68);
__ mulpd(xmm5, xmm5);
__ mulsd(xmm2, xmm7);
__ mulpd(xmm1, xmm7);
__ pshufd(xmm7, xmm0, 68);
__ mulsd(xmm6, xmm6);
__ addsd(xmm7, xmm7);
__ mulsd(xmm4, xmm4);
__ mulpd(xmm1, xmm5);
__ addsd(xmm7, xmm0);
__ mulsd(xmm6, xmm3);
__ mulsd(xmm7, xmm3);
__ pshufd(xmm3, xmm1, 238);
__ mulsd(xmm1, xmm5);
__ pshufd(xmm5, xmm4, 238);
__ addsd(xmm3, xmm2);
__ pshufd(xmm2, xmm2, 238);
__ addsd(xmm7, xmm4);
__ movq(xmm4, xmm0);
__ mulsd(xmm6, xmm2);
__ mulsd(xmm7, xmm5);
__ addsd(xmm0, xmm6);
__ mulsd(xmm7, xmm2);
__ subsd(xmm4, xmm0);
__ addsd(xmm1, xmm7);
__ addsd(xmm6, xmm4);
__ addsd(xmm1, xmm3);
__ addsd(xmm1, xmm6);
__ addsd(xmm0, xmm1);
__ jmp(B1_5);
__ bind(L_2TAG_PACKET_4_0_2);
__ cmpl(rcx, 16);
__ jcc(Assembler::aboveEqual, L_2TAG_PACKET_5_0_2); // Branch only if |x| is not denormalized
__ movq(xmm1, xmm0);
__ mulsd(xmm1, xmm1);
__ jmp(B1_5);
__ bind(L_2TAG_PACKET_5_0_2);
__ xorpd(xmm2, xmm2);
__ movl(rcx, 17392);
__ pinsrw(xmm2, rcx, 3);
__ xorpd(xmm3, xmm3);
__ movl(rdx, 15344);
__ pinsrw(xmm3, rdx, 3);
__ mulsd(xmm2, xmm0);
__ addsd(xmm0, xmm2);
__ mulsd(xmm0, xmm3);
__ jmp(B1_5);
__ bind(L_2TAG_PACKET_3_0_2);
__ cmpl(rcx, 32752);
__ jcc(Assembler::aboveEqual, L_2TAG_PACKET_6_0_2); // Branch only if |x| is INF or NaN
__ xorpd(xmm0, xmm0);
__ movl(rax, 32736);
__ pinsrw(xmm0, rax, 3);
__ orl(rax, rdx);
__ pinsrw(xmm1, rax, 3);
__ mulsd(xmm0, xmm1);
__ jmp(B1_5);
__ bind(L_2TAG_PACKET_6_0_2);
__ xorpd(xmm1, xmm1);
__ movl(rax, 32768);
__ pinsrw(xmm1, rax, 3);
__ andnpd(xmm1, xmm0);
__ mulsd(xmm0, xmm1);
__ bind(B1_5);
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
return start;
}
#undef __

View File

@ -45,6 +45,17 @@ STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY, DEFINE_ARCH_ENTRY_INIT)
#undef DEFINE_ARCH_ENTRY_INIT
#undef DEFINE_ARCH_ENTRY
address StubRoutines::crc_table_addr() {
return (address)StubRoutines::x86::_crc_table;
}
address StubRoutines::crc32c_table_addr() {
if (StubRoutines::x86::_crc32c_table == nullptr) {
bool supports_clmul = VM_Version::supports_clmul();
StubRoutines::x86::generate_CRC32C_table(supports_clmul);
}
return (address)StubRoutines::x86::_crc32c_table;
}
address StubRoutines::x86::_k256_adr = nullptr;
address StubRoutines::x86::_k256_W_adr = nullptr;
address StubRoutines::x86::_k512_W_addr = nullptr;
@ -291,7 +302,7 @@ static uint32_t crc32c_f_pow_n(uint32_t n) {
return result;
}
juint *StubRoutines::x86::_crc32c_table;
juint* StubRoutines::x86::_crc32c_table = nullptr;
void StubRoutines::x86::generate_CRC32C_table(bool is_pclmulqdq_table_supported) {

View File

@ -44,6 +44,7 @@ enum platform_dependent_constants {
class x86 {
friend class StubGenerator;
friend class StubRoutines;
friend class VMStructs;
// declare fields for arch-specific entries

View File

@ -464,6 +464,13 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
} else {
__ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dtan));
}
} else if (kind == Interpreter::java_lang_math_sinh) {
if (StubRoutines::dsinh() != nullptr) {
__ movdbl(xmm0, Address(rsp, wordSize));
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dsinh())));
} else {
return nullptr; // Fallback to default implementation
}
} else if (kind == Interpreter::java_lang_math_tanh) {
if (StubRoutines::dtanh() != nullptr) {
__ movdbl(xmm0, Address(rsp, wordSize));

View File

@ -38,6 +38,7 @@
#include "runtime/stubCodeGenerator.hpp"
#include "runtime/vm_version.hpp"
#include "utilities/checkedCast.hpp"
#include "utilities/ostream.hpp"
#include "utilities/powerOfTwo.hpp"
#include "utilities/virtualizationSupport.hpp"
@ -1069,6 +1070,7 @@ void VM_Version::get_processor_features() {
if (FLAG_IS_DEFAULT(IntelJccErratumMitigation)) {
_has_intel_jcc_erratum = compute_has_intel_jcc_erratum();
FLAG_SET_ERGO(IntelJccErratumMitigation, _has_intel_jcc_erratum);
} else {
_has_intel_jcc_erratum = IntelJccErratumMitigation;
}
@ -1096,21 +1098,16 @@ void VM_Version::get_processor_features() {
}
}
char buf[2048];
size_t cpu_info_size = jio_snprintf(
buf, sizeof(buf),
"(%u cores per cpu, %u threads per core) family %d model %d stepping %d microcode 0x%x",
cores_per_cpu(), threads_per_core(),
cpu_family(), _model, _stepping, os::cpu_microcode_revision());
assert(cpu_info_size > 0, "not enough temporary space allocated");
stringStream ss(2048);
ss.print("(%u cores per cpu, %u threads per core) family %d model %d stepping %d microcode 0x%x",
cores_per_cpu(), threads_per_core(),
cpu_family(), _model, _stepping, os::cpu_microcode_revision());
ss.print(", ");
int features_offset = (int)ss.size();
insert_features_names(_features, ss);
insert_features_names(_features, buf + cpu_info_size, sizeof(buf) - cpu_info_size);
_cpu_info_string = os::strdup(buf);
_features_string = extract_features_string(_cpu_info_string,
strnlen(_cpu_info_string, sizeof(buf)),
cpu_info_size);
_cpu_info_string = ss.as_string(true);
_features_string = _cpu_info_string + features_offset;
// Use AES instructions if available.
if (supports_aes()) {
@ -3265,13 +3262,15 @@ bool VM_Version::is_intrinsic_supported(vmIntrinsicID id) {
return true;
}
void VM_Version::insert_features_names(VM_Version::VM_Features features, char* buf, size_t buflen) {
for (int i = 0; i < MAX_CPU_FEATURES; i++) {
if (features.supports_feature((VM_Version::Feature_Flag)i)) {
int res = jio_snprintf(buf, buflen, ", %s", _features_names[i]);
assert(res > 0, "not enough temporary space allocated");
buf += res;
buflen -= res;
void VM_Version::insert_features_names(VM_Version::VM_Features features, stringStream& ss) {
int i = 0;
ss.join([&]() {
while (i < MAX_CPU_FEATURES) {
if (_features.supports_feature((VM_Version::Feature_Flag)i)) {
return _features_names[i++];
}
i += 1;
}
}
return (const char*)nullptr;
}, ", ");
}

View File

@ -30,6 +30,8 @@
#include "utilities/macros.hpp"
#include "utilities/sizes.hpp"
class stringStream;
class VM_Version : public Abstract_VM_Version {
friend class VMStructs;
friend class JVMCIVMStructs;
@ -922,7 +924,7 @@ public:
static bool is_intel_tsc_synched_at_init();
static void insert_features_names(VM_Version::VM_Features features, char* buf, size_t buflen);
static void insert_features_names(VM_Version::VM_Features features, stringStream& ss);
// This checks if the JVM is potentially affected by an erratum on Intel CPUs (SKX102)
// that causes unpredictable behaviour when jcc crosses 64 byte boundaries. Its microcode

View File

@ -1831,7 +1831,10 @@ bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) {
}
break;
case Op_SelectFromTwoVector:
if (size_in_bits < 128 || (size_in_bits < 512 && !VM_Version::supports_avx512vl())) {
if (size_in_bits < 128) {
return false;
}
if ((size_in_bits < 512 && !VM_Version::supports_avx512vl())) {
return false;
}
if (bt == T_SHORT && !VM_Version::supports_avx512bw()) {

View File

@ -37,7 +37,7 @@
do_arch_blob, \
do_arch_entry, \
do_arch_entry_init) \
do_arch_blob(initial, 0) \
do_arch_blob(initial, 32) \
#define STUBGEN_CONTINUATION_BLOBS_ARCH_DO(do_stub, \
@ -58,7 +58,7 @@
do_arch_blob, \
do_arch_entry, \
do_arch_entry_init) \
do_arch_blob(final, 0) \
do_arch_blob(final, 32) \
#endif // CPU_ZERO_STUBDECLARATIONS_HPP

View File

@ -28,4 +28,5 @@
#include "runtime/javaThread.hpp"
#include "runtime/stubRoutines.hpp"
// zero has no arch-specific stubs nor any associated entries
address StubRoutines::crc_table_addr() { ShouldNotCallThis(); return nullptr; }
address StubRoutines::crc32c_table_addr() { ShouldNotCallThis(); return nullptr; }

View File

@ -2343,8 +2343,7 @@ int os::open(const char *path, int oflag, int mode) {
// specifically destined for a subprocess should have the
// close-on-exec flag set. If we don't set it, then careless 3rd
// party native code might fork and exec without closing all
// appropriate file descriptors (e.g. as we do in closeDescriptors in
// UNIXProcess.c), and this in turn might:
// appropriate file descriptors, and this in turn might:
//
// - cause end-of-file to fail to be detected on some file
// descriptors, resulting in mysterious hangs, or

View File

@ -233,10 +233,10 @@ public:
mach_msg_type_number_t num_out = TASK_VM_INFO_COUNT;
kern_return_t err = task_info(mach_task_self(), TASK_VM_INFO, (task_info_t)(&vm_info), &num_out);
if (err == KERN_SUCCESS) {
st->print_cr(" vsize: %llu (%llu%s)", vm_info.virtual_size, PROPERFMTARGS(vm_info.virtual_size));
st->print_cr(" rss: %llu (%llu%s)", vm_info.resident_size, PROPERFMTARGS(vm_info.resident_size));
st->print_cr(" peak rss: %llu (%llu%s)", vm_info.resident_size_peak, PROPERFMTARGS(vm_info.resident_size_peak));
st->print_cr(" page size: %d (%ld%s)", vm_info.page_size, PROPERFMTARGS((size_t)vm_info.page_size));
st->print_cr(" vsize: %llu (" PROPERFMT ")", vm_info.virtual_size, PROPERFMTARGS((size_t)vm_info.virtual_size));
st->print_cr(" rss: %llu (" PROPERFMT ")", vm_info.resident_size, PROPERFMTARGS((size_t)vm_info.resident_size));
st->print_cr(" peak rss: %llu (" PROPERFMT ")", vm_info.resident_size_peak, PROPERFMTARGS((size_t)vm_info.resident_size_peak));
st->print_cr(" page size: %d (" PROPERFMT ")", vm_info.page_size, PROPERFMTARGS((size_t)vm_info.page_size));
} else {
st->print_cr("error getting vm_info %d", err);
}

View File

@ -2251,8 +2251,7 @@ int os::open(const char *path, int oflag, int mode) {
// specifically destined for a subprocess should have the
// close-on-exec flag set. If we don't set it, then careless 3rd
// party native code might fork and exec without closing all
// appropriate file descriptors (e.g. as we do in closeDescriptors in
// UNIXProcess.c), and this in turn might:
// appropriate file descriptors, and this in turn might:
//
// - cause end-of-file to fail to be detected on some file
// descriptors, resulting in mysterious hangs, or

View File

@ -4872,9 +4872,8 @@ int os::open(const char *path, int oflag, int mode) {
// All file descriptors that are opened in the Java process and not
// specifically destined for a subprocess should have the close-on-exec
// flag set. If we don't set it, then careless 3rd party native code
// might fork and exec without closing all appropriate file descriptors
// (e.g. as we do in closeDescriptors in UNIXProcess.c), and this in
// turn might:
// might fork and exec without closing all appropriate file descriptors,
// and this in turn might:
//
// - cause end-of-file to fail to be detected on some file
// descriptors, resulting in mysterious hangs, or

View File

@ -713,7 +713,7 @@ bool os::get_host_name(char* buf, size_t buflen) {
}
#ifndef _LP64
// Helper, on 32bit, for os::has_allocatable_memory_limit
// Helper, on 32bit, for os::commit_memory_limit
static bool is_allocatable(size_t s) {
if (s < 2 * G) {
return true;
@ -731,31 +731,19 @@ static bool is_allocatable(size_t s) {
}
#endif // !_LP64
size_t os::commit_memory_limit() {
// On POSIX systems, the amount of memory that can be commmitted is limited
// by the size of the reservable memory.
size_t reserve_limit = reserve_memory_limit();
bool os::has_allocatable_memory_limit(size_t* limit) {
struct rlimit rlim;
int getrlimit_res = getrlimit(RLIMIT_AS, &rlim);
// if there was an error when calling getrlimit, assume that there is no limitation
// on virtual memory.
bool result;
if ((getrlimit_res != 0) || (rlim.rlim_cur == RLIM_INFINITY)) {
result = false;
} else {
*limit = (size_t)rlim.rlim_cur;
result = true;
}
#ifdef _LP64
return result;
return reserve_limit;
#else
// arbitrary virtual space limit for 32 bit Unices found by testing. If
// getrlimit above returned a limit, bound it with this limit. Otherwise
// directly use it.
const size_t max_virtual_limit = 3800*M;
if (result) {
*limit = MIN2(*limit, max_virtual_limit);
} else {
*limit = max_virtual_limit;
}
// Arbitrary max reserve limit for 32 bit Unices found by testing.
const size_t max_reserve_limit = 3800 * M;
// Bound the reserve limit with the arbitrary max.
size_t actual_limit = MIN2(reserve_limit, max_reserve_limit);
// bound by actually allocatable memory. The algorithm uses two bounds, an
// upper and a lower limit. The upper limit is the current highest amount of
@ -769,15 +757,15 @@ bool os::has_allocatable_memory_limit(size_t* limit) {
// the minimum amount of memory we care about allocating.
const size_t min_allocation_size = M;
size_t upper_limit = *limit;
size_t upper_limit = actual_limit;
// first check a few trivial cases
if (is_allocatable(upper_limit) || (upper_limit <= min_allocation_size)) {
*limit = upper_limit;
// The actual limit is allocatable, no need to do anything.
} else if (!is_allocatable(min_allocation_size)) {
// we found that not even min_allocation_size is allocatable. Return it
// anyway. There is no point to search for a better value any more.
*limit = min_allocation_size;
actual_limit = min_allocation_size;
} else {
// perform the binary search.
size_t lower_limit = min_allocation_size;
@ -790,12 +778,31 @@ bool os::has_allocatable_memory_limit(size_t* limit) {
upper_limit = temp_limit;
}
}
*limit = lower_limit;
actual_limit = lower_limit;
}
return true;
return actual_limit;
#endif
}
size_t os::reserve_memory_limit() {
struct rlimit rlim;
int getrlimit_res = getrlimit(RLIMIT_AS, &rlim);
// If there was an error calling getrlimit, conservatively assume no limit.
if (getrlimit_res != 0) {
return SIZE_MAX;
}
// If the current limit is not infinity, there is a limit.
if (rlim.rlim_cur != RLIM_INFINITY) {
return (size_t)rlim.rlim_cur;
}
// No limit
return SIZE_MAX;
}
void* os::get_default_process_handle() {
#ifdef __APPLE__
// MacOS X needs to use RTLD_FIRST instead of RTLD_LAZY

View File

@ -897,13 +897,6 @@ size_t os::rss() {
return rss;
}
bool os::has_allocatable_memory_limit(size_t* limit) {
MEMORYSTATUSEX ms;
ms.dwLength = sizeof(ms);
GlobalMemoryStatusEx(&ms);
*limit = (size_t)ms.ullAvailVirtual;
return true;
}
int os::active_processor_count() {
// User has overridden the number of active processors
@ -3303,6 +3296,51 @@ static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int fi
return aligned_base;
}
size_t os::commit_memory_limit() {
BOOL is_in_job_object = false;
BOOL res = IsProcessInJob(GetCurrentProcess(), nullptr, &is_in_job_object);
if (!res) {
char buf[512];
size_t buf_len = os::lasterror(buf, sizeof(buf));
warning("Attempt to determine whether the process is running in a job failed for commit limit: %s", buf_len != 0 ? buf : "<unknown error>");
// Conservatively assume no limit when there was an error calling IsProcessInJob.
return SIZE_MAX;
}
if (!is_in_job_object) {
// Not limited by a Job Object
return SIZE_MAX;
}
JOBOBJECT_EXTENDED_LIMIT_INFORMATION jeli = {};
res = QueryInformationJobObject(nullptr, JobObjectExtendedLimitInformation, &jeli, sizeof(jeli), nullptr);
if (!res) {
char buf[512];
size_t buf_len = os::lasterror(buf, sizeof(buf));
warning("Attempt to query job object information failed for commit limit: %s", buf_len != 0 ? buf : "<unknown error>");
// Conservatively assume no limit when there was an error calling QueryInformationJobObject.
return SIZE_MAX;
}
if (jeli.BasicLimitInformation.LimitFlags & JOB_OBJECT_LIMIT_PROCESS_MEMORY) {
return jeli.ProcessMemoryLimit;
}
if (jeli.BasicLimitInformation.LimitFlags & JOB_OBJECT_LIMIT_JOB_MEMORY) {
return jeli.JobMemoryLimit;
}
// No limit
return SIZE_MAX;
}
size_t os::reserve_memory_limit() {
// Virtual address space cannot be limited on Windows.
return SIZE_MAX;
}
char* os::reserve_memory_aligned(size_t size, size_t alignment, MemTag mem_tag, bool exec) {
// exec can be ignored
return map_or_reserve_memory_aligned(size, alignment, -1/* file_desc */, mem_tag);

View File

@ -67,7 +67,8 @@ void VM_Version::get_os_cpu_info() {
// 2) ID_AA64PFR0_EL1 describes AdvSIMD always equals to FP field.
// See the Arm ARM, section "ID_AA64PFR0_EL1, AArch64 Processor Feature
// Register 0".
_features = CPU_FP | CPU_ASIMD;
set_feature(CPU_FP);
set_feature(CPU_ASIMD);
// All Apple-darwin Arm processors have AES, PMULL, SHA1 and SHA2.
// See https://github.com/apple-oss-distributions/xnu/blob/main/osfmk/arm/commpage/commpage.c#L412
@ -75,25 +76,28 @@ void VM_Version::get_os_cpu_info() {
// these four CPU features, e.g., "hw.optional.arm.FEAT_AES", but the
// corresponding string names are not available before xnu-8019 version.
// Hence, assertions are omitted considering backward compatibility.
_features |= CPU_AES | CPU_PMULL | CPU_SHA1 | CPU_SHA2;
set_feature(CPU_AES);
set_feature(CPU_PMULL);
set_feature(CPU_SHA1);
set_feature(CPU_SHA2);
if (cpu_has("hw.optional.armv8_crc32")) {
_features |= CPU_CRC32;
set_feature(CPU_CRC32);
}
if (cpu_has("hw.optional.arm.FEAT_LSE") ||
cpu_has("hw.optional.armv8_1_atomics")) {
_features |= CPU_LSE;
set_feature(CPU_LSE);
}
if (cpu_has("hw.optional.arm.FEAT_SHA512") ||
cpu_has("hw.optional.armv8_2_sha512")) {
_features |= CPU_SHA512;
set_feature(CPU_SHA512);
}
if (cpu_has("hw.optional.arm.FEAT_SHA3") ||
cpu_has("hw.optional.armv8_2_sha3")) {
_features |= CPU_SHA3;
set_feature(CPU_SHA3);
}
if (cpu_has("hw.optional.arm.FEAT_SB")) {
_features |= CPU_SB;
set_feature(CPU_SB);
}
int cache_line_size;

View File

@ -117,22 +117,22 @@ void VM_Version::get_os_cpu_info() {
uint64_t auxv = getauxval(AT_HWCAP);
uint64_t auxv2 = getauxval(AT_HWCAP2);
static_assert(CPU_FP == HWCAP_FP, "Flag CPU_FP must follow Linux HWCAP");
static_assert(CPU_ASIMD == HWCAP_ASIMD, "Flag CPU_ASIMD must follow Linux HWCAP");
static_assert(CPU_EVTSTRM == HWCAP_EVTSTRM, "Flag CPU_EVTSTRM must follow Linux HWCAP");
static_assert(CPU_AES == HWCAP_AES, "Flag CPU_AES must follow Linux HWCAP");
static_assert(CPU_PMULL == HWCAP_PMULL, "Flag CPU_PMULL must follow Linux HWCAP");
static_assert(CPU_SHA1 == HWCAP_SHA1, "Flag CPU_SHA1 must follow Linux HWCAP");
static_assert(CPU_SHA2 == HWCAP_SHA2, "Flag CPU_SHA2 must follow Linux HWCAP");
static_assert(CPU_CRC32 == HWCAP_CRC32, "Flag CPU_CRC32 must follow Linux HWCAP");
static_assert(CPU_LSE == HWCAP_ATOMICS, "Flag CPU_LSE must follow Linux HWCAP");
static_assert(CPU_DCPOP == HWCAP_DCPOP, "Flag CPU_DCPOP must follow Linux HWCAP");
static_assert(CPU_SHA3 == HWCAP_SHA3, "Flag CPU_SHA3 must follow Linux HWCAP");
static_assert(CPU_SHA512 == HWCAP_SHA512, "Flag CPU_SHA512 must follow Linux HWCAP");
static_assert(CPU_SVE == HWCAP_SVE, "Flag CPU_SVE must follow Linux HWCAP");
static_assert(CPU_PACA == HWCAP_PACA, "Flag CPU_PACA must follow Linux HWCAP");
static_assert(CPU_FPHP == HWCAP_FPHP, "Flag CPU_FPHP must follow Linux HWCAP");
static_assert(CPU_ASIMDHP == HWCAP_ASIMDHP, "Flag CPU_ASIMDHP must follow Linux HWCAP");
static_assert(BIT_MASK(CPU_FP) == HWCAP_FP, "Flag CPU_FP must follow Linux HWCAP");
static_assert(BIT_MASK(CPU_ASIMD) == HWCAP_ASIMD, "Flag CPU_ASIMD must follow Linux HWCAP");
static_assert(BIT_MASK(CPU_EVTSTRM) == HWCAP_EVTSTRM, "Flag CPU_EVTSTRM must follow Linux HWCAP");
static_assert(BIT_MASK(CPU_AES) == HWCAP_AES, "Flag CPU_AES must follow Linux HWCAP");
static_assert(BIT_MASK(CPU_PMULL) == HWCAP_PMULL, "Flag CPU_PMULL must follow Linux HWCAP");
static_assert(BIT_MASK(CPU_SHA1) == HWCAP_SHA1, "Flag CPU_SHA1 must follow Linux HWCAP");
static_assert(BIT_MASK(CPU_SHA2) == HWCAP_SHA2, "Flag CPU_SHA2 must follow Linux HWCAP");
static_assert(BIT_MASK(CPU_CRC32) == HWCAP_CRC32, "Flag CPU_CRC32 must follow Linux HWCAP");
static_assert(BIT_MASK(CPU_LSE) == HWCAP_ATOMICS, "Flag CPU_LSE must follow Linux HWCAP");
static_assert(BIT_MASK(CPU_DCPOP) == HWCAP_DCPOP, "Flag CPU_DCPOP must follow Linux HWCAP");
static_assert(BIT_MASK(CPU_SHA3) == HWCAP_SHA3, "Flag CPU_SHA3 must follow Linux HWCAP");
static_assert(BIT_MASK(CPU_SHA512) == HWCAP_SHA512, "Flag CPU_SHA512 must follow Linux HWCAP");
static_assert(BIT_MASK(CPU_SVE) == HWCAP_SVE, "Flag CPU_SVE must follow Linux HWCAP");
static_assert(BIT_MASK(CPU_PACA) == HWCAP_PACA, "Flag CPU_PACA must follow Linux HWCAP");
static_assert(BIT_MASK(CPU_FPHP) == HWCAP_FPHP, "Flag CPU_FPHP must follow Linux HWCAP");
static_assert(BIT_MASK(CPU_ASIMDHP) == HWCAP_ASIMDHP, "Flag CPU_ASIMDHP must follow Linux HWCAP");
_features = auxv & (
HWCAP_FP |
HWCAP_ASIMD |
@ -152,8 +152,12 @@ void VM_Version::get_os_cpu_info() {
HWCAP_FPHP |
HWCAP_ASIMDHP);
if (auxv2 & HWCAP2_SVE2) _features |= CPU_SVE2;
if (auxv2 & HWCAP2_SVEBITPERM) _features |= CPU_SVEBITPERM;
if (auxv2 & HWCAP2_SVE2) {
set_feature(CPU_SVE2);
}
if (auxv2 & HWCAP2_SVEBITPERM) {
set_feature(CPU_SVEBITPERM);
}
uint64_t ctr_el0;
uint64_t dczid_el0;
@ -187,7 +191,7 @@ void VM_Version::get_os_cpu_info() {
_revision = v;
} else if (strncmp(buf, "flags", sizeof("flags") - 1) == 0) {
if (strstr(p+1, "dcpop")) {
guarantee(_features & CPU_DCPOP, "dcpop availability should be consistent");
guarantee(supports_feature(CPU_DCPOP), "dcpop availability should be consistent");
}
}
}

View File

@ -29,14 +29,13 @@
// Standard include file for ADLC parser
//
#include <ctype.h>
// standard library constants
#include <iostream>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <stdarg.h>
#include <sys/types.h>
/* Make sure that we have the intptr_t and uintptr_t definitions */
@ -86,14 +85,8 @@ typedef unsigned __int64 uintptr_t;
// ADLC components
#include "adlArena.hpp"
#include "opto/adlcVMDeps.hpp"
#include "filebuff.hpp"
#include "dict2.hpp"
#include "forms.hpp"
#include "formsopt.hpp"
#include "formssel.hpp"
#include "archDesc.hpp"
#include "adlparse.hpp"
#include "archDesc.hpp"
// globally define ArchDesc for convenience. Alternatively every form
// could have a backpointer to the AD but it's too complicated to pass

View File

@ -25,6 +25,11 @@
#ifndef SHARE_ADLC_ADLPARSE_HPP
#define SHARE_ADLC_ADLPARSE_HPP
#include "filebuff.hpp"
#include "forms.hpp"
#include "formsopt.hpp"
#include "formssel.hpp"
// ADLPARSE.HPP - Definitions for Architecture Description Language Parser
// Authors: Chris Vick and Mike Paleczny

View File

@ -25,6 +25,9 @@
#ifndef SHARE_ADLC_FORMS_HPP
#define SHARE_ADLC_FORMS_HPP
#include "dict2.hpp"
#include "opto/adlcVMDeps.hpp"
// FORMS.HPP - ADL Parser Generic and Utility Forms Classes
#define TRUE 1

View File

@ -166,6 +166,7 @@ bool Compiler::is_intrinsic_supported(vmIntrinsics::ID id) {
case vmIntrinsics::_dcos:
case vmIntrinsics::_dtan:
#if defined(AMD64)
case vmIntrinsics::_dsinh:
case vmIntrinsics::_dtanh:
case vmIntrinsics::_dcbrt:
#endif

View File

@ -3296,6 +3296,7 @@ GraphBuilder::GraphBuilder(Compilation* compilation, IRScope* scope)
case vmIntrinsics::_dsin : // fall through
case vmIntrinsics::_dcos : // fall through
case vmIntrinsics::_dtan : // fall through
case vmIntrinsics::_dsinh : // fall through
case vmIntrinsics::_dtanh : // fall through
case vmIntrinsics::_dcbrt : // fall through
case vmIntrinsics::_dlog : // fall through

View File

@ -2865,6 +2865,7 @@ void LIRGenerator::do_Intrinsic(Intrinsic* x) {
case vmIntrinsics::_dsqrt: // fall through
case vmIntrinsics::_dsqrt_strict: // fall through
case vmIntrinsics::_dtan: // fall through
case vmIntrinsics::_dsinh: // fall through
case vmIntrinsics::_dtanh: // fall through
case vmIntrinsics::_dsin : // fall through
case vmIntrinsics::_dcos : // fall through

View File

@ -362,6 +362,7 @@ const char* Runtime1::name_for_address(address entry) {
FUNCTION_CASE(entry, StubRoutines::dsin());
FUNCTION_CASE(entry, StubRoutines::dcos());
FUNCTION_CASE(entry, StubRoutines::dtan());
FUNCTION_CASE(entry, StubRoutines::dsinh());
FUNCTION_CASE(entry, StubRoutines::dtanh());
FUNCTION_CASE(entry, StubRoutines::dcbrt());

View File

@ -63,6 +63,7 @@ class TypeArrayKlass;
// be AOT-initialized:
// - If we discover at least one instance of class X, then class X is AOT-initialized (** Note1).
// - If AOTClassInitializer::can_archive_initialized_mirror(X) is true, then X is AOT-initialized.
// This function checks for the @jdk.internal.vm.annotation.AOTSafeClassInitializer annotation.
// - For each AOT-initialized class, we scan all the static fields in its java mirror. This will in
// turn discover more Klasses and java heap objects.
// - The scanning continues until we reach a steady state.

View File

@ -37,67 +37,6 @@
DEBUG_ONLY(InstanceKlass* _aot_init_class = nullptr;)
// Detector for class names we wish to handle specially.
// It is either an exact string match or a string prefix match.
class AOTClassInitializer::AllowedSpec {
const char* _class_name;
bool _is_prefix;
int _len;
public:
AllowedSpec(const char* class_name, bool is_prefix = false)
: _class_name(class_name), _is_prefix(is_prefix)
{
_len = (class_name == nullptr) ? 0 : (int)strlen(class_name);
}
const char* class_name() { return _class_name; }
bool matches(Symbol* name, int len) {
assert(_class_name != nullptr, "caller resp.");
if (_is_prefix) {
return len >= _len && name->starts_with(_class_name);
} else {
return len == _len && name->equals(_class_name);
}
}
};
// Tell if ik has a name that matches one of the given specs.
bool AOTClassInitializer::is_allowed(AllowedSpec* specs, InstanceKlass* ik) {
Symbol* name = ik->name();
int len = name->utf8_length();
for (AllowedSpec* s = specs; s->class_name() != nullptr; s++) {
if (s->matches(name, len)) {
// If a type is included in the tables inside can_archive_initialized_mirror(), we require that
// - all super classes must be included
// - all super interfaces that have <clinit> must be included.
// This ensures that in the production run, we don't run the <clinit> of a supertype but skips
// ik's <clinit>.
if (ik->java_super() != nullptr) {
DEBUG_ONLY(ResourceMark rm);
assert(AOTClassInitializer::can_archive_initialized_mirror(ik->java_super()),
"super class %s of %s must be aot-initialized", ik->java_super()->external_name(),
ik->external_name());
}
Array<InstanceKlass*>* interfaces = ik->local_interfaces();
int len = interfaces->length();
for (int i = 0; i < len; i++) {
InstanceKlass* intf = interfaces->at(i);
if (intf->class_initializer() != nullptr) {
assert(AOTClassInitializer::can_archive_initialized_mirror(intf),
"super interface %s (which has <clinit>) of %s must be aot-initialized", intf->external_name(),
ik->external_name());
}
}
return true;
}
}
return false;
}
bool AOTClassInitializer::can_archive_initialized_mirror(InstanceKlass* ik) {
assert(!ArchiveBuilder::is_active() || !ArchiveBuilder::current()->is_in_buffer_space(ik), "must be source klass");
if (!CDSConfig::is_initing_classes_at_dump_time()) {
@ -260,74 +199,19 @@ bool AOTClassInitializer::can_archive_initialized_mirror(InstanceKlass* ik) {
// because of invokedynamic. They are few enough for now to be
// manually tracked. There may be more in the future.
// IS_PREFIX means that we match all class names that start with a
// prefix. Otherwise, it is an exact match, of just one class name.
const bool IS_PREFIX = true;
{
static AllowedSpec specs[] = {
if (ik == vmClasses::Object_klass()) {
// everybody's favorite super
{"java/lang/Object"},
{nullptr}
};
if (is_allowed(specs, ik)) {
return true;
}
}
if (CDSConfig::is_dumping_method_handles()) {
// This table was created with the help of CDSHeapVerifier.
// The minimal list of @AOTSafeClassInitializer was created with the help of CDSHeapVerifier.
// Also, some $Holder classes are needed. E.g., Invokers.<clinit> explicitly
// initializes Invokers$Holder. Since Invokers.<clinit> won't be executed
// at runtime, we need to make sure Invokers$Holder is also aot-inited.
//
// We hope we can reduce the size of this list over time, and move
// the responsibility for identifying such classes into the JDK
// code itself. See tracking RFE JDK-8342481.
static AllowedSpec indy_specs[] = {
{"java/lang/constant/ConstantDescs"},
{"java/lang/constant/DynamicConstantDesc"},
{"java/lang/invoke/BoundMethodHandle"},
{"java/lang/invoke/BoundMethodHandle$Specializer"},
{"java/lang/invoke/BoundMethodHandle$Species_", IS_PREFIX},
{"java/lang/invoke/ClassSpecializer"},
{"java/lang/invoke/ClassSpecializer$", IS_PREFIX},
{"java/lang/invoke/DelegatingMethodHandle"},
{"java/lang/invoke/DelegatingMethodHandle$Holder"}, // UNSAFE.ensureClassInitialized()
{"java/lang/invoke/DirectMethodHandle"},
{"java/lang/invoke/DirectMethodHandle$Constructor"},
{"java/lang/invoke/DirectMethodHandle$Holder"}, // UNSAFE.ensureClassInitialized()
{"java/lang/invoke/Invokers"},
{"java/lang/invoke/Invokers$Holder"}, // UNSAFE.ensureClassInitialized()
{"java/lang/invoke/LambdaForm"},
{"java/lang/invoke/LambdaForm$Holder"}, // UNSAFE.ensureClassInitialized()
{"java/lang/invoke/LambdaForm$NamedFunction"},
{"java/lang/invoke/LambdaMetafactory"},
{"java/lang/invoke/MethodHandle"},
{"java/lang/invoke/MethodHandles"},
{"java/lang/invoke/SimpleMethodHandle"},
{"java/lang/invoke/StringConcatFactory"},
{"java/lang/invoke/VarHandleGuards"},
{"java/util/Collections"},
{"java/util/stream/Collectors"},
{"jdk/internal/constant/ConstantUtils"},
{"jdk/internal/constant/PrimitiveClassDescImpl"},
{"jdk/internal/constant/ReferenceClassDescImpl"},
// Can't include this, as it will pull in MethodHandleStatics which has many environment
// dependencies (on system properties, etc).
// MethodHandleStatics is an example of a class that must NOT get the aot-init treatment,
// because of its strong reliance on (a) final fields which are (b) environmentally determined.
//{"java/lang/invoke/InvokerBytecodeGenerator"},
{nullptr}
};
if (is_allowed(indy_specs, ik)) {
return true;
}
if (ik->name()->starts_with("java/lang/invoke/MethodHandleImpl")) {
if (ik->has_aot_safe_initializer()) {
return true;
}
}
@ -341,17 +225,6 @@ bool AOTClassInitializer::can_archive_initialized_mirror(InstanceKlass* ik) {
return false;
}
// TODO: currently we have a hard-coded list. We should turn this into
// an annotation: @jdk.internal.vm.annotation.RuntimeSetupRequired
// See JDK-8342481.
bool AOTClassInitializer::is_runtime_setup_required(InstanceKlass* ik) {
return ik == vmClasses::Class_klass() ||
ik == vmClasses::internal_Unsafe_klass() ||
ik == vmClasses::ConcurrentHashMap_klass() ||
ik == vmClasses::MethodHandleImpl_klass() ||
ik == vmClasses::Reference_klass();
}
void AOTClassInitializer::call_runtime_setup(JavaThread* current, InstanceKlass* ik) {
assert(ik->has_aot_initialized_mirror(), "sanity");
if (ik->is_runtime_setup_required()) {

View File

@ -31,15 +31,11 @@
class InstanceKlass;
class AOTClassInitializer : AllStatic {
class AllowedSpec;
static bool is_allowed(AllowedSpec* specs, InstanceKlass* ik);
public:
// Called by heapShared.cpp to see if src_ik->java_mirror() can be archived in
// the initialized state.
static bool can_archive_initialized_mirror(InstanceKlass* src_ik);
static bool is_runtime_setup_required(InstanceKlass* ik);
static void call_runtime_setup(JavaThread* current, InstanceKlass* ik);
// Support for regression testing. Available in debug builds only.

View File

@ -45,8 +45,8 @@
#include "utilities/formatBuffer.hpp"
#include "utilities/stringUtils.hpp"
#include <sys/stat.h>
#include <errno.h>
#include <sys/stat.h>
Array<ClassPathZipEntry*>* AOTClassLocationConfig::_dumptime_jar_files = nullptr;
AOTClassLocationConfig* AOTClassLocationConfig::_dumptime_instance = nullptr;

View File

@ -28,8 +28,8 @@
#include "memory/allocation.hpp"
#include "oops/array.hpp"
#include "utilities/exceptions.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/macros.hpp"
#include "utilities/ostream.hpp"

View File

@ -26,8 +26,8 @@
#define SHARE_CDS_AOTCONSTANTPOOLRESOLVER_HPP
#include "interpreter/bytecodes.hpp"
#include "memory/allStatic.hpp"
#include "memory/allocation.hpp"
#include "memory/allStatic.hpp"
#include "oops/oopsHierarchy.hpp"
#include "runtime/handles.hpp"
#include "utilities/exceptions.hpp"

View File

@ -25,8 +25,8 @@
#ifndef SHARE_CDS_AOTLINKEDCLASSBULKLOADER_HPP
#define SHARE_CDS_AOTLINKEDCLASSBULKLOADER_HPP
#include "memory/allStatic.hpp"
#include "memory/allocation.hpp"
#include "memory/allStatic.hpp"
#include "runtime/handles.hpp"
#include "utilities/exceptions.hpp"
#include "utilities/macros.hpp"

View File

@ -30,11 +30,11 @@
#include "logging/log.hpp"
#include "memory/metaspace.hpp"
#include "memory/virtualspace.hpp"
#include "runtime/nonJavaThread.hpp"
#include "runtime/semaphore.hpp"
#include "utilities/bitMap.hpp"
#include "utilities/exceptions.hpp"
#include "utilities/macros.hpp"
#include "runtime/nonJavaThread.hpp"
#include "runtime/semaphore.hpp"
class BootstrapInfo;
class ReservedSpace;

View File

@ -25,8 +25,8 @@
#include "cds/archiveHeapLoader.hpp"
#include "cds/cdsEnumKlass.hpp"
#include "cds/heapShared.hpp"
#include "classfile/vmClasses.hpp"
#include "classfile/systemDictionaryShared.hpp"
#include "classfile/vmClasses.hpp"
#include "memory/resourceArea.hpp"
#include "oops/fieldStreams.inline.hpp"
#include "oops/oop.inline.hpp"

View File

@ -121,6 +121,7 @@
\
product(ccstr, AOTCacheOutput, nullptr, \
"Specifies the file name for writing the AOT cache") \
constraint(AOTCacheOutputConstraintFunc, AtParse) \
\
product(bool, AOTInvokeDynamicLinking, false, DIAGNOSTIC, \
"AOT-link JVM_CONSTANT_InvokeDynamic entries in cached " \

View File

@ -22,8 +22,8 @@
*
*/
#include "cds/archiveUtils.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/archiveUtils.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/cppVtables.hpp"
#include "cds/metaspaceShared.hpp"
@ -34,8 +34,8 @@
#include "oops/instanceStackChunkKlass.hpp"
#include "oops/methodCounters.hpp"
#include "oops/methodData.hpp"
#include "oops/trainingData.hpp"
#include "oops/objArrayKlass.hpp"
#include "oops/trainingData.hpp"
#include "oops/typeArrayKlass.hpp"
#include "runtime/arguments.hpp"
#include "utilities/globalDefinitions.hpp"

View File

@ -29,8 +29,8 @@
#include "cds/dumpTimeClassInfo.hpp"
#include "cds/cdsConfig.hpp"
#include "classfile/systemDictionaryShared.hpp"
#include "classfile/classLoaderData.inline.hpp"
#include "classfile/systemDictionaryShared.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/klass.inline.hpp"
#include "runtime/safepoint.hpp"

View File

@ -42,8 +42,8 @@
#include "classfile/systemDictionaryShared.hpp"
#include "classfile/vmSymbols.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/gcVMOperations.hpp"
#include "gc/shared/gc_globals.hpp"
#include "gc/shared/gcVMOperations.hpp"
#include "jvm.h"
#include "logging/log.hpp"
#include "memory/metaspaceClosure.hpp"

View File

@ -55,9 +55,9 @@
#include "memory/universe.hpp"
#include "nmt/memTracker.hpp"
#include "oops/access.hpp"
#include "oops/compressedKlass.hpp"
#include "oops/compressedOops.hpp"
#include "oops/compressedOops.inline.hpp"
#include "oops/compressedKlass.hpp"
#include "oops/objArrayOop.hpp"
#include "oops/oop.inline.hpp"
#include "oops/trainingData.hpp"
@ -80,8 +80,8 @@
#include "gc/g1/g1HeapRegion.hpp"
#endif
# include <sys/stat.h>
# include <errno.h>
#include <errno.h>
#include <sys/stat.h>
#ifndef O_BINARY // if defined (Win32) use binary files.
#define O_BINARY 0 // otherwise do nothing.

View File

@ -505,9 +505,6 @@ bool HeapShared::is_archivable_hidden_klass(InstanceKlass* ik) {
void HeapShared::copy_and_rescan_aot_inited_mirror(InstanceKlass* ik) {
ik->set_has_aot_initialized_mirror();
if (AOTClassInitializer::is_runtime_setup_required(ik)) {
ik->set_is_runtime_setup_required();
}
oop orig_mirror;
if (RegeneratedClasses::is_regenerated_object(ik)) {

View File

@ -26,6 +26,7 @@
#define SHARE_CDS_LAMBDAFORMINVOKERS_INLINE_HPP
#include "cds/lambdaFormInvokers.hpp"
#include "classfile/vmSymbols.hpp"
inline bool LambdaFormInvokers::may_be_regenerated_class(Symbol* name) {

View File

@ -62,8 +62,8 @@
#include "code/aotCodeCache.hpp"
#include "code/codeCache.hpp"
#include "gc/shared/gcVMOperations.hpp"
#include "interpreter/bytecodeStream.hpp"
#include "interpreter/bytecodes.hpp"
#include "interpreter/bytecodeStream.hpp"
#include "jvm_io.h"
#include "logging/log.hpp"
#include "logging/logMessage.hpp"
@ -245,7 +245,7 @@ static bool shared_base_too_high(char* specified_base, char* aligned_base, size_
static char* compute_shared_base(size_t cds_max) {
char* specified_base = (char*)SharedBaseAddress;
size_t alignment = MetaspaceShared::core_region_alignment();
if (UseCompressedClassPointers) {
if (UseCompressedClassPointers && CompressedKlassPointers::needs_class_space()) {
alignment = MAX2(alignment, Metaspace::reserve_alignment());
}
@ -1603,8 +1603,7 @@ MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, File
// Set up compressed Klass pointer encoding: the encoding range must
// cover both archive and class space.
const address encoding_base = (address)mapped_base_address;
const address klass_range_start = encoding_base + prot_zone_size;
const address klass_range_start = (address)mapped_base_address;
const size_t klass_range_size = (address)class_space_rs.end() - klass_range_start;
if (INCLUDE_CDS_JAVA_HEAP || UseCompactObjectHeaders) {
// The CDS archive may contain narrow Klass IDs that were precomputed at archive generation time:
@ -1615,13 +1614,19 @@ MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, File
// mapping start (including protection zone), shift should be the shift used at archive generation time.
CompressedKlassPointers::initialize_for_given_encoding(
klass_range_start, klass_range_size,
encoding_base, ArchiveBuilder::precomputed_narrow_klass_shift() // precomputed encoding, see ArchiveBuilder
klass_range_start, ArchiveBuilder::precomputed_narrow_klass_shift() // precomputed encoding, see ArchiveBuilder
);
assert(CompressedKlassPointers::base() == klass_range_start, "must be");
} else {
// Let JVM freely choose encoding base and shift
CompressedKlassPointers::initialize(klass_range_start, klass_range_size);
assert(CompressedKlassPointers::base() == nullptr ||
CompressedKlassPointers::base() == klass_range_start, "must be");
}
// Establish protection zone, but only if we need one
if (CompressedKlassPointers::base() == klass_range_start) {
CompressedKlassPointers::establish_protection_zone(klass_range_start, prot_zone_size);
}
CompressedKlassPointers::establish_protection_zone(encoding_base, prot_zone_size);
// map_or_load_heap_region() compares the current narrow oop and klass encodings
// with the archived ones, so it must be done after all encodings are determined.

View File

@ -77,8 +77,8 @@
#include "utilities/bitMap.inline.hpp"
#include "utilities/checkedCast.hpp"
#include "utilities/copy.hpp"
#include "utilities/formatBuffer.hpp"
#include "utilities/exceptions.hpp"
#include "utilities/formatBuffer.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/macros.hpp"
@ -941,6 +941,8 @@ public:
_jdk_internal_ValueBased,
_java_lang_Deprecated,
_java_lang_Deprecated_for_removal,
_jdk_internal_vm_annotation_AOTSafeClassInitializer,
_method_AOTRuntimeSetup,
_annotation_LIMIT
};
const Location _location;
@ -976,6 +978,8 @@ public:
void set_stable(bool stable) { set_annotation(_field_Stable); }
bool is_stable() const { return has_annotation(_field_Stable); }
bool has_aot_runtime_setup() const { return has_annotation(_method_AOTRuntimeSetup); }
};
// This class also doubles as a holder for metadata cleanup.
@ -1896,6 +1900,16 @@ AnnotationCollector::annotation_index(const ClassLoaderData* loader_data,
case VM_SYMBOL_ENUM_NAME(java_lang_Deprecated): {
return _java_lang_Deprecated;
}
case VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_AOTSafeClassInitializer_signature): {
if (_location != _in_class) break; // only allow for classes
if (!privileged) break; // only allow in privileged code
return _jdk_internal_vm_annotation_AOTSafeClassInitializer;
}
case VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_AOTRuntimeSetup_signature): {
if (_location != _in_method) break; // only allow for methods
if (!privileged) break; // only allow in privileged code
return _method_AOTRuntimeSetup;
}
default: {
break;
}
@ -1975,6 +1989,9 @@ void ClassFileParser::ClassAnnotationCollector::apply_to(InstanceKlass* ik) {
m->set_deprecated_for_removal();
}
}
if (has_annotation(_jdk_internal_vm_annotation_AOTSafeClassInitializer)) {
ik->set_has_aot_safe_initializer();
}
}
#define MAX_ARGS_SIZE 255
@ -2661,6 +2678,13 @@ Method* ClassFileParser::parse_method(const ClassFileStream* const cfs,
if (is_hidden()) { // Mark methods in hidden classes as 'hidden'.
m->set_is_hidden();
}
if (parsed_annotations.has_aot_runtime_setup()) {
if (name != vmSymbols::runtimeSetup() || signature != vmSymbols::void_method_signature() ||
!access_flags.is_private() || !access_flags.is_static()) {
classfile_parse_error("@AOTRuntimeSetup method must be declared private static void runtimeSetup() for class %s", CHECK_NULL);
}
_has_aot_runtime_setup_method = true;
}
// Copy annotations
copy_method_annotations(m->constMethod(),
@ -3978,6 +4002,15 @@ void ClassFileParser::set_precomputed_flags(InstanceKlass* ik) {
const jint lh = Klass::instance_layout_helper(ik->size_helper(), true);
ik->set_layout_helper(lh);
}
// Propagate the AOT runtimeSetup method discovery
if (_has_aot_runtime_setup_method) {
ik->set_is_runtime_setup_required();
if (log_is_enabled(Info, aot, init)) {
ResourceMark rm;
log_info(aot, init)("Found @AOTRuntimeSetup class %s", ik->external_name());
}
}
}
// utility methods for appending an array with check for duplicates
@ -5117,8 +5150,47 @@ void ClassFileParser::fill_instance_klass(InstanceKlass* ik,
check_methods_for_intrinsics(ik, methods);
// Fill in field values obtained by parse_classfile_attributes
if (_parsed_annotations->has_any_annotations()) {
if (_parsed_annotations->has_any_annotations())
_parsed_annotations->apply_to(ik);
// AOT-related checks.
// Note we cannot check this in general due to instrumentation or module patching
if (CDSConfig::is_initing_classes_at_dump_time()) {
// Check the aot initialization safe status.
// @AOTSafeClassInitializer is used only to support ahead-of-time initialization of classes
// in the AOT assembly phase.
if (ik->has_aot_safe_initializer()) {
// If a type is included in the tables inside can_archive_initialized_mirror(), we require that
// - all super classes must be included
// - all super interfaces that have <clinit> must be included.
// This ensures that in the production run, we don't run the <clinit> of a supertype but skips
// ik's <clinit>.
if (_super_klass != nullptr) {
guarantee_property(_super_klass->has_aot_safe_initializer(),
"Missing @AOTSafeClassInitializer in superclass %s for class %s",
_super_klass->external_name(),
CHECK);
}
int len = _local_interfaces->length();
for (int i = 0; i < len; i++) {
InstanceKlass* intf = _local_interfaces->at(i);
guarantee_property(intf->class_initializer() == nullptr || intf->has_aot_safe_initializer(),
"Missing @AOTSafeClassInitializer in superinterface %s for class %s",
intf->external_name(),
CHECK);
}
if (log_is_enabled(Info, aot, init)) {
ResourceMark rm;
log_info(aot, init)("Found @AOTSafeClassInitializer class %s", ik->external_name());
}
} else {
// @AOTRuntimeSetup only meaningful in @AOTClassInitializer
guarantee_property(!ik->is_runtime_setup_required(),
"@AOTRuntimeSetup meaningless in non-@AOTSafeClassInitializer class %s",
CHECK);
}
}
apply_parsed_class_attributes(ik);
@ -5326,6 +5398,7 @@ ClassFileParser::ClassFileParser(ClassFileStream* stream,
_has_localvariable_table(false),
_has_final_method(false),
_has_contended_fields(false),
_has_aot_runtime_setup_method(false),
_has_finalizer(false),
_has_empty_finalizer(false),
_max_bootstrap_specifier_index(-1) {

View File

@ -192,6 +192,7 @@ class ClassFileParser {
bool _has_localvariable_table;
bool _has_final_method;
bool _has_contended_fields;
bool _has_aot_runtime_setup_method;
// precomputed flags
bool _has_finalizer;

View File

@ -80,8 +80,8 @@
#include "utilities/ostream.hpp"
#include "utilities/utf8.hpp"
#include <stdlib.h>
#include <ctype.h>
#include <stdlib.h>
// Entry point in java.dll for path canonicalization

View File

@ -35,8 +35,9 @@ class SerializeClosure;
class ClassLoaderDataShared : AllStatic {
static bool _full_module_graph_loaded;
static void ensure_module_entry_table_exists(oop class_loader);
CDS_JAVA_HEAP_ONLY(static void ensure_module_entry_table_exists(oop class_loader);)
public:
#if INCLUDE_CDS_JAVA_HEAP
static void ensure_module_entry_tables_exist();
static void allocate_archived_tables();
static void iterate_symbols(MetaspaceClosure* closure);
@ -49,6 +50,7 @@ public:
static void restore_java_system_loader_from_archive(ClassLoaderData* loader_data);
static ModuleEntry* archived_boot_unnamed_module();
static ModuleEntry* archived_unnamed_module(ClassLoaderData* loader_data);
#endif // INCLUDE_CDS_JAVA_HEAP
static bool is_full_module_graph_loaded() { return _full_module_graph_loaded; }
};

Some files were not shown because too many files have changed in this diff Show More