mirror of
https://github.com/openjdk/jdk.git
synced 2026-03-16 10:53:31 +00:00
Merge
This commit is contained in:
commit
b871c1ff3a
3
.hgtags
3
.hgtags
@ -588,3 +588,6 @@ fbbe6672ae15deaf350a9e935290a36f57ba9c25 jdk-14+13
|
||||
cddef3bde924f3ff4f17f3d369280cf69d0450e5 jdk-14+14
|
||||
9c250a7600e12bdb1e611835250af3204d4aa152 jdk-13-ga
|
||||
778fc2dcbdaa8981e07e929a2cacef979c72261e jdk-14+15
|
||||
d29f0181ba424a95d881aba5eabf2e393abcc70f jdk-14+16
|
||||
5c83830390baafb76a1fbe33443c57620bd45fb9 jdk-14+17
|
||||
e84d8379815ba0d3e50fb096d28c25894cb50b8c jdk-14+18
|
||||
|
||||
@ -86,16 +86,18 @@ endif
|
||||
# from there. These files were explicitly filtered or modified in <module>-copy
|
||||
# targets. For the rest, just pick up everything from the source legal dirs.
|
||||
LEGAL_NOTICES := \
|
||||
$(SUPPORT_OUTPUTDIR)/modules_legal/common \
|
||||
$(wildcard $(SUPPORT_OUTPUTDIR)/modules_legal/common) \
|
||||
$(if $(wildcard $(SUPPORT_OUTPUTDIR)/modules_legal/$(MODULE)), \
|
||||
$(wildcard $(SUPPORT_OUTPUTDIR)/modules_legal/$(MODULE)), \
|
||||
$(call FindModuleLegalSrcDirs, $(MODULE)) \
|
||||
)
|
||||
|
||||
LEGAL_NOTICES_PATH := $(call PathList, $(LEGAL_NOTICES))
|
||||
DEPS += $(call FindFiles, $(LEGAL_NOTICES))
|
||||
ifneq ($(strip $(LEGAL_NOTICES)), )
|
||||
LEGAL_NOTICES_PATH := $(call PathList, $(LEGAL_NOTICES))
|
||||
DEPS += $(call FindFiles, $(LEGAL_NOTICES))
|
||||
|
||||
JMOD_FLAGS += --legal-notices $(LEGAL_NOTICES_PATH)
|
||||
JMOD_FLAGS += --legal-notices $(LEGAL_NOTICES_PATH)
|
||||
endif
|
||||
|
||||
ifeq ($(filter-out jdk.incubator.%, $(MODULE)), )
|
||||
JMOD_FLAGS += --do-not-resolve-by-default
|
||||
|
||||
@ -213,8 +213,10 @@ AC_DEFUN([BASIC_ABSOLUTE_PATH],
|
||||
if test "x[$]$1" != x; then
|
||||
new_path="[$]$1"
|
||||
|
||||
if [ [[ "$new_path" = ~* ]] ]; then
|
||||
# Use eval to expand a potential ~
|
||||
# Use eval to expand a potential ~. This technique does not work if there
|
||||
# are spaces in the path (which is valid at this point on Windows), so only
|
||||
# try to apply it if there is an actual ~ first in the path.
|
||||
if [ [[ "$new_path" = "~"* ]] ]; then
|
||||
eval new_path="$new_path"
|
||||
if test ! -f "$new_path" && test ! -d "$new_path"; then
|
||||
AC_MSG_ERROR([The new_path of $1, which resolves as "$new_path", is not found.])
|
||||
|
||||
@ -35,7 +35,7 @@ DEFAULT_VERSION_EXTRA3=0
|
||||
DEFAULT_VERSION_DATE=2020-03-17
|
||||
DEFAULT_VERSION_CLASSFILE_MAJOR=58 # "`$EXPR $DEFAULT_VERSION_FEATURE + 44`"
|
||||
DEFAULT_VERSION_CLASSFILE_MINOR=0
|
||||
DEFAULT_ACCEPTABLE_BOOT_VERSIONS="12 13 14"
|
||||
DEFAULT_ACCEPTABLE_BOOT_VERSIONS="13 14"
|
||||
DEFAULT_JDK_SOURCE_TARGET_VERSION=14
|
||||
DEFAULT_PROMOTED_VERSION_PRE=ea
|
||||
|
||||
|
||||
@ -760,34 +760,6 @@ define SetupNativeCompilationBody
|
||||
endif
|
||||
endif
|
||||
|
||||
# Create a rule to collect all the individual make dependency files into a
|
||||
# single makefile.
|
||||
$1_DEPS_FILE := $$($1_OBJECT_DIR)/$1.d
|
||||
|
||||
$$($1_DEPS_FILE): $$($1_ALL_OBJS)
|
||||
$(RM) $$@
|
||||
# CD into dir to reduce risk of hitting command length limits, which
|
||||
# could otherwise happen if TOPDIR is a very long path.
|
||||
$(CD) $$($1_OBJECT_DIR) && $(CAT) *.d > $$@.tmp
|
||||
$(CD) $$($1_OBJECT_DIR) && $(CAT) *.d.targets | $(SORT) -u >> $$@.tmp
|
||||
# After generating the file, which happens after all objects have been
|
||||
# compiled, copy it to .old extension. On the next make invocation, this
|
||||
# .old file will be included by make.
|
||||
$(CP) $$@.tmp $$@.old
|
||||
$(MV) $$@.tmp $$@
|
||||
|
||||
$1 += $$($1_DEPS_FILE)
|
||||
|
||||
# The include must be on the .old file, which represents the state from the
|
||||
# previous invocation of make. The file being included must not have a rule
|
||||
# defined for it as otherwise make will think it has to run the rule before
|
||||
# being able to include the file, which would be wrong since we specifically
|
||||
# need the file as it was generated by a previous make invocation.
|
||||
ifneq ($$(wildcard $$($1_DEPS_FILE).old), )
|
||||
$1_DEPS_FILE_LOADED := true
|
||||
-include $$($1_DEPS_FILE).old
|
||||
endif
|
||||
|
||||
# Now call SetupCompileNativeFile for each source file we are going to compile.
|
||||
$$(foreach file, $$($1_SRCS), \
|
||||
$$(eval $$(call SetupCompileNativeFile, $1_$$(notdir $$(file)),\
|
||||
@ -850,6 +822,34 @@ define SetupNativeCompilationBody
|
||||
endif
|
||||
endif
|
||||
|
||||
# Create a rule to collect all the individual make dependency files into a
|
||||
# single makefile.
|
||||
$1_DEPS_FILE := $$($1_OBJECT_DIR)/$1.d
|
||||
|
||||
$$($1_DEPS_FILE): $$($1_ALL_OBJS) $$($1_RES)
|
||||
$(RM) $$@
|
||||
# CD into dir to reduce risk of hitting command length limits, which
|
||||
# could otherwise happen if TOPDIR is a very long path.
|
||||
$(CD) $$($1_OBJECT_DIR) && $(CAT) *.d > $$@.tmp
|
||||
$(CD) $$($1_OBJECT_DIR) && $(CAT) *.d.targets | $(SORT) -u >> $$@.tmp
|
||||
# After generating the file, which happens after all objects have been
|
||||
# compiled, copy it to .old extension. On the next make invocation, this
|
||||
# .old file will be included by make.
|
||||
$(CP) $$@.tmp $$@.old
|
||||
$(MV) $$@.tmp $$@
|
||||
|
||||
$1 += $$($1_DEPS_FILE)
|
||||
|
||||
# The include must be on the .old file, which represents the state from the
|
||||
# previous invocation of make. The file being included must not have a rule
|
||||
# defined for it as otherwise make will think it has to run the rule before
|
||||
# being able to include the file, which would be wrong since we specifically
|
||||
# need the file as it was generated by a previous make invocation.
|
||||
ifneq ($$(wildcard $$($1_DEPS_FILE).old), )
|
||||
$1_DEPS_FILE_LOADED := true
|
||||
-include $$($1_DEPS_FILE).old
|
||||
endif
|
||||
|
||||
ifneq ($(DISABLE_MAPFILES), true)
|
||||
$1_REAL_MAPFILE := $$($1_MAPFILE)
|
||||
ifeq ($(call isTargetOs, windows), false)
|
||||
|
||||
@ -365,7 +365,7 @@ var getJibProfilesCommon = function (input, data) {
|
||||
};
|
||||
};
|
||||
|
||||
common.boot_jdk_version = "12";
|
||||
common.boot_jdk_version = "13";
|
||||
common.boot_jdk_build_number = "33";
|
||||
common.boot_jdk_home = input.get("boot_jdk", "install_path") + "/jdk-"
|
||||
+ common.boot_jdk_version
|
||||
|
||||
@ -73,7 +73,7 @@ $(GENSRC_DIR)/module-info.java.extra: $(GENSRC_DIR)/_gensrc_proc_done
|
||||
($(CD) $(GENSRC_DIR)/META-INF/providers && \
|
||||
p=""; \
|
||||
impl=""; \
|
||||
for i in $$($(GREP) '^' * | $(SORT) -t ':' -k 2 | $(SED) 's/:.*//'); do \
|
||||
for i in $$($(NAWK) '$$0=FILENAME" "$$0' * | $(SORT) -k 2 | $(SED) 's/ .*//'); do \
|
||||
c=$$($(CAT) $$i | $(TR) -d '\n\r'); \
|
||||
if test x$$p != x$$c; then \
|
||||
if test x$$p != x; then \
|
||||
|
||||
@ -23,8 +23,6 @@
|
||||
# questions.
|
||||
#
|
||||
|
||||
WIN_VERIFY_LIB := $(SUPPORT_OUTPUTDIR)/native/$(MODULE)/libverify/verify.lib
|
||||
|
||||
# Hook to include the corresponding custom file, if present.
|
||||
$(eval $(call IncludeCustomExtension, lib/CoreLibraries.gmk))
|
||||
|
||||
@ -110,14 +108,14 @@ $(eval $(call SetupJdkLibrary, BUILD_LIBJAVA, \
|
||||
LDFLAGS_macosx := -L$(SUPPORT_OUTPUTDIR)/native/$(MODULE)/, \
|
||||
LDFLAGS_windows := -delayload:shell32.dll, \
|
||||
LIBS := $(BUILD_LIBFDLIBM_TARGET), \
|
||||
LIBS_unix := -ljvm -lverify, \
|
||||
LIBS_unix := -ljvm, \
|
||||
LIBS_linux := $(LIBDL), \
|
||||
LIBS_solaris := -lsocket -lnsl -lscf $(LIBDL), \
|
||||
LIBS_aix := $(LIBDL) $(LIBM),\
|
||||
LIBS_macosx := -framework CoreFoundation \
|
||||
-framework Foundation \
|
||||
-framework SystemConfiguration, \
|
||||
LIBS_windows := jvm.lib $(WIN_VERIFY_LIB) \
|
||||
LIBS_windows := jvm.lib \
|
||||
shell32.lib delayimp.lib \
|
||||
advapi32.lib version.lib, \
|
||||
))
|
||||
|
||||
@ -2513,17 +2513,8 @@ void Compile::reshape_address(AddPNode* addp) {
|
||||
__ INSN(REG, as_Register(BASE)); \
|
||||
}
|
||||
|
||||
typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
|
||||
typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
|
||||
typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
|
||||
MacroAssembler::SIMD_RegVariant T, const Address &adr);
|
||||
|
||||
// Used for all non-volatile memory accesses. The use of
|
||||
// $mem->opcode() to discover whether this pattern uses sign-extended
|
||||
// offsets is something of a kludge.
|
||||
static void loadStore(MacroAssembler masm, mem_insn insn,
|
||||
Register reg, int opcode,
|
||||
Register base, int index, int size, int disp)
|
||||
static Address mem2address(int opcode, Register base, int index, int size, int disp)
|
||||
{
|
||||
Address::extend scale;
|
||||
|
||||
@ -2542,16 +2533,34 @@ typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
|
||||
}
|
||||
|
||||
if (index == -1) {
|
||||
(masm.*insn)(reg, Address(base, disp));
|
||||
return Address(base, disp);
|
||||
} else {
|
||||
assert(disp == 0, "unsupported address mode: disp = %d", disp);
|
||||
(masm.*insn)(reg, Address(base, as_Register(index), scale));
|
||||
return Address(base, as_Register(index), scale);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
|
||||
typedef void (MacroAssembler::* mem_insn2)(Register Rt, Register adr);
|
||||
typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
|
||||
typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
|
||||
MacroAssembler::SIMD_RegVariant T, const Address &adr);
|
||||
|
||||
// Used for all non-volatile memory accesses. The use of
|
||||
// $mem->opcode() to discover whether this pattern uses sign-extended
|
||||
// offsets is something of a kludge.
|
||||
static void loadStore(MacroAssembler masm, mem_insn insn,
|
||||
Register reg, int opcode,
|
||||
Register base, int index, int size, int disp)
|
||||
{
|
||||
Address addr = mem2address(opcode, base, index, size, disp);
|
||||
(masm.*insn)(reg, addr);
|
||||
}
|
||||
|
||||
static void loadStore(MacroAssembler masm, mem_float_insn insn,
|
||||
FloatRegister reg, int opcode,
|
||||
Register base, int index, int size, int disp)
|
||||
FloatRegister reg, int opcode,
|
||||
Register base, int index, int size, int disp)
|
||||
{
|
||||
Address::extend scale;
|
||||
|
||||
@ -2573,8 +2582,8 @@ typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
|
||||
}
|
||||
|
||||
static void loadStore(MacroAssembler masm, mem_vector_insn insn,
|
||||
FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
|
||||
int opcode, Register base, int index, int size, int disp)
|
||||
FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
|
||||
int opcode, Register base, int index, int size, int disp)
|
||||
{
|
||||
if (index == -1) {
|
||||
(masm.*insn)(reg, T, Address(base, disp));
|
||||
@ -3791,7 +3800,7 @@ frame %{
|
||||
static const int hi[Op_RegL + 1] = { // enum name
|
||||
0, // Op_Node
|
||||
0, // Op_Set
|
||||
OptoReg::Bad, // Op_RegN
|
||||
OptoReg::Bad, // Op_RegN
|
||||
OptoReg::Bad, // Op_RegI
|
||||
R0_H_num, // Op_RegP
|
||||
OptoReg::Bad, // Op_RegF
|
||||
@ -6923,7 +6932,7 @@ instruct loadRange(iRegINoSp dst, memory mem)
|
||||
instruct loadP(iRegPNoSp dst, memory mem)
|
||||
%{
|
||||
match(Set dst (LoadP mem));
|
||||
predicate(!needs_acquiring_load(n));
|
||||
predicate(!needs_acquiring_load(n) && (n->as_Load()->barrier_data() == 0));
|
||||
|
||||
ins_cost(4 * INSN_COST);
|
||||
format %{ "ldr $dst, $mem\t# ptr" %}
|
||||
@ -7616,6 +7625,7 @@ instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
|
||||
instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
|
||||
%{
|
||||
match(Set dst (LoadP mem));
|
||||
predicate(n->as_Load()->barrier_data() == 0);
|
||||
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
format %{ "ldar $dst, $mem\t# ptr" %}
|
||||
@ -8552,6 +8562,7 @@ instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoS
|
||||
instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
|
||||
|
||||
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
|
||||
predicate(n->as_LoadStore()->barrier_data() == 0);
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
effect(KILL cr);
|
||||
@ -8665,7 +8676,7 @@ instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegL
|
||||
|
||||
instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
|
||||
|
||||
predicate(needs_acquiring_load_exclusive(n));
|
||||
predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
|
||||
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
|
||||
@ -8796,6 +8807,7 @@ instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN ne
|
||||
%}
|
||||
|
||||
instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
|
||||
predicate(n->as_LoadStore()->barrier_data() == 0);
|
||||
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
effect(TEMP_DEF res, KILL cr);
|
||||
@ -8895,7 +8907,7 @@ instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN
|
||||
%}
|
||||
|
||||
instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
|
||||
predicate(needs_acquiring_load_exclusive(n));
|
||||
predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
|
||||
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
effect(TEMP_DEF res, KILL cr);
|
||||
@ -8996,6 +9008,7 @@ instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN ne
|
||||
%}
|
||||
|
||||
instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
|
||||
predicate(n->as_LoadStore()->barrier_data() == 0);
|
||||
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
effect(KILL cr);
|
||||
@ -9103,8 +9116,8 @@ instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN
|
||||
%}
|
||||
|
||||
instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
|
||||
predicate(needs_acquiring_load_exclusive(n));
|
||||
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
|
||||
predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
effect(KILL cr);
|
||||
format %{
|
||||
@ -9154,6 +9167,7 @@ instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
|
||||
%}
|
||||
|
||||
instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
|
||||
predicate(n->as_LoadStore()->barrier_data() == 0);
|
||||
match(Set prev (GetAndSetP mem newv));
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
format %{ "atomic_xchg $prev, $newv, [$mem]" %}
|
||||
@ -9197,7 +9211,7 @@ instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev) %{
|
||||
%}
|
||||
|
||||
instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev) %{
|
||||
predicate(needs_acquiring_load_exclusive(n));
|
||||
predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
|
||||
match(Set prev (GetAndSetP mem newv));
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
format %{ "atomic_xchg_acq $prev, $newv, [$mem]" %}
|
||||
|
||||
@ -26,6 +26,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "oops/constMethod.hpp"
|
||||
#include "oops/klass.inline.hpp"
|
||||
#include "oops/method.hpp"
|
||||
#include "runtime/frame.inline.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -162,16 +162,12 @@ void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, ad
|
||||
// Creation also verifies the object.
|
||||
NativeMovConstReg* method_holder
|
||||
= nativeMovConstReg_at(stub + NativeInstruction::instruction_size);
|
||||
#ifndef PRODUCT
|
||||
NativeGeneralJump* jump = nativeGeneralJump_at(method_holder->next_instruction_address());
|
||||
|
||||
// read the value once
|
||||
volatile intptr_t data = method_holder->data();
|
||||
assert(data == 0 || data == (intptr_t)callee(),
|
||||
"a) MT-unsafe modification of inline cache");
|
||||
assert(data == 0 || jump->jump_destination() == entry,
|
||||
"b) MT-unsafe modification of inline cache");
|
||||
#ifdef ASSERT
|
||||
NativeGeneralJump* jump = nativeGeneralJump_at(method_holder->next_instruction_address());
|
||||
verify_mt_safe(callee, entry, method_holder, jump);
|
||||
#endif
|
||||
|
||||
// Update stub.
|
||||
method_holder->set_data((intptr_t)callee());
|
||||
NativeGeneralJump::insert_unconditional(method_holder->next_instruction_address(), entry);
|
||||
|
||||
@ -211,9 +211,14 @@ void ShenandoahBarrierSetAssembler::resolve_forward_pointer_not_null(MacroAssemb
|
||||
if (borrow_reg) {
|
||||
// No free registers available. Make one useful.
|
||||
tmp = rscratch1;
|
||||
if (tmp == dst) {
|
||||
tmp = rscratch2;
|
||||
}
|
||||
__ push(RegSet::of(tmp), sp);
|
||||
}
|
||||
|
||||
assert_different_registers(tmp, dst);
|
||||
|
||||
Label done;
|
||||
__ ldr(tmp, Address(dst, oopDesc::mark_offset_in_bytes()));
|
||||
__ eon(tmp, tmp, zr);
|
||||
|
||||
@ -24,22 +24,23 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "code/codeBlob.hpp"
|
||||
#include "code/vmreg.inline.hpp"
|
||||
#include "gc/z/zBarrier.inline.hpp"
|
||||
#include "gc/z/zBarrierSet.hpp"
|
||||
#include "gc/z/zBarrierSetAssembler.hpp"
|
||||
#include "gc/z/zBarrierSetRuntime.hpp"
|
||||
#include "gc/z/zThreadLocalData.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#ifdef COMPILER1
|
||||
#include "c1/c1_LIRAssembler.hpp"
|
||||
#include "c1/c1_MacroAssembler.hpp"
|
||||
#include "gc/z/c1/zBarrierSetC1.hpp"
|
||||
#endif // COMPILER1
|
||||
|
||||
#include "gc/z/zThreadLocalData.hpp"
|
||||
|
||||
ZBarrierSetAssembler::ZBarrierSetAssembler() :
|
||||
_load_barrier_slow_stub(),
|
||||
_load_barrier_weak_slow_stub() {}
|
||||
#ifdef COMPILER2
|
||||
#include "gc/z/c2/zBarrierSetC2.hpp"
|
||||
#endif // COMPILER2
|
||||
|
||||
#ifdef PRODUCT
|
||||
#define BLOCK_COMMENT(str) /* nothing */
|
||||
@ -66,7 +67,7 @@ void ZBarrierSetAssembler::load_at(MacroAssembler* masm,
|
||||
assert_different_registers(rscratch1, rscratch2, src.base());
|
||||
assert_different_registers(rscratch1, rscratch2, dst);
|
||||
|
||||
RegSet savedRegs = RegSet::range(r0,r28) - RegSet::of(dst, rscratch1, rscratch2);
|
||||
RegSet savedRegs = RegSet::range(r0, r28) - RegSet::of(dst, rscratch1, rscratch2);
|
||||
|
||||
Label done;
|
||||
|
||||
@ -206,7 +207,8 @@ void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm,
|
||||
|
||||
// The Address offset is too large to direct load - -784. Our range is +127, -128.
|
||||
__ mov(tmp, (long int)(in_bytes(ZThreadLocalData::address_bad_mask_offset()) -
|
||||
in_bytes(JavaThread::jni_environment_offset())));
|
||||
in_bytes(JavaThread::jni_environment_offset())));
|
||||
|
||||
// Load address bad mask
|
||||
__ add(tmp, jni_env, tmp);
|
||||
__ ldr(tmp, Address(tmp));
|
||||
@ -294,12 +296,12 @@ void ZBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler*
|
||||
__ prologue("zgc_load_barrier stub", false);
|
||||
|
||||
// We don't use push/pop_clobbered_registers() - we need to pull out the result from r0.
|
||||
for (int i = 0; i < 32; i +=2) {
|
||||
__ stpd(as_FloatRegister(i), as_FloatRegister(i+1), Address(__ pre(sp,-16)));
|
||||
for (int i = 0; i < 32; i += 2) {
|
||||
__ stpd(as_FloatRegister(i), as_FloatRegister(i + 1), Address(__ pre(sp,-16)));
|
||||
}
|
||||
|
||||
RegSet saveRegs = RegSet::range(r0,r28) - RegSet::of(r0);
|
||||
__ push(saveRegs, sp);
|
||||
const RegSet save_regs = RegSet::range(r1, r28);
|
||||
__ push(save_regs, sp);
|
||||
|
||||
// Setup arguments
|
||||
__ load_parameter(0, c_rarg0);
|
||||
@ -307,98 +309,161 @@ void ZBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler*
|
||||
|
||||
__ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2);
|
||||
|
||||
__ pop(saveRegs, sp);
|
||||
__ pop(save_regs, sp);
|
||||
|
||||
for (int i = 30; i >0; i -=2) {
|
||||
__ ldpd(as_FloatRegister(i), as_FloatRegister(i+1), Address(__ post(sp, 16)));
|
||||
}
|
||||
for (int i = 30; i >= 0; i -= 2) {
|
||||
__ ldpd(as_FloatRegister(i), as_FloatRegister(i + 1), Address(__ post(sp, 16)));
|
||||
}
|
||||
|
||||
__ epilogue();
|
||||
}
|
||||
#endif // COMPILER1
|
||||
|
||||
#ifdef COMPILER2
|
||||
|
||||
OptoReg::Name ZBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) {
|
||||
if (!OptoReg::is_reg(opto_reg)) {
|
||||
return OptoReg::Bad;
|
||||
}
|
||||
|
||||
const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
|
||||
if (vm_reg->is_FloatRegister()) {
|
||||
return opto_reg & ~1;
|
||||
}
|
||||
|
||||
return opto_reg;
|
||||
}
|
||||
|
||||
#undef __
|
||||
#define __ cgen->assembler()->
|
||||
#define __ _masm->
|
||||
|
||||
// Generates a register specific stub for calling
|
||||
// ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded() or
|
||||
// ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded().
|
||||
//
|
||||
// The raddr register serves as both input and output for this stub. When the stub is
|
||||
// called the raddr register contains the object field address (oop*) where the bad oop
|
||||
// was loaded from, which caused the slow path to be taken. On return from the stub the
|
||||
// raddr register contains the good/healed oop returned from
|
||||
// ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded() or
|
||||
// ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded().
|
||||
static address generate_load_barrier_stub(StubCodeGenerator* cgen, Register raddr, DecoratorSet decorators) {
|
||||
// Don't generate stub for invalid registers
|
||||
if (raddr == zr || raddr == r29 || raddr == r30) {
|
||||
return NULL;
|
||||
class ZSaveLiveRegisters {
|
||||
private:
|
||||
MacroAssembler* const _masm;
|
||||
RegSet _gp_regs;
|
||||
RegSet _fp_regs;
|
||||
|
||||
public:
|
||||
void initialize(ZLoadBarrierStubC2* stub) {
|
||||
// Create mask of live registers
|
||||
RegMask live = stub->live();
|
||||
|
||||
// Record registers that needs to be saved/restored
|
||||
while (live.is_NotEmpty()) {
|
||||
const OptoReg::Name opto_reg = live.find_first_elem();
|
||||
live.Remove(opto_reg);
|
||||
if (OptoReg::is_reg(opto_reg)) {
|
||||
const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
|
||||
if (vm_reg->is_Register()) {
|
||||
_gp_regs += RegSet::of(vm_reg->as_Register());
|
||||
} else if (vm_reg->is_FloatRegister()) {
|
||||
_fp_regs += RegSet::of((Register)vm_reg->as_FloatRegister());
|
||||
} else {
|
||||
fatal("Unknown register type");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Remove C-ABI SOE registers, scratch regs and _ref register that will be updated
|
||||
_gp_regs -= RegSet::range(r19, r30) + RegSet::of(r8, r9, stub->ref());
|
||||
}
|
||||
|
||||
// Create stub name
|
||||
char name[64];
|
||||
const bool weak = (decorators & ON_WEAK_OOP_REF) != 0;
|
||||
os::snprintf(name, sizeof(name), "zgc_load_barrier%s_stub_%s", weak ? "_weak" : "", raddr->name());
|
||||
ZSaveLiveRegisters(MacroAssembler* masm, ZLoadBarrierStubC2* stub) :
|
||||
_masm(masm),
|
||||
_gp_regs(),
|
||||
_fp_regs() {
|
||||
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(cgen, "StubRoutines", os::strdup(name, mtCode));
|
||||
address start = __ pc();
|
||||
// Figure out what registers to save/restore
|
||||
initialize(stub);
|
||||
|
||||
// Save live registers
|
||||
RegSet savedRegs = RegSet::range(r0,r18) - RegSet::of(raddr);
|
||||
|
||||
__ enter();
|
||||
__ push(savedRegs, sp);
|
||||
|
||||
// Setup arguments
|
||||
if (raddr != c_rarg1) {
|
||||
__ mov(c_rarg1, raddr);
|
||||
// Save registers
|
||||
__ push(_gp_regs, sp);
|
||||
__ push_fp(_fp_regs, sp);
|
||||
}
|
||||
|
||||
__ ldr(c_rarg0, Address(raddr));
|
||||
~ZSaveLiveRegisters() {
|
||||
// Restore registers
|
||||
__ pop_fp(_fp_regs, sp);
|
||||
__ pop(_gp_regs, sp);
|
||||
}
|
||||
};
|
||||
|
||||
// Call barrier function
|
||||
__ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), c_rarg0, c_rarg1);
|
||||
#undef __
|
||||
#define __ _masm->
|
||||
|
||||
// Move result returned in r0 to raddr, if needed
|
||||
if (raddr != r0) {
|
||||
__ mov(raddr, r0);
|
||||
class ZSetupArguments {
|
||||
private:
|
||||
MacroAssembler* const _masm;
|
||||
const Register _ref;
|
||||
const Address _ref_addr;
|
||||
|
||||
public:
|
||||
ZSetupArguments(MacroAssembler* masm, ZLoadBarrierStubC2* stub) :
|
||||
_masm(masm),
|
||||
_ref(stub->ref()),
|
||||
_ref_addr(stub->ref_addr()) {
|
||||
|
||||
// Setup arguments
|
||||
if (_ref_addr.base() == noreg) {
|
||||
// No self healing
|
||||
if (_ref != c_rarg0) {
|
||||
__ mov(c_rarg0, _ref);
|
||||
}
|
||||
__ mov(c_rarg1, 0);
|
||||
} else {
|
||||
// Self healing
|
||||
if (_ref == c_rarg0) {
|
||||
// _ref is already at correct place
|
||||
__ lea(c_rarg1, _ref_addr);
|
||||
} else if (_ref != c_rarg1) {
|
||||
// _ref is in wrong place, but not in c_rarg1, so fix it first
|
||||
__ lea(c_rarg1, _ref_addr);
|
||||
__ mov(c_rarg0, _ref);
|
||||
} else if (_ref_addr.base() != c_rarg0 && _ref_addr.index() != c_rarg0) {
|
||||
assert(_ref == c_rarg1, "Mov ref first, vacating c_rarg0");
|
||||
__ mov(c_rarg0, _ref);
|
||||
__ lea(c_rarg1, _ref_addr);
|
||||
} else {
|
||||
assert(_ref == c_rarg1, "Need to vacate c_rarg1 and _ref_addr is using c_rarg0");
|
||||
if (_ref_addr.base() == c_rarg0 || _ref_addr.index() == c_rarg0) {
|
||||
__ mov(rscratch2, c_rarg1);
|
||||
__ lea(c_rarg1, _ref_addr);
|
||||
__ mov(c_rarg0, rscratch2);
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
__ pop(savedRegs, sp);
|
||||
__ leave();
|
||||
__ ret(lr);
|
||||
~ZSetupArguments() {
|
||||
// Transfer result
|
||||
if (_ref != r0) {
|
||||
__ mov(_ref, r0);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
return start;
|
||||
#undef __
|
||||
#define __ masm->
|
||||
|
||||
void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, ZLoadBarrierStubC2* stub) const {
|
||||
BLOCK_COMMENT("ZLoadBarrierStubC2");
|
||||
|
||||
// Stub entry
|
||||
__ bind(*stub->entry());
|
||||
|
||||
{
|
||||
ZSaveLiveRegisters save_live_registers(masm, stub);
|
||||
ZSetupArguments setup_arguments(masm, stub);
|
||||
__ mov(rscratch1, stub->slow_path());
|
||||
__ blr(rscratch1);
|
||||
}
|
||||
|
||||
// Stub exit
|
||||
__ b(*stub->continuation());
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
static void barrier_stubs_init_inner(const char* label, const DecoratorSet decorators, address* stub) {
|
||||
const int nregs = 28; // Exclude FP, XZR, SP from calculation.
|
||||
const int code_size = nregs * 254; // Rough estimate of code size
|
||||
|
||||
ResourceMark rm;
|
||||
|
||||
CodeBuffer buf(BufferBlob::create(label, code_size));
|
||||
StubCodeGenerator cgen(&buf);
|
||||
|
||||
for (int i = 0; i < nregs; i++) {
|
||||
const Register reg = as_Register(i);
|
||||
stub[i] = generate_load_barrier_stub(&cgen, reg, decorators);
|
||||
}
|
||||
}
|
||||
|
||||
void ZBarrierSetAssembler::barrier_stubs_init() {
|
||||
barrier_stubs_init_inner("zgc_load_barrier_stubs", ON_STRONG_OOP_REF, _load_barrier_slow_stub);
|
||||
barrier_stubs_init_inner("zgc_load_barrier_weak_stubs", ON_WEAK_OOP_REF, _load_barrier_weak_slow_stub);
|
||||
}
|
||||
|
||||
address ZBarrierSetAssembler::load_barrier_slow_stub(Register reg) {
|
||||
return _load_barrier_slow_stub[reg->encoding()];
|
||||
}
|
||||
|
||||
address ZBarrierSetAssembler::load_barrier_weak_slow_stub(Register reg) {
|
||||
return _load_barrier_weak_slow_stub[reg->encoding()];
|
||||
}
|
||||
#endif // COMPILER2
|
||||
|
||||
@ -24,6 +24,12 @@
|
||||
#ifndef CPU_AARCH64_GC_Z_ZBARRIERSETASSEMBLER_AARCH64_HPP
|
||||
#define CPU_AARCH64_GC_Z_ZBARRIERSETASSEMBLER_AARCH64_HPP
|
||||
|
||||
#include "code/vmreg.hpp"
|
||||
#include "oops/accessDecorators.hpp"
|
||||
#ifdef COMPILER2
|
||||
#include "opto/optoreg.hpp"
|
||||
#endif // COMPILER2
|
||||
|
||||
#ifdef COMPILER1
|
||||
class LIR_Assembler;
|
||||
class LIR_OprDesc;
|
||||
@ -32,14 +38,13 @@ class StubAssembler;
|
||||
class ZLoadBarrierStubC1;
|
||||
#endif // COMPILER1
|
||||
|
||||
#ifdef COMPILER2
|
||||
class Node;
|
||||
class ZLoadBarrierStubC2;
|
||||
#endif // COMPILER2
|
||||
|
||||
class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase {
|
||||
private:
|
||||
address _load_barrier_slow_stub[RegisterImpl::number_of_registers];
|
||||
address _load_barrier_weak_slow_stub[RegisterImpl::number_of_registers];
|
||||
|
||||
public:
|
||||
ZBarrierSetAssembler();
|
||||
|
||||
virtual void load_at(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
BasicType type,
|
||||
@ -83,10 +88,13 @@ public:
|
||||
DecoratorSet decorators) const;
|
||||
#endif // COMPILER1
|
||||
|
||||
virtual void barrier_stubs_init();
|
||||
#ifdef COMPILER2
|
||||
OptoReg::Name refine_register(const Node* node,
|
||||
OptoReg::Name opto_reg);
|
||||
|
||||
address load_barrier_slow_stub(Register reg);
|
||||
address load_barrier_weak_slow_stub(Register reg);
|
||||
void generate_c2_load_barrier_stub(MacroAssembler* masm,
|
||||
ZLoadBarrierStubC2* stub) const;
|
||||
#endif // COMPILER2
|
||||
};
|
||||
|
||||
#endif // CPU_AARCH64_GC_Z_ZBARRIERSETASSEMBLER_AARCH64_HPP
|
||||
|
||||
@ -40,7 +40,7 @@
|
||||
// +--------------------------------+ 0x0000014000000000 (20TB)
|
||||
// | Remapped View |
|
||||
// +--------------------------------+ 0x0000010000000000 (16TB)
|
||||
// | (Reserved, but unused) |
|
||||
// . .
|
||||
// +--------------------------------+ 0x00000c0000000000 (12TB)
|
||||
// | Marked1 View |
|
||||
// +--------------------------------+ 0x0000080000000000 (8TB)
|
||||
@ -75,7 +75,7 @@
|
||||
// +--------------------------------+ 0x0000280000000000 (40TB)
|
||||
// | Remapped View |
|
||||
// +--------------------------------+ 0x0000200000000000 (32TB)
|
||||
// | (Reserved, but unused) |
|
||||
// . .
|
||||
// +--------------------------------+ 0x0000180000000000 (24TB)
|
||||
// | Marked1 View |
|
||||
// +--------------------------------+ 0x0000100000000000 (16TB)
|
||||
@ -110,7 +110,7 @@
|
||||
// +--------------------------------+ 0x0000500000000000 (80TB)
|
||||
// | Remapped View |
|
||||
// +--------------------------------+ 0x0000400000000000 (64TB)
|
||||
// | (Reserved, but unused) |
|
||||
// . .
|
||||
// +--------------------------------+ 0x0000300000000000 (48TB)
|
||||
// | Marked1 View |
|
||||
// +--------------------------------+ 0x0000200000000000 (32TB)
|
||||
|
||||
@ -36,7 +36,6 @@
|
||||
// ------------------------------------------------------------------
|
||||
//
|
||||
const size_t ZPlatformGranuleSizeShift = 21; // 2MB
|
||||
const size_t ZPlatformMaxHeapSizeShift = 46; // 16TB
|
||||
const size_t ZPlatformNMethodDisarmedOffset = 4;
|
||||
const size_t ZPlatformCacheLineSize = 64;
|
||||
|
||||
|
||||
@ -24,155 +24,244 @@
|
||||
source_hpp %{
|
||||
|
||||
#include "gc/z/c2/zBarrierSetC2.hpp"
|
||||
#include "gc/z/zThreadLocalData.hpp"
|
||||
|
||||
%}
|
||||
|
||||
source %{
|
||||
|
||||
#include "gc/z/zBarrierSetAssembler.hpp"
|
||||
static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, bool weak) {
|
||||
ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, weak);
|
||||
__ ldr(tmp, Address(rthread, ZThreadLocalData::address_bad_mask_offset()));
|
||||
__ andr(tmp, tmp, ref);
|
||||
__ cbnz(tmp, *stub->entry());
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
static void z_load_barrier_slow_reg(MacroAssembler& _masm, Register dst,
|
||||
Register base, int index, int scale,
|
||||
int disp, bool weak) {
|
||||
const address stub = weak ? ZBarrierSet::assembler()->load_barrier_weak_slow_stub(dst)
|
||||
: ZBarrierSet::assembler()->load_barrier_slow_stub(dst);
|
||||
|
||||
if (index == -1) {
|
||||
if (disp != 0) {
|
||||
__ lea(dst, Address(base, disp));
|
||||
} else {
|
||||
__ mov(dst, base);
|
||||
}
|
||||
} else {
|
||||
Register index_reg = as_Register(index);
|
||||
if (disp == 0) {
|
||||
__ lea(dst, Address(base, index_reg, Address::lsl(scale)));
|
||||
} else {
|
||||
__ lea(dst, Address(base, disp));
|
||||
__ lea(dst, Address(dst, index_reg, Address::lsl(scale)));
|
||||
}
|
||||
}
|
||||
|
||||
__ far_call(RuntimeAddress(stub));
|
||||
static void z_load_barrier_slow_path(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
|
||||
ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, false /* weak */);
|
||||
__ b(*stub->entry());
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
%}
|
||||
|
||||
//
|
||||
// Execute ZGC load barrier (strong) slow path
|
||||
//
|
||||
instruct loadBarrierSlowReg(iRegP dst, memory src, rFlagsReg cr,
|
||||
vRegD_V0 v0, vRegD_V1 v1, vRegD_V2 v2, vRegD_V3 v3, vRegD_V4 v4,
|
||||
vRegD_V5 v5, vRegD_V6 v6, vRegD_V7 v7, vRegD_V8 v8, vRegD_V9 v9,
|
||||
vRegD_V10 v10, vRegD_V11 v11, vRegD_V12 v12, vRegD_V13 v13, vRegD_V14 v14,
|
||||
vRegD_V15 v15, vRegD_V16 v16, vRegD_V17 v17, vRegD_V18 v18, vRegD_V19 v19,
|
||||
vRegD_V20 v20, vRegD_V21 v21, vRegD_V22 v22, vRegD_V23 v23, vRegD_V24 v24,
|
||||
vRegD_V25 v25, vRegD_V26 v26, vRegD_V27 v27, vRegD_V28 v28, vRegD_V29 v29,
|
||||
vRegD_V30 v30, vRegD_V31 v31) %{
|
||||
match(Set dst (LoadBarrierSlowReg src dst));
|
||||
predicate(!n->as_LoadBarrierSlowReg()->is_weak());
|
||||
// Load Pointer
|
||||
instruct zLoadP(iRegPNoSp dst, memory mem, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (LoadP mem));
|
||||
predicate(UseZGC && !needs_acquiring_load(n) && (n->as_Load()->barrier_data() == ZLoadBarrierStrong));
|
||||
effect(TEMP dst, KILL cr);
|
||||
|
||||
effect(KILL cr,
|
||||
KILL v0, KILL v1, KILL v2, KILL v3, KILL v4, KILL v5, KILL v6, KILL v7,
|
||||
KILL v8, KILL v9, KILL v10, KILL v11, KILL v12, KILL v13, KILL v14,
|
||||
KILL v15, KILL v16, KILL v17, KILL v18, KILL v19, KILL v20, KILL v21,
|
||||
KILL v22, KILL v23, KILL v24, KILL v25, KILL v26, KILL v27, KILL v28,
|
||||
KILL v29, KILL v30, KILL v31);
|
||||
ins_cost(4 * INSN_COST);
|
||||
|
||||
format %{ "lea $dst, $src\n\t"
|
||||
"call #ZLoadBarrierSlowPath" %}
|
||||
format %{ "ldr $dst, $mem" %}
|
||||
|
||||
ins_encode %{
|
||||
z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$base$$Register,
|
||||
$src$$index, $src$$scale, $src$$disp, false);
|
||||
const Address ref_addr = mem2address($mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
|
||||
__ ldr($dst$$Register, ref_addr);
|
||||
if (barrier_data() != ZLoadBarrierElided) {
|
||||
z_load_barrier(_masm, this, ref_addr, $dst$$Register, rscratch2 /* tmp */, false /* weak */);
|
||||
}
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
|
||||
ins_pipe(iload_reg_mem);
|
||||
%}
|
||||
|
||||
//
|
||||
// Execute ZGC load barrier (weak) slow path
|
||||
//
|
||||
instruct loadBarrierWeakSlowReg(iRegP dst, memory src, rFlagsReg cr,
|
||||
vRegD_V0 v0, vRegD_V1 v1, vRegD_V2 v2, vRegD_V3 v3, vRegD_V4 v4,
|
||||
vRegD_V5 v5, vRegD_V6 v6, vRegD_V7 v7, vRegD_V8 v8, vRegD_V9 v9,
|
||||
vRegD_V10 v10, vRegD_V11 v11, vRegD_V12 v12, vRegD_V13 v13, vRegD_V14 v14,
|
||||
vRegD_V15 v15, vRegD_V16 v16, vRegD_V17 v17, vRegD_V18 v18, vRegD_V19 v19,
|
||||
vRegD_V20 v20, vRegD_V21 v21, vRegD_V22 v22, vRegD_V23 v23, vRegD_V24 v24,
|
||||
vRegD_V25 v25, vRegD_V26 v26, vRegD_V27 v27, vRegD_V28 v28, vRegD_V29 v29,
|
||||
vRegD_V30 v30, vRegD_V31 v31) %{
|
||||
match(Set dst (LoadBarrierSlowReg src dst));
|
||||
predicate(n->as_LoadBarrierSlowReg()->is_weak());
|
||||
// Load Weak Pointer
|
||||
instruct zLoadWeakP(iRegPNoSp dst, memory mem, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (LoadP mem));
|
||||
predicate(UseZGC && !needs_acquiring_load(n) && (n->as_Load()->barrier_data() == ZLoadBarrierWeak));
|
||||
effect(TEMP dst, KILL cr);
|
||||
|
||||
effect(KILL cr,
|
||||
KILL v0, KILL v1, KILL v2, KILL v3, KILL v4, KILL v5, KILL v6, KILL v7,
|
||||
KILL v8, KILL v9, KILL v10, KILL v11, KILL v12, KILL v13, KILL v14,
|
||||
KILL v15, KILL v16, KILL v17, KILL v18, KILL v19, KILL v20, KILL v21,
|
||||
KILL v22, KILL v23, KILL v24, KILL v25, KILL v26, KILL v27, KILL v28,
|
||||
KILL v29, KILL v30, KILL v31);
|
||||
ins_cost(4 * INSN_COST);
|
||||
|
||||
format %{ "lea $dst, $src\n\t"
|
||||
"call #ZLoadBarrierSlowPath" %}
|
||||
format %{ "ldr $dst, $mem" %}
|
||||
|
||||
ins_encode %{
|
||||
z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$base$$Register,
|
||||
$src$$index, $src$$scale, $src$$disp, true);
|
||||
const Address ref_addr = mem2address($mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
|
||||
__ ldr($dst$$Register, ref_addr);
|
||||
z_load_barrier(_masm, this, ref_addr, $dst$$Register, rscratch2 /* tmp */, true /* weak */);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
|
||||
ins_pipe(iload_reg_mem);
|
||||
%}
|
||||
|
||||
// Load Pointer Volatile
|
||||
instruct zLoadPVolatile(iRegPNoSp dst, indirect mem /* sync_memory */, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (LoadP mem));
|
||||
predicate(UseZGC && needs_acquiring_load(n) && n->as_Load()->barrier_data() == ZLoadBarrierStrong);
|
||||
effect(TEMP dst, KILL cr);
|
||||
|
||||
// Specialized versions of compareAndExchangeP that adds a keepalive that is consumed
|
||||
// but doesn't affect output.
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
|
||||
format %{ "ldar $dst, $mem\t" %}
|
||||
|
||||
ins_encode %{
|
||||
__ ldar($dst$$Register, $mem$$Register);
|
||||
if (barrier_data() != ZLoadBarrierElided) {
|
||||
z_load_barrier(_masm, this, Address($mem$$Register), $dst$$Register, rscratch2 /* tmp */, false /* weak */);
|
||||
}
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_serial);
|
||||
%}
|
||||
|
||||
instruct zCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
|
||||
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
|
||||
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
|
||||
predicate(UseZGC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
|
||||
effect(KILL cr, TEMP_DEF res);
|
||||
|
||||
instruct z_compareAndExchangeP(iRegPNoSp res, indirect mem,
|
||||
iRegP oldval, iRegP newval, iRegP keepalive,
|
||||
rFlagsReg cr) %{
|
||||
match(Set res (ZCompareAndExchangeP (Binary mem keepalive) (Binary oldval newval)));
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
format %{ "cmpxchg $mem, $oldval, $newval\n\t"
|
||||
"cset $res, EQ" %}
|
||||
|
||||
ins_encode %{
|
||||
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
|
||||
false /* acquire */, true /* release */, false /* weak */, rscratch2);
|
||||
__ cset($res$$Register, Assembler::EQ);
|
||||
if (barrier_data() != ZLoadBarrierElided) {
|
||||
Label good;
|
||||
__ ldr(rscratch1, Address(rthread, ZThreadLocalData::address_bad_mask_offset()));
|
||||
__ andr(rscratch1, rscratch1, rscratch2);
|
||||
__ cbz(rscratch1, good);
|
||||
z_load_barrier_slow_path(_masm, this, Address($mem$$Register), rscratch2 /* ref */, rscratch1 /* tmp */);
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
|
||||
false /* acquire */, true /* release */, false /* weak */, rscratch2);
|
||||
__ cset($res$$Register, Assembler::EQ);
|
||||
__ bind(good);
|
||||
}
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct zCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
|
||||
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
|
||||
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
|
||||
predicate(UseZGC && needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong));
|
||||
effect(KILL cr, TEMP_DEF res);
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
format %{ "cmpxchg $mem, $oldval, $newval\n\t"
|
||||
"cset $res, EQ" %}
|
||||
|
||||
ins_encode %{
|
||||
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
|
||||
true /* acquire */, true /* release */, false /* weak */, rscratch2);
|
||||
__ cset($res$$Register, Assembler::EQ);
|
||||
if (barrier_data() != ZLoadBarrierElided) {
|
||||
Label good;
|
||||
__ ldr(rscratch1, Address(rthread, ZThreadLocalData::address_bad_mask_offset()));
|
||||
__ andr(rscratch1, rscratch1, rscratch2);
|
||||
__ cbz(rscratch1, good);
|
||||
z_load_barrier_slow_path(_masm, this, Address($mem$$Register), rscratch2 /* ref */, rscratch1 /* tmp */ );
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
|
||||
true /* acquire */, true /* release */, false /* weak */, rscratch2);
|
||||
__ cset($res$$Register, Assembler::EQ);
|
||||
__ bind(good);
|
||||
}
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct zCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
|
||||
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
|
||||
predicate(UseZGC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
|
||||
effect(TEMP_DEF res, KILL cr);
|
||||
format %{
|
||||
"cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
|
||||
%}
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
format %{ "cmpxchg $res = $mem, $oldval, $newval" %}
|
||||
|
||||
ins_encode %{
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
|
||||
Assembler::xword, /*acquire*/ false, /*release*/ true,
|
||||
/*weak*/ false, $res$$Register);
|
||||
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
|
||||
false /* acquire */, true /* release */, false /* weak */, $res$$Register);
|
||||
if (barrier_data() != ZLoadBarrierElided) {
|
||||
Label good;
|
||||
__ ldr(rscratch1, Address(rthread, ZThreadLocalData::address_bad_mask_offset()));
|
||||
__ andr(rscratch1, rscratch1, $res$$Register);
|
||||
__ cbz(rscratch1, good);
|
||||
z_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, rscratch1 /* tmp */);
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
|
||||
false /* acquire */, true /* release */, false /* weak */, $res$$Register);
|
||||
__ bind(good);
|
||||
}
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct z_compareAndSwapP(iRegINoSp res,
|
||||
indirect mem,
|
||||
iRegP oldval, iRegP newval, iRegP keepalive,
|
||||
rFlagsReg cr) %{
|
||||
|
||||
match(Set res (ZCompareAndSwapP (Binary mem keepalive) (Binary oldval newval)));
|
||||
match(Set res (ZWeakCompareAndSwapP (Binary mem keepalive) (Binary oldval newval)));
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
effect(KILL cr);
|
||||
|
||||
format %{
|
||||
"cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
|
||||
"cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
|
||||
%}
|
||||
|
||||
ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
|
||||
aarch64_enc_cset_eq(res));
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
|
||||
instruct z_get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev,
|
||||
iRegP keepalive) %{
|
||||
match(Set prev (ZGetAndSetP mem (Binary newv keepalive)));
|
||||
instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
|
||||
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
|
||||
predicate(UseZGC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
|
||||
effect(TEMP_DEF res, KILL cr);
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
format %{ "cmpxchg $res = $mem, $oldval, $newval" %}
|
||||
|
||||
ins_encode %{
|
||||
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
|
||||
true /* acquire */, true /* release */, false /* weak */, $res$$Register);
|
||||
if (barrier_data() != ZLoadBarrierElided) {
|
||||
Label good;
|
||||
__ ldr(rscratch1, Address(rthread, ZThreadLocalData::address_bad_mask_offset()));
|
||||
__ andr(rscratch1, rscratch1, $res$$Register);
|
||||
__ cbz(rscratch1, good);
|
||||
z_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, rscratch1 /* tmp */);
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
|
||||
true /* acquire */, true /* release */, false /* weak */, $res$$Register);
|
||||
__ bind(good);
|
||||
}
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct zGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
|
||||
match(Set prev (GetAndSetP mem newv));
|
||||
predicate(UseZGC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
|
||||
effect(TEMP_DEF prev, KILL cr);
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
format %{ "atomic_xchg $prev, $newv, [$mem]" %}
|
||||
|
||||
ins_encode %{
|
||||
__ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
|
||||
__ atomic_xchg($prev$$Register, $newv$$Register, $mem$$Register);
|
||||
if (barrier_data() != ZLoadBarrierElided) {
|
||||
z_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, rscratch2 /* tmp */, false /* weak */);
|
||||
}
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_serial);
|
||||
%}
|
||||
|
||||
instruct zGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
|
||||
match(Set prev (GetAndSetP mem newv));
|
||||
predicate(UseZGC && needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong));
|
||||
effect(TEMP_DEF prev, KILL cr);
|
||||
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
|
||||
format %{ "atomic_xchg_acq $prev, $newv, [$mem]" %}
|
||||
|
||||
ins_encode %{
|
||||
__ atomic_xchgal($prev$$Register, $newv$$Register, $mem$$Register);
|
||||
if (barrier_data() != ZLoadBarrierElided) {
|
||||
z_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, rscratch2 /* tmp */, false /* weak */);
|
||||
}
|
||||
%}
|
||||
ins_pipe(pipe_serial);
|
||||
%}
|
||||
|
||||
@ -2132,6 +2132,65 @@ int MacroAssembler::pop(unsigned int bitset, Register stack) {
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
// Push lots of registers in the bit set supplied. Don't push sp.
|
||||
// Return the number of words pushed
|
||||
int MacroAssembler::push_fp(unsigned int bitset, Register stack) {
|
||||
int words_pushed = 0;
|
||||
|
||||
// Scan bitset to accumulate register pairs
|
||||
unsigned char regs[32];
|
||||
int count = 0;
|
||||
for (int reg = 0; reg <= 31; reg++) {
|
||||
if (1 & bitset)
|
||||
regs[count++] = reg;
|
||||
bitset >>= 1;
|
||||
}
|
||||
regs[count++] = zr->encoding_nocheck();
|
||||
count &= ~1; // Only push an even number of regs
|
||||
|
||||
// Always pushing full 128 bit registers.
|
||||
if (count) {
|
||||
stpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -count * wordSize * 2)));
|
||||
words_pushed += 2;
|
||||
}
|
||||
for (int i = 2; i < count; i += 2) {
|
||||
stpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2));
|
||||
words_pushed += 2;
|
||||
}
|
||||
|
||||
assert(words_pushed == count, "oops, pushed != count");
|
||||
return count;
|
||||
}
|
||||
|
||||
int MacroAssembler::pop_fp(unsigned int bitset, Register stack) {
|
||||
int words_pushed = 0;
|
||||
|
||||
// Scan bitset to accumulate register pairs
|
||||
unsigned char regs[32];
|
||||
int count = 0;
|
||||
for (int reg = 0; reg <= 31; reg++) {
|
||||
if (1 & bitset)
|
||||
regs[count++] = reg;
|
||||
bitset >>= 1;
|
||||
}
|
||||
regs[count++] = zr->encoding_nocheck();
|
||||
count &= ~1;
|
||||
|
||||
for (int i = 2; i < count; i += 2) {
|
||||
ldpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2));
|
||||
words_pushed += 2;
|
||||
}
|
||||
if (count) {
|
||||
ldpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, count * wordSize * 2)));
|
||||
words_pushed += 2;
|
||||
}
|
||||
|
||||
assert(words_pushed == count, "oops, pushed != count");
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
void MacroAssembler::verify_heapbase(const char* msg) {
|
||||
#if 0
|
||||
|
||||
@ -442,12 +442,18 @@ private:
|
||||
int push(unsigned int bitset, Register stack);
|
||||
int pop(unsigned int bitset, Register stack);
|
||||
|
||||
int push_fp(unsigned int bitset, Register stack);
|
||||
int pop_fp(unsigned int bitset, Register stack);
|
||||
|
||||
void mov(Register dst, Address a);
|
||||
|
||||
public:
|
||||
void push(RegSet regs, Register stack) { if (regs.bits()) push(regs.bits(), stack); }
|
||||
void pop(RegSet regs, Register stack) { if (regs.bits()) pop(regs.bits(), stack); }
|
||||
|
||||
void push_fp(RegSet regs, Register stack) { if (regs.bits()) push_fp(regs.bits(), stack); }
|
||||
void pop_fp(RegSet regs, Register stack) { if (regs.bits()) pop_fp(regs.bits(), stack); }
|
||||
|
||||
// Push and pop everything that might be clobbered by a native
|
||||
// runtime call except rscratch1 and rscratch2. (They are always
|
||||
// scratch, so we don't have to protect them.) Only save the lower
|
||||
|
||||
@ -230,6 +230,11 @@ public:
|
||||
return *this;
|
||||
}
|
||||
|
||||
RegSet &operator-=(const RegSet aSet) {
|
||||
*this = *this - aSet;
|
||||
return *this;
|
||||
}
|
||||
|
||||
static RegSet of(Register r1) {
|
||||
return RegSet(r1);
|
||||
}
|
||||
|
||||
@ -27,6 +27,7 @@
|
||||
#include "interpreter/bytecode.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "oops/constMethod.hpp"
|
||||
#include "oops/klass.inline.hpp"
|
||||
#include "oops/method.hpp"
|
||||
#include "prims/methodHandles.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -115,16 +115,7 @@ void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, ad
|
||||
// Creation also verifies the object.
|
||||
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
|
||||
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
|
||||
|
||||
#ifdef ASSERT
|
||||
// read the value once
|
||||
volatile intptr_t data = method_holder->data();
|
||||
volatile address destination = jump->jump_destination();
|
||||
assert(data == 0 || data == (intptr_t)callee(),
|
||||
"a) MT-unsafe modification of inline cache");
|
||||
assert(destination == (address)-1 || destination == entry,
|
||||
"b) MT-unsafe modification of inline cache");
|
||||
#endif
|
||||
verify_mt_safe(callee, entry, method_holder, jump);
|
||||
|
||||
// Update stub.
|
||||
method_holder->set_data((intptr_t)callee());
|
||||
|
||||
@ -26,6 +26,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "oops/constMethod.hpp"
|
||||
#include "oops/klass.inline.hpp"
|
||||
#include "oops/method.hpp"
|
||||
#include "runtime/frame.inline.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
|
||||
* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2019 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -52,7 +52,7 @@ LIR_Opr FrameMap::map_to_opr(BasicType type, VMRegPair* reg, bool outgoing) {
|
||||
//}
|
||||
if (r_2->is_Register() && (type == T_LONG || type == T_DOUBLE)) {
|
||||
opr = as_long_opr(reg);
|
||||
} else if (type == T_OBJECT || type == T_ARRAY) {
|
||||
} else if (is_reference_type(type)) {
|
||||
opr = as_oop_opr(reg);
|
||||
} else {
|
||||
opr = as_opr(reg);
|
||||
|
||||
@ -1237,7 +1237,7 @@ void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) {
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
if (to_reg->type() == T_OBJECT || to_reg->type() == T_ARRAY) {
|
||||
if (is_reference_type(to_reg->type())) {
|
||||
__ verify_oop(to_reg->as_register());
|
||||
}
|
||||
}
|
||||
@ -1253,7 +1253,7 @@ void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type,
|
||||
Register disp_reg = noreg;
|
||||
int disp_value = addr->disp();
|
||||
bool needs_patching = (patch_code != lir_patch_none);
|
||||
bool compress_oop = (type == T_ARRAY || type == T_OBJECT) && UseCompressedOops && !wide &&
|
||||
bool compress_oop = (is_reference_type(type)) && UseCompressedOops && !wide &&
|
||||
CompressedOops::mode() != CompressedOops::UnscaledNarrowOop;
|
||||
bool load_disp = addr->index()->is_illegal() && !Assembler::is_simm16(disp_value);
|
||||
bool use_R29 = compress_oop && load_disp; // Avoid register conflict, also do null check before killing R29.
|
||||
@ -1473,7 +1473,7 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
|
||||
}
|
||||
} else {
|
||||
assert(opr1->type() != T_ADDRESS && opr2->type() != T_ADDRESS, "currently unsupported");
|
||||
if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
|
||||
if (is_reference_type(opr1->type())) {
|
||||
// There are only equal/notequal comparisons on objects.
|
||||
assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops");
|
||||
__ cmpd(BOOL_RESULT, opr1->as_register(), opr2->as_register());
|
||||
@ -2315,8 +2315,8 @@ void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
|
||||
void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
|
||||
LP64_ONLY( __ extsw(op->len()->as_register(), op->len()->as_register()); )
|
||||
if (UseSlowPath ||
|
||||
(!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
|
||||
(!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
|
||||
(!UseFastNewObjectArray && (is_reference_type(op->type()))) ||
|
||||
(!UseFastNewTypeArray && (!is_reference_type(op->type())))) {
|
||||
__ b(*op->stub()->entry());
|
||||
} else {
|
||||
__ allocate_array(op->obj()->as_register(),
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2017, SAP SE. All rights reserved.
|
||||
* Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2019, SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -648,7 +648,7 @@ LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_
|
||||
__ membar_release();
|
||||
}
|
||||
|
||||
if (type == T_OBJECT || type == T_ARRAY) {
|
||||
if (is_reference_type(type)) {
|
||||
if (UseCompressedOops) {
|
||||
t1 = new_register(T_OBJECT);
|
||||
t2 = new_register(T_OBJECT);
|
||||
|
||||
@ -82,9 +82,9 @@ define_pd_global(bool, OptoScheduling, false);
|
||||
define_pd_global(bool, IdealizeClearArrayNode, true);
|
||||
|
||||
define_pd_global(uintx, InitialCodeCacheSize, 2048*K); // Integral multiple of CodeCacheExpansionSize
|
||||
define_pd_global(uintx, ReservedCodeCacheSize, 256*M);
|
||||
define_pd_global(uintx, NonProfiledCodeHeapSize, 125*M);
|
||||
define_pd_global(uintx, ProfiledCodeHeapSize, 126*M);
|
||||
define_pd_global(uintx, ReservedCodeCacheSize, 48*M);
|
||||
define_pd_global(uintx, NonProfiledCodeHeapSize, 21*M);
|
||||
define_pd_global(uintx, ProfiledCodeHeapSize, 22*M);
|
||||
define_pd_global(uintx, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(uintx, CodeCacheExpansionSize, 64*K);
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -178,15 +178,7 @@ void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, ad
|
||||
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + IC_pos_in_java_to_interp_stub);
|
||||
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
|
||||
|
||||
#ifdef ASSERT
|
||||
// read the value once
|
||||
volatile intptr_t data = method_holder->data();
|
||||
volatile address destination = jump->jump_destination();
|
||||
assert(data == 0 || data == (intptr_t)callee(),
|
||||
"a) MT-unsafe modification of inline cache");
|
||||
assert(destination == (address)-1 || destination == entry,
|
||||
"b) MT-unsafe modification of inline cache");
|
||||
#endif
|
||||
verify_mt_safe(callee, entry, method_holder, jump);
|
||||
|
||||
// Update stub.
|
||||
method_holder->set_data((intptr_t)callee());
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, SAP SE. All rights reserved.
|
||||
* Copyright (c) 2018, 2019, SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -305,7 +305,7 @@ void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet deco
|
||||
void G1BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register base, RegisterOrConstant ind_or_offs, Register dst,
|
||||
Register tmp1, Register tmp2, bool needs_frame, Label *L_handle_null) {
|
||||
bool on_oop = type == T_OBJECT || type == T_ARRAY;
|
||||
bool on_oop = is_reference_type(type);
|
||||
bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
|
||||
bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
|
||||
bool on_reference = on_weak || on_phantom;
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, SAP SE. All rights reserved.
|
||||
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2019, SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -59,7 +59,7 @@ void ModRefBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, Decorat
|
||||
void ModRefBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register base, RegisterOrConstant ind_or_offs, Register val,
|
||||
Register tmp1, Register tmp2, Register tmp3, bool needs_frame) {
|
||||
if (type == T_OBJECT || type == T_ARRAY) {
|
||||
if (is_reference_type(type)) {
|
||||
oop_store_at(masm, decorators, type, base, ind_or_offs, val, tmp1, tmp2, tmp3, needs_frame);
|
||||
} else {
|
||||
BarrierSetAssembler::store_at(masm, decorators, type, base, ind_or_offs, val, tmp1, tmp2, tmp3, needs_frame);
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, SAP SE. All rights reserved.
|
||||
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2019, SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2018 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2012, 2019 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1142,7 +1142,7 @@ void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
|
||||
}
|
||||
if (!r_2->is_valid()) {
|
||||
// Not sure we need to do this but it shouldn't hurt.
|
||||
if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ADDRESS || sig_bt[i] == T_ARRAY) {
|
||||
if (is_reference_type(sig_bt[i]) || sig_bt[i] == T_ADDRESS) {
|
||||
__ ld(r, ld_offset, ld_ptr);
|
||||
ld_offset-=wordSize;
|
||||
} else {
|
||||
@ -1739,8 +1739,7 @@ static void verify_oop_args(MacroAssembler* masm,
|
||||
Register temp_reg = R19_method; // not part of any compiled calling seq
|
||||
if (VerifyOops) {
|
||||
for (int i = 0; i < method->size_of_parameters(); i++) {
|
||||
if (sig_bt[i] == T_OBJECT ||
|
||||
sig_bt[i] == T_ARRAY) {
|
||||
if (is_reference_type(sig_bt[i])) {
|
||||
VMReg r = regs[i].first();
|
||||
assert(r->is_valid(), "bad oop arg");
|
||||
if (r->is_stack()) {
|
||||
@ -2602,7 +2601,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
// Unbox oop result, e.g. JNIHandles::resolve value.
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
|
||||
if (is_reference_type(ret_type)) {
|
||||
__ resolve_jobject(R3_RET, r_temp_1, r_temp_2, /* needs_frame */ false);
|
||||
}
|
||||
|
||||
|
||||
@ -26,6 +26,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "oops/constMethod.hpp"
|
||||
#include "oops/klass.inline.hpp"
|
||||
#include "oops/method.hpp"
|
||||
#include "runtime/frame.inline.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2019, SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -46,7 +46,7 @@ LIR_Opr FrameMap::map_to_opr(BasicType type, VMRegPair* reg, bool outgoing) {
|
||||
Register reg = r_1->as_Register();
|
||||
if (r_2->is_Register() && (type == T_LONG || type == T_DOUBLE)) {
|
||||
opr = as_long_opr(reg);
|
||||
} else if (type == T_OBJECT || type == T_ARRAY) {
|
||||
} else if (is_reference_type(type)) {
|
||||
opr = as_oop_opr(reg);
|
||||
} else if (type == T_METADATA) {
|
||||
opr = as_metadata_opr(reg);
|
||||
|
||||
@ -972,6 +972,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type, LIR_P
|
||||
} else {
|
||||
__ z_lg(dest->as_register(), disp_value, disp_reg, src);
|
||||
}
|
||||
__ verify_oop(dest->as_register());
|
||||
break;
|
||||
}
|
||||
case T_FLOAT:
|
||||
@ -991,9 +992,6 @@ void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type, LIR_P
|
||||
case T_LONG : __ z_lg(dest->as_register_lo(), disp_value, disp_reg, src); break;
|
||||
default : ShouldNotReachHere();
|
||||
}
|
||||
if (type == T_ARRAY || type == T_OBJECT) {
|
||||
__ verify_oop(dest->as_register());
|
||||
}
|
||||
|
||||
if (patch != NULL) {
|
||||
patching_epilog(patch, patch_code, src, info);
|
||||
@ -1006,7 +1004,7 @@ void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
|
||||
assert(dest->is_register(), "should not call otherwise");
|
||||
|
||||
if (dest->is_single_cpu()) {
|
||||
if (type == T_ARRAY || type == T_OBJECT) {
|
||||
if (is_reference_type(type)) {
|
||||
__ mem2reg_opt(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()), true);
|
||||
__ verify_oop(dest->as_register());
|
||||
} else if (type == T_METADATA) {
|
||||
@ -1034,7 +1032,7 @@ void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool po
|
||||
|
||||
if (src->is_single_cpu()) {
|
||||
const Address dst = frame_map()->address_for_slot(dest->single_stack_ix());
|
||||
if (type == T_OBJECT || type == T_ARRAY) {
|
||||
if (is_reference_type(type)) {
|
||||
__ verify_oop(src->as_register());
|
||||
__ reg2mem_opt(src->as_register(), dst, true);
|
||||
} else if (type == T_METADATA) {
|
||||
@ -1080,7 +1078,7 @@ void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) {
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
if (to_reg->type() == T_OBJECT || to_reg->type() == T_ARRAY) {
|
||||
if (is_reference_type(to_reg->type())) {
|
||||
__ verify_oop(to_reg->as_register());
|
||||
}
|
||||
}
|
||||
@ -1131,7 +1129,7 @@ void LIR_Assembler::reg2mem(LIR_Opr from, LIR_Opr dest_opr, BasicType type,
|
||||
|
||||
assert(disp_reg != Z_R0 || Immediate::is_simm20(disp_value), "should have set this up");
|
||||
|
||||
if (type == T_ARRAY || type == T_OBJECT) {
|
||||
if (is_reference_type(type)) {
|
||||
__ verify_oop(from->as_register());
|
||||
}
|
||||
|
||||
@ -1294,10 +1292,10 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
|
||||
Register reg1 = opr1->as_register();
|
||||
if (opr2->is_single_cpu()) {
|
||||
// cpu register - cpu register
|
||||
if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
|
||||
if (is_reference_type(opr1->type())) {
|
||||
__ z_clgr(reg1, opr2->as_register());
|
||||
} else {
|
||||
assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?");
|
||||
assert(!is_reference_type(opr2->type()), "cmp int, oop?");
|
||||
if (unsigned_comp) {
|
||||
__ z_clr(reg1, opr2->as_register());
|
||||
} else {
|
||||
@ -1306,7 +1304,7 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
|
||||
}
|
||||
} else if (opr2->is_stack()) {
|
||||
// cpu register - stack
|
||||
if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
|
||||
if (is_reference_type(opr1->type())) {
|
||||
__ z_cg(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
|
||||
} else {
|
||||
if (unsigned_comp) {
|
||||
@ -1324,7 +1322,7 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
|
||||
} else {
|
||||
__ z_cfi(reg1, c->as_jint());
|
||||
}
|
||||
} else if (c->type() == T_OBJECT || c->type() == T_ARRAY) {
|
||||
} else if (is_reference_type(c->type())) {
|
||||
// In 64bit oops are single register.
|
||||
jobject o = c->as_jobject();
|
||||
if (o == NULL) {
|
||||
@ -1767,7 +1765,7 @@ void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
|
||||
}
|
||||
} else {
|
||||
Register r_lo;
|
||||
if (right->type() == T_OBJECT || right->type() == T_ARRAY) {
|
||||
if (is_reference_type(right->type())) {
|
||||
r_lo = right->as_register();
|
||||
} else {
|
||||
r_lo = right->as_register_lo();
|
||||
@ -2413,8 +2411,8 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
|
||||
__ move_reg_if_needed(len, T_LONG, len, T_INT); // sign extend
|
||||
|
||||
if (UseSlowPath ||
|
||||
(!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
|
||||
(!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
|
||||
(!UseFastNewObjectArray && (is_reference_type(op->type()))) ||
|
||||
(!UseFastNewTypeArray && (!is_reference_type(op->type())))) {
|
||||
__ z_brul(*op->stub()->entry());
|
||||
} else {
|
||||
__ allocate_array(op->obj()->as_register(),
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -104,19 +104,7 @@ void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, ad
|
||||
// Creation also verifies the object.
|
||||
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + NativeCall::get_IC_pos_in_java_to_interp_stub());
|
||||
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
|
||||
|
||||
#ifdef ASSERT
|
||||
// A generated lambda form might be deleted from the Lambdaform
|
||||
// cache in MethodTypeForm. If a jit compiled lambdaform method
|
||||
// becomes not entrant and the cache access returns null, the new
|
||||
// resolve will lead to a new generated LambdaForm.
|
||||
volatile intptr_t data = method_holder->data();
|
||||
volatile address destination = jump->jump_destination();
|
||||
assert(data == 0 || data == (intptr_t)callee() || callee->is_compiled_lambda_form(),
|
||||
"a) MT-unsafe modification of inline cache");
|
||||
assert(destination == (address)-1 || destination == entry,
|
||||
"b) MT-unsafe modification of inline cache");
|
||||
#endif
|
||||
verify_mt_safe(callee, entry, method_holder, jump);
|
||||
|
||||
// Update stub.
|
||||
method_holder->set_data((intptr_t)callee(), relocInfo::metadata_type);
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, SAP SE. All rights reserved.
|
||||
* Copyright (c) 2018, 2019, SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -100,7 +100,7 @@ void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* mas
|
||||
|
||||
void G1BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
const Address& src, Register dst, Register tmp1, Register tmp2, Label *L_handle_null) {
|
||||
bool on_oop = type == T_OBJECT || type == T_ARRAY;
|
||||
bool on_oop = is_reference_type(type);
|
||||
bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
|
||||
bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
|
||||
bool on_reference = on_weak || on_phantom;
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, SAP SE. All rights reserved.
|
||||
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2019, SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -36,14 +36,14 @@ void ModRefBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler*
|
||||
|
||||
void ModRefBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register src, Register dst, Register count) {
|
||||
if (type == T_OBJECT || type == T_ARRAY) {
|
||||
if (is_reference_type(type)) {
|
||||
gen_write_ref_array_pre_barrier(masm, decorators, dst, count);
|
||||
}
|
||||
}
|
||||
|
||||
void ModRefBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register dst, Register count, bool do_return) {
|
||||
if (type == T_OBJECT || type == T_ARRAY) {
|
||||
if (is_reference_type(type)) {
|
||||
gen_write_ref_array_post_barrier(masm, decorators, dst, count, do_return);
|
||||
} else {
|
||||
if (do_return) { __ z_br(Z_R14); }
|
||||
@ -52,7 +52,7 @@ void ModRefBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, Decorat
|
||||
|
||||
void ModRefBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
const Address& dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
|
||||
if (type == T_OBJECT || type == T_ARRAY) {
|
||||
if (is_reference_type(type)) {
|
||||
oop_store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
|
||||
} else {
|
||||
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2018 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2016, 2019, SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -883,7 +883,7 @@ static void verify_oop_args(MacroAssembler *masm,
|
||||
if (!VerifyOops) { return; }
|
||||
|
||||
for (int i = 0; i < total_args_passed; i++) {
|
||||
if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) {
|
||||
if (is_reference_type(sig_bt[i])) {
|
||||
VMReg r = regs[i].first();
|
||||
assert(r->is_valid(), "bad oop arg");
|
||||
|
||||
@ -2318,7 +2318,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
__ reset_last_Java_frame();
|
||||
|
||||
// Unpack oop result, e.g. JNIHandles::resolve result.
|
||||
if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
|
||||
if (is_reference_type(ret_type)) {
|
||||
__ resolve_jobject(Z_RET, /* tmp1 */ Z_R13, /* tmp2 */ Z_R7);
|
||||
}
|
||||
|
||||
@ -2621,7 +2621,7 @@ void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
|
||||
} else {
|
||||
if (!r_2->is_valid()) {
|
||||
// Not sure we need to do this but it shouldn't hurt.
|
||||
if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ADDRESS || sig_bt[i] == T_ARRAY) {
|
||||
if (is_reference_type(sig_bt[i]) || sig_bt[i] == T_ADDRESS) {
|
||||
__ z_lg(r_1->as_Register(), ld_offset, ld_ptr);
|
||||
} else {
|
||||
__ z_l(r_1->as_Register(), ld_offset, ld_ptr);
|
||||
|
||||
@ -25,6 +25,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "oops/constMethod.hpp"
|
||||
#include "oops/klass.inline.hpp"
|
||||
#include "oops/method.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/frame.inline.hpp"
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -104,16 +104,7 @@ void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, ad
|
||||
// Creation also verifies the object.
|
||||
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
|
||||
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
|
||||
|
||||
#ifdef ASSERT
|
||||
// read the value once
|
||||
volatile intptr_t data = method_holder->data();
|
||||
volatile address destination = jump->jump_destination();
|
||||
assert(data == 0 || data == (intptr_t)callee(),
|
||||
"a) MT-unsafe modification of inline cache");
|
||||
assert(destination == (address)-1 || destination == entry,
|
||||
"b) MT-unsafe modification of inline cache");
|
||||
#endif
|
||||
verify_mt_safe(callee, entry, method_holder, jump);
|
||||
|
||||
// Update stub.
|
||||
method_holder->set_data((intptr_t)callee());
|
||||
|
||||
@ -25,6 +25,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "ci/ciMethod.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "oops/klass.inline.hpp"
|
||||
#include "runtime/frame.inline.hpp"
|
||||
|
||||
|
||||
|
||||
@ -4742,6 +4742,25 @@ void Assembler::smovl() {
|
||||
emit_int8((unsigned char)0xA5);
|
||||
}
|
||||
|
||||
void Assembler::roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) {
|
||||
assert(VM_Version::supports_sse4_1(), "");
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
|
||||
emit_int8(0x0B);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
emit_int8((unsigned char)rmode);
|
||||
}
|
||||
|
||||
void Assembler::roundsd(XMMRegister dst, Address src, int32_t rmode) {
|
||||
assert(VM_Version::supports_sse4_1(), "");
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
|
||||
emit_int8(0x0B);
|
||||
emit_operand(dst, src);
|
||||
emit_int8((unsigned char)rmode);
|
||||
}
|
||||
|
||||
void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
@ -5539,6 +5558,49 @@ void Assembler::vdivps(XMMRegister dst, XMMRegister nds, Address src, int vector
|
||||
emit_operand(dst, src);
|
||||
}
|
||||
|
||||
void Assembler::vroundpd(XMMRegister dst, XMMRegister src, int32_t rmode, int vector_len) {
|
||||
assert(VM_Version::supports_avx(), "");
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
|
||||
emit_int8(0x09);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
emit_int8((unsigned char)(rmode));
|
||||
}
|
||||
|
||||
void Assembler::vroundpd(XMMRegister dst, Address src, int32_t rmode, int vector_len) {
|
||||
assert(VM_Version::supports_avx(), "");
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
|
||||
emit_int8(0x09);
|
||||
emit_operand(dst, src);
|
||||
emit_int8((unsigned char)(rmode));
|
||||
}
|
||||
|
||||
void Assembler::vrndscalepd(XMMRegister dst, XMMRegister src, int32_t rmode, int vector_len) {
|
||||
assert(VM_Version::supports_evex(), "requires EVEX support");
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
|
||||
attributes.set_is_evex_instruction();
|
||||
int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
|
||||
emit_int8((unsigned char)0x09);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
emit_int8((unsigned char)(rmode));
|
||||
}
|
||||
|
||||
void Assembler::vrndscalepd(XMMRegister dst, Address src, int32_t rmode, int vector_len) {
|
||||
assert(VM_Version::supports_evex(), "requires EVEX support");
|
||||
assert(dst != xnoreg, "sanity");
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
|
||||
attributes.set_is_evex_instruction();
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
|
||||
vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
|
||||
emit_int8((unsigned char)0x09);
|
||||
emit_operand(dst, src);
|
||||
emit_int8((unsigned char)(rmode));
|
||||
}
|
||||
|
||||
|
||||
void Assembler::vsqrtpd(XMMRegister dst, XMMRegister src, int vector_len) {
|
||||
assert(VM_Version::supports_avx(), "");
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
|
||||
|
||||
@ -1856,6 +1856,9 @@ private:
|
||||
void sqrtsd(XMMRegister dst, Address src);
|
||||
void sqrtsd(XMMRegister dst, XMMRegister src);
|
||||
|
||||
void roundsd(XMMRegister dst, Address src, int32_t rmode);
|
||||
void roundsd(XMMRegister dst, XMMRegister src, int32_t rmode);
|
||||
|
||||
// Compute Square Root of Scalar Single-Precision Floating-Point Value
|
||||
void sqrtss(XMMRegister dst, Address src);
|
||||
void sqrtss(XMMRegister dst, XMMRegister src);
|
||||
@ -2020,6 +2023,12 @@ private:
|
||||
void vsqrtps(XMMRegister dst, XMMRegister src, int vector_len);
|
||||
void vsqrtps(XMMRegister dst, Address src, int vector_len);
|
||||
|
||||
// Round Packed Double precision value.
|
||||
void vroundpd(XMMRegister dst, XMMRegister src, int32_t rmode, int vector_len);
|
||||
void vroundpd(XMMRegister dst, Address src, int32_t rmode, int vector_len);
|
||||
void vrndscalepd(XMMRegister dst, XMMRegister src, int32_t rmode, int vector_len);
|
||||
void vrndscalepd(XMMRegister dst, Address src, int32_t rmode, int vector_len);
|
||||
|
||||
// Bitwise Logical AND of Packed Floating-Point Values
|
||||
void andpd(XMMRegister dst, XMMRegister src);
|
||||
void andps(XMMRegister dst, XMMRegister src);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -157,16 +157,7 @@ void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, ad
|
||||
// Creation also verifies the object.
|
||||
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
|
||||
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
|
||||
|
||||
#ifdef ASSERT
|
||||
Method* old_method = reinterpret_cast<Method*>(method_holder->data());
|
||||
address destination = jump->jump_destination();
|
||||
assert(old_method == NULL || old_method == callee() ||
|
||||
!old_method->method_holder()->is_loader_alive(),
|
||||
"a) MT-unsafe modification of inline cache");
|
||||
assert(destination == (address)-1 || destination == entry,
|
||||
"b) MT-unsafe modification of inline cache");
|
||||
#endif
|
||||
verify_mt_safe(callee, entry, method_holder, jump);
|
||||
|
||||
// Update stub.
|
||||
method_holder->set_data((intptr_t)callee());
|
||||
|
||||
@ -271,9 +271,14 @@ void ShenandoahBarrierSetAssembler::resolve_forward_pointer_not_null(MacroAssemb
|
||||
if (borrow_reg) {
|
||||
// No free registers available. Make one useful.
|
||||
tmp = LP64_ONLY(rscratch1) NOT_LP64(rdx);
|
||||
if (tmp == dst) {
|
||||
tmp = LP64_ONLY(rscratch2) NOT_LP64(rcx);
|
||||
}
|
||||
__ push(tmp);
|
||||
}
|
||||
|
||||
assert_different_registers(dst, tmp);
|
||||
|
||||
Label done;
|
||||
__ movptr(tmp, Address(dst, oopDesc::mark_offset_in_bytes()));
|
||||
__ notptr(tmp);
|
||||
|
||||
@ -23,20 +23,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/z/zArguments.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/globals_extension.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
void ZArguments::initialize_platform() {
|
||||
#ifdef COMPILER2
|
||||
// The C2 barrier slow path expects vector registers to be least
|
||||
// 16 bytes wide, which is the minimum width available on all
|
||||
// x86-64 systems. However, the user could have speficied a lower
|
||||
// number on the command-line, in which case we print a warning
|
||||
// and raise it to 16.
|
||||
if (MaxVectorSize < 16) {
|
||||
warning("ZGC requires MaxVectorSize to be at least 16");
|
||||
FLAG_SET_DEFAULT(MaxVectorSize, 16);
|
||||
}
|
||||
#endif
|
||||
// Does nothing
|
||||
}
|
||||
|
||||
@ -24,22 +24,22 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "code/codeBlob.hpp"
|
||||
#include "code/vmreg.inline.hpp"
|
||||
#include "gc/z/zBarrier.inline.hpp"
|
||||
#include "gc/z/zBarrierSet.hpp"
|
||||
#include "gc/z/zBarrierSetAssembler.hpp"
|
||||
#include "gc/z/zBarrierSetRuntime.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "runtime/stubCodeGenerator.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#ifdef COMPILER1
|
||||
#include "c1/c1_LIRAssembler.hpp"
|
||||
#include "c1/c1_MacroAssembler.hpp"
|
||||
#include "gc/z/c1/zBarrierSetC1.hpp"
|
||||
#endif // COMPILER1
|
||||
|
||||
ZBarrierSetAssembler::ZBarrierSetAssembler() :
|
||||
_load_barrier_slow_stub(),
|
||||
_load_barrier_weak_slow_stub() {}
|
||||
#ifdef COMPILER2
|
||||
#include "gc/z/c2/zBarrierSetC2.hpp"
|
||||
#endif // COMPILER2
|
||||
|
||||
#ifdef PRODUCT
|
||||
#define BLOCK_COMMENT(str) /* nothing */
|
||||
@ -344,137 +344,327 @@ void ZBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler*
|
||||
|
||||
#endif // COMPILER1
|
||||
|
||||
#ifdef COMPILER2
|
||||
|
||||
OptoReg::Name ZBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) {
|
||||
if (!OptoReg::is_reg(opto_reg)) {
|
||||
return OptoReg::Bad;
|
||||
}
|
||||
|
||||
const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
|
||||
if (vm_reg->is_XMMRegister()) {
|
||||
opto_reg &= ~15;
|
||||
switch (node->ideal_reg()) {
|
||||
case Op_VecX:
|
||||
opto_reg |= 2;
|
||||
break;
|
||||
case Op_VecY:
|
||||
opto_reg |= 4;
|
||||
break;
|
||||
case Op_VecZ:
|
||||
opto_reg |= 8;
|
||||
break;
|
||||
default:
|
||||
opto_reg |= 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return opto_reg;
|
||||
}
|
||||
|
||||
// We use the vec_spill_helper from the x86.ad file to avoid reinventing this wheel
|
||||
extern int vec_spill_helper(CodeBuffer *cbuf, bool do_size, bool is_load,
|
||||
int stack_offset, int reg, uint ireg, outputStream* st);
|
||||
|
||||
#undef __
|
||||
#define __ cgen->assembler()->
|
||||
#define __ _masm->
|
||||
|
||||
// Generates a register specific stub for calling
|
||||
// ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded() or
|
||||
// ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded().
|
||||
//
|
||||
// The raddr register serves as both input and output for this stub. When the stub is
|
||||
// called the raddr register contains the object field address (oop*) where the bad oop
|
||||
// was loaded from, which caused the slow path to be taken. On return from the stub the
|
||||
// raddr register contains the good/healed oop returned from
|
||||
// ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded() or
|
||||
// ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded().
|
||||
static address generate_load_barrier_stub(StubCodeGenerator* cgen, Register raddr, DecoratorSet decorators) {
|
||||
// Don't generate stub for invalid registers
|
||||
if (raddr == rsp || raddr == r15) {
|
||||
return NULL;
|
||||
class ZSaveLiveRegisters {
|
||||
private:
|
||||
struct XMMRegisterData {
|
||||
XMMRegister _reg;
|
||||
int _size;
|
||||
|
||||
// Used by GrowableArray::find()
|
||||
bool operator == (const XMMRegisterData& other) {
|
||||
return _reg == other._reg;
|
||||
}
|
||||
};
|
||||
|
||||
MacroAssembler* const _masm;
|
||||
GrowableArray<Register> _gp_registers;
|
||||
GrowableArray<XMMRegisterData> _xmm_registers;
|
||||
int _spill_size;
|
||||
int _spill_offset;
|
||||
|
||||
static int xmm_compare_register_size(XMMRegisterData* left, XMMRegisterData* right) {
|
||||
if (left->_size == right->_size) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return (left->_size < right->_size) ? -1 : 1;
|
||||
}
|
||||
|
||||
// Create stub name
|
||||
char name[64];
|
||||
const bool weak = (decorators & ON_WEAK_OOP_REF) != 0;
|
||||
os::snprintf(name, sizeof(name), "zgc_load_barrier%s_stub_%s", weak ? "_weak" : "", raddr->name());
|
||||
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(cgen, "StubRoutines", os::strdup(name, mtCode));
|
||||
address start = __ pc();
|
||||
|
||||
// Save live registers
|
||||
if (raddr != rax) {
|
||||
__ push(rax);
|
||||
}
|
||||
if (raddr != rcx) {
|
||||
__ push(rcx);
|
||||
}
|
||||
if (raddr != rdx) {
|
||||
__ push(rdx);
|
||||
}
|
||||
if (raddr != rsi) {
|
||||
__ push(rsi);
|
||||
}
|
||||
if (raddr != rdi) {
|
||||
__ push(rdi);
|
||||
}
|
||||
if (raddr != r8) {
|
||||
__ push(r8);
|
||||
}
|
||||
if (raddr != r9) {
|
||||
__ push(r9);
|
||||
}
|
||||
if (raddr != r10) {
|
||||
__ push(r10);
|
||||
}
|
||||
if (raddr != r11) {
|
||||
__ push(r11);
|
||||
static int xmm_slot_size(OptoReg::Name opto_reg) {
|
||||
// The low order 4 bytes denote what size of the XMM register is live
|
||||
return (opto_reg & 15) << 3;
|
||||
}
|
||||
|
||||
// Setup arguments
|
||||
if (raddr != c_rarg1) {
|
||||
__ movq(c_rarg1, raddr);
|
||||
}
|
||||
__ movq(c_rarg0, Address(raddr, 0));
|
||||
|
||||
// Call barrier function
|
||||
__ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), c_rarg0, c_rarg1);
|
||||
|
||||
// Move result returned in rax to raddr, if needed
|
||||
if (raddr != rax) {
|
||||
__ movq(raddr, rax);
|
||||
static uint xmm_ideal_reg_for_size(int reg_size) {
|
||||
switch (reg_size) {
|
||||
case 8:
|
||||
return Op_VecD;
|
||||
case 16:
|
||||
return Op_VecX;
|
||||
case 32:
|
||||
return Op_VecY;
|
||||
case 64:
|
||||
return Op_VecZ;
|
||||
default:
|
||||
fatal("Invalid register size %d", reg_size);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
// Restore saved registers
|
||||
if (raddr != r11) {
|
||||
__ pop(r11);
|
||||
}
|
||||
if (raddr != r10) {
|
||||
__ pop(r10);
|
||||
}
|
||||
if (raddr != r9) {
|
||||
__ pop(r9);
|
||||
}
|
||||
if (raddr != r8) {
|
||||
__ pop(r8);
|
||||
}
|
||||
if (raddr != rdi) {
|
||||
__ pop(rdi);
|
||||
}
|
||||
if (raddr != rsi) {
|
||||
__ pop(rsi);
|
||||
}
|
||||
if (raddr != rdx) {
|
||||
__ pop(rdx);
|
||||
}
|
||||
if (raddr != rcx) {
|
||||
__ pop(rcx);
|
||||
}
|
||||
if (raddr != rax) {
|
||||
__ pop(rax);
|
||||
bool xmm_needs_vzeroupper() const {
|
||||
return _xmm_registers.is_nonempty() && _xmm_registers.at(0)._size > 16;
|
||||
}
|
||||
|
||||
__ ret(0);
|
||||
void xmm_register_save(const XMMRegisterData& reg_data) {
|
||||
const OptoReg::Name opto_reg = OptoReg::as_OptoReg(reg_data._reg->as_VMReg());
|
||||
const uint ideal_reg = xmm_ideal_reg_for_size(reg_data._size);
|
||||
_spill_offset -= reg_data._size;
|
||||
vec_spill_helper(__ code(), false /* do_size */, false /* is_load */, _spill_offset, opto_reg, ideal_reg, tty);
|
||||
}
|
||||
|
||||
return start;
|
||||
void xmm_register_restore(const XMMRegisterData& reg_data) {
|
||||
const OptoReg::Name opto_reg = OptoReg::as_OptoReg(reg_data._reg->as_VMReg());
|
||||
const uint ideal_reg = xmm_ideal_reg_for_size(reg_data._size);
|
||||
vec_spill_helper(__ code(), false /* do_size */, true /* is_load */, _spill_offset, opto_reg, ideal_reg, tty);
|
||||
_spill_offset += reg_data._size;
|
||||
}
|
||||
|
||||
void gp_register_save(Register reg) {
|
||||
_spill_offset -= 8;
|
||||
__ movq(Address(rsp, _spill_offset), reg);
|
||||
}
|
||||
|
||||
void gp_register_restore(Register reg) {
|
||||
__ movq(reg, Address(rsp, _spill_offset));
|
||||
_spill_offset += 8;
|
||||
}
|
||||
|
||||
void initialize(ZLoadBarrierStubC2* stub) {
|
||||
// Create mask of caller saved registers that need to
|
||||
// be saved/restored if live
|
||||
RegMask caller_saved;
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(rax->as_VMReg()));
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(rcx->as_VMReg()));
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(rdx->as_VMReg()));
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(rsi->as_VMReg()));
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(rdi->as_VMReg()));
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(r8->as_VMReg()));
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(r9->as_VMReg()));
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(r10->as_VMReg()));
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(r11->as_VMReg()));
|
||||
caller_saved.Remove(OptoReg::as_OptoReg(stub->ref()->as_VMReg()));
|
||||
|
||||
// Create mask of live registers
|
||||
RegMask live = stub->live();
|
||||
if (stub->tmp() != noreg) {
|
||||
live.Insert(OptoReg::as_OptoReg(stub->tmp()->as_VMReg()));
|
||||
}
|
||||
|
||||
int gp_spill_size = 0;
|
||||
int xmm_spill_size = 0;
|
||||
|
||||
// Record registers that needs to be saved/restored
|
||||
while (live.is_NotEmpty()) {
|
||||
const OptoReg::Name opto_reg = live.find_first_elem();
|
||||
const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
|
||||
|
||||
live.Remove(opto_reg);
|
||||
|
||||
if (vm_reg->is_Register()) {
|
||||
if (caller_saved.Member(opto_reg)) {
|
||||
_gp_registers.append(vm_reg->as_Register());
|
||||
gp_spill_size += 8;
|
||||
}
|
||||
} else if (vm_reg->is_XMMRegister()) {
|
||||
// We encode in the low order 4 bits of the opto_reg, how large part of the register is live
|
||||
const VMReg vm_reg_base = OptoReg::as_VMReg(opto_reg & ~15);
|
||||
const int reg_size = xmm_slot_size(opto_reg);
|
||||
const XMMRegisterData reg_data = { vm_reg_base->as_XMMRegister(), reg_size };
|
||||
const int reg_index = _xmm_registers.find(reg_data);
|
||||
if (reg_index == -1) {
|
||||
// Not previously appended
|
||||
_xmm_registers.append(reg_data);
|
||||
xmm_spill_size += reg_size;
|
||||
} else {
|
||||
// Previously appended, update size
|
||||
const int reg_size_prev = _xmm_registers.at(reg_index)._size;
|
||||
if (reg_size > reg_size_prev) {
|
||||
_xmm_registers.at_put(reg_index, reg_data);
|
||||
xmm_spill_size += reg_size - reg_size_prev;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
fatal("Unexpected register type");
|
||||
}
|
||||
}
|
||||
|
||||
// Sort by size, largest first
|
||||
_xmm_registers.sort(xmm_compare_register_size);
|
||||
|
||||
// Stack pointer must be 16 bytes aligned for the call
|
||||
_spill_offset = _spill_size = align_up(xmm_spill_size + gp_spill_size, 16);
|
||||
}
|
||||
|
||||
public:
|
||||
ZSaveLiveRegisters(MacroAssembler* masm, ZLoadBarrierStubC2* stub) :
|
||||
_masm(masm),
|
||||
_gp_registers(),
|
||||
_xmm_registers(),
|
||||
_spill_size(0),
|
||||
_spill_offset(0) {
|
||||
|
||||
//
|
||||
// Stack layout after registers have been spilled:
|
||||
//
|
||||
// | ... | original rsp, 16 bytes aligned
|
||||
// ------------------
|
||||
// | zmm0 high |
|
||||
// | ... |
|
||||
// | zmm0 low | 16 bytes aligned
|
||||
// | ... |
|
||||
// | ymm1 high |
|
||||
// | ... |
|
||||
// | ymm1 low | 16 bytes aligned
|
||||
// | ... |
|
||||
// | xmmN high |
|
||||
// | ... |
|
||||
// | xmmN low | 8 bytes aligned
|
||||
// | reg0 | 8 bytes aligned
|
||||
// | reg1 |
|
||||
// | ... |
|
||||
// | regN | new rsp, if 16 bytes aligned
|
||||
// | <padding> | else new rsp, 16 bytes aligned
|
||||
// ------------------
|
||||
//
|
||||
|
||||
// Figure out what registers to save/restore
|
||||
initialize(stub);
|
||||
|
||||
// Allocate stack space
|
||||
if (_spill_size > 0) {
|
||||
__ subptr(rsp, _spill_size);
|
||||
}
|
||||
|
||||
// Save XMM/YMM/ZMM registers
|
||||
for (int i = 0; i < _xmm_registers.length(); i++) {
|
||||
xmm_register_save(_xmm_registers.at(i));
|
||||
}
|
||||
|
||||
if (xmm_needs_vzeroupper()) {
|
||||
__ vzeroupper();
|
||||
}
|
||||
|
||||
// Save general purpose registers
|
||||
for (int i = 0; i < _gp_registers.length(); i++) {
|
||||
gp_register_save(_gp_registers.at(i));
|
||||
}
|
||||
}
|
||||
|
||||
~ZSaveLiveRegisters() {
|
||||
// Restore general purpose registers
|
||||
for (int i = _gp_registers.length() - 1; i >= 0; i--) {
|
||||
gp_register_restore(_gp_registers.at(i));
|
||||
}
|
||||
|
||||
__ vzeroupper();
|
||||
|
||||
// Restore XMM/YMM/ZMM registers
|
||||
for (int i = _xmm_registers.length() - 1; i >= 0; i--) {
|
||||
xmm_register_restore(_xmm_registers.at(i));
|
||||
}
|
||||
|
||||
// Free stack space
|
||||
if (_spill_size > 0) {
|
||||
__ addptr(rsp, _spill_size);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class ZSetupArguments {
|
||||
private:
|
||||
MacroAssembler* const _masm;
|
||||
const Register _ref;
|
||||
const Address _ref_addr;
|
||||
|
||||
public:
|
||||
ZSetupArguments(MacroAssembler* masm, ZLoadBarrierStubC2* stub) :
|
||||
_masm(masm),
|
||||
_ref(stub->ref()),
|
||||
_ref_addr(stub->ref_addr()) {
|
||||
|
||||
// Setup arguments
|
||||
if (_ref_addr.base() == noreg) {
|
||||
// No self healing
|
||||
if (_ref != c_rarg0) {
|
||||
__ movq(c_rarg0, _ref);
|
||||
}
|
||||
__ xorq(c_rarg1, c_rarg1);
|
||||
} else {
|
||||
// Self healing
|
||||
if (_ref == c_rarg0) {
|
||||
__ lea(c_rarg1, _ref_addr);
|
||||
} else if (_ref != c_rarg1) {
|
||||
__ lea(c_rarg1, _ref_addr);
|
||||
__ movq(c_rarg0, _ref);
|
||||
} else if (_ref_addr.base() != c_rarg0 && _ref_addr.index() != c_rarg0) {
|
||||
__ movq(c_rarg0, _ref);
|
||||
__ lea(c_rarg1, _ref_addr);
|
||||
} else {
|
||||
__ xchgq(c_rarg0, c_rarg1);
|
||||
if (_ref_addr.base() == c_rarg0) {
|
||||
__ lea(c_rarg1, Address(c_rarg1, _ref_addr.index(), _ref_addr.scale(), _ref_addr.disp()));
|
||||
} else if (_ref_addr.index() == c_rarg0) {
|
||||
__ lea(c_rarg1, Address(_ref_addr.base(), c_rarg1, _ref_addr.scale(), _ref_addr.disp()));
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
~ZSetupArguments() {
|
||||
// Transfer result
|
||||
if (_ref != rax) {
|
||||
__ movq(_ref, rax);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
#undef __
|
||||
#define __ masm->
|
||||
|
||||
void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, ZLoadBarrierStubC2* stub) const {
|
||||
BLOCK_COMMENT("ZLoadBarrierStubC2");
|
||||
|
||||
// Stub entry
|
||||
__ bind(*stub->entry());
|
||||
|
||||
{
|
||||
ZSaveLiveRegisters save_live_registers(masm, stub);
|
||||
ZSetupArguments setup_arguments(masm, stub);
|
||||
__ call(RuntimeAddress(stub->slow_path()));
|
||||
}
|
||||
|
||||
// Stub exit
|
||||
__ jmp(*stub->continuation());
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
static void barrier_stubs_init_inner(const char* label, const DecoratorSet decorators, address* stub) {
|
||||
const int nregs = RegisterImpl::number_of_registers;
|
||||
const int code_size = nregs * 128; // Rough estimate of code size
|
||||
|
||||
ResourceMark rm;
|
||||
|
||||
CodeBuffer buf(BufferBlob::create(label, code_size));
|
||||
StubCodeGenerator cgen(&buf);
|
||||
|
||||
for (int i = 0; i < nregs; i++) {
|
||||
const Register reg = as_Register(i);
|
||||
stub[i] = generate_load_barrier_stub(&cgen, reg, decorators);
|
||||
}
|
||||
}
|
||||
|
||||
void ZBarrierSetAssembler::barrier_stubs_init() {
|
||||
barrier_stubs_init_inner("zgc_load_barrier_stubs", ON_STRONG_OOP_REF, _load_barrier_slow_stub);
|
||||
barrier_stubs_init_inner("zgc_load_barrier_weak_stubs", ON_WEAK_OOP_REF, _load_barrier_weak_slow_stub);
|
||||
}
|
||||
|
||||
address ZBarrierSetAssembler::load_barrier_slow_stub(Register reg) {
|
||||
return _load_barrier_slow_stub[reg->encoding()];
|
||||
}
|
||||
|
||||
address ZBarrierSetAssembler::load_barrier_weak_slow_stub(Register reg) {
|
||||
return _load_barrier_weak_slow_stub[reg->encoding()];
|
||||
}
|
||||
#endif // COMPILER2
|
||||
|
||||
@ -24,6 +24,14 @@
|
||||
#ifndef CPU_X86_GC_Z_ZBARRIERSETASSEMBLER_X86_HPP
|
||||
#define CPU_X86_GC_Z_ZBARRIERSETASSEMBLER_X86_HPP
|
||||
|
||||
#include "code/vmreg.hpp"
|
||||
#include "oops/accessDecorators.hpp"
|
||||
#ifdef COMPILER2
|
||||
#include "opto/optoreg.hpp"
|
||||
#endif // COMPILER2
|
||||
|
||||
class MacroAssembler;
|
||||
|
||||
#ifdef COMPILER1
|
||||
class LIR_Assembler;
|
||||
class LIR_OprDesc;
|
||||
@ -32,14 +40,13 @@ class StubAssembler;
|
||||
class ZLoadBarrierStubC1;
|
||||
#endif // COMPILER1
|
||||
|
||||
#ifdef COMPILER2
|
||||
class Node;
|
||||
class ZLoadBarrierStubC2;
|
||||
#endif // COMPILER2
|
||||
|
||||
class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase {
|
||||
private:
|
||||
address _load_barrier_slow_stub[RegisterImpl::number_of_registers];
|
||||
address _load_barrier_weak_slow_stub[RegisterImpl::number_of_registers];
|
||||
|
||||
public:
|
||||
ZBarrierSetAssembler();
|
||||
|
||||
virtual void load_at(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
BasicType type,
|
||||
@ -82,10 +89,13 @@ public:
|
||||
DecoratorSet decorators) const;
|
||||
#endif // COMPILER1
|
||||
|
||||
virtual void barrier_stubs_init();
|
||||
#ifdef COMPILER2
|
||||
OptoReg::Name refine_register(const Node* node,
|
||||
OptoReg::Name opto_reg);
|
||||
|
||||
address load_barrier_slow_stub(Register reg);
|
||||
address load_barrier_weak_slow_stub(Register reg);
|
||||
void generate_c2_load_barrier_stub(MacroAssembler* masm,
|
||||
ZLoadBarrierStubC2* stub) const;
|
||||
#endif // COMPILER2
|
||||
};
|
||||
|
||||
#endif // CPU_X86_GC_Z_ZBARRIERSETASSEMBLER_X86_HPP
|
||||
|
||||
@ -40,7 +40,7 @@
|
||||
// +--------------------------------+ 0x0000014000000000 (20TB)
|
||||
// | Remapped View |
|
||||
// +--------------------------------+ 0x0000010000000000 (16TB)
|
||||
// | (Reserved, but unused) |
|
||||
// . .
|
||||
// +--------------------------------+ 0x00000c0000000000 (12TB)
|
||||
// | Marked1 View |
|
||||
// +--------------------------------+ 0x0000080000000000 (8TB)
|
||||
@ -75,7 +75,7 @@
|
||||
// +--------------------------------+ 0x0000280000000000 (40TB)
|
||||
// | Remapped View |
|
||||
// +--------------------------------+ 0x0000200000000000 (32TB)
|
||||
// | (Reserved, but unused) |
|
||||
// . .
|
||||
// +--------------------------------+ 0x0000180000000000 (24TB)
|
||||
// | Marked1 View |
|
||||
// +--------------------------------+ 0x0000100000000000 (16TB)
|
||||
@ -110,7 +110,7 @@
|
||||
// +--------------------------------+ 0x0000500000000000 (80TB)
|
||||
// | Remapped View |
|
||||
// +--------------------------------+ 0x0000400000000000 (64TB)
|
||||
// | (Reserved, but unused) |
|
||||
// . .
|
||||
// +--------------------------------+ 0x0000300000000000 (48TB)
|
||||
// | Marked1 View |
|
||||
// +--------------------------------+ 0x0000200000000000 (32TB)
|
||||
|
||||
@ -36,7 +36,6 @@
|
||||
// ------------------------------------------------------------------
|
||||
//
|
||||
const size_t ZPlatformGranuleSizeShift = 21; // 2MB
|
||||
const size_t ZPlatformMaxHeapSizeShift = 46; // 16TB
|
||||
const size_t ZPlatformNMethodDisarmedOffset = 4;
|
||||
const size_t ZPlatformCacheLineSize = 64;
|
||||
|
||||
|
||||
@ -24,190 +24,144 @@
|
||||
source_hpp %{
|
||||
|
||||
#include "gc/z/c2/zBarrierSetC2.hpp"
|
||||
#include "gc/z/zThreadLocalData.hpp"
|
||||
|
||||
%}
|
||||
|
||||
source %{
|
||||
|
||||
#include "gc/z/zBarrierSetAssembler.hpp"
|
||||
static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, bool weak) {
|
||||
ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, weak);
|
||||
__ testptr(ref, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset()));
|
||||
__ jcc(Assembler::notZero, *stub->entry());
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
static void z_load_barrier_slow_reg(MacroAssembler& _masm, Register dst, Address src, bool weak) {
|
||||
assert(dst != rsp, "Invalid register");
|
||||
assert(dst != r15, "Invalid register");
|
||||
|
||||
const address stub = weak ? ZBarrierSet::assembler()->load_barrier_weak_slow_stub(dst)
|
||||
: ZBarrierSet::assembler()->load_barrier_slow_stub(dst);
|
||||
__ lea(dst, src);
|
||||
__ call(RuntimeAddress(stub));
|
||||
static void z_load_barrier_slow_path(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
|
||||
ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, false /* weak */);
|
||||
__ jmp(*stub->entry());
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
%}
|
||||
|
||||
// For XMM and YMM enabled processors
|
||||
instruct zLoadBarrierSlowRegXmmAndYmm(rRegP dst, memory src, rFlagsReg cr,
|
||||
rxmm0 x0, rxmm1 x1, rxmm2 x2, rxmm3 x3,
|
||||
rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
|
||||
rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
|
||||
rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
|
||||
match(Set dst (LoadBarrierSlowReg src dst));
|
||||
predicate(UseAVX <= 2 && !n->as_LoadBarrierSlowReg()->is_weak());
|
||||
// Load Pointer
|
||||
instruct zLoadP(rRegP dst, memory mem, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseZGC && n->as_Load()->barrier_data() == ZLoadBarrierStrong);
|
||||
match(Set dst (LoadP mem));
|
||||
effect(KILL cr, TEMP dst);
|
||||
|
||||
effect(KILL cr,
|
||||
KILL x0, KILL x1, KILL x2, KILL x3,
|
||||
KILL x4, KILL x5, KILL x6, KILL x7,
|
||||
KILL x8, KILL x9, KILL x10, KILL x11,
|
||||
KILL x12, KILL x13, KILL x14, KILL x15);
|
||||
ins_cost(125);
|
||||
|
||||
format %{ "lea $dst, $src\n\t"
|
||||
"call #ZLoadBarrierSlowPath" %}
|
||||
format %{ "movq $dst, $mem" %}
|
||||
|
||||
ins_encode %{
|
||||
z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, false /* weak */);
|
||||
__ movptr($dst$$Register, $mem$$Address);
|
||||
if (barrier_data() != ZLoadBarrierElided) {
|
||||
z_load_barrier(_masm, this, $mem$$Address, $dst$$Register, noreg /* tmp */, false /* weak */);
|
||||
}
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
// For ZMM enabled processors
|
||||
instruct zLoadBarrierSlowRegZmm(rRegP dst, memory src, rFlagsReg cr,
|
||||
rxmm0 x0, rxmm1 x1, rxmm2 x2, rxmm3 x3,
|
||||
rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
|
||||
rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
|
||||
rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
|
||||
rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19,
|
||||
rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23,
|
||||
rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
|
||||
rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
|
||||
// Load Weak Pointer
|
||||
instruct zLoadWeakP(rRegP dst, memory mem, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseZGC && n->as_Load()->barrier_data() == ZLoadBarrierWeak);
|
||||
match(Set dst (LoadP mem));
|
||||
effect(KILL cr, TEMP dst);
|
||||
|
||||
match(Set dst (LoadBarrierSlowReg src dst));
|
||||
predicate(UseAVX == 3 && !n->as_LoadBarrierSlowReg()->is_weak());
|
||||
ins_cost(125);
|
||||
|
||||
effect(KILL cr,
|
||||
KILL x0, KILL x1, KILL x2, KILL x3,
|
||||
KILL x4, KILL x5, KILL x6, KILL x7,
|
||||
KILL x8, KILL x9, KILL x10, KILL x11,
|
||||
KILL x12, KILL x13, KILL x14, KILL x15,
|
||||
KILL x16, KILL x17, KILL x18, KILL x19,
|
||||
KILL x20, KILL x21, KILL x22, KILL x23,
|
||||
KILL x24, KILL x25, KILL x26, KILL x27,
|
||||
KILL x28, KILL x29, KILL x30, KILL x31);
|
||||
|
||||
format %{ "lea $dst, $src\n\t"
|
||||
"call #ZLoadBarrierSlowPath" %}
|
||||
format %{ "movq $dst, $mem" %}
|
||||
|
||||
ins_encode %{
|
||||
z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, false /* weak */);
|
||||
__ movptr($dst$$Register, $mem$$Address);
|
||||
z_load_barrier(_masm, this, $mem$$Address, $dst$$Register, noreg /* tmp */, true /* weak */);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
// For XMM and YMM enabled processors
|
||||
instruct zLoadBarrierWeakSlowRegXmmAndYmm(rRegP dst, memory src, rFlagsReg cr,
|
||||
rxmm0 x0, rxmm1 x1, rxmm2 x2, rxmm3 x3,
|
||||
rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
|
||||
rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
|
||||
rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
|
||||
match(Set dst (LoadBarrierSlowReg src dst));
|
||||
predicate(UseAVX <= 2 && n->as_LoadBarrierSlowReg()->is_weak());
|
||||
instruct zCompareAndExchangeP(memory mem, rax_RegP oldval, rRegP newval, rRegP tmp, rFlagsReg cr) %{
|
||||
match(Set oldval (CompareAndExchangeP mem (Binary oldval newval)));
|
||||
predicate(UseZGC && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
|
||||
effect(KILL cr, TEMP tmp);
|
||||
|
||||
effect(KILL cr,
|
||||
KILL x0, KILL x1, KILL x2, KILL x3,
|
||||
KILL x4, KILL x5, KILL x6, KILL x7,
|
||||
KILL x8, KILL x9, KILL x10, KILL x11,
|
||||
KILL x12, KILL x13, KILL x14, KILL x15);
|
||||
|
||||
format %{ "lea $dst, $src\n\t"
|
||||
"call #ZLoadBarrierSlowPath" %}
|
||||
format %{ "lock\n\t"
|
||||
"cmpxchgq $newval, $mem" %}
|
||||
|
||||
ins_encode %{
|
||||
z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, true /* weak */);
|
||||
if (barrier_data() != ZLoadBarrierElided) {
|
||||
__ movptr($tmp$$Register, $oldval$$Register);
|
||||
}
|
||||
__ lock();
|
||||
__ cmpxchgptr($newval$$Register, $mem$$Address);
|
||||
if (barrier_data() != ZLoadBarrierElided) {
|
||||
Label good;
|
||||
__ testptr($oldval$$Register, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset()));
|
||||
__ jcc(Assembler::zero, good);
|
||||
z_load_barrier_slow_path(_masm, this, $mem$$Address, $oldval$$Register, $tmp$$Register);
|
||||
__ movptr($oldval$$Register, $tmp$$Register);
|
||||
__ lock();
|
||||
__ cmpxchgptr($newval$$Register, $mem$$Address);
|
||||
__ bind(good);
|
||||
}
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
|
||||
ins_pipe(pipe_cmpxchg);
|
||||
%}
|
||||
|
||||
// For ZMM enabled processors
|
||||
instruct zLoadBarrierWeakSlowRegZmm(rRegP dst, memory src, rFlagsReg cr,
|
||||
rxmm0 x0, rxmm1 x1, rxmm2 x2, rxmm3 x3,
|
||||
rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
|
||||
rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
|
||||
rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
|
||||
rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19,
|
||||
rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23,
|
||||
rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
|
||||
rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
|
||||
instruct zCompareAndSwapP(rRegI res, memory mem, rRegP newval, rRegP tmp, rFlagsReg cr, rax_RegP oldval) %{
|
||||
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
|
||||
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
|
||||
predicate(UseZGC && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
|
||||
effect(KILL cr, KILL oldval, TEMP tmp);
|
||||
|
||||
match(Set dst (LoadBarrierSlowReg src dst));
|
||||
predicate(UseAVX == 3 && n->as_LoadBarrierSlowReg()->is_weak());
|
||||
|
||||
effect(KILL cr,
|
||||
KILL x0, KILL x1, KILL x2, KILL x3,
|
||||
KILL x4, KILL x5, KILL x6, KILL x7,
|
||||
KILL x8, KILL x9, KILL x10, KILL x11,
|
||||
KILL x12, KILL x13, KILL x14, KILL x15,
|
||||
KILL x16, KILL x17, KILL x18, KILL x19,
|
||||
KILL x20, KILL x21, KILL x22, KILL x23,
|
||||
KILL x24, KILL x25, KILL x26, KILL x27,
|
||||
KILL x28, KILL x29, KILL x30, KILL x31);
|
||||
|
||||
format %{ "lea $dst, $src\n\t"
|
||||
"call #ZLoadBarrierSlowPath" %}
|
||||
format %{ "lock\n\t"
|
||||
"cmpxchgq $newval, $mem\n\t"
|
||||
"sete $res\n\t"
|
||||
"movzbl $res, $res" %}
|
||||
|
||||
ins_encode %{
|
||||
z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, true /* weak */);
|
||||
if (barrier_data() != ZLoadBarrierElided) {
|
||||
__ movptr($tmp$$Register, $oldval$$Register);
|
||||
}
|
||||
__ lock();
|
||||
__ cmpxchgptr($newval$$Register, $mem$$Address);
|
||||
if (barrier_data() != ZLoadBarrierElided) {
|
||||
Label good;
|
||||
__ testptr($oldval$$Register, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset()));
|
||||
__ jcc(Assembler::zero, good);
|
||||
z_load_barrier_slow_path(_masm, this, $mem$$Address, $oldval$$Register, $tmp$$Register);
|
||||
__ movptr($oldval$$Register, $tmp$$Register);
|
||||
__ lock();
|
||||
__ cmpxchgptr($newval$$Register, $mem$$Address);
|
||||
__ bind(good);
|
||||
__ cmpptr($tmp$$Register, $oldval$$Register);
|
||||
}
|
||||
__ setb(Assembler::equal, $res$$Register);
|
||||
__ movzbl($res$$Register, $res$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
|
||||
ins_pipe(pipe_cmpxchg);
|
||||
%}
|
||||
|
||||
// Specialized versions of compareAndExchangeP that adds a keepalive that is consumed
|
||||
// but doesn't affect output.
|
||||
instruct zXChgP(memory mem, rRegP newval, rFlagsReg cr) %{
|
||||
match(Set newval (GetAndSetP mem newval));
|
||||
predicate(UseZGC && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
|
||||
effect(KILL cr);
|
||||
|
||||
instruct z_compareAndExchangeP(
|
||||
memory mem_ptr,
|
||||
rax_RegP oldval, rRegP newval, rRegP keepalive,
|
||||
rFlagsReg cr) %{
|
||||
predicate(VM_Version::supports_cx8());
|
||||
match(Set oldval (ZCompareAndExchangeP (Binary mem_ptr keepalive) (Binary oldval newval)));
|
||||
effect(KILL cr);
|
||||
format %{ "xchgq $newval, $mem" %}
|
||||
|
||||
format %{ "cmpxchgq $mem_ptr,$newval\t# "
|
||||
"If rax == $mem_ptr then store $newval into $mem_ptr\n\t" %}
|
||||
opcode(0x0F, 0xB1);
|
||||
ins_encode(lock_prefix,
|
||||
REX_reg_mem_wide(newval, mem_ptr),
|
||||
OpcP, OpcS,
|
||||
reg_mem(newval, mem_ptr) // lock cmpxchg
|
||||
);
|
||||
ins_pipe( pipe_cmpxchg );
|
||||
%}
|
||||
|
||||
instruct z_compareAndSwapP(rRegI res,
|
||||
memory mem_ptr,
|
||||
rax_RegP oldval, rRegP newval, rRegP keepalive,
|
||||
rFlagsReg cr) %{
|
||||
predicate(VM_Version::supports_cx8());
|
||||
match(Set res (ZCompareAndSwapP (Binary mem_ptr keepalive) (Binary oldval newval)));
|
||||
match(Set res (ZWeakCompareAndSwapP (Binary mem_ptr keepalive) (Binary oldval newval)));
|
||||
effect(KILL cr, KILL oldval);
|
||||
|
||||
format %{ "cmpxchgq $mem_ptr,$newval\t# "
|
||||
"If rax == $mem_ptr then store $newval into $mem_ptr\n\t"
|
||||
"sete $res\n\t"
|
||||
"movzbl $res, $res" %}
|
||||
opcode(0x0F, 0xB1);
|
||||
ins_encode(lock_prefix,
|
||||
REX_reg_mem_wide(newval, mem_ptr),
|
||||
OpcP, OpcS,
|
||||
reg_mem(newval, mem_ptr),
|
||||
REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete
|
||||
REX_reg_breg(res, res), // movzbl
|
||||
Opcode(0xF), Opcode(0xB6), reg_reg(res, res));
|
||||
ins_pipe( pipe_cmpxchg );
|
||||
%}
|
||||
|
||||
instruct z_xchgP( memory mem, rRegP newval, rRegP keepalive) %{
|
||||
match(Set newval (ZGetAndSetP mem (Binary newval keepalive)));
|
||||
format %{ "XCHGQ $newval,[$mem]" %}
|
||||
ins_encode %{
|
||||
__ xchgq($newval$$Register, $mem$$Address);
|
||||
__ xchgptr($newval$$Register, $mem$$Address);
|
||||
if (barrier_data() != ZLoadBarrierElided) {
|
||||
z_load_barrier(_masm, this, Address(noreg, 0), $newval$$Register, noreg /* tmp */, false /* weak */);
|
||||
}
|
||||
%}
|
||||
ins_pipe( pipe_cmpxchg );
|
||||
|
||||
ins_pipe(pipe_cmpxchg);
|
||||
%}
|
||||
|
||||
@ -211,5 +211,15 @@ define_pd_global(bool, ThreadLocalHandshakes, false);
|
||||
"Use BMI2 instructions") \
|
||||
\
|
||||
diagnostic(bool, UseLibmIntrinsic, true, \
|
||||
"Use Libm Intrinsics")
|
||||
"Use Libm Intrinsics") \
|
||||
\
|
||||
/* Minimum array size in bytes to use AVX512 intrinsics */ \
|
||||
/* for copy, inflate and fill which don't bail out early based on any */ \
|
||||
/* condition. When this value is set to zero compare operations like */ \
|
||||
/* compare, vectorizedMismatch, compress can also use AVX512 intrinsics.*/\
|
||||
diagnostic(int, AVX3Threshold, 4096, \
|
||||
"Minimum array size in bytes to use AVX512 intrinsics" \
|
||||
"for copy, inflate and fill. When this value is set as zero" \
|
||||
"compare operations can also use AVX512 intrinsics.") \
|
||||
range(0, max_jint)
|
||||
#endif // CPU_X86_GLOBALS_X86_HPP
|
||||
|
||||
@ -824,11 +824,13 @@ static void pass_arg3(MacroAssembler* masm, Register arg) {
|
||||
}
|
||||
|
||||
void MacroAssembler::stop(const char* msg) {
|
||||
address rip = pc();
|
||||
pusha(); // get regs on stack
|
||||
if (ShowMessageBoxOnError) {
|
||||
address rip = pc();
|
||||
pusha(); // get regs on stack
|
||||
lea(c_rarg1, InternalAddress(rip));
|
||||
movq(c_rarg2, rsp); // pass pointer to regs array
|
||||
}
|
||||
lea(c_rarg0, ExternalAddress((address) msg));
|
||||
lea(c_rarg1, InternalAddress(rip));
|
||||
movq(c_rarg2, rsp); // pass pointer to regs array
|
||||
andq(rsp, -16); // align stack as required by ABI
|
||||
call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)));
|
||||
hlt();
|
||||
@ -3661,6 +3663,15 @@ void MacroAssembler::subsd(XMMRegister dst, AddressLiteral src) {
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register scratch_reg) {
|
||||
if (reachable(src)) {
|
||||
Assembler::roundsd(dst, as_Address(src), rmode);
|
||||
} else {
|
||||
lea(scratch_reg, src);
|
||||
Assembler::roundsd(dst, Address(scratch_reg, 0), rmode);
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::subss(XMMRegister dst, AddressLiteral src) {
|
||||
if (reachable(src)) {
|
||||
Assembler::subss(dst, as_Address(src));
|
||||
@ -6584,7 +6595,7 @@ void MacroAssembler::string_compare(Register str1, Register str2,
|
||||
bind(COMPARE_WIDE_VECTORS_LOOP);
|
||||
|
||||
#ifdef _LP64
|
||||
if (VM_Version::supports_avx512vlbw()) { // trying 64 bytes fast loop
|
||||
if ((AVX3Threshold == 0) && VM_Version::supports_avx512vlbw()) { // trying 64 bytes fast loop
|
||||
cmpl(cnt2, stride2x2);
|
||||
jccb(Assembler::below, COMPARE_WIDE_VECTORS_LOOP_AVX2);
|
||||
testl(cnt2, stride2x2-1); // cnt2 holds the vector count
|
||||
@ -6844,7 +6855,7 @@ void MacroAssembler::has_negatives(Register ary1, Register len,
|
||||
testl(len, len);
|
||||
jcc(Assembler::zero, FALSE_LABEL);
|
||||
|
||||
if ((UseAVX > 2) && // AVX512
|
||||
if ((AVX3Threshold == 0) && (UseAVX > 2) && // AVX512
|
||||
VM_Version::supports_avx512vlbw() &&
|
||||
VM_Version::supports_bmi2()) {
|
||||
|
||||
@ -6917,7 +6928,7 @@ void MacroAssembler::has_negatives(Register ary1, Register len,
|
||||
} else {
|
||||
movl(result, len); // copy
|
||||
|
||||
if (UseAVX == 2 && UseSSE >= 2) {
|
||||
if (UseAVX >= 2 && UseSSE >= 2) {
|
||||
// With AVX2, use 32-byte vector compare
|
||||
Label COMPARE_WIDE_VECTORS, COMPARE_TAIL;
|
||||
|
||||
@ -7090,14 +7101,12 @@ void MacroAssembler::arrays_equals(bool is_array_equ, Register ary1, Register ar
|
||||
lea(ary2, Address(ary2, limit, Address::times_1));
|
||||
negptr(limit);
|
||||
|
||||
bind(COMPARE_WIDE_VECTORS);
|
||||
|
||||
#ifdef _LP64
|
||||
if (VM_Version::supports_avx512vlbw()) { // trying 64 bytes fast loop
|
||||
if ((AVX3Threshold == 0) && VM_Version::supports_avx512vlbw()) { // trying 64 bytes fast loop
|
||||
Label COMPARE_WIDE_VECTORS_LOOP_AVX2, COMPARE_WIDE_VECTORS_LOOP_AVX3;
|
||||
|
||||
cmpl(limit, -64);
|
||||
jccb(Assembler::greater, COMPARE_WIDE_VECTORS_LOOP_AVX2);
|
||||
jcc(Assembler::greater, COMPARE_WIDE_VECTORS_LOOP_AVX2);
|
||||
|
||||
bind(COMPARE_WIDE_VECTORS_LOOP_AVX3); // the hottest loop
|
||||
|
||||
@ -7130,7 +7139,7 @@ void MacroAssembler::arrays_equals(bool is_array_equ, Register ary1, Register ar
|
||||
|
||||
}//if (VM_Version::supports_avx512vlbw())
|
||||
#endif //_LP64
|
||||
|
||||
bind(COMPARE_WIDE_VECTORS);
|
||||
vmovdqu(vec1, Address(ary1, limit, Address::times_1));
|
||||
vmovdqu(vec2, Address(ary2, limit, Address::times_1));
|
||||
vpxor(vec1, vec2);
|
||||
@ -7356,32 +7365,33 @@ void MacroAssembler::generate_fill(BasicType t, bool aligned,
|
||||
assert( UseSSE >= 2, "supported cpu only" );
|
||||
Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes;
|
||||
movdl(xtmp, value);
|
||||
if (UseAVX > 2 && UseUnalignedLoadStores) {
|
||||
if (UseAVX >= 2 && UseUnalignedLoadStores) {
|
||||
Label L_check_fill_32_bytes;
|
||||
if (UseAVX > 2) {
|
||||
// Fill 64-byte chunks
|
||||
Label L_fill_64_bytes_loop_avx3, L_check_fill_64_bytes_avx2;
|
||||
|
||||
// If number of bytes to fill < AVX3Threshold, perform fill using AVX2
|
||||
cmpl(count, AVX3Threshold);
|
||||
jccb(Assembler::below, L_check_fill_64_bytes_avx2);
|
||||
|
||||
vpbroadcastd(xtmp, xtmp, Assembler::AVX_512bit);
|
||||
|
||||
subl(count, 16 << shift);
|
||||
jccb(Assembler::less, L_check_fill_32_bytes);
|
||||
align(16);
|
||||
|
||||
BIND(L_fill_64_bytes_loop_avx3);
|
||||
evmovdqul(Address(to, 0), xtmp, Assembler::AVX_512bit);
|
||||
addptr(to, 64);
|
||||
subl(count, 16 << shift);
|
||||
jcc(Assembler::greaterEqual, L_fill_64_bytes_loop_avx3);
|
||||
jmpb(L_check_fill_32_bytes);
|
||||
|
||||
BIND(L_check_fill_64_bytes_avx2);
|
||||
}
|
||||
// Fill 64-byte chunks
|
||||
Label L_fill_64_bytes_loop, L_check_fill_32_bytes;
|
||||
vpbroadcastd(xtmp, xtmp, Assembler::AVX_512bit);
|
||||
|
||||
subl(count, 16 << shift);
|
||||
jcc(Assembler::less, L_check_fill_32_bytes);
|
||||
align(16);
|
||||
|
||||
BIND(L_fill_64_bytes_loop);
|
||||
evmovdqul(Address(to, 0), xtmp, Assembler::AVX_512bit);
|
||||
addptr(to, 64);
|
||||
subl(count, 16 << shift);
|
||||
jcc(Assembler::greaterEqual, L_fill_64_bytes_loop);
|
||||
|
||||
BIND(L_check_fill_32_bytes);
|
||||
addl(count, 8 << shift);
|
||||
jccb(Assembler::less, L_check_fill_8_bytes);
|
||||
vmovdqu(Address(to, 0), xtmp);
|
||||
addptr(to, 32);
|
||||
subl(count, 8 << shift);
|
||||
|
||||
BIND(L_check_fill_8_bytes);
|
||||
} else if (UseAVX == 2 && UseUnalignedLoadStores) {
|
||||
// Fill 64-byte chunks
|
||||
Label L_fill_64_bytes_loop, L_check_fill_32_bytes;
|
||||
Label L_fill_64_bytes_loop;
|
||||
vpbroadcastd(xtmp, xtmp, Assembler::AVX_256bit);
|
||||
|
||||
subl(count, 16 << shift);
|
||||
@ -8095,12 +8105,13 @@ void MacroAssembler::vectorized_mismatch(Register obja, Register objb, Register
|
||||
shlq(length);
|
||||
xorq(result, result);
|
||||
|
||||
if ((UseAVX > 2) &&
|
||||
if ((AVX3Threshold == 0) && (UseAVX > 2) &&
|
||||
VM_Version::supports_avx512vlbw()) {
|
||||
Label VECTOR64_LOOP, VECTOR64_NOT_EQUAL, VECTOR32_TAIL;
|
||||
|
||||
cmpq(length, 64);
|
||||
jcc(Assembler::less, VECTOR32_TAIL);
|
||||
|
||||
movq(tmp1, length);
|
||||
andq(tmp1, 0x3F); // tail count
|
||||
andq(length, ~(0x3F)); //vector count
|
||||
@ -9557,7 +9568,7 @@ void MacroAssembler::char_array_compress(Register src, Register dst, Register le
|
||||
// save length for return
|
||||
push(len);
|
||||
|
||||
if ((UseAVX > 2) && // AVX512
|
||||
if ((AVX3Threshold == 0) && (UseAVX > 2) && // AVX512
|
||||
VM_Version::supports_avx512vlbw() &&
|
||||
VM_Version::supports_bmi2()) {
|
||||
|
||||
@ -9749,7 +9760,7 @@ void MacroAssembler::char_array_compress(Register src, Register dst, Register le
|
||||
// }
|
||||
void MacroAssembler::byte_array_inflate(Register src, Register dst, Register len,
|
||||
XMMRegister tmp1, Register tmp2) {
|
||||
Label copy_chars_loop, done, below_threshold;
|
||||
Label copy_chars_loop, done, below_threshold, avx3_threshold;
|
||||
// rsi: src
|
||||
// rdi: dst
|
||||
// rdx: len
|
||||
@ -9759,7 +9770,7 @@ void MacroAssembler::byte_array_inflate(Register src, Register dst, Register len
|
||||
// rdi holds start addr of destination char[]
|
||||
// rdx holds length
|
||||
assert_different_registers(src, dst, len, tmp2);
|
||||
|
||||
movl(tmp2, len);
|
||||
if ((UseAVX > 2) && // AVX512
|
||||
VM_Version::supports_avx512vlbw() &&
|
||||
VM_Version::supports_bmi2()) {
|
||||
@ -9771,9 +9782,11 @@ void MacroAssembler::byte_array_inflate(Register src, Register dst, Register len
|
||||
testl(len, -16);
|
||||
jcc(Assembler::zero, below_threshold);
|
||||
|
||||
testl(len, -1 * AVX3Threshold);
|
||||
jcc(Assembler::zero, avx3_threshold);
|
||||
|
||||
// In order to use only one arithmetic operation for the main loop we use
|
||||
// this pre-calculation
|
||||
movl(tmp2, len);
|
||||
andl(tmp2, (32 - 1)); // tail count (in chars), 32 element wide loop
|
||||
andl(len, -32); // vector count
|
||||
jccb(Assembler::zero, copy_tail);
|
||||
@ -9804,12 +9817,11 @@ void MacroAssembler::byte_array_inflate(Register src, Register dst, Register len
|
||||
evmovdquw(Address(dst, 0), k2, tmp1, Assembler::AVX_512bit);
|
||||
|
||||
jmp(done);
|
||||
bind(avx3_threshold);
|
||||
}
|
||||
if (UseSSE42Intrinsics) {
|
||||
Label copy_16_loop, copy_8_loop, copy_bytes, copy_new_tail, copy_tail;
|
||||
|
||||
movl(tmp2, len);
|
||||
|
||||
if (UseAVX > 1) {
|
||||
andl(tmp2, (16 - 1));
|
||||
andl(len, -16);
|
||||
@ -9834,13 +9846,7 @@ void MacroAssembler::byte_array_inflate(Register src, Register dst, Register len
|
||||
|
||||
bind(below_threshold);
|
||||
bind(copy_new_tail);
|
||||
if ((UseAVX > 2) &&
|
||||
VM_Version::supports_avx512vlbw() &&
|
||||
VM_Version::supports_bmi2()) {
|
||||
movl(tmp2, len);
|
||||
} else {
|
||||
movl(len, tmp2);
|
||||
}
|
||||
movl(len, tmp2);
|
||||
andl(tmp2, 0x00000007);
|
||||
andl(len, 0xFFFFFFF8);
|
||||
jccb(Assembler::zero, copy_tail);
|
||||
|
||||
@ -1180,6 +1180,10 @@ public:
|
||||
void sqrtsd(XMMRegister dst, Address src) { Assembler::sqrtsd(dst, src); }
|
||||
void sqrtsd(XMMRegister dst, AddressLiteral src);
|
||||
|
||||
void roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); }
|
||||
void roundsd(XMMRegister dst, Address src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); }
|
||||
void roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register scratch_reg);
|
||||
|
||||
void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); }
|
||||
void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); }
|
||||
void sqrtss(XMMRegister dst, AddressLiteral src);
|
||||
|
||||
@ -1288,30 +1288,58 @@ class StubGenerator: public StubCodeGenerator {
|
||||
if (UseUnalignedLoadStores) {
|
||||
Label L_end;
|
||||
// Copy 64-bytes per iteration
|
||||
__ BIND(L_loop);
|
||||
if (UseAVX > 2) {
|
||||
Label L_loop_avx512, L_loop_avx2, L_32_byte_head, L_above_threshold, L_below_threshold;
|
||||
|
||||
__ BIND(L_copy_bytes);
|
||||
__ cmpptr(qword_count, (-1 * AVX3Threshold / 8));
|
||||
__ jccb(Assembler::less, L_above_threshold);
|
||||
__ jmpb(L_below_threshold);
|
||||
|
||||
__ bind(L_loop_avx512);
|
||||
__ evmovdqul(xmm0, Address(end_from, qword_count, Address::times_8, -56), Assembler::AVX_512bit);
|
||||
__ evmovdqul(Address(end_to, qword_count, Address::times_8, -56), xmm0, Assembler::AVX_512bit);
|
||||
} else if (UseAVX == 2) {
|
||||
__ bind(L_above_threshold);
|
||||
__ addptr(qword_count, 8);
|
||||
__ jcc(Assembler::lessEqual, L_loop_avx512);
|
||||
__ jmpb(L_32_byte_head);
|
||||
|
||||
__ bind(L_loop_avx2);
|
||||
__ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56));
|
||||
__ vmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0);
|
||||
__ vmovdqu(xmm1, Address(end_from, qword_count, Address::times_8, -24));
|
||||
__ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm1);
|
||||
__ bind(L_below_threshold);
|
||||
__ addptr(qword_count, 8);
|
||||
__ jcc(Assembler::lessEqual, L_loop_avx2);
|
||||
|
||||
__ bind(L_32_byte_head);
|
||||
__ subptr(qword_count, 4); // sub(8) and add(4)
|
||||
__ jccb(Assembler::greater, L_end);
|
||||
} else {
|
||||
__ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56));
|
||||
__ movdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0);
|
||||
__ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, -40));
|
||||
__ movdqu(Address(end_to, qword_count, Address::times_8, -40), xmm1);
|
||||
__ movdqu(xmm2, Address(end_from, qword_count, Address::times_8, -24));
|
||||
__ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm2);
|
||||
__ movdqu(xmm3, Address(end_from, qword_count, Address::times_8, - 8));
|
||||
__ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm3);
|
||||
__ BIND(L_loop);
|
||||
if (UseAVX == 2) {
|
||||
__ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56));
|
||||
__ vmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0);
|
||||
__ vmovdqu(xmm1, Address(end_from, qword_count, Address::times_8, -24));
|
||||
__ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm1);
|
||||
} else {
|
||||
__ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56));
|
||||
__ movdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0);
|
||||
__ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, -40));
|
||||
__ movdqu(Address(end_to, qword_count, Address::times_8, -40), xmm1);
|
||||
__ movdqu(xmm2, Address(end_from, qword_count, Address::times_8, -24));
|
||||
__ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm2);
|
||||
__ movdqu(xmm3, Address(end_from, qword_count, Address::times_8, - 8));
|
||||
__ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm3);
|
||||
}
|
||||
|
||||
__ BIND(L_copy_bytes);
|
||||
__ addptr(qword_count, 8);
|
||||
__ jcc(Assembler::lessEqual, L_loop);
|
||||
__ subptr(qword_count, 4); // sub(8) and add(4)
|
||||
__ jccb(Assembler::greater, L_end);
|
||||
}
|
||||
__ BIND(L_copy_bytes);
|
||||
__ addptr(qword_count, 8);
|
||||
__ jcc(Assembler::lessEqual, L_loop);
|
||||
__ subptr(qword_count, 4); // sub(8) and add(4)
|
||||
__ jccb(Assembler::greater, L_end);
|
||||
// Copy trailing 32 bytes
|
||||
if (UseAVX >= 2) {
|
||||
__ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24));
|
||||
@ -1368,31 +1396,59 @@ class StubGenerator: public StubCodeGenerator {
|
||||
if (UseUnalignedLoadStores) {
|
||||
Label L_end;
|
||||
// Copy 64-bytes per iteration
|
||||
__ BIND(L_loop);
|
||||
if (UseAVX > 2) {
|
||||
Label L_loop_avx512, L_loop_avx2, L_32_byte_head, L_above_threshold, L_below_threshold;
|
||||
|
||||
__ BIND(L_copy_bytes);
|
||||
__ cmpptr(qword_count, (AVX3Threshold / 8));
|
||||
__ jccb(Assembler::greater, L_above_threshold);
|
||||
__ jmpb(L_below_threshold);
|
||||
|
||||
__ BIND(L_loop_avx512);
|
||||
__ evmovdqul(xmm0, Address(from, qword_count, Address::times_8, 0), Assembler::AVX_512bit);
|
||||
__ evmovdqul(Address(dest, qword_count, Address::times_8, 0), xmm0, Assembler::AVX_512bit);
|
||||
} else if (UseAVX == 2) {
|
||||
__ bind(L_above_threshold);
|
||||
__ subptr(qword_count, 8);
|
||||
__ jcc(Assembler::greaterEqual, L_loop_avx512);
|
||||
__ jmpb(L_32_byte_head);
|
||||
|
||||
__ bind(L_loop_avx2);
|
||||
__ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32));
|
||||
__ vmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0);
|
||||
__ vmovdqu(xmm1, Address(from, qword_count, Address::times_8, 0));
|
||||
__ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm1);
|
||||
} else {
|
||||
__ movdqu(xmm0, Address(from, qword_count, Address::times_8, 48));
|
||||
__ movdqu(Address(dest, qword_count, Address::times_8, 48), xmm0);
|
||||
__ movdqu(xmm1, Address(from, qword_count, Address::times_8, 32));
|
||||
__ movdqu(Address(dest, qword_count, Address::times_8, 32), xmm1);
|
||||
__ movdqu(xmm2, Address(from, qword_count, Address::times_8, 16));
|
||||
__ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm2);
|
||||
__ movdqu(xmm3, Address(from, qword_count, Address::times_8, 0));
|
||||
__ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm3);
|
||||
}
|
||||
__ BIND(L_copy_bytes);
|
||||
__ subptr(qword_count, 8);
|
||||
__ jcc(Assembler::greaterEqual, L_loop);
|
||||
__ vmovdqu(xmm1, Address(from, qword_count, Address::times_8, 0));
|
||||
__ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm1);
|
||||
__ bind(L_below_threshold);
|
||||
__ subptr(qword_count, 8);
|
||||
__ jcc(Assembler::greaterEqual, L_loop_avx2);
|
||||
|
||||
__ addptr(qword_count, 4); // add(8) and sub(4)
|
||||
__ jccb(Assembler::less, L_end);
|
||||
__ bind(L_32_byte_head);
|
||||
__ addptr(qword_count, 4); // add(8) and sub(4)
|
||||
__ jccb(Assembler::less, L_end);
|
||||
} else {
|
||||
__ BIND(L_loop);
|
||||
if (UseAVX == 2) {
|
||||
__ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32));
|
||||
__ vmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0);
|
||||
__ vmovdqu(xmm1, Address(from, qword_count, Address::times_8, 0));
|
||||
__ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm1);
|
||||
} else {
|
||||
__ movdqu(xmm0, Address(from, qword_count, Address::times_8, 48));
|
||||
__ movdqu(Address(dest, qword_count, Address::times_8, 48), xmm0);
|
||||
__ movdqu(xmm1, Address(from, qword_count, Address::times_8, 32));
|
||||
__ movdqu(Address(dest, qword_count, Address::times_8, 32), xmm1);
|
||||
__ movdqu(xmm2, Address(from, qword_count, Address::times_8, 16));
|
||||
__ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm2);
|
||||
__ movdqu(xmm3, Address(from, qword_count, Address::times_8, 0));
|
||||
__ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm3);
|
||||
}
|
||||
|
||||
__ BIND(L_copy_bytes);
|
||||
__ subptr(qword_count, 8);
|
||||
__ jcc(Assembler::greaterEqual, L_loop);
|
||||
|
||||
__ addptr(qword_count, 4); // add(8) and sub(4)
|
||||
__ jccb(Assembler::less, L_end);
|
||||
}
|
||||
// Copy trailing 32 bytes
|
||||
if (UseAVX >= 2) {
|
||||
__ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 0));
|
||||
|
||||
@ -381,6 +381,10 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
|
||||
__ cmpl(rax, 0xE0);
|
||||
__ jccb(Assembler::notEqual, legacy_setup); // jump if EVEX is not supported
|
||||
|
||||
__ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
|
||||
__ movl(rax, Address(rsi, 0));
|
||||
__ cmpl(rax, 0x50654); // If it is Skylake
|
||||
__ jcc(Assembler::equal, legacy_setup);
|
||||
// If UseAVX is unitialized or is set by the user to include EVEX
|
||||
if (use_evex) {
|
||||
// EVEX setup: run in lowest evex mode
|
||||
@ -465,6 +469,11 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
|
||||
__ cmpl(rax, 0xE0);
|
||||
__ jcc(Assembler::notEqual, legacy_save_restore);
|
||||
|
||||
__ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
|
||||
__ movl(rax, Address(rsi, 0));
|
||||
__ cmpl(rax, 0x50654); // If it is Skylake
|
||||
__ jcc(Assembler::equal, legacy_save_restore);
|
||||
|
||||
// If UseAVX is unitialized or is set by the user to include EVEX
|
||||
if (use_evex) {
|
||||
// EVEX check: run in lowest evex mode
|
||||
@ -660,6 +669,9 @@ void VM_Version::get_processor_features() {
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(UseAVX)) {
|
||||
FLAG_SET_DEFAULT(UseAVX, use_avx_limit);
|
||||
if (is_intel_family_core() && _model == CPU_MODEL_SKYLAKE && _stepping < 5) {
|
||||
FLAG_SET_DEFAULT(UseAVX, 2); //Set UseAVX=2 for Skylake
|
||||
}
|
||||
} else if (UseAVX > use_avx_limit) {
|
||||
warning("UseAVX=%d is not supported on this CPU, setting it to UseAVX=%d", (int) UseAVX, use_avx_limit);
|
||||
FLAG_SET_DEFAULT(UseAVX, use_avx_limit);
|
||||
@ -1059,6 +1071,13 @@ void VM_Version::get_processor_features() {
|
||||
}
|
||||
#endif // COMPILER2 && ASSERT
|
||||
|
||||
if (!FLAG_IS_DEFAULT(AVX3Threshold)) {
|
||||
if (!is_power_of_2(AVX3Threshold)) {
|
||||
warning("AVX3Threshold must be a power of 2");
|
||||
FLAG_SET_DEFAULT(AVX3Threshold, 4096);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef _LP64
|
||||
if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
|
||||
UseMultiplyToLenIntrinsic = true;
|
||||
|
||||
@ -366,7 +366,7 @@ enum Extended_Family {
|
||||
CPU_MODEL_HASWELL_E3 = 0x3c,
|
||||
CPU_MODEL_HASWELL_E7 = 0x3f,
|
||||
CPU_MODEL_BROADWELL = 0x3d,
|
||||
CPU_MODEL_SKYLAKE = CPU_MODEL_HASWELL_E3
|
||||
CPU_MODEL_SKYLAKE = 0x55
|
||||
};
|
||||
|
||||
// cpuid information block. All info derived from executing cpuid with
|
||||
|
||||
@ -1097,138 +1097,6 @@ reg_class vectorz_reg_legacy(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0
|
||||
reg_class_dynamic vectorz_reg(vectorz_reg_evex, vectorz_reg_legacy, %{ VM_Version::supports_evex() %} );
|
||||
reg_class_dynamic vectorz_reg_vl(vectorz_reg_evex, vectorz_reg_legacy, %{ VM_Version::supports_evex() && VM_Version::supports_avx512vl() %} );
|
||||
|
||||
reg_class xmm0_reg(XMM0, XMM0b, XMM0c, XMM0d);
|
||||
reg_class ymm0_reg(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM0h);
|
||||
reg_class zmm0_reg(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM0h, XMM0i, XMM0j, XMM0k, XMM0l, XMM0m, XMM0n, XMM0o, XMM0p);
|
||||
|
||||
reg_class xmm1_reg(XMM1, XMM1b, XMM1c, XMM1d);
|
||||
reg_class ymm1_reg(XMM1, XMM1b, XMM1c, XMM1d, XMM1e, XMM1f, XMM1g, XMM1h);
|
||||
reg_class zmm1_reg(XMM1, XMM1b, XMM1c, XMM1d, XMM1e, XMM1f, XMM1g, XMM1h, XMM1i, XMM1j, XMM1k, XMM1l, XMM1m, XMM1n, XMM1o, XMM1p);
|
||||
|
||||
reg_class xmm2_reg(XMM2, XMM2b, XMM2c, XMM2d);
|
||||
reg_class ymm2_reg(XMM2, XMM2b, XMM2c, XMM2d, XMM2e, XMM2f, XMM2g, XMM2h);
|
||||
reg_class zmm2_reg(XMM2, XMM2b, XMM2c, XMM2d, XMM2e, XMM2f, XMM2g, XMM2h, XMM2i, XMM2j, XMM2k, XMM2l, XMM2m, XMM2n, XMM2o, XMM2p);
|
||||
|
||||
reg_class xmm3_reg(XMM3, XMM3b, XMM3c, XMM3d);
|
||||
reg_class ymm3_reg(XMM3, XMM3b, XMM3c, XMM3d, XMM3e, XMM3f, XMM3g, XMM3h);
|
||||
reg_class zmm3_reg(XMM3, XMM3b, XMM3c, XMM3d, XMM3e, XMM3f, XMM3g, XMM3h, XMM3i, XMM3j, XMM3k, XMM3l, XMM3m, XMM3n, XMM3o, XMM3p);
|
||||
|
||||
reg_class xmm4_reg(XMM4, XMM4b, XMM4c, XMM4d);
|
||||
reg_class ymm4_reg(XMM4, XMM4b, XMM4c, XMM4d, XMM4e, XMM4f, XMM4g, XMM4h);
|
||||
reg_class zmm4_reg(XMM4, XMM4b, XMM4c, XMM4d, XMM4e, XMM4f, XMM4g, XMM4h, XMM4i, XMM4j, XMM4k, XMM4l, XMM4m, XMM4n, XMM4o, XMM4p);
|
||||
|
||||
reg_class xmm5_reg(XMM5, XMM5b, XMM5c, XMM5d);
|
||||
reg_class ymm5_reg(XMM5, XMM5b, XMM5c, XMM5d, XMM5e, XMM5f, XMM5g, XMM5h);
|
||||
reg_class zmm5_reg(XMM5, XMM5b, XMM5c, XMM5d, XMM5e, XMM5f, XMM5g, XMM5h, XMM5i, XMM5j, XMM5k, XMM5l, XMM5m, XMM5n, XMM5o, XMM5p);
|
||||
|
||||
reg_class xmm6_reg(XMM6, XMM6b, XMM6c, XMM6d);
|
||||
reg_class ymm6_reg(XMM6, XMM6b, XMM6c, XMM6d, XMM6e, XMM6f, XMM6g, XMM6h);
|
||||
reg_class zmm6_reg(XMM6, XMM6b, XMM6c, XMM6d, XMM6e, XMM6f, XMM6g, XMM6h, XMM6i, XMM6j, XMM6k, XMM6l, XMM6m, XMM6n, XMM6o, XMM6p);
|
||||
|
||||
reg_class xmm7_reg(XMM7, XMM7b, XMM7c, XMM7d);
|
||||
reg_class ymm7_reg(XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h);
|
||||
reg_class zmm7_reg(XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h, XMM7i, XMM7j, XMM7k, XMM7l, XMM7m, XMM7n, XMM7o, XMM7p);
|
||||
|
||||
#ifdef _LP64
|
||||
|
||||
reg_class xmm8_reg(XMM8, XMM8b, XMM8c, XMM8d);
|
||||
reg_class ymm8_reg(XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h);
|
||||
reg_class zmm8_reg(XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h, XMM8i, XMM8j, XMM8k, XMM8l, XMM8m, XMM8n, XMM8o, XMM8p);
|
||||
|
||||
reg_class xmm9_reg(XMM9, XMM9b, XMM9c, XMM9d);
|
||||
reg_class ymm9_reg(XMM9, XMM9b, XMM9c, XMM9d, XMM9e, XMM9f, XMM9g, XMM9h);
|
||||
reg_class zmm9_reg(XMM9, XMM9b, XMM9c, XMM9d, XMM9e, XMM9f, XMM9g, XMM9h, XMM9i, XMM9j, XMM9k, XMM9l, XMM9m, XMM9n, XMM9o, XMM9p);
|
||||
|
||||
reg_class xmm10_reg(XMM10, XMM10b, XMM10c, XMM10d);
|
||||
reg_class ymm10_reg(XMM10, XMM10b, XMM10c, XMM10d, XMM10e, XMM10f, XMM10g, XMM10h);
|
||||
reg_class zmm10_reg(XMM10, XMM10b, XMM10c, XMM10d, XMM10e, XMM10f, XMM10g, XMM10h, XMM10i, XMM10j, XMM10k, XMM10l, XMM10m, XMM10n, XMM10o, XMM10p);
|
||||
|
||||
reg_class xmm11_reg(XMM11, XMM11b, XMM11c, XMM11d);
|
||||
reg_class ymm11_reg(XMM11, XMM11b, XMM11c, XMM11d, XMM11e, XMM11f, XMM11g, XMM11h);
|
||||
reg_class zmm11_reg(XMM11, XMM11b, XMM11c, XMM11d, XMM11e, XMM11f, XMM11g, XMM11h, XMM11i, XMM11j, XMM11k, XMM11l, XMM11m, XMM11n, XMM11o, XMM11p);
|
||||
|
||||
reg_class xmm12_reg(XMM12, XMM12b, XMM12c, XMM12d);
|
||||
reg_class ymm12_reg(XMM12, XMM12b, XMM12c, XMM12d, XMM12e, XMM12f, XMM12g, XMM12h);
|
||||
reg_class zmm12_reg(XMM12, XMM12b, XMM12c, XMM12d, XMM12e, XMM12f, XMM12g, XMM12h, XMM12i, XMM12j, XMM12k, XMM12l, XMM12m, XMM12n, XMM12o, XMM12p);
|
||||
|
||||
reg_class xmm13_reg(XMM13, XMM13b, XMM13c, XMM13d);
|
||||
reg_class ymm13_reg(XMM13, XMM13b, XMM13c, XMM13d, XMM13e, XMM13f, XMM13g, XMM13h);
|
||||
reg_class zmm13_reg(XMM13, XMM13b, XMM13c, XMM13d, XMM13e, XMM13f, XMM13g, XMM13h, XMM13i, XMM13j, XMM13k, XMM13l, XMM13m, XMM13n, XMM13o, XMM13p);
|
||||
|
||||
reg_class xmm14_reg(XMM14, XMM14b, XMM14c, XMM14d);
|
||||
reg_class ymm14_reg(XMM14, XMM14b, XMM14c, XMM14d, XMM14e, XMM14f, XMM14g, XMM14h);
|
||||
reg_class zmm14_reg(XMM14, XMM14b, XMM14c, XMM14d, XMM14e, XMM14f, XMM14g, XMM14h, XMM14i, XMM14j, XMM14k, XMM14l, XMM14m, XMM14n, XMM14o, XMM14p);
|
||||
|
||||
reg_class xmm15_reg(XMM15, XMM15b, XMM15c, XMM15d);
|
||||
reg_class ymm15_reg(XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h);
|
||||
reg_class zmm15_reg(XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h, XMM15i, XMM15j, XMM15k, XMM15l, XMM15m, XMM15n, XMM15o, XMM15p);
|
||||
|
||||
reg_class xmm16_reg(XMM16, XMM16b, XMM16c, XMM16d);
|
||||
reg_class ymm16_reg(XMM16, XMM16b, XMM16c, XMM16d, XMM16e, XMM16f, XMM16g, XMM16h);
|
||||
reg_class zmm16_reg(XMM16, XMM16b, XMM16c, XMM16d, XMM16e, XMM16f, XMM16g, XMM16h, XMM16i, XMM16j, XMM16k, XMM16l, XMM16m, XMM16n, XMM16o, XMM16p);
|
||||
|
||||
reg_class xmm17_reg(XMM17, XMM17b, XMM17c, XMM17d);
|
||||
reg_class ymm17_reg(XMM17, XMM17b, XMM17c, XMM17d, XMM17e, XMM17f, XMM17g, XMM17h);
|
||||
reg_class zmm17_reg(XMM17, XMM17b, XMM17c, XMM17d, XMM17e, XMM17f, XMM17g, XMM17h, XMM17i, XMM17j, XMM17k, XMM17l, XMM17m, XMM17n, XMM17o, XMM17p);
|
||||
|
||||
reg_class xmm18_reg(XMM18, XMM18b, XMM18c, XMM18d);
|
||||
reg_class ymm18_reg(XMM18, XMM18b, XMM18c, XMM18d, XMM18e, XMM18f, XMM18g, XMM18h);
|
||||
reg_class zmm18_reg(XMM18, XMM18b, XMM18c, XMM18d, XMM18e, XMM18f, XMM18g, XMM18h, XMM18i, XMM18j, XMM18k, XMM18l, XMM18m, XMM18n, XMM18o, XMM18p);
|
||||
|
||||
reg_class xmm19_reg(XMM19, XMM19b, XMM19c, XMM19d);
|
||||
reg_class ymm19_reg(XMM19, XMM19b, XMM19c, XMM19d, XMM19e, XMM19f, XMM19g, XMM19h);
|
||||
reg_class zmm19_reg(XMM19, XMM19b, XMM19c, XMM19d, XMM19e, XMM19f, XMM19g, XMM19h, XMM19i, XMM19j, XMM19k, XMM19l, XMM19m, XMM19n, XMM19o, XMM19p);
|
||||
|
||||
reg_class xmm20_reg(XMM20, XMM20b, XMM20c, XMM20d);
|
||||
reg_class ymm20_reg(XMM20, XMM20b, XMM20c, XMM20d, XMM20e, XMM20f, XMM20g, XMM20h);
|
||||
reg_class zmm20_reg(XMM20, XMM20b, XMM20c, XMM20d, XMM20e, XMM20f, XMM20g, XMM20h, XMM20i, XMM20j, XMM20k, XMM20l, XMM20m, XMM20n, XMM20o, XMM20p);
|
||||
|
||||
reg_class xmm21_reg(XMM21, XMM21b, XMM21c, XMM21d);
|
||||
reg_class ymm21_reg(XMM21, XMM21b, XMM21c, XMM21d, XMM21e, XMM21f, XMM21g, XMM21h);
|
||||
reg_class zmm21_reg(XMM21, XMM21b, XMM21c, XMM21d, XMM21e, XMM21f, XMM21g, XMM21h, XMM21i, XMM21j, XMM21k, XMM21l, XMM21m, XMM21n, XMM21o, XMM21p);
|
||||
|
||||
reg_class xmm22_reg(XMM22, XMM22b, XMM22c, XMM22d);
|
||||
reg_class ymm22_reg(XMM22, XMM22b, XMM22c, XMM22d, XMM22e, XMM22f, XMM22g, XMM22h);
|
||||
reg_class zmm22_reg(XMM22, XMM22b, XMM22c, XMM22d, XMM22e, XMM22f, XMM22g, XMM22h, XMM22i, XMM22j, XMM22k, XMM22l, XMM22m, XMM22n, XMM22o, XMM22p);
|
||||
|
||||
reg_class xmm23_reg(XMM23, XMM23b, XMM23c, XMM23d);
|
||||
reg_class ymm23_reg(XMM23, XMM23b, XMM23c, XMM23d, XMM23e, XMM23f, XMM23g, XMM23h);
|
||||
reg_class zmm23_reg(XMM23, XMM23b, XMM23c, XMM23d, XMM23e, XMM23f, XMM23g, XMM23h, XMM23i, XMM23j, XMM23k, XMM23l, XMM23m, XMM23n, XMM23o, XMM23p);
|
||||
|
||||
reg_class xmm24_reg(XMM24, XMM24b, XMM24c, XMM24d);
|
||||
reg_class ymm24_reg(XMM24, XMM24b, XMM24c, XMM24d, XMM24e, XMM24f, XMM24g, XMM24h);
|
||||
reg_class zmm24_reg(XMM24, XMM24b, XMM24c, XMM24d, XMM24e, XMM24f, XMM24g, XMM24h, XMM24i, XMM24j, XMM24k, XMM24l, XMM24m, XMM24n, XMM24o, XMM24p);
|
||||
|
||||
reg_class xmm25_reg(XMM25, XMM25b, XMM25c, XMM25d);
|
||||
reg_class ymm25_reg(XMM25, XMM25b, XMM25c, XMM25d, XMM25e, XMM25f, XMM25g, XMM25h);
|
||||
reg_class zmm25_reg(XMM25, XMM25b, XMM25c, XMM25d, XMM25e, XMM25f, XMM25g, XMM25h, XMM25i, XMM25j, XMM25k, XMM25l, XMM25m, XMM25n, XMM25o, XMM25p);
|
||||
|
||||
reg_class xmm26_reg(XMM26, XMM26b, XMM26c, XMM26d);
|
||||
reg_class ymm26_reg(XMM26, XMM26b, XMM26c, XMM26d, XMM26e, XMM26f, XMM26g, XMM26h);
|
||||
reg_class zmm26_reg(XMM26, XMM26b, XMM26c, XMM26d, XMM26e, XMM26f, XMM26g, XMM26h, XMM26i, XMM26j, XMM26k, XMM26l, XMM26m, XMM26n, XMM26o, XMM26p);
|
||||
|
||||
reg_class xmm27_reg(XMM27, XMM27b, XMM27c, XMM27d);
|
||||
reg_class ymm27_reg(XMM27, XMM27b, XMM27c, XMM27d, XMM27e, XMM27f, XMM27g, XMM27h);
|
||||
reg_class zmm27_reg(XMM27, XMM27b, XMM27c, XMM27d, XMM27e, XMM27f, XMM27g, XMM27h, XMM27i, XMM27j, XMM27k, XMM27l, XMM27m, XMM27n, XMM27o, XMM27p);
|
||||
|
||||
reg_class xmm28_reg(XMM28, XMM28b, XMM28c, XMM28d);
|
||||
reg_class ymm28_reg(XMM28, XMM28b, XMM28c, XMM28d, XMM28e, XMM28f, XMM28g, XMM28h);
|
||||
reg_class zmm28_reg(XMM28, XMM28b, XMM28c, XMM28d, XMM28e, XMM28f, XMM28g, XMM28h, XMM28i, XMM28j, XMM28k, XMM28l, XMM28m, XMM28n, XMM28o, XMM28p);
|
||||
|
||||
reg_class xmm29_reg(XMM29, XMM29b, XMM29c, XMM29d);
|
||||
reg_class ymm29_reg(XMM29, XMM29b, XMM29c, XMM29d, XMM29e, XMM29f, XMM29g, XMM29h);
|
||||
reg_class zmm29_reg(XMM29, XMM29b, XMM29c, XMM29d, XMM29e, XMM29f, XMM29g, XMM29h, XMM29i, XMM29j, XMM29k, XMM29l, XMM29m, XMM29n, XMM29o, XMM29p);
|
||||
|
||||
reg_class xmm30_reg(XMM30, XMM30b, XMM30c, XMM30d);
|
||||
reg_class ymm30_reg(XMM30, XMM30b, XMM30c, XMM30d, XMM30e, XMM30f, XMM30g, XMM30h);
|
||||
reg_class zmm30_reg(XMM30, XMM30b, XMM30c, XMM30d, XMM30e, XMM30f, XMM30g, XMM30h, XMM30i, XMM30j, XMM30k, XMM30l, XMM30m, XMM30n, XMM30o, XMM30p);
|
||||
|
||||
reg_class xmm31_reg(XMM31, XMM31b, XMM31c, XMM31d);
|
||||
reg_class ymm31_reg(XMM31, XMM31b, XMM31c, XMM31d, XMM31e, XMM31f, XMM31g, XMM31h);
|
||||
reg_class zmm31_reg(XMM31, XMM31b, XMM31c, XMM31d, XMM31e, XMM31f, XMM31g, XMM31h, XMM31i, XMM31j, XMM31k, XMM31l, XMM31m, XMM31n, XMM31o, XMM31p);
|
||||
|
||||
#endif
|
||||
|
||||
%}
|
||||
|
||||
|
||||
@ -1485,6 +1353,10 @@ const bool Matcher::match_rule_supported(int opcode) {
|
||||
ret_value = false;
|
||||
}
|
||||
break;
|
||||
case Op_RoundDoubleMode:
|
||||
if (UseSSE < 4)
|
||||
ret_value = false;
|
||||
break;
|
||||
}
|
||||
|
||||
return ret_value; // Per default match rules are supported.
|
||||
@ -1536,6 +1408,10 @@ const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
|
||||
if (vlen != 4)
|
||||
ret_value = false;
|
||||
break;
|
||||
case Op_RoundDoubleModeV:
|
||||
if (VM_Version::supports_avx() == false)
|
||||
ret_value = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1792,8 +1668,8 @@ static int vec_mov_helper(CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo
|
||||
return (UseAVX > 2) ? 6 : 4;
|
||||
}
|
||||
|
||||
static int vec_spill_helper(CodeBuffer *cbuf, bool do_size, bool is_load,
|
||||
int stack_offset, int reg, uint ireg, outputStream* st) {
|
||||
int vec_spill_helper(CodeBuffer *cbuf, bool do_size, bool is_load,
|
||||
int stack_offset, int reg, uint ireg, outputStream* st) {
|
||||
// In 64-bit VM size calculation is very complex. Emitting instructions
|
||||
// into scratch buffer is used to get size in 64-bit VM.
|
||||
LP64_ONLY( assert(!do_size, "this method calculates size only for 32-bit VM"); )
|
||||
@ -2854,6 +2730,110 @@ instruct sqrtD_imm(regD dst, immD con) %{
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
|
||||
#ifdef _LP64
|
||||
instruct roundD_reg(legRegD dst, legRegD src, immU8 rmode) %{
|
||||
predicate(UseSSE>=4);
|
||||
match(Set dst (RoundDoubleMode src rmode));
|
||||
format %{ "roundsd $dst, $src" %}
|
||||
ins_cost(150);
|
||||
ins_encode %{
|
||||
__ roundsd($dst$$XMMRegister, $src$$XMMRegister, $rmode$$constant);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct roundD_mem(legRegD dst, memory src, immU8 rmode) %{
|
||||
predicate(UseSSE>=4);
|
||||
match(Set dst (RoundDoubleMode (LoadD src) rmode));
|
||||
format %{ "roundsd $dst, $src" %}
|
||||
ins_cost(150);
|
||||
ins_encode %{
|
||||
__ roundsd($dst$$XMMRegister, $src$$Address, $rmode$$constant);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct roundD_imm(legRegD dst, immD con, immU8 rmode, rRegI scratch_reg) %{
|
||||
predicate(UseSSE>=4);
|
||||
match(Set dst (RoundDoubleMode con rmode));
|
||||
effect(TEMP scratch_reg);
|
||||
format %{ "roundsd $dst, [$constantaddress]\t# load from constant table: double=$con" %}
|
||||
ins_cost(150);
|
||||
ins_encode %{
|
||||
__ roundsd($dst$$XMMRegister, $constantaddress($con), $rmode$$constant, $scratch_reg$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct vround2D_reg(legVecX dst, legVecX src, immU8 rmode) %{
|
||||
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
||||
match(Set dst (RoundDoubleModeV src rmode));
|
||||
format %{ "vroundpd $dst, $src, $rmode\t! round packed2D" %}
|
||||
ins_encode %{
|
||||
int vector_len = 0;
|
||||
__ vroundpd($dst$$XMMRegister, $src$$XMMRegister, $rmode$$constant, vector_len);
|
||||
%}
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
instruct vround2D_mem(legVecX dst, memory mem, immU8 rmode) %{
|
||||
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
||||
match(Set dst (RoundDoubleModeV (LoadVector mem) rmode));
|
||||
format %{ "vroundpd $dst, $mem, $rmode\t! round packed2D" %}
|
||||
ins_encode %{
|
||||
int vector_len = 0;
|
||||
__ vroundpd($dst$$XMMRegister, $mem$$Address, $rmode$$constant, vector_len);
|
||||
%}
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
instruct vround4D_reg(legVecY dst, legVecY src, legVecY rmode) %{
|
||||
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
||||
match(Set dst (RoundDoubleModeV src rmode));
|
||||
format %{ "vroundpd $dst, $src, $rmode\t! round packed4D" %}
|
||||
ins_encode %{
|
||||
int vector_len = 1;
|
||||
__ vroundpd($dst$$XMMRegister, $src$$XMMRegister, $rmode$$constant, vector_len);
|
||||
%}
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
instruct vround4D_mem(legVecY dst, memory mem, immU8 rmode) %{
|
||||
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
||||
match(Set dst (RoundDoubleModeV (LoadVector mem) rmode));
|
||||
format %{ "vroundpd $dst, $mem, $rmode\t! round packed4D" %}
|
||||
ins_encode %{
|
||||
int vector_len = 1;
|
||||
__ vroundpd($dst$$XMMRegister, $mem$$Address, $rmode$$constant, vector_len);
|
||||
%}
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
|
||||
instruct vround8D_reg(vecZ dst, vecZ src, immU8 rmode) %{
|
||||
predicate(UseAVX > 2 && n->as_Vector()->length() == 8);
|
||||
match(Set dst (RoundDoubleModeV src rmode));
|
||||
format %{ "vrndscalepd $dst, $src, $rmode\t! round packed8D" %}
|
||||
ins_encode %{
|
||||
int vector_len = 2;
|
||||
__ vrndscalepd($dst$$XMMRegister, $src$$XMMRegister, $rmode$$constant, vector_len);
|
||||
%}
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
instruct vround8D_mem(vecZ dst, memory mem, immU8 rmode) %{
|
||||
predicate(UseAVX > 2 && n->as_Vector()->length() == 8);
|
||||
match(Set dst (RoundDoubleModeV (LoadVector mem) rmode));
|
||||
format %{ "vrndscalepd $dst, $mem, $rmode\t! round packed8D" %}
|
||||
ins_encode %{
|
||||
int vector_len = 2;
|
||||
__ vrndscalepd($dst$$XMMRegister, $mem$$Address, $rmode$$constant, vector_len);
|
||||
%}
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
#endif // _LP64
|
||||
|
||||
instruct onspinwait() %{
|
||||
match(OnSpinWait);
|
||||
ins_cost(200);
|
||||
@ -3749,7 +3729,7 @@ instruct Repl16F_mem(legVecZ dst, memory mem) %{
|
||||
%}
|
||||
|
||||
instruct Repl2F_zero(vecD dst, immF0 zero) %{
|
||||
predicate(n->as_Vector()->length() == 2 && UseAVX < 3);
|
||||
predicate(n->as_Vector()->length() == 2);
|
||||
match(Set dst (ReplicateF zero));
|
||||
format %{ "xorps $dst,$dst\t! replicate2F zero" %}
|
||||
ins_encode %{
|
||||
@ -3759,7 +3739,7 @@ instruct Repl2F_zero(vecD dst, immF0 zero) %{
|
||||
%}
|
||||
|
||||
instruct Repl4F_zero(vecX dst, immF0 zero) %{
|
||||
predicate(n->as_Vector()->length() == 4 && UseAVX < 3);
|
||||
predicate(n->as_Vector()->length() == 4);
|
||||
match(Set dst (ReplicateF zero));
|
||||
format %{ "xorps $dst,$dst\t! replicate4F zero" %}
|
||||
ins_encode %{
|
||||
@ -3769,7 +3749,7 @@ instruct Repl4F_zero(vecX dst, immF0 zero) %{
|
||||
%}
|
||||
|
||||
instruct Repl8F_zero(vecY dst, immF0 zero) %{
|
||||
predicate(n->as_Vector()->length() == 8 && UseAVX < 3);
|
||||
predicate(n->as_Vector()->length() == 8 && UseAVX > 0);
|
||||
match(Set dst (ReplicateF zero));
|
||||
format %{ "vxorps $dst,$dst,$dst\t! replicate8F zero" %}
|
||||
ins_encode %{
|
||||
@ -3843,7 +3823,7 @@ instruct Repl8D_mem(legVecZ dst, memory mem) %{
|
||||
|
||||
// Replicate double (8 byte) scalar zero to be vector
|
||||
instruct Repl2D_zero(vecX dst, immD0 zero) %{
|
||||
predicate(n->as_Vector()->length() == 2 && UseAVX < 3);
|
||||
predicate(n->as_Vector()->length() == 2);
|
||||
match(Set dst (ReplicateD zero));
|
||||
format %{ "xorpd $dst,$dst\t! replicate2D zero" %}
|
||||
ins_encode %{
|
||||
@ -3853,7 +3833,7 @@ instruct Repl2D_zero(vecX dst, immD0 zero) %{
|
||||
%}
|
||||
|
||||
instruct Repl4D_zero(vecY dst, immD0 zero) %{
|
||||
predicate(n->as_Vector()->length() == 4 && UseAVX < 3);
|
||||
predicate(n->as_Vector()->length() == 4 && UseAVX > 0);
|
||||
match(Set dst (ReplicateD zero));
|
||||
format %{ "vxorpd $dst,$dst,$dst,vect256\t! replicate4D zero" %}
|
||||
ins_encode %{
|
||||
@ -4778,42 +4758,6 @@ instruct Repl16F_mem_evex(vecZ dst, memory mem) %{
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
instruct Repl2F_zero_evex(vecD dst, immF0 zero) %{
|
||||
predicate(n->as_Vector()->length() == 2 && UseAVX > 2);
|
||||
match(Set dst (ReplicateF zero));
|
||||
format %{ "vpxor $dst k0,$dst,$dst\t! replicate2F zero" %}
|
||||
ins_encode %{
|
||||
// Use vpxor in place of vxorps since EVEX has a constriant on dq for vxorps: this is a 512-bit operation
|
||||
int vector_len = 2;
|
||||
__ vpxor($dst$$XMMRegister,$dst$$XMMRegister, $dst$$XMMRegister, vector_len);
|
||||
%}
|
||||
ins_pipe( fpu_reg_reg );
|
||||
%}
|
||||
|
||||
instruct Repl4F_zero_evex(vecX dst, immF0 zero) %{
|
||||
predicate(n->as_Vector()->length() == 4 && UseAVX > 2);
|
||||
match(Set dst (ReplicateF zero));
|
||||
format %{ "vpxor $dst k0,$dst,$dst\t! replicate4F zero" %}
|
||||
ins_encode %{
|
||||
// Use vpxor in place of vxorps since EVEX has a constriant on dq for vxorps: this is a 512-bit operation
|
||||
int vector_len = 2;
|
||||
__ vpxor($dst$$XMMRegister,$dst$$XMMRegister, $dst$$XMMRegister, vector_len);
|
||||
%}
|
||||
ins_pipe( fpu_reg_reg );
|
||||
%}
|
||||
|
||||
instruct Repl8F_zero_evex(vecY dst, immF0 zero) %{
|
||||
predicate(n->as_Vector()->length() == 8 && UseAVX > 2);
|
||||
match(Set dst (ReplicateF zero));
|
||||
format %{ "vpxor $dst k0,$dst,$dst\t! replicate8F zero" %}
|
||||
ins_encode %{
|
||||
// Use vpxor in place of vxorps since EVEX has a constriant on dq for vxorps: this is a 512-bit operation
|
||||
int vector_len = 2;
|
||||
__ vpxor($dst$$XMMRegister,$dst$$XMMRegister, $dst$$XMMRegister, vector_len);
|
||||
%}
|
||||
ins_pipe( fpu_reg_reg );
|
||||
%}
|
||||
|
||||
instruct Repl16F_zero_evex(vecZ dst, immF0 zero) %{
|
||||
predicate(n->as_Vector()->length() == 16 && UseAVX > 2);
|
||||
match(Set dst (ReplicateF zero));
|
||||
@ -4870,30 +4814,6 @@ instruct Repl8D_mem_evex(vecZ dst, memory mem) %{
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
instruct Repl2D_zero_evex(vecX dst, immD0 zero) %{
|
||||
predicate(n->as_Vector()->length() == 2 && UseAVX > 2);
|
||||
match(Set dst (ReplicateD zero));
|
||||
format %{ "vpxor $dst k0,$dst,$dst\t! replicate2D zero" %}
|
||||
ins_encode %{
|
||||
// Use vpxor in place of vxorpd since EVEX has a constriant on dq for vxorpd: this is a 512-bit operation
|
||||
int vector_len = 2;
|
||||
__ vpxor($dst$$XMMRegister,$dst$$XMMRegister, $dst$$XMMRegister, vector_len);
|
||||
%}
|
||||
ins_pipe( fpu_reg_reg );
|
||||
%}
|
||||
|
||||
instruct Repl4D_zero_evex(vecY dst, immD0 zero) %{
|
||||
predicate(n->as_Vector()->length() == 4 && UseAVX > 2);
|
||||
match(Set dst (ReplicateD zero));
|
||||
format %{ "vpxor $dst k0,$dst,$dst\t! replicate4D zero" %}
|
||||
ins_encode %{
|
||||
// Use vpxor in place of vxorpd since EVEX has a constriant on dq for vxorpd: this is a 512-bit operation
|
||||
int vector_len = 2;
|
||||
__ vpxor($dst$$XMMRegister,$dst$$XMMRegister, $dst$$XMMRegister, vector_len);
|
||||
%}
|
||||
ins_pipe( fpu_reg_reg );
|
||||
%}
|
||||
|
||||
instruct Repl8D_zero_evex(vecZ dst, immD0 zero) %{
|
||||
predicate(n->as_Vector()->length() == 8 && UseAVX > 2);
|
||||
match(Set dst (ReplicateD zero));
|
||||
|
||||
@ -1058,8 +1058,8 @@ static enum RC rc_class(OptoReg::Name reg)
|
||||
static int vec_mov_helper(CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo,
|
||||
int src_hi, int dst_hi, uint ireg, outputStream* st);
|
||||
|
||||
static int vec_spill_helper(CodeBuffer *cbuf, bool do_size, bool is_load,
|
||||
int stack_offset, int reg, uint ireg, outputStream* st);
|
||||
int vec_spill_helper(CodeBuffer *cbuf, bool do_size, bool is_load,
|
||||
int stack_offset, int reg, uint ireg, outputStream* st);
|
||||
|
||||
static void vec_stack_to_stack_helper(CodeBuffer *cbuf, int src_offset,
|
||||
int dst_offset, uint ireg, outputStream* st) {
|
||||
@ -4260,200 +4260,6 @@ operand cmpOpUCF2() %{
|
||||
%}
|
||||
%}
|
||||
|
||||
// Operands for bound floating pointer register arguments
|
||||
operand rxmm0() %{
|
||||
constraint(ALLOC_IN_RC(xmm0_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm1() %{
|
||||
constraint(ALLOC_IN_RC(xmm1_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm2() %{
|
||||
constraint(ALLOC_IN_RC(xmm2_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm3() %{
|
||||
constraint(ALLOC_IN_RC(xmm3_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm4() %{
|
||||
constraint(ALLOC_IN_RC(xmm4_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm5() %{
|
||||
constraint(ALLOC_IN_RC(xmm5_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm6() %{
|
||||
constraint(ALLOC_IN_RC(xmm6_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm7() %{
|
||||
constraint(ALLOC_IN_RC(xmm7_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm8() %{
|
||||
constraint(ALLOC_IN_RC(xmm8_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm9() %{
|
||||
constraint(ALLOC_IN_RC(xmm9_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm10() %{
|
||||
constraint(ALLOC_IN_RC(xmm10_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm11() %{
|
||||
constraint(ALLOC_IN_RC(xmm11_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm12() %{
|
||||
constraint(ALLOC_IN_RC(xmm12_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm13() %{
|
||||
constraint(ALLOC_IN_RC(xmm13_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm14() %{
|
||||
constraint(ALLOC_IN_RC(xmm14_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm15() %{
|
||||
constraint(ALLOC_IN_RC(xmm15_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm16() %{
|
||||
constraint(ALLOC_IN_RC(xmm16_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm17() %{
|
||||
constraint(ALLOC_IN_RC(xmm17_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm18() %{
|
||||
constraint(ALLOC_IN_RC(xmm18_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm19() %{
|
||||
constraint(ALLOC_IN_RC(xmm19_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm20() %{
|
||||
constraint(ALLOC_IN_RC(xmm20_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm21() %{
|
||||
constraint(ALLOC_IN_RC(xmm21_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm22() %{
|
||||
constraint(ALLOC_IN_RC(xmm22_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm23() %{
|
||||
constraint(ALLOC_IN_RC(xmm23_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm24() %{
|
||||
constraint(ALLOC_IN_RC(xmm24_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm25() %{
|
||||
constraint(ALLOC_IN_RC(xmm25_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm26() %{
|
||||
constraint(ALLOC_IN_RC(xmm26_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm27() %{
|
||||
constraint(ALLOC_IN_RC(xmm27_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm28() %{
|
||||
constraint(ALLOC_IN_RC(xmm28_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm29() %{
|
||||
constraint(ALLOC_IN_RC(xmm29_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm30() %{
|
||||
constraint(ALLOC_IN_RC(xmm30_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm31() %{
|
||||
constraint(ALLOC_IN_RC(xmm31_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
|
||||
//----------OPERAND CLASSES----------------------------------------------------
|
||||
// Operand Classes are groups of operands that are used as to simplify
|
||||
// instruction definitions by not requiring the AD writer to specify separate
|
||||
@ -5346,6 +5152,7 @@ instruct loadRange(rRegI dst, memory mem)
|
||||
instruct loadP(rRegP dst, memory mem)
|
||||
%{
|
||||
match(Set dst (LoadP mem));
|
||||
predicate(n->as_Load()->barrier_data() == 0);
|
||||
|
||||
ins_cost(125); // XXX
|
||||
format %{ "movq $dst, $mem\t# ptr" %}
|
||||
@ -7794,6 +7601,7 @@ instruct storePConditional(memory heap_top_ptr,
|
||||
rax_RegP oldval, rRegP newval,
|
||||
rFlagsReg cr)
|
||||
%{
|
||||
predicate(n->as_LoadStore()->barrier_data() == 0);
|
||||
match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
|
||||
|
||||
format %{ "cmpxchgq $heap_top_ptr, $newval\t# (ptr) "
|
||||
@ -7845,7 +7653,7 @@ instruct compareAndSwapP(rRegI res,
|
||||
rax_RegP oldval, rRegP newval,
|
||||
rFlagsReg cr)
|
||||
%{
|
||||
predicate(VM_Version::supports_cx8());
|
||||
predicate(VM_Version::supports_cx8() && n->as_LoadStore()->barrier_data() == 0);
|
||||
match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
|
||||
match(Set res (WeakCompareAndSwapP mem_ptr (Binary oldval newval)));
|
||||
effect(KILL cr, KILL oldval);
|
||||
@ -8087,7 +7895,7 @@ instruct compareAndExchangeP(
|
||||
rax_RegP oldval, rRegP newval,
|
||||
rFlagsReg cr)
|
||||
%{
|
||||
predicate(VM_Version::supports_cx8());
|
||||
predicate(VM_Version::supports_cx8() && n->as_LoadStore()->barrier_data() == 0);
|
||||
match(Set oldval (CompareAndExchangeP mem_ptr (Binary oldval newval)));
|
||||
effect(KILL cr);
|
||||
|
||||
@ -8232,6 +8040,7 @@ instruct xchgL( memory mem, rRegL newval) %{
|
||||
|
||||
instruct xchgP( memory mem, rRegP newval) %{
|
||||
match(Set newval (GetAndSetP mem newval));
|
||||
predicate(n->as_LoadStore()->barrier_data() == 0);
|
||||
format %{ "XCHGQ $newval,[$mem]" %}
|
||||
ins_encode %{
|
||||
__ xchgq($newval$$Register, $mem$$Address);
|
||||
@ -11974,6 +11783,7 @@ instruct compP_rReg(rFlagsRegU cr, rRegP op1, rRegP op2)
|
||||
instruct compP_rReg_mem(rFlagsRegU cr, rRegP op1, memory op2)
|
||||
%{
|
||||
match(Set cr (CmpP op1 (LoadP op2)));
|
||||
predicate(n->in(2)->as_Load()->barrier_data() == 0);
|
||||
|
||||
ins_cost(500); // XXX
|
||||
format %{ "cmpq $op1, $op2\t# ptr" %}
|
||||
@ -11999,7 +11809,8 @@ instruct compP_rReg_mem(rFlagsRegU cr, rRegP op1, memory op2)
|
||||
// and raw pointers have no anti-dependencies.
|
||||
instruct compP_mem_rReg(rFlagsRegU cr, rRegP op1, memory op2)
|
||||
%{
|
||||
predicate(n->in(2)->in(2)->bottom_type()->reloc() == relocInfo::none);
|
||||
predicate(n->in(2)->in(2)->bottom_type()->reloc() == relocInfo::none &&
|
||||
n->in(2)->as_Load()->barrier_data() == 0);
|
||||
match(Set cr (CmpP op1 (LoadP op2)));
|
||||
|
||||
format %{ "cmpq $op1, $op2\t# raw ptr" %}
|
||||
@ -12024,7 +11835,8 @@ instruct testP_reg(rFlagsReg cr, rRegP src, immP0 zero)
|
||||
// any compare to a zero should be eq/neq.
|
||||
instruct testP_mem(rFlagsReg cr, memory op, immP0 zero)
|
||||
%{
|
||||
predicate(!UseCompressedOops || (CompressedOops::base() != NULL));
|
||||
predicate((!UseCompressedOops || (CompressedOops::base() != NULL)) &&
|
||||
n->in(1)->as_Load()->barrier_data() == 0);
|
||||
match(Set cr (CmpP (LoadP op) zero));
|
||||
|
||||
ins_cost(500); // XXX
|
||||
@ -12037,7 +11849,9 @@ instruct testP_mem(rFlagsReg cr, memory op, immP0 zero)
|
||||
|
||||
instruct testP_mem_reg0(rFlagsReg cr, memory mem, immP0 zero)
|
||||
%{
|
||||
predicate(UseCompressedOops && (CompressedOops::base() == NULL) && (CompressedKlassPointers::base() == NULL));
|
||||
predicate(UseCompressedOops && (CompressedOops::base() == NULL) &&
|
||||
(CompressedKlassPointers::base() == NULL) &&
|
||||
n->in(1)->as_Load()->barrier_data() == 0);
|
||||
match(Set cr (CmpP (LoadP mem) zero));
|
||||
|
||||
format %{ "cmpq R12, $mem\t# ptr (R12_heapbase==0)" %}
|
||||
|
||||
@ -132,18 +132,6 @@ extern "C" int getargs(procsinfo*, int, char*, int);
|
||||
#define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
|
||||
|
||||
// excerpts from systemcfg.h that might be missing on older os levels
|
||||
#ifndef PV_5_Compat
|
||||
#define PV_5_Compat 0x0F8000 /* Power PC 5 */
|
||||
#endif
|
||||
#ifndef PV_6
|
||||
#define PV_6 0x100000 /* Power PC 6 */
|
||||
#endif
|
||||
#ifndef PV_6_1
|
||||
#define PV_6_1 0x100001 /* Power PC 6 DD1.x */
|
||||
#endif
|
||||
#ifndef PV_6_Compat
|
||||
#define PV_6_Compat 0x108000 /* Power PC 6 */
|
||||
#endif
|
||||
#ifndef PV_7
|
||||
#define PV_7 0x200000 /* Power PC 7 */
|
||||
#endif
|
||||
@ -156,6 +144,13 @@ extern "C" int getargs(procsinfo*, int, char*, int);
|
||||
#ifndef PV_8_Compat
|
||||
#define PV_8_Compat 0x308000 /* Power PC 8 */
|
||||
#endif
|
||||
#ifndef PV_9
|
||||
#define PV_9 0x400000 /* Power PC 9 */
|
||||
#endif
|
||||
#ifndef PV_9_Compat
|
||||
#define PV_9_Compat 0x408000 /* Power PC 9 */
|
||||
#endif
|
||||
|
||||
|
||||
static address resolve_function_descriptor_to_code_pointer(address p);
|
||||
|
||||
@ -1027,17 +1022,15 @@ void os::free_thread(OSThread* osthread) {
|
||||
// Time since start-up in seconds to a fine granularity.
|
||||
// Used by VMSelfDestructTimer and the MemProfiler.
|
||||
double os::elapsedTime() {
|
||||
return (double)(os::elapsed_counter()) * 0.000001;
|
||||
return ((double)os::elapsed_counter()) / os::elapsed_frequency(); // nanosecond resolution
|
||||
}
|
||||
|
||||
jlong os::elapsed_counter() {
|
||||
timeval time;
|
||||
int status = gettimeofday(&time, NULL);
|
||||
return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;
|
||||
return javaTimeNanos() - initial_time_count;
|
||||
}
|
||||
|
||||
jlong os::elapsed_frequency() {
|
||||
return (1000 * 1000);
|
||||
return NANOSECS_PER_SEC; // nanosecond resolution
|
||||
}
|
||||
|
||||
bool os::supports_vtime() { return true; }
|
||||
@ -1388,15 +1381,7 @@ void os::print_os_info_brief(outputStream* st) {
|
||||
void os::print_os_info(outputStream* st) {
|
||||
st->print("OS:");
|
||||
|
||||
st->print("uname:");
|
||||
struct utsname name;
|
||||
uname(&name);
|
||||
st->print(name.sysname); st->print(" ");
|
||||
st->print(name.nodename); st->print(" ");
|
||||
st->print(name.release); st->print(" ");
|
||||
st->print(name.version); st->print(" ");
|
||||
st->print(name.machine);
|
||||
st->cr();
|
||||
os::Posix::print_uname_info(st);
|
||||
|
||||
uint32_t ver = os::Aix::os_version();
|
||||
st->print_cr("AIX kernel version %u.%u.%u.%u",
|
||||
@ -1404,16 +1389,12 @@ void os::print_os_info(outputStream* st) {
|
||||
|
||||
os::Posix::print_rlimit_info(st);
|
||||
|
||||
os::Posix::print_load_average(st);
|
||||
|
||||
// _SC_THREAD_THREADS_MAX is the maximum number of threads within a process.
|
||||
long tmax = sysconf(_SC_THREAD_THREADS_MAX);
|
||||
st->print_cr("maximum #threads within a process:%ld", tmax);
|
||||
|
||||
// load average
|
||||
st->print("load average:");
|
||||
double loadavg[3] = {-1.L, -1.L, -1.L};
|
||||
os::loadavg(loadavg, 3);
|
||||
st->print_cr("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
|
||||
|
||||
// print wpar info
|
||||
libperfstat::wparinfo_t wi;
|
||||
if (libperfstat::get_wparinfo(&wi)) {
|
||||
@ -1506,6 +1487,9 @@ void os::print_memory_info(outputStream* st) {
|
||||
void os::get_summary_cpu_info(char* buf, size_t buflen) {
|
||||
// read _system_configuration.version
|
||||
switch (_system_configuration.version) {
|
||||
case PV_9:
|
||||
strncpy(buf, "Power PC 9", buflen);
|
||||
break;
|
||||
case PV_8:
|
||||
strncpy(buf, "Power PC 8", buflen);
|
||||
break;
|
||||
@ -1539,6 +1523,9 @@ void os::get_summary_cpu_info(char* buf, size_t buflen) {
|
||||
case PV_8_Compat:
|
||||
strncpy(buf, "PV_8_Compat", buflen);
|
||||
break;
|
||||
case PV_9_Compat:
|
||||
strncpy(buf, "PV_9_Compat", buflen);
|
||||
break;
|
||||
default:
|
||||
strncpy(buf, "unknown", buflen);
|
||||
}
|
||||
@ -3498,7 +3485,7 @@ void os::init(void) {
|
||||
// _main_thread points to the thread that created/loaded the JVM.
|
||||
Aix::_main_thread = pthread_self();
|
||||
|
||||
initial_time_count = os::elapsed_counter();
|
||||
initial_time_count = javaTimeNanos();
|
||||
|
||||
os::Posix::init();
|
||||
}
|
||||
|
||||
@ -43,7 +43,7 @@ static bool map(uintptr_t start, size_t size) {
|
||||
|
||||
if ((uintptr_t)res != start) {
|
||||
// Failed to reserve memory at the requested address
|
||||
unmap(start, size);
|
||||
unmap((uintptr_t)res, size);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@ -373,8 +373,12 @@ struct tm* os::gmtime_pd(const time_t* clock, struct tm* res) {
|
||||
void os::Posix::print_load_average(outputStream* st) {
|
||||
st->print("load average:");
|
||||
double loadavg[3];
|
||||
os::loadavg(loadavg, 3);
|
||||
st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
|
||||
int res = os::loadavg(loadavg, 3);
|
||||
if (res != -1) {
|
||||
st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
|
||||
} else {
|
||||
st->print(" Unavailable");
|
||||
}
|
||||
st->cr();
|
||||
}
|
||||
|
||||
|
||||
@ -4159,128 +4159,135 @@ static void file_attribute_data_to_stat(struct stat* sbuf, WIN32_FILE_ATTRIBUTE_
|
||||
}
|
||||
}
|
||||
|
||||
// The following function is adapted from java.base/windows/native/libjava/canonicalize_md.c
|
||||
// Creates an UNC path from a single byte path. Return buffer is
|
||||
// allocated in C heap and needs to be freed by the caller.
|
||||
// Returns NULL on error.
|
||||
static wchar_t* create_unc_path(const char* path, errno_t &err) {
|
||||
wchar_t* wpath = NULL;
|
||||
size_t converted_chars = 0;
|
||||
size_t path_len = strlen(path) + 1; // includes the terminating NULL
|
||||
if (path[0] == '\\' && path[1] == '\\') {
|
||||
if (path[2] == '?' && path[3] == '\\'){
|
||||
// if it already has a \\?\ don't do the prefix
|
||||
wpath = (wchar_t*)os::malloc(path_len * sizeof(wchar_t), mtInternal);
|
||||
if (wpath != NULL) {
|
||||
err = ::mbstowcs_s(&converted_chars, wpath, path_len, path, path_len);
|
||||
} else {
|
||||
err = ENOMEM;
|
||||
}
|
||||
} else {
|
||||
// only UNC pathname includes double slashes here
|
||||
wpath = (wchar_t*)os::malloc((path_len + 7) * sizeof(wchar_t), mtInternal);
|
||||
if (wpath != NULL) {
|
||||
::wcscpy(wpath, L"\\\\?\\UNC\0");
|
||||
err = ::mbstowcs_s(&converted_chars, &wpath[7], path_len, path, path_len);
|
||||
} else {
|
||||
err = ENOMEM;
|
||||
}
|
||||
}
|
||||
// Returns the given path as an absolute wide path in unc format. The returned path is NULL
|
||||
// on error (with err being set accordingly) and should be freed via os::free() otherwise.
|
||||
// additional_space is the number of additionally allocated wchars after the terminating L'\0'.
|
||||
// This is based on pathToNTPath() in io_util_md.cpp, but omits the optimizations for
|
||||
// short paths.
|
||||
static wchar_t* wide_abs_unc_path(char const* path, errno_t & err, int additional_space = 0) {
|
||||
if ((path == NULL) || (path[0] == '\0')) {
|
||||
err = ENOENT;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
size_t path_len = strlen(path);
|
||||
// Need to allocate at least room for 3 characters, since os::native_path transforms C: to C:.
|
||||
char* buf = (char*) os::malloc(1 + MAX2((size_t) 3, path_len), mtInternal);
|
||||
wchar_t* result = NULL;
|
||||
|
||||
if (buf == NULL) {
|
||||
err = ENOMEM;
|
||||
} else {
|
||||
wpath = (wchar_t*)os::malloc((path_len + 4) * sizeof(wchar_t), mtInternal);
|
||||
if (wpath != NULL) {
|
||||
::wcscpy(wpath, L"\\\\?\\\0");
|
||||
err = ::mbstowcs_s(&converted_chars, &wpath[4], path_len, path, path_len);
|
||||
memcpy(buf, path, path_len + 1);
|
||||
os::native_path(buf);
|
||||
|
||||
wchar_t* prefix;
|
||||
int prefix_off = 0;
|
||||
bool is_abs = true;
|
||||
bool needs_fullpath = true;
|
||||
|
||||
if (::isalpha(buf[0]) && !::IsDBCSLeadByte(buf[0]) && buf[1] == ':' && buf[2] == '\\') {
|
||||
prefix = L"\\\\?\\";
|
||||
} else if (buf[0] == '\\' && buf[1] == '\\') {
|
||||
if (buf[2] == '?' && buf[3] == '\\') {
|
||||
prefix = L"";
|
||||
needs_fullpath = false;
|
||||
} else {
|
||||
prefix = L"\\\\?\\UNC";
|
||||
prefix_off = 1; // Overwrite the first char with the prefix, so \\share\path becomes \\?\UNC\share\path
|
||||
}
|
||||
} else {
|
||||
is_abs = false;
|
||||
prefix = L"\\\\?\\";
|
||||
}
|
||||
|
||||
size_t buf_len = strlen(buf);
|
||||
size_t prefix_len = wcslen(prefix);
|
||||
size_t full_path_size = is_abs ? 1 + buf_len : JVM_MAXPATHLEN;
|
||||
size_t result_size = prefix_len + full_path_size - prefix_off;
|
||||
result = (wchar_t*) os::malloc(sizeof(wchar_t) * (additional_space + result_size), mtInternal);
|
||||
|
||||
if (result == NULL) {
|
||||
err = ENOMEM;
|
||||
} else {
|
||||
size_t converted_chars;
|
||||
wchar_t* path_start = result + prefix_len - prefix_off;
|
||||
err = ::mbstowcs_s(&converted_chars, path_start, buf_len + 1, buf, buf_len);
|
||||
|
||||
if ((err == ERROR_SUCCESS) && needs_fullpath) {
|
||||
wchar_t* tmp = (wchar_t*) os::malloc(sizeof(wchar_t) * full_path_size, mtInternal);
|
||||
|
||||
if (tmp == NULL) {
|
||||
err = ENOMEM;
|
||||
} else {
|
||||
if (!_wfullpath(tmp, path_start, full_path_size)) {
|
||||
err = ENOENT;
|
||||
} else {
|
||||
::memcpy(path_start, tmp, (1 + wcslen(tmp)) * sizeof(wchar_t));
|
||||
}
|
||||
|
||||
os::free(tmp);
|
||||
}
|
||||
}
|
||||
|
||||
memcpy(result, prefix, sizeof(wchar_t) * prefix_len);
|
||||
|
||||
// Remove trailing pathsep (not for \\?\<DRIVE>:\, since it would make it relative)
|
||||
size_t result_len = wcslen(result);
|
||||
|
||||
if (result[result_len - 1] == L'\\') {
|
||||
if (!(::iswalpha(result[4]) && result[5] == L':' && result_len == 7)) {
|
||||
result[result_len - 1] = L'\0';
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return wpath;
|
||||
}
|
||||
|
||||
static void destroy_unc_path(wchar_t* wpath) {
|
||||
os::free(wpath);
|
||||
os::free(buf);
|
||||
|
||||
if (err != ERROR_SUCCESS) {
|
||||
os::free(result);
|
||||
result = NULL;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
int os::stat(const char *path, struct stat *sbuf) {
|
||||
char* pathbuf = (char*)os::strdup(path, mtInternal);
|
||||
if (pathbuf == NULL) {
|
||||
errno = ENOMEM;
|
||||
errno_t err;
|
||||
wchar_t* wide_path = wide_abs_unc_path(path, err);
|
||||
|
||||
if (wide_path == NULL) {
|
||||
errno = err;
|
||||
return -1;
|
||||
}
|
||||
os::native_path(pathbuf);
|
||||
int ret;
|
||||
WIN32_FILE_ATTRIBUTE_DATA file_data;
|
||||
// Not using stat() to avoid the problem described in JDK-6539723
|
||||
if (strlen(path) < MAX_PATH) {
|
||||
BOOL bret = ::GetFileAttributesExA(pathbuf, GetFileExInfoStandard, &file_data);
|
||||
if (!bret) {
|
||||
errno = ::GetLastError();
|
||||
ret = -1;
|
||||
}
|
||||
else {
|
||||
file_attribute_data_to_stat(sbuf, file_data);
|
||||
ret = 0;
|
||||
}
|
||||
} else {
|
||||
errno_t err = ERROR_SUCCESS;
|
||||
wchar_t* wpath = create_unc_path(pathbuf, err);
|
||||
if (err != ERROR_SUCCESS) {
|
||||
if (wpath != NULL) {
|
||||
destroy_unc_path(wpath);
|
||||
}
|
||||
os::free(pathbuf);
|
||||
errno = err;
|
||||
return -1;
|
||||
}
|
||||
BOOL bret = ::GetFileAttributesExW(wpath, GetFileExInfoStandard, &file_data);
|
||||
if (!bret) {
|
||||
errno = ::GetLastError();
|
||||
ret = -1;
|
||||
} else {
|
||||
file_attribute_data_to_stat(sbuf, file_data);
|
||||
ret = 0;
|
||||
}
|
||||
destroy_unc_path(wpath);
|
||||
|
||||
WIN32_FILE_ATTRIBUTE_DATA file_data;;
|
||||
BOOL bret = ::GetFileAttributesExW(wide_path, GetFileExInfoStandard, &file_data);
|
||||
os::free(wide_path);
|
||||
|
||||
if (!bret) {
|
||||
errno = ::GetLastError();
|
||||
return -1;
|
||||
}
|
||||
os::free(pathbuf);
|
||||
return ret;
|
||||
|
||||
file_attribute_data_to_stat(sbuf, file_data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static HANDLE create_read_only_file_handle(const char* file) {
|
||||
if (file == NULL) {
|
||||
errno_t err;
|
||||
wchar_t* wide_path = wide_abs_unc_path(file, err);
|
||||
|
||||
if (wide_path == NULL) {
|
||||
errno = err;
|
||||
return INVALID_HANDLE_VALUE;
|
||||
}
|
||||
|
||||
char* nativepath = (char*)os::strdup(file, mtInternal);
|
||||
if (nativepath == NULL) {
|
||||
errno = ENOMEM;
|
||||
return INVALID_HANDLE_VALUE;
|
||||
}
|
||||
os::native_path(nativepath);
|
||||
HANDLE handle = ::CreateFileW(wide_path, 0, FILE_SHARE_READ,
|
||||
NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
|
||||
os::free(wide_path);
|
||||
|
||||
size_t len = strlen(nativepath);
|
||||
HANDLE handle = INVALID_HANDLE_VALUE;
|
||||
|
||||
if (len < MAX_PATH) {
|
||||
handle = ::CreateFile(nativepath, 0, FILE_SHARE_READ,
|
||||
NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
|
||||
} else {
|
||||
errno_t err = ERROR_SUCCESS;
|
||||
wchar_t* wfile = create_unc_path(nativepath, err);
|
||||
if (err != ERROR_SUCCESS) {
|
||||
if (wfile != NULL) {
|
||||
destroy_unc_path(wfile);
|
||||
}
|
||||
os::free(nativepath);
|
||||
return INVALID_HANDLE_VALUE;
|
||||
}
|
||||
handle = ::CreateFileW(wfile, 0, FILE_SHARE_READ,
|
||||
NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
|
||||
destroy_unc_path(wfile);
|
||||
}
|
||||
|
||||
os::free(nativepath);
|
||||
return handle;
|
||||
}
|
||||
|
||||
@ -4329,7 +4336,6 @@ bool os::same_files(const char* file1, const char* file2) {
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
#define FT2INT64(ft) \
|
||||
((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime))
|
||||
|
||||
@ -4434,38 +4440,22 @@ bool os::dont_yield() {
|
||||
return DontYieldALot;
|
||||
}
|
||||
|
||||
// This method is a slightly reworked copy of JDK's sysOpen
|
||||
// from src/windows/hpi/src/sys_api_md.c
|
||||
|
||||
int os::open(const char *path, int oflag, int mode) {
|
||||
char* pathbuf = (char*)os::strdup(path, mtInternal);
|
||||
if (pathbuf == NULL) {
|
||||
errno = ENOMEM;
|
||||
errno_t err;
|
||||
wchar_t* wide_path = wide_abs_unc_path(path, err);
|
||||
|
||||
if (wide_path == NULL) {
|
||||
errno = err;
|
||||
return -1;
|
||||
}
|
||||
os::native_path(pathbuf);
|
||||
int ret;
|
||||
if (strlen(path) < MAX_PATH) {
|
||||
ret = ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode);
|
||||
} else {
|
||||
errno_t err = ERROR_SUCCESS;
|
||||
wchar_t* wpath = create_unc_path(pathbuf, err);
|
||||
if (err != ERROR_SUCCESS) {
|
||||
if (wpath != NULL) {
|
||||
destroy_unc_path(wpath);
|
||||
}
|
||||
os::free(pathbuf);
|
||||
errno = err;
|
||||
return -1;
|
||||
}
|
||||
ret = ::_wopen(wpath, oflag | O_BINARY | O_NOINHERIT, mode);
|
||||
if (ret == -1) {
|
||||
errno = ::GetLastError();
|
||||
}
|
||||
destroy_unc_path(wpath);
|
||||
int fd = ::_wopen(wide_path, oflag | O_BINARY | O_NOINHERIT, mode);
|
||||
os::free(wide_path);
|
||||
|
||||
if (fd == -1) {
|
||||
errno = ::GetLastError();
|
||||
}
|
||||
os::free(pathbuf);
|
||||
return ret;
|
||||
|
||||
return fd;
|
||||
}
|
||||
|
||||
FILE* os::open(int fd, const char* mode) {
|
||||
@ -4474,37 +4464,26 @@ FILE* os::open(int fd, const char* mode) {
|
||||
|
||||
// Is a (classpath) directory empty?
|
||||
bool os::dir_is_empty(const char* path) {
|
||||
char* search_path = (char*)os::malloc(strlen(path) + 3, mtInternal);
|
||||
if (search_path == NULL) {
|
||||
errno = ENOMEM;
|
||||
return false;
|
||||
}
|
||||
strcpy(search_path, path);
|
||||
os::native_path(search_path);
|
||||
// Append "*", or possibly "\\*", to path
|
||||
if (search_path[1] == ':' &&
|
||||
(search_path[2] == '\0' ||
|
||||
(search_path[2] == '\\' && search_path[3] == '\0'))) {
|
||||
// No '\\' needed for cases like "Z:" or "Z:\"
|
||||
strcat(search_path, "*");
|
||||
}
|
||||
else {
|
||||
strcat(search_path, "\\*");
|
||||
}
|
||||
errno_t err = ERROR_SUCCESS;
|
||||
wchar_t* wpath = create_unc_path(search_path, err);
|
||||
if (err != ERROR_SUCCESS) {
|
||||
if (wpath != NULL) {
|
||||
destroy_unc_path(wpath);
|
||||
}
|
||||
os::free(search_path);
|
||||
errno_t err;
|
||||
wchar_t* wide_path = wide_abs_unc_path(path, err, 2);
|
||||
|
||||
if (wide_path == NULL) {
|
||||
errno = err;
|
||||
return false;
|
||||
}
|
||||
|
||||
// Make sure we end with "\\*"
|
||||
if (wide_path[wcslen(wide_path) - 1] == L'\\') {
|
||||
wcscat(wide_path, L"*");
|
||||
} else {
|
||||
wcscat(wide_path, L"\\*");
|
||||
}
|
||||
|
||||
WIN32_FIND_DATAW fd;
|
||||
HANDLE f = ::FindFirstFileW(wpath, &fd);
|
||||
destroy_unc_path(wpath);
|
||||
HANDLE f = ::FindFirstFileW(wide_path, &fd);
|
||||
os::free(wide_path);
|
||||
bool is_empty = true;
|
||||
|
||||
if (f != INVALID_HANDLE_VALUE) {
|
||||
while (is_empty && ::FindNextFileW(f, &fd)) {
|
||||
// An empty directory contains only the current directory file
|
||||
@ -4515,8 +4494,10 @@ bool os::dir_is_empty(const char* path) {
|
||||
}
|
||||
}
|
||||
FindClose(f);
|
||||
} else {
|
||||
errno = ::GetLastError();
|
||||
}
|
||||
os::free(search_path);
|
||||
|
||||
return is_empty;
|
||||
}
|
||||
|
||||
|
||||
@ -773,11 +773,6 @@ bool InstructForm::captures_bottom_type(FormDict &globals) const {
|
||||
!strcmp(_matrule->_rChild->_opType,"CheckCastPP") ||
|
||||
!strcmp(_matrule->_rChild->_opType,"GetAndSetP") ||
|
||||
!strcmp(_matrule->_rChild->_opType,"GetAndSetN") ||
|
||||
#if INCLUDE_ZGC
|
||||
!strcmp(_matrule->_rChild->_opType,"ZGetAndSetP") ||
|
||||
!strcmp(_matrule->_rChild->_opType,"ZCompareAndExchangeP") ||
|
||||
!strcmp(_matrule->_rChild->_opType,"LoadBarrierSlowReg") ||
|
||||
#endif
|
||||
#if INCLUDE_SHENANDOAHGC
|
||||
!strcmp(_matrule->_rChild->_opType,"ShenandoahCompareAndExchangeP") ||
|
||||
!strcmp(_matrule->_rChild->_opType,"ShenandoahCompareAndExchangeN") ||
|
||||
@ -3510,9 +3505,6 @@ int MatchNode::needs_ideal_memory_edge(FormDict &globals) const {
|
||||
"StoreCM",
|
||||
"GetAndSetB", "GetAndSetS", "GetAndAddI", "GetAndSetI", "GetAndSetP",
|
||||
"GetAndAddB", "GetAndAddS", "GetAndAddL", "GetAndSetL", "GetAndSetN",
|
||||
#if INCLUDE_ZGC
|
||||
"ZGetAndSetP", "ZCompareAndSwapP", "ZCompareAndExchangeP", "ZWeakCompareAndSwapP",
|
||||
#endif
|
||||
"ClearArray"
|
||||
};
|
||||
int cnt = sizeof(needs_ideal_memory_list)/sizeof(char*);
|
||||
@ -4047,6 +4039,7 @@ int MatchRule::is_expensive() const {
|
||||
strcmp(opType,"FmaD") == 0 ||
|
||||
strcmp(opType,"FmaF") == 0 ||
|
||||
strcmp(opType,"RoundDouble")==0 ||
|
||||
strcmp(opType,"RoundDoubleMode")==0 ||
|
||||
strcmp(opType,"RoundFloat")==0 ||
|
||||
strcmp(opType,"ReverseBytesI")==0 ||
|
||||
strcmp(opType,"ReverseBytesL")==0 ||
|
||||
@ -4175,7 +4168,7 @@ bool MatchRule::is_vector() const {
|
||||
"URShiftVB","URShiftVS","URShiftVI","URShiftVL",
|
||||
"MaxReductionV", "MinReductionV",
|
||||
"ReplicateB","ReplicateS","ReplicateI","ReplicateL","ReplicateF","ReplicateD",
|
||||
"LoadVector","StoreVector",
|
||||
"RoundDoubleModeV","LoadVector","StoreVector",
|
||||
"FmaVD", "FmaVF","PopCountVI",
|
||||
// Next are not supported currently.
|
||||
"PackB","PackS","PackI","PackL","PackF","PackD","Pack2L","Pack2D",
|
||||
|
||||
@ -37,6 +37,7 @@
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/compressedOops.hpp"
|
||||
#include "oops/klass.inline.hpp"
|
||||
#include "oops/method.inline.hpp"
|
||||
#include "runtime/deoptimization.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
|
||||
@ -32,6 +32,7 @@
|
||||
#include "compiler/compilerOracle.hpp"
|
||||
#include "gc/shared/cardTableBarrierSet.hpp"
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
#include "oops/klass.inline.hpp"
|
||||
#include "oops/method.inline.hpp"
|
||||
#include "runtime/frame.inline.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
|
||||
@ -33,13 +33,13 @@
|
||||
#include "ci/ciKlass.hpp"
|
||||
#include "ci/ciMemberName.hpp"
|
||||
#include "ci/ciUtilities.inline.hpp"
|
||||
#include "compiler/compilationPolicy.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
#include "interpreter/bytecode.hpp"
|
||||
#include "jfr/jfrEvents.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/compilationPolicy.hpp"
|
||||
#include "runtime/vm_version.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
|
||||
|
||||
@ -36,6 +36,7 @@
|
||||
#include "ci/ciUtilities.hpp"
|
||||
#include "gc/shared/barrierSet.hpp"
|
||||
#include "gc/shared/c1/barrierSetC1.hpp"
|
||||
#include "oops/klass.inline.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
|
||||
@ -37,6 +37,7 @@
|
||||
#include "code/pcDesc.hpp"
|
||||
#include "code/scopeDesc.hpp"
|
||||
#include "code/vtableStubs.hpp"
|
||||
#include "compiler/compilationPolicy.hpp"
|
||||
#include "compiler/disassembler.hpp"
|
||||
#include "gc/shared/barrierSet.hpp"
|
||||
#include "gc/shared/c1/barrierSetC1.hpp"
|
||||
@ -55,7 +56,6 @@
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/compilationPolicy.hpp"
|
||||
#include "runtime/fieldDescriptor.inline.hpp"
|
||||
#include "runtime/frame.inline.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
|
||||
@ -154,6 +154,7 @@ ciEnv::ciEnv(CompileTask* task)
|
||||
_the_null_string = NULL;
|
||||
_the_min_jint_string = NULL;
|
||||
|
||||
_jvmti_redefinition_count = 0;
|
||||
_jvmti_can_hotswap_or_post_breakpoint = false;
|
||||
_jvmti_can_access_local_variables = false;
|
||||
_jvmti_can_post_on_exceptions = false;
|
||||
@ -209,6 +210,7 @@ ciEnv::ciEnv(Arena* arena) : _ciEnv_arena(mtCompiler) {
|
||||
_the_null_string = NULL;
|
||||
_the_min_jint_string = NULL;
|
||||
|
||||
_jvmti_redefinition_count = 0;
|
||||
_jvmti_can_hotswap_or_post_breakpoint = false;
|
||||
_jvmti_can_access_local_variables = false;
|
||||
_jvmti_can_post_on_exceptions = false;
|
||||
@ -231,13 +233,20 @@ void ciEnv::cache_jvmti_state() {
|
||||
VM_ENTRY_MARK;
|
||||
// Get Jvmti capabilities under lock to get consistant values.
|
||||
MutexLocker mu(JvmtiThreadState_lock);
|
||||
_jvmti_redefinition_count = JvmtiExport::redefinition_count();
|
||||
_jvmti_can_hotswap_or_post_breakpoint = JvmtiExport::can_hotswap_or_post_breakpoint();
|
||||
_jvmti_can_access_local_variables = JvmtiExport::can_access_local_variables();
|
||||
_jvmti_can_post_on_exceptions = JvmtiExport::can_post_on_exceptions();
|
||||
_jvmti_can_pop_frame = JvmtiExport::can_pop_frame();
|
||||
_jvmti_can_get_owned_monitor_info = JvmtiExport::can_get_owned_monitor_info();
|
||||
}
|
||||
|
||||
bool ciEnv::jvmti_state_changed() const {
|
||||
// Some classes were redefined
|
||||
if (_jvmti_redefinition_count != JvmtiExport::redefinition_count()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!_jvmti_can_access_local_variables &&
|
||||
JvmtiExport::can_access_local_variables()) {
|
||||
return true;
|
||||
@ -254,6 +263,11 @@ bool ciEnv::jvmti_state_changed() const {
|
||||
JvmtiExport::can_pop_frame()) {
|
||||
return true;
|
||||
}
|
||||
if (!_jvmti_can_get_owned_monitor_info &&
|
||||
JvmtiExport::can_get_owned_monitor_info()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@ -68,10 +68,12 @@ private:
|
||||
int _name_buffer_len;
|
||||
|
||||
// Cache Jvmti state
|
||||
uint64_t _jvmti_redefinition_count;
|
||||
bool _jvmti_can_hotswap_or_post_breakpoint;
|
||||
bool _jvmti_can_access_local_variables;
|
||||
bool _jvmti_can_post_on_exceptions;
|
||||
bool _jvmti_can_pop_frame;
|
||||
bool _jvmti_can_get_owned_monitor_info; // includes can_get_owned_monitor_stack_depth_info
|
||||
|
||||
// Cache DTrace flags
|
||||
bool _dtrace_extended_probes;
|
||||
@ -346,6 +348,7 @@ public:
|
||||
}
|
||||
bool jvmti_can_hotswap_or_post_breakpoint() const { return _jvmti_can_hotswap_or_post_breakpoint; }
|
||||
bool jvmti_can_post_on_exceptions() const { return _jvmti_can_post_on_exceptions; }
|
||||
bool jvmti_can_get_owned_monitor_info() const { return _jvmti_can_get_owned_monitor_info; }
|
||||
|
||||
// Cache DTrace flags
|
||||
void cache_dtrace_flags();
|
||||
|
||||
@ -3004,7 +3004,7 @@ static const intArray* sort_methods(Array<Method*>* methods) {
|
||||
// We temporarily use the vtable_index field in the Method* to store the
|
||||
// class file index, so we can read in after calling qsort.
|
||||
// Put the method ordering in the shared archive.
|
||||
if (JvmtiExport::can_maintain_original_method_order() || DumpSharedSpaces) {
|
||||
if (JvmtiExport::can_maintain_original_method_order() || Arguments::is_dumping_archive()) {
|
||||
for (int index = 0; index < length; index++) {
|
||||
Method* const m = methods->at(index);
|
||||
assert(!m->valid_vtable_index(), "vtable index should not be set");
|
||||
@ -3018,7 +3018,7 @@ static const intArray* sort_methods(Array<Method*>* methods) {
|
||||
intArray* method_ordering = NULL;
|
||||
// If JVMTI original method ordering or sharing is enabled construct int
|
||||
// array remembering the original ordering
|
||||
if (JvmtiExport::can_maintain_original_method_order() || DumpSharedSpaces) {
|
||||
if (JvmtiExport::can_maintain_original_method_order() || Arguments::is_dumping_archive()) {
|
||||
method_ordering = new intArray(length, length, -1);
|
||||
for (int index = 0; index < length; index++) {
|
||||
Method* const m = methods->at(index);
|
||||
|
||||
@ -57,7 +57,6 @@
|
||||
#include "oops/symbol.hpp"
|
||||
#include "prims/jvm_misc.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/compilationPolicy.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/init.hpp"
|
||||
#include "runtime/interfaceSupport.inline.hpp"
|
||||
@ -462,7 +461,7 @@ bool ClassPathImageEntry::is_modules_image() const {
|
||||
|
||||
#if INCLUDE_CDS
|
||||
void ClassLoader::exit_with_path_failure(const char* error, const char* message) {
|
||||
assert(DumpSharedSpaces || DynamicDumpSharedSpaces, "only called at dump time");
|
||||
Arguments::assert_is_dumping_archive();
|
||||
tty->print_cr("Hint: enable -Xlog:class+path=info to diagnose the failure");
|
||||
vm_exit_during_initialization(error, message);
|
||||
}
|
||||
@ -532,7 +531,7 @@ void ClassLoader::setup_bootstrap_search_path() {
|
||||
|
||||
#if INCLUDE_CDS
|
||||
void ClassLoader::setup_app_search_path(const char *class_path) {
|
||||
assert(DumpSharedSpaces || DynamicDumpSharedSpaces, "Sanity");
|
||||
Arguments::assert_is_dumping_archive();
|
||||
|
||||
ResourceMark rm;
|
||||
ClasspathStream cp_stream(class_path);
|
||||
@ -546,7 +545,7 @@ void ClassLoader::setup_app_search_path(const char *class_path) {
|
||||
void ClassLoader::add_to_module_path_entries(const char* path,
|
||||
ClassPathEntry* entry) {
|
||||
assert(entry != NULL, "ClassPathEntry should not be NULL");
|
||||
assert(DumpSharedSpaces || DynamicDumpSharedSpaces, "dump time only");
|
||||
Arguments::assert_is_dumping_archive();
|
||||
|
||||
// The entry does not exist, add to the list
|
||||
if (_module_path_entries == NULL) {
|
||||
@ -560,7 +559,7 @@ void ClassLoader::add_to_module_path_entries(const char* path,
|
||||
|
||||
// Add a module path to the _module_path_entries list.
|
||||
void ClassLoader::update_module_path_entry_list(const char *path, TRAPS) {
|
||||
assert(DumpSharedSpaces || DynamicDumpSharedSpaces, "dump time only");
|
||||
Arguments::assert_is_dumping_archive();
|
||||
struct stat st;
|
||||
if (os::stat(path, &st) != 0) {
|
||||
tty->print_cr("os::stat error %d (%s). CDS dump aborted (path was \"%s\").",
|
||||
@ -656,7 +655,7 @@ void ClassLoader::setup_boot_search_path(const char *class_path) {
|
||||
bool set_base_piece = true;
|
||||
|
||||
#if INCLUDE_CDS
|
||||
if (DumpSharedSpaces || DynamicDumpSharedSpaces) {
|
||||
if (Arguments::is_dumping_archive()) {
|
||||
if (!Arguments::has_jimage()) {
|
||||
vm_exit_during_initialization("CDS is not supported in exploded JDK build", NULL);
|
||||
}
|
||||
@ -1360,7 +1359,7 @@ char* ClassLoader::skip_uri_protocol(char* source) {
|
||||
// Record the shared classpath index and loader type for classes loaded
|
||||
// by the builtin loaders at dump time.
|
||||
void ClassLoader::record_result(InstanceKlass* ik, const ClassFileStream* stream, TRAPS) {
|
||||
assert(DumpSharedSpaces || DynamicDumpSharedSpaces, "sanity");
|
||||
Arguments::assert_is_dumping_archive();
|
||||
assert(stream != NULL, "sanity");
|
||||
|
||||
if (ik->is_unsafe_anonymous()) {
|
||||
@ -1537,13 +1536,13 @@ void ClassLoader::initialize() {
|
||||
|
||||
#if INCLUDE_CDS
|
||||
void ClassLoader::initialize_shared_path() {
|
||||
if (DumpSharedSpaces || DynamicDumpSharedSpaces) {
|
||||
if (Arguments::is_dumping_archive()) {
|
||||
ClassLoaderExt::setup_search_paths();
|
||||
}
|
||||
}
|
||||
|
||||
void ClassLoader::initialize_module_path(TRAPS) {
|
||||
if (DumpSharedSpaces || DynamicDumpSharedSpaces) {
|
||||
if (Arguments::is_dumping_archive()) {
|
||||
ClassLoaderExt::setup_module_paths(THREAD);
|
||||
FileMapInfo::allocate_shared_path_table();
|
||||
}
|
||||
|
||||
@ -26,6 +26,7 @@
|
||||
#define SHARE_CLASSFILE_CLASSLOADER_HPP
|
||||
|
||||
#include "jimage.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/handles.hpp"
|
||||
#include "runtime/perfData.hpp"
|
||||
#include "utilities/exceptions.hpp"
|
||||
@ -236,6 +237,8 @@ class ClassLoader: AllStatic {
|
||||
CDS_ONLY(static ClassPathEntry* app_classpath_entries() {return _app_classpath_entries;})
|
||||
CDS_ONLY(static ClassPathEntry* module_path_entries() {return _module_path_entries;})
|
||||
|
||||
static bool has_bootclasspath_append() { return _first_append_entry != NULL; }
|
||||
|
||||
protected:
|
||||
// Initialization:
|
||||
// - setup the boot loader's system class path
|
||||
@ -395,8 +398,7 @@ class ClassLoader: AllStatic {
|
||||
// Helper function used by CDS code to get the number of module path
|
||||
// entries during shared classpath setup time.
|
||||
static int num_module_path_entries() {
|
||||
assert(DumpSharedSpaces || DynamicDumpSharedSpaces,
|
||||
"Should only be called at CDS dump time");
|
||||
Arguments::assert_is_dumping_archive();
|
||||
int num_entries = 0;
|
||||
ClassPathEntry* e= ClassLoader::_module_path_entries;
|
||||
while (e != NULL) {
|
||||
|
||||
@ -62,8 +62,7 @@ inline ClassPathEntry* ClassLoader::classpath_entry(int n) {
|
||||
// entries during shared classpath setup time.
|
||||
|
||||
inline int ClassLoader::num_boot_classpath_entries() {
|
||||
assert(DumpSharedSpaces || DynamicDumpSharedSpaces,
|
||||
"Should only be called at CDS dump time");
|
||||
Arguments::assert_is_dumping_archive();
|
||||
assert(has_jrt_entry(), "must have a java runtime image");
|
||||
int num_entries = 1; // count the runtime image
|
||||
ClassPathEntry* e = ClassLoader::_first_append_entry;
|
||||
@ -85,8 +84,7 @@ inline ClassPathEntry* ClassLoader::get_next_boot_classpath_entry(ClassPathEntry
|
||||
// Helper function used by CDS code to get the number of app classpath
|
||||
// entries during shared classpath setup time.
|
||||
inline int ClassLoader::num_app_classpath_entries() {
|
||||
assert(DumpSharedSpaces || DynamicDumpSharedSpaces,
|
||||
"Should only be called at CDS dump time");
|
||||
Arguments::assert_is_dumping_archive();
|
||||
int num_entries = 0;
|
||||
ClassPathEntry* e= ClassLoader::_app_classpath_entries;
|
||||
while (e != NULL) {
|
||||
|
||||
@ -129,8 +129,8 @@ void ClassLoaderData::initialize_name(Handle class_loader) {
|
||||
|
||||
ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_unsafe_anonymous) :
|
||||
_metaspace(NULL),
|
||||
_metaspace_lock(new Mutex(Monitor::leaf+1, "Metaspace allocation lock", true,
|
||||
Monitor::_safepoint_check_never)),
|
||||
_metaspace_lock(new Mutex(Mutex::leaf+1, "Metaspace allocation lock", true,
|
||||
Mutex::_safepoint_check_never)),
|
||||
_unloading(false), _is_unsafe_anonymous(is_unsafe_anonymous),
|
||||
_modified_oops(true), _accumulated_modified_oops(false),
|
||||
// An unsafe anonymous class loader data doesn't have anything to keep
|
||||
|
||||
@ -62,8 +62,7 @@ void ClassLoaderExt::append_boot_classpath(ClassPathEntry* new_entry) {
|
||||
}
|
||||
|
||||
void ClassLoaderExt::setup_app_search_path() {
|
||||
assert(DumpSharedSpaces || DynamicDumpSharedSpaces,
|
||||
"this function is only used at CDS dump time");
|
||||
Arguments::assert_is_dumping_archive();
|
||||
_app_class_paths_start_index = ClassLoader::num_boot_classpath_entries();
|
||||
char* app_class_path = os::strdup(Arguments::get_appclasspath());
|
||||
|
||||
@ -92,8 +91,7 @@ void ClassLoaderExt::process_module_table(ModuleEntryTable* met, TRAPS) {
|
||||
}
|
||||
}
|
||||
void ClassLoaderExt::setup_module_paths(TRAPS) {
|
||||
assert(DumpSharedSpaces || DynamicDumpSharedSpaces,
|
||||
"this function is only used with CDS dump time");
|
||||
Arguments::assert_is_dumping_archive();
|
||||
_app_module_paths_start_index = ClassLoader::num_boot_classpath_entries() +
|
||||
ClassLoader::num_app_classpath_entries();
|
||||
Handle system_class_loader (THREAD, SystemDictionary::java_system_loader());
|
||||
@ -231,7 +229,7 @@ void ClassLoaderExt::setup_search_paths() {
|
||||
void ClassLoaderExt::record_result(const s2 classpath_index,
|
||||
InstanceKlass* result,
|
||||
TRAPS) {
|
||||
assert(DumpSharedSpaces || DynamicDumpSharedSpaces, "Sanity");
|
||||
Arguments::assert_is_dumping_archive();
|
||||
|
||||
// We need to remember where the class comes from during dumping.
|
||||
oop loader = result->class_loader();
|
||||
|
||||
@ -42,7 +42,7 @@
|
||||
//
|
||||
CompactHashtableWriter::CompactHashtableWriter(int num_entries,
|
||||
CompactHashtableStats* stats) {
|
||||
assert(DumpSharedSpaces || DynamicDumpSharedSpaces, "dump-time only");
|
||||
Arguments::assert_is_dumping_archive();
|
||||
assert(num_entries >= 0, "sanity");
|
||||
_num_buckets = calculate_num_buckets(num_entries);
|
||||
assert(_num_buckets > 0, "no buckets");
|
||||
|
||||
@ -246,7 +246,7 @@ void Dictionary::all_entries_do(KlassClosure* closure) {
|
||||
|
||||
// Used to scan and relocate the classes during CDS archive dump.
|
||||
void Dictionary::classes_do(MetaspaceClosure* it) {
|
||||
assert(DumpSharedSpaces || DynamicDumpSharedSpaces, "dump-time only");
|
||||
Arguments::assert_is_dumping_archive();
|
||||
for (int index = 0; index < table_size(); index++) {
|
||||
for (DictionaryEntry* probe = bucket(index);
|
||||
probe != NULL;
|
||||
|
||||
@ -377,7 +377,7 @@ Handle java_lang_String::create_from_platform_dependent_str(const char* str, TRA
|
||||
|
||||
if (_to_java_string_fn == NULL) {
|
||||
void *lib_handle = os::native_java_library();
|
||||
_to_java_string_fn = CAST_TO_FN_PTR(to_java_string_fn_t, os::dll_lookup(lib_handle, "NewStringPlatform"));
|
||||
_to_java_string_fn = CAST_TO_FN_PTR(to_java_string_fn_t, os::dll_lookup(lib_handle, "JNU_NewStringPlatform"));
|
||||
if (_to_java_string_fn == NULL) {
|
||||
fatal("NewStringPlatform missing");
|
||||
}
|
||||
|
||||
@ -202,7 +202,6 @@ class java_lang_String : AllStatic {
|
||||
|
||||
// Conversion between '.' and '/' formats
|
||||
static Handle externalize_classname(Handle java_string, TRAPS) { return char_converter(java_string, '/', '.', THREAD); }
|
||||
static Handle internalize_classname(Handle java_string, TRAPS) { return char_converter(java_string, '.', '/', THREAD); }
|
||||
|
||||
// Conversion
|
||||
static Symbol* as_symbol(oop java_string);
|
||||
|
||||
@ -218,7 +218,7 @@ InstanceKlass* KlassFactory::create_from_stream(ClassFileStream* stream,
|
||||
JFR_ONLY(ON_KLASS_CREATION(result, parser, THREAD);)
|
||||
|
||||
#if INCLUDE_CDS
|
||||
if (DumpSharedSpaces || DynamicDumpSharedSpaces) {
|
||||
if (Arguments::is_dumping_archive()) {
|
||||
ClassLoader::record_result(result, stream, THREAD);
|
||||
}
|
||||
#endif // INCLUDE_CDS
|
||||
|
||||
@ -220,7 +220,7 @@ Symbol* SymbolTable::allocate_symbol(const char* name, int len, bool c_heap) {
|
||||
assert (len <= Symbol::max_length(), "should be checked by caller");
|
||||
|
||||
Symbol* sym;
|
||||
if (DumpSharedSpaces || DynamicDumpSharedSpaces) {
|
||||
if (Arguments::is_dumping_archive()) {
|
||||
c_heap = false;
|
||||
}
|
||||
if (c_heap) {
|
||||
@ -283,7 +283,7 @@ public:
|
||||
};
|
||||
|
||||
void SymbolTable::metaspace_pointers_do(MetaspaceClosure* it) {
|
||||
assert(DumpSharedSpaces || DynamicDumpSharedSpaces, "called only during dump time");
|
||||
Arguments::assert_is_dumping_archive();
|
||||
MetaspacePointersDo mpd(it);
|
||||
_local_table->do_safepoint_scan(mpd);
|
||||
}
|
||||
|
||||
@ -1205,10 +1205,8 @@ bool SystemDictionary::is_shared_class_visible(Symbol* class_name,
|
||||
TempNewSymbol pkg_name = NULL;
|
||||
PackageEntry* pkg_entry = NULL;
|
||||
ModuleEntry* mod_entry = NULL;
|
||||
const char* pkg_string = NULL;
|
||||
pkg_name = InstanceKlass::package_from_name(class_name, CHECK_false);
|
||||
if (pkg_name != NULL) {
|
||||
pkg_string = pkg_name->as_C_string();
|
||||
if (loader_data != NULL) {
|
||||
pkg_entry = loader_data->packages()->lookup_only(pkg_name);
|
||||
}
|
||||
@ -1245,7 +1243,7 @@ bool SystemDictionary::is_shared_class_visible(Symbol* class_name,
|
||||
// 3. or, the class is from an unamed module
|
||||
if (!ent->is_modules_image() && ik->is_shared_boot_class()) {
|
||||
// the class is from the -Xbootclasspath/a
|
||||
if (pkg_string == NULL ||
|
||||
if (pkg_name == NULL ||
|
||||
pkg_entry == NULL ||
|
||||
pkg_entry->in_unnamed_module()) {
|
||||
assert(mod_entry == NULL ||
|
||||
@ -1257,8 +1255,7 @@ bool SystemDictionary::is_shared_class_visible(Symbol* class_name,
|
||||
return false;
|
||||
} else {
|
||||
bool res = SystemDictionaryShared::is_shared_class_visible_for_classloader(
|
||||
ik, class_loader, pkg_string, pkg_name,
|
||||
pkg_entry, mod_entry, CHECK_(false));
|
||||
ik, class_loader, pkg_name, pkg_entry, mod_entry, CHECK_(false));
|
||||
return res;
|
||||
}
|
||||
}
|
||||
@ -1432,6 +1429,11 @@ InstanceKlass* SystemDictionary::load_instance_class(Symbol* class_name, Handle
|
||||
// a named package within the unnamed module. In all cases,
|
||||
// limit visibility to search for the class only in the boot
|
||||
// loader's append path.
|
||||
if (!ClassLoader::has_bootclasspath_append()) {
|
||||
// If there is no bootclasspath append entry, no need to continue
|
||||
// searching.
|
||||
return NULL;
|
||||
}
|
||||
search_only_bootloader_append = true;
|
||||
}
|
||||
}
|
||||
|
||||
@ -657,7 +657,6 @@ bool SystemDictionaryShared::is_sharing_possible(ClassLoaderData* loader_data) {
|
||||
bool SystemDictionaryShared::is_shared_class_visible_for_classloader(
|
||||
InstanceKlass* ik,
|
||||
Handle class_loader,
|
||||
const char* pkg_string,
|
||||
Symbol* pkg_name,
|
||||
PackageEntry* pkg_entry,
|
||||
ModuleEntry* mod_entry,
|
||||
@ -684,7 +683,7 @@ bool SystemDictionaryShared::is_shared_class_visible_for_classloader(
|
||||
}
|
||||
} else if (SystemDictionary::is_system_class_loader(class_loader())) {
|
||||
assert(ent != NULL, "shared class for system loader should have valid SharedClassPathEntry");
|
||||
if (pkg_string == NULL) {
|
||||
if (pkg_name == NULL) {
|
||||
// The archived class is in the unnamed package. Currently, the boot image
|
||||
// does not contain any class in the unnamed package.
|
||||
assert(!ent->is_modules_image(), "Class in the unnamed package must be from the classpath");
|
||||
@ -906,14 +905,9 @@ InstanceKlass* SystemDictionaryShared::lookup_from_stream(Symbol* class_name,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
const RunTimeSharedClassInfo* record = find_record(&_unregistered_dictionary, class_name);
|
||||
const RunTimeSharedClassInfo* record = find_record(&_unregistered_dictionary, &_dynamic_unregistered_dictionary, class_name);
|
||||
if (record == NULL) {
|
||||
if (DynamicArchive::is_mapped()) {
|
||||
record = find_record(&_dynamic_unregistered_dictionary, class_name);
|
||||
}
|
||||
if (record == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int clsfile_size = cfs->length();
|
||||
@ -1029,7 +1023,7 @@ DumpTimeSharedClassInfo* SystemDictionaryShared::find_or_allocate_info_for(Insta
|
||||
}
|
||||
|
||||
void SystemDictionaryShared::set_shared_class_misc_info(InstanceKlass* k, ClassFileStream* cfs) {
|
||||
assert(DumpSharedSpaces || DynamicDumpSharedSpaces, "only when dumping");
|
||||
Arguments::assert_is_dumping_archive();
|
||||
assert(!is_builtin(k), "must be unregistered class");
|
||||
DumpTimeSharedClassInfo* info = find_or_allocate_info_for(k);
|
||||
info->_clsfile_size = cfs->length();
|
||||
@ -1185,7 +1179,7 @@ void SystemDictionaryShared::check_excluded_classes() {
|
||||
|
||||
bool SystemDictionaryShared::is_excluded_class(InstanceKlass* k) {
|
||||
assert(_no_class_loading_should_happen, "sanity");
|
||||
assert(DumpSharedSpaces || DynamicDumpSharedSpaces, "only when dumping");
|
||||
Arguments::assert_is_dumping_archive();
|
||||
return find_or_allocate_info_for(k)->is_excluded();
|
||||
}
|
||||
|
||||
@ -1209,7 +1203,7 @@ void SystemDictionaryShared::dumptime_classes_do(class MetaspaceClosure* it) {
|
||||
|
||||
bool SystemDictionaryShared::add_verification_constraint(InstanceKlass* k, Symbol* name,
|
||||
Symbol* from_name, bool from_field_is_protected, bool from_is_array, bool from_is_object) {
|
||||
assert(DumpSharedSpaces || DynamicDumpSharedSpaces, "called at dump time only");
|
||||
Arguments::assert_is_dumping_archive();
|
||||
DumpTimeSharedClassInfo* info = find_or_allocate_info_for(k);
|
||||
info->add_verification_constraint(k, name, from_name, from_field_is_protected,
|
||||
from_is_array, from_is_object);
|
||||
@ -1413,29 +1407,34 @@ void SystemDictionaryShared::serialize_dictionary_headers(SerializeClosure* soc,
|
||||
}
|
||||
|
||||
const RunTimeSharedClassInfo*
|
||||
SystemDictionaryShared::find_record(RunTimeSharedDictionary* dict, Symbol* name) {
|
||||
if (UseSharedSpaces) {
|
||||
unsigned int hash = primitive_hash<Symbol*>(name);
|
||||
return dict->lookup(name, hash, 0);
|
||||
} else {
|
||||
SystemDictionaryShared::find_record(RunTimeSharedDictionary* static_dict, RunTimeSharedDictionary* dynamic_dict, Symbol* name) {
|
||||
if (!UseSharedSpaces || !name->is_shared()) {
|
||||
// The names of all shared classes must also be a shared Symbol.
|
||||
return NULL;
|
||||
}
|
||||
|
||||
unsigned int hash = primitive_hash<Symbol*>(name);
|
||||
const RunTimeSharedClassInfo* record = NULL;
|
||||
if (!MetaspaceShared::is_shared_dynamic(name)) {
|
||||
// The names of all shared classes in the static dict must also be in the
|
||||
// static archive
|
||||
record = static_dict->lookup(name, hash, 0);
|
||||
}
|
||||
|
||||
if (record == NULL && DynamicArchive::is_mapped()) {
|
||||
record = dynamic_dict->lookup(name, hash, 0);
|
||||
}
|
||||
|
||||
return record;
|
||||
}
|
||||
|
||||
InstanceKlass* SystemDictionaryShared::find_builtin_class(Symbol* name) {
|
||||
const RunTimeSharedClassInfo* record = find_record(&_builtin_dictionary, name);
|
||||
if (record) {
|
||||
const RunTimeSharedClassInfo* record = find_record(&_builtin_dictionary, &_dynamic_builtin_dictionary, name);
|
||||
if (record != NULL) {
|
||||
return record->_klass;
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (DynamicArchive::is_mapped()) {
|
||||
record = find_record(&_dynamic_builtin_dictionary, name);
|
||||
if (record) {
|
||||
return record->_klass;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void SystemDictionaryShared::update_shared_entry(InstanceKlass* k, int id) {
|
||||
|
||||
@ -223,7 +223,9 @@ private:
|
||||
public:
|
||||
static InstanceKlass* find_builtin_class(Symbol* class_name);
|
||||
|
||||
static const RunTimeSharedClassInfo* find_record(RunTimeSharedDictionary* dict, Symbol* name);
|
||||
static const RunTimeSharedClassInfo* find_record(RunTimeSharedDictionary* static_dict,
|
||||
RunTimeSharedDictionary* dynamic_dict,
|
||||
Symbol* name);
|
||||
|
||||
static bool has_platform_or_app_classes();
|
||||
|
||||
@ -240,7 +242,6 @@ public:
|
||||
static bool is_sharing_possible(ClassLoaderData* loader_data);
|
||||
static bool is_shared_class_visible_for_classloader(InstanceKlass* ik,
|
||||
Handle class_loader,
|
||||
const char* pkg_string,
|
||||
Symbol* pkg_name,
|
||||
PackageEntry* pkg_entry,
|
||||
ModuleEntry* mod_entry,
|
||||
|
||||
@ -28,6 +28,7 @@
|
||||
#include "classfile/verificationType.hpp"
|
||||
#include "classfile/verifier.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "oops/klass.inline.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
|
||||
VerificationType VerificationType::from_tag(u1 tag) {
|
||||
@ -94,7 +95,7 @@ bool VerificationType::is_reference_assignable_from(
|
||||
return true;
|
||||
}
|
||||
|
||||
if (DumpSharedSpaces || DynamicDumpSharedSpaces) {
|
||||
if (Arguments::is_dumping_archive()) {
|
||||
if (SystemDictionaryShared::add_verification_constraint(klass,
|
||||
name(), from.name(), from_field_is_protected, from.is_array(),
|
||||
from.is_object())) {
|
||||
|
||||
@ -63,29 +63,39 @@
|
||||
#define STATIC_METHOD_IN_INTERFACE_MAJOR_VERSION 52
|
||||
#define MAX_ARRAY_DIMENSIONS 255
|
||||
|
||||
// Access to external entry for VerifyClassCodes - old byte code verifier
|
||||
// Access to external entry for VerifyClassForMajorVersion - old byte code verifier
|
||||
|
||||
extern "C" {
|
||||
typedef jboolean (*verify_byte_codes_fn_t)(JNIEnv *, jclass, char *, jint);
|
||||
typedef jboolean (*verify_byte_codes_fn_new_t)(JNIEnv *, jclass, char *, jint, jint);
|
||||
typedef jboolean (*verify_byte_codes_fn_t)(JNIEnv *, jclass, char *, jint, jint);
|
||||
}
|
||||
|
||||
static void* volatile _verify_byte_codes_fn = NULL;
|
||||
static verify_byte_codes_fn_t volatile _verify_byte_codes_fn = NULL;
|
||||
|
||||
static volatile jint _is_new_verify_byte_codes_fn = (jint) true;
|
||||
static verify_byte_codes_fn_t verify_byte_codes_fn() {
|
||||
|
||||
static void* verify_byte_codes_fn() {
|
||||
if (OrderAccess::load_acquire(&_verify_byte_codes_fn) == NULL) {
|
||||
void *lib_handle = os::native_java_library();
|
||||
void *func = os::dll_lookup(lib_handle, "VerifyClassCodesForMajorVersion");
|
||||
OrderAccess::release_store(&_verify_byte_codes_fn, func);
|
||||
if (func == NULL) {
|
||||
_is_new_verify_byte_codes_fn = false;
|
||||
func = os::dll_lookup(lib_handle, "VerifyClassCodes");
|
||||
OrderAccess::release_store(&_verify_byte_codes_fn, func);
|
||||
}
|
||||
}
|
||||
return (void*)_verify_byte_codes_fn;
|
||||
if (_verify_byte_codes_fn != NULL)
|
||||
return _verify_byte_codes_fn;
|
||||
|
||||
MutexLocker locker(Verify_lock);
|
||||
|
||||
if (_verify_byte_codes_fn != NULL)
|
||||
return _verify_byte_codes_fn;
|
||||
|
||||
// Load verify dll
|
||||
char buffer[JVM_MAXPATHLEN];
|
||||
char ebuf[1024];
|
||||
if (!os::dll_locate_lib(buffer, sizeof(buffer), Arguments::get_dll_dir(), "verify"))
|
||||
return NULL; // Caller will throw VerifyError
|
||||
|
||||
void *lib_handle = os::dll_load(buffer, ebuf, sizeof(ebuf));
|
||||
if (lib_handle == NULL)
|
||||
return NULL; // Caller will throw VerifyError
|
||||
|
||||
void *fn = os::dll_lookup(lib_handle, "VerifyClassForMajorVersion");
|
||||
if (fn == NULL)
|
||||
return NULL; // Caller will throw VerifyError
|
||||
|
||||
return _verify_byte_codes_fn = CAST_TO_FN_PTR(verify_byte_codes_fn_t, fn);
|
||||
}
|
||||
|
||||
|
||||
@ -282,7 +292,7 @@ Symbol* Verifier::inference_verify(
|
||||
JavaThread* thread = (JavaThread*)THREAD;
|
||||
JNIEnv *env = thread->jni_environment();
|
||||
|
||||
void* verify_func = verify_byte_codes_fn();
|
||||
verify_byte_codes_fn_t verify_func = verify_byte_codes_fn();
|
||||
|
||||
if (verify_func == NULL) {
|
||||
jio_snprintf(message, message_len, "Could not link verifier");
|
||||
@ -301,16 +311,7 @@ Symbol* Verifier::inference_verify(
|
||||
// ThreadToNativeFromVM takes care of changing thread_state, so safepoint
|
||||
// code knows that we have left the VM
|
||||
|
||||
if (_is_new_verify_byte_codes_fn) {
|
||||
verify_byte_codes_fn_new_t func =
|
||||
CAST_TO_FN_PTR(verify_byte_codes_fn_new_t, verify_func);
|
||||
result = (*func)(env, cls, message, (int)message_len,
|
||||
klass->major_version());
|
||||
} else {
|
||||
verify_byte_codes_fn_t func =
|
||||
CAST_TO_FN_PTR(verify_byte_codes_fn_t, verify_func);
|
||||
result = (*func)(env, cls, message, (int)message_len);
|
||||
}
|
||||
result = (*verify_func)(env, cls, message, (int)message_len, klass->major_version());
|
||||
}
|
||||
|
||||
JNIHandles::destroy_local(cls);
|
||||
|
||||
@ -575,6 +575,9 @@ bool vmIntrinsics::is_disabled_by_flags(vmIntrinsics::ID id) {
|
||||
case vmIntrinsics::_intBitsToFloat:
|
||||
case vmIntrinsics::_doubleToRawLongBits:
|
||||
case vmIntrinsics::_longBitsToDouble:
|
||||
case vmIntrinsics::_ceil:
|
||||
case vmIntrinsics::_floor:
|
||||
case vmIntrinsics::_rint:
|
||||
case vmIntrinsics::_dabs:
|
||||
case vmIntrinsics::_fabs:
|
||||
case vmIntrinsics::_iabs:
|
||||
|
||||
@ -766,6 +766,7 @@
|
||||
do_name(tan_name,"tan") do_name(atan2_name,"atan2") do_name(sqrt_name,"sqrt") \
|
||||
do_name(log_name,"log") do_name(log10_name,"log10") do_name(pow_name,"pow") \
|
||||
do_name(exp_name,"exp") do_name(min_name,"min") do_name(max_name,"max") \
|
||||
do_name(floor_name, "floor") do_name(ceil_name, "ceil") do_name(rint_name, "rint") \
|
||||
\
|
||||
do_name(addExact_name,"addExact") \
|
||||
do_name(decrementExact_name,"decrementExact") \
|
||||
@ -781,6 +782,9 @@
|
||||
do_intrinsic(_iabs, java_lang_Math, abs_name, int_int_signature, F_S) \
|
||||
do_intrinsic(_labs, java_lang_Math, abs_name, long_long_signature, F_S) \
|
||||
do_intrinsic(_dsin, java_lang_Math, sin_name, double_double_signature, F_S) \
|
||||
do_intrinsic(_floor, java_lang_Math, floor_name, double_double_signature, F_S) \
|
||||
do_intrinsic(_ceil, java_lang_Math, ceil_name, double_double_signature, F_S) \
|
||||
do_intrinsic(_rint, java_lang_Math, rint_name, double_double_signature, F_S) \
|
||||
do_intrinsic(_dcos, java_lang_Math, cos_name, double_double_signature, F_S) \
|
||||
do_intrinsic(_dtan, java_lang_Math, tan_name, double_double_signature, F_S) \
|
||||
do_intrinsic(_datan2, java_lang_Math, atan2_name, double2_double_signature, F_S) \
|
||||
|
||||
@ -33,6 +33,7 @@
|
||||
#include "code/icBuffer.hpp"
|
||||
#include "code/nmethod.hpp"
|
||||
#include "code/pcDesc.hpp"
|
||||
#include "compiler/compilationPolicy.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
#include "jfr/jfrEvents.hpp"
|
||||
#include "logging/log.hpp"
|
||||
@ -46,7 +47,6 @@
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/verifyOopClosure.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/compilationPolicy.hpp"
|
||||
#include "runtime/deoptimization.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/icache.hpp"
|
||||
|
||||
@ -741,4 +741,22 @@ void CompiledDirectStaticCall::print() {
|
||||
tty->cr();
|
||||
}
|
||||
|
||||
void CompiledDirectStaticCall::verify_mt_safe(const methodHandle& callee, address entry,
|
||||
NativeMovConstReg* method_holder,
|
||||
NativeJump* jump) {
|
||||
// A generated lambda form might be deleted from the Lambdaform
|
||||
// cache in MethodTypeForm. If a jit compiled lambdaform method
|
||||
// becomes not entrant and the cache access returns null, the new
|
||||
// resolve will lead to a new generated LambdaForm.
|
||||
Method* old_method = reinterpret_cast<Method*>(method_holder->data());
|
||||
assert(old_method == NULL || old_method == callee() ||
|
||||
callee->is_compiled_lambda_form() ||
|
||||
!old_method->method_holder()->is_loader_alive() ||
|
||||
old_method->is_old(), // may be race patching deoptimized nmethod due to redefinition.
|
||||
"a) MT-unsafe modification of inline cache");
|
||||
|
||||
address destination = jump->jump_destination();
|
||||
assert(destination == (address)-1 || destination == entry,
|
||||
"b) MT-unsafe modification of inline cache");
|
||||
}
|
||||
#endif // !PRODUCT
|
||||
|
||||
@ -402,6 +402,9 @@ private:
|
||||
|
||||
// Also used by CompiledIC
|
||||
void set_to_interpreted(const methodHandle& callee, address entry);
|
||||
void verify_mt_safe(const methodHandle& callee, address entry,
|
||||
NativeMovConstReg* method_holder,
|
||||
NativeJump* jump) PRODUCT_RETURN;
|
||||
#if INCLUDE_AOT
|
||||
void set_to_far(const methodHandle& callee, address entry);
|
||||
#endif
|
||||
|
||||
@ -1293,7 +1293,6 @@ void nmethod::unlink_from_method() {
|
||||
*/
|
||||
bool nmethod::make_not_entrant_or_zombie(int state) {
|
||||
assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
|
||||
assert(!is_zombie(), "should not already be a zombie");
|
||||
|
||||
if (Atomic::load(&_state) >= state) {
|
||||
// Avoid taking the lock if already in required state.
|
||||
@ -1316,20 +1315,18 @@ bool nmethod::make_not_entrant_or_zombie(int state) {
|
||||
// This flag is used to remember whether we need to later lock and unregister.
|
||||
bool nmethod_needs_unregister = false;
|
||||
|
||||
// invalidate osr nmethod before acquiring the patching lock since
|
||||
// they both acquire leaf locks and we don't want a deadlock.
|
||||
// This logic is equivalent to the logic below for patching the
|
||||
// verified entry point of regular methods. We check that the
|
||||
// nmethod is in use to ensure that it is invalidated only once.
|
||||
if (is_osr_method() && is_in_use()) {
|
||||
// this effectively makes the osr nmethod not entrant
|
||||
invalidate_osr_method();
|
||||
}
|
||||
|
||||
{
|
||||
// Enter critical section. Does not block for safepoint.
|
||||
MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL : CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
|
||||
|
||||
// This logic is equivalent to the logic below for patching the
|
||||
// verified entry point of regular methods. We check that the
|
||||
// nmethod is in use to ensure that it is invalidated only once.
|
||||
if (is_osr_method() && is_in_use()) {
|
||||
// this effectively makes the osr nmethod not entrant
|
||||
invalidate_osr_method();
|
||||
}
|
||||
|
||||
if (Atomic::load(&_state) >= state) {
|
||||
// another thread already performed this transition so nothing
|
||||
// to do, but return false to indicate this.
|
||||
@ -2192,6 +2189,17 @@ public:
|
||||
virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
|
||||
};
|
||||
|
||||
class VerifyMetadataClosure: public MetadataClosure {
|
||||
public:
|
||||
void do_metadata(Metadata* md) {
|
||||
if (md->is_method()) {
|
||||
Method* method = (Method*)md;
|
||||
assert(!method->is_old(), "Should not be installing old methods");
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
void nmethod::verify() {
|
||||
|
||||
// Hmm. OSR methods can be deopted but not marked as zombie or not_entrant
|
||||
@ -2255,6 +2263,10 @@ void nmethod::verify() {
|
||||
Universe::heap()->verify_nmethod(this);
|
||||
|
||||
verify_scopes();
|
||||
|
||||
CompiledICLocker nm_verify(this);
|
||||
VerifyMetadataClosure vmc;
|
||||
metadata_do(&vmc);
|
||||
}
|
||||
|
||||
|
||||
|
||||
@ -27,19 +27,18 @@
|
||||
#include "code/compiledIC.hpp"
|
||||
#include "code/nmethod.hpp"
|
||||
#include "code/scopeDesc.hpp"
|
||||
#include "compiler/compilationPolicy.hpp"
|
||||
#include "compiler/tieredThresholdPolicy.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/methodData.hpp"
|
||||
#include "oops/method.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "prims/nativeLookup.hpp"
|
||||
#include "runtime/compilationPolicy.hpp"
|
||||
#include "runtime/frame.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/rframe.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
#include "runtime/tieredThresholdPolicy.hpp"
|
||||
#include "runtime/vframe.hpp"
|
||||
#include "runtime/vmOperations.hpp"
|
||||
#include "utilities/events.hpp"
|
||||
@ -56,28 +55,16 @@ CompilationPolicy* CompilationPolicy::_policy;
|
||||
|
||||
// Determine compilation policy based on command line argument
|
||||
void compilationPolicy_init() {
|
||||
switch(CompilationPolicyChoice) {
|
||||
case 0:
|
||||
CompilationPolicy::set_policy(new SimpleCompPolicy());
|
||||
break;
|
||||
|
||||
case 1:
|
||||
#ifdef COMPILER2
|
||||
CompilationPolicy::set_policy(new StackWalkCompPolicy());
|
||||
#else
|
||||
Unimplemented();
|
||||
#endif
|
||||
break;
|
||||
case 2:
|
||||
#ifdef TIERED
|
||||
#ifdef TIERED
|
||||
if (TieredCompilation) {
|
||||
CompilationPolicy::set_policy(new TieredThresholdPolicy());
|
||||
#else
|
||||
Unimplemented();
|
||||
#endif
|
||||
break;
|
||||
default:
|
||||
fatal("CompilationPolicyChoice must be in the range: [0-2]");
|
||||
} else {
|
||||
CompilationPolicy::set_policy(new SimpleCompPolicy());
|
||||
}
|
||||
#else
|
||||
CompilationPolicy::set_policy(new SimpleCompPolicy());
|
||||
#endif
|
||||
|
||||
CompilationPolicy::policy()->initialize();
|
||||
}
|
||||
|
||||
@ -204,7 +191,7 @@ CompileTask* CompilationPolicy::select_task_helper(CompileQueue* compile_queue)
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void NonTieredCompPolicy::trace_osr_completion(nmethod* osr_nm) {
|
||||
void SimpleCompPolicy::trace_osr_completion(nmethod* osr_nm) {
|
||||
if (TraceOnStackReplacement) {
|
||||
if (osr_nm == NULL) tty->print_cr("compilation failed");
|
||||
else tty->print_cr("nmethod " INTPTR_FORMAT, p2i(osr_nm));
|
||||
@ -212,7 +199,7 @@ void NonTieredCompPolicy::trace_osr_completion(nmethod* osr_nm) {
|
||||
}
|
||||
#endif // !PRODUCT
|
||||
|
||||
void NonTieredCompPolicy::initialize() {
|
||||
void SimpleCompPolicy::initialize() {
|
||||
// Setup the compiler thread numbers
|
||||
if (CICompilerCountPerCPU) {
|
||||
// Example: if CICompilerCountPerCPU is true, then we get
|
||||
@ -246,7 +233,7 @@ void NonTieredCompPolicy::initialize() {
|
||||
// - with COMPILER2 not defined it should return zero for c2 compilation levels.
|
||||
// - with COMPILER1 not defined it should return zero for c1 compilation levels.
|
||||
// - if neither is defined - always return zero.
|
||||
int NonTieredCompPolicy::compiler_count(CompLevel comp_level) {
|
||||
int SimpleCompPolicy::compiler_count(CompLevel comp_level) {
|
||||
assert(!TieredCompilation, "This policy should not be used with TieredCompilation");
|
||||
if (COMPILER2_PRESENT(is_server_compilation_mode_vm() && is_c2_compile(comp_level) ||)
|
||||
is_client_compilation_mode_vm() && is_c1_compile(comp_level)) {
|
||||
@ -255,7 +242,7 @@ int NonTieredCompPolicy::compiler_count(CompLevel comp_level) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
void NonTieredCompPolicy::reset_counter_for_invocation_event(const methodHandle& m) {
|
||||
void SimpleCompPolicy::reset_counter_for_invocation_event(const methodHandle& m) {
|
||||
// Make sure invocation and backedge counter doesn't overflow again right away
|
||||
// as would be the case for native methods.
|
||||
|
||||
@ -269,7 +256,7 @@ void NonTieredCompPolicy::reset_counter_for_invocation_event(const methodHandle&
|
||||
assert(!m->was_never_executed(), "don't reset to 0 -- could be mistaken for never-executed");
|
||||
}
|
||||
|
||||
void NonTieredCompPolicy::reset_counter_for_back_branch_event(const methodHandle& m) {
|
||||
void SimpleCompPolicy::reset_counter_for_back_branch_event(const methodHandle& m) {
|
||||
// Delay next back-branch event but pump up invocation counter to trigger
|
||||
// whole method compilation.
|
||||
MethodCounters* mcs = m->method_counters();
|
||||
@ -327,13 +314,13 @@ void CounterDecay::decay() {
|
||||
}
|
||||
|
||||
// Called at the end of the safepoint
|
||||
void NonTieredCompPolicy::do_safepoint_work() {
|
||||
void SimpleCompPolicy::do_safepoint_work() {
|
||||
if(UseCounterDecay && CounterDecay::is_decay_needed()) {
|
||||
CounterDecay::decay();
|
||||
}
|
||||
}
|
||||
|
||||
void NonTieredCompPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) {
|
||||
void SimpleCompPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) {
|
||||
ScopeDesc* sd = trap_scope;
|
||||
MethodCounters* mcs;
|
||||
InvocationCounter* c;
|
||||
@ -359,7 +346,7 @@ void NonTieredCompPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) {
|
||||
|
||||
// This method can be called by any component of the runtime to notify the policy
|
||||
// that it's recommended to delay the compilation of this method.
|
||||
void NonTieredCompPolicy::delay_compilation(Method* method) {
|
||||
void SimpleCompPolicy::delay_compilation(Method* method) {
|
||||
MethodCounters* mcs = method->method_counters();
|
||||
if (mcs != NULL) {
|
||||
mcs->invocation_counter()->decay();
|
||||
@ -367,7 +354,7 @@ void NonTieredCompPolicy::delay_compilation(Method* method) {
|
||||
}
|
||||
}
|
||||
|
||||
void NonTieredCompPolicy::disable_compilation(Method* method) {
|
||||
void SimpleCompPolicy::disable_compilation(Method* method) {
|
||||
MethodCounters* mcs = method->method_counters();
|
||||
if (mcs != NULL) {
|
||||
mcs->invocation_counter()->set_state(InvocationCounter::wait_for_nothing);
|
||||
@ -375,11 +362,11 @@ void NonTieredCompPolicy::disable_compilation(Method* method) {
|
||||
}
|
||||
}
|
||||
|
||||
CompileTask* NonTieredCompPolicy::select_task(CompileQueue* compile_queue) {
|
||||
CompileTask* SimpleCompPolicy::select_task(CompileQueue* compile_queue) {
|
||||
return select_task_helper(compile_queue);
|
||||
}
|
||||
|
||||
bool NonTieredCompPolicy::is_mature(Method* method) {
|
||||
bool SimpleCompPolicy::is_mature(Method* method) {
|
||||
MethodData* mdo = method->method_data();
|
||||
assert(mdo != NULL, "Should be");
|
||||
uint current = mdo->mileage_of(method);
|
||||
@ -394,7 +381,7 @@ bool NonTieredCompPolicy::is_mature(Method* method) {
|
||||
return (current >= initial + target);
|
||||
}
|
||||
|
||||
nmethod* NonTieredCompPolicy::event(const methodHandle& method, const methodHandle& inlinee, int branch_bci,
|
||||
nmethod* SimpleCompPolicy::event(const methodHandle& method, const methodHandle& inlinee, int branch_bci,
|
||||
int bci, CompLevel comp_level, CompiledMethod* nm, JavaThread* thread) {
|
||||
assert(comp_level == CompLevel_none, "This should be only called from the interpreter");
|
||||
NOT_PRODUCT(trace_frequency_counter_overflow(method, branch_bci, bci));
|
||||
@ -453,7 +440,7 @@ nmethod* NonTieredCompPolicy::event(const methodHandle& method, const methodHand
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void NonTieredCompPolicy::trace_frequency_counter_overflow(const methodHandle& m, int branch_bci, int bci) {
|
||||
void SimpleCompPolicy::trace_frequency_counter_overflow(const methodHandle& m, int branch_bci, int bci) {
|
||||
if (TraceInvocationCounterOverflow) {
|
||||
MethodCounters* mcs = m->method_counters();
|
||||
assert(mcs != NULL, "MethodCounters cannot be NULL for profiling");
|
||||
@ -485,7 +472,7 @@ void NonTieredCompPolicy::trace_frequency_counter_overflow(const methodHandle& m
|
||||
}
|
||||
}
|
||||
|
||||
void NonTieredCompPolicy::trace_osr_request(const methodHandle& method, nmethod* osr, int bci) {
|
||||
void SimpleCompPolicy::trace_osr_request(const methodHandle& method, nmethod* osr, int bci) {
|
||||
if (TraceOnStackReplacement) {
|
||||
ResourceMark rm;
|
||||
tty->print(osr != NULL ? "Reused OSR entry for " : "Requesting OSR entry for ");
|
||||
@ -495,8 +482,6 @@ void NonTieredCompPolicy::trace_osr_request(const methodHandle& method, nmethod*
|
||||
}
|
||||
#endif // !PRODUCT
|
||||
|
||||
// SimpleCompPolicy - compile current method
|
||||
|
||||
void SimpleCompPolicy::method_invocation_event(const methodHandle& m, JavaThread* thread) {
|
||||
const int comp_level = CompLevel_highest_tier;
|
||||
const int hot_count = m->invocation_count();
|
||||
@ -519,208 +504,3 @@ void SimpleCompPolicy::method_back_branch_event(const methodHandle& m, int bci,
|
||||
NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, comp_level, true));)
|
||||
}
|
||||
}
|
||||
// StackWalkCompPolicy - walk up stack to find a suitable method to compile
|
||||
|
||||
#ifdef COMPILER2
|
||||
const char* StackWalkCompPolicy::_msg = NULL;
|
||||
|
||||
|
||||
// Consider m for compilation
|
||||
void StackWalkCompPolicy::method_invocation_event(const methodHandle& m, JavaThread* thread) {
|
||||
const int comp_level = CompLevel_highest_tier;
|
||||
const int hot_count = m->invocation_count();
|
||||
reset_counter_for_invocation_event(m);
|
||||
|
||||
if (is_compilation_enabled() && m->code() == NULL && can_be_compiled(m, comp_level)) {
|
||||
ResourceMark rm(thread);
|
||||
frame fr = thread->last_frame();
|
||||
assert(fr.is_interpreted_frame(), "must be interpreted");
|
||||
assert(fr.interpreter_frame_method() == m(), "bad method");
|
||||
|
||||
RegisterMap reg_map(thread, false);
|
||||
javaVFrame* triggerVF = thread->last_java_vframe(®_map);
|
||||
// triggerVF is the frame that triggered its counter
|
||||
RFrame* first = new InterpretedRFrame(triggerVF->fr(), thread, m());
|
||||
|
||||
if (first->top_method()->code() != NULL) {
|
||||
// called obsolete method/nmethod -- no need to recompile
|
||||
} else {
|
||||
GrowableArray<RFrame*>* stack = new GrowableArray<RFrame*>(50);
|
||||
stack->push(first);
|
||||
RFrame* top = findTopInlinableFrame(stack);
|
||||
assert(top != NULL, "findTopInlinableFrame returned null");
|
||||
CompileBroker::compile_method(top->top_method(), InvocationEntryBci, comp_level,
|
||||
m, hot_count, CompileTask::Reason_InvocationCount, thread);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void StackWalkCompPolicy::method_back_branch_event(const methodHandle& m, int bci, JavaThread* thread) {
|
||||
const int comp_level = CompLevel_highest_tier;
|
||||
const int hot_count = m->backedge_count();
|
||||
|
||||
if (is_compilation_enabled() && can_be_osr_compiled(m, comp_level)) {
|
||||
CompileBroker::compile_method(m, bci, comp_level, m, hot_count, CompileTask::Reason_BackedgeCount, thread);
|
||||
NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, comp_level, true));)
|
||||
}
|
||||
}
|
||||
|
||||
RFrame* StackWalkCompPolicy::findTopInlinableFrame(GrowableArray<RFrame*>* stack) {
|
||||
// go up the stack until finding a frame that (probably) won't be inlined
|
||||
// into its caller
|
||||
RFrame* current = stack->at(0); // current choice for stopping
|
||||
assert( current && !current->is_compiled(), "" );
|
||||
const char* msg = NULL;
|
||||
|
||||
while (1) {
|
||||
|
||||
// before going up the stack further, check if doing so would get us into
|
||||
// compiled code
|
||||
RFrame* next = senderOf(current, stack);
|
||||
if( !next ) // No next frame up the stack?
|
||||
break; // Then compile with current frame
|
||||
|
||||
Method* m = current->top_method();
|
||||
Method* next_m = next->top_method();
|
||||
|
||||
if( !Inline ) { // Inlining turned off
|
||||
msg = "Inlining turned off";
|
||||
break;
|
||||
}
|
||||
if (next_m->is_not_compilable()) { // Did fail to compile this before/
|
||||
msg = "caller not compilable";
|
||||
break;
|
||||
}
|
||||
if (next->num() > MaxRecompilationSearchLength) {
|
||||
// don't go up too high when searching for recompilees
|
||||
msg = "don't go up any further: > MaxRecompilationSearchLength";
|
||||
break;
|
||||
}
|
||||
if (next->distance() > MaxInterpretedSearchLength) {
|
||||
// don't go up too high when searching for recompilees
|
||||
msg = "don't go up any further: next > MaxInterpretedSearchLength";
|
||||
break;
|
||||
}
|
||||
// Compiled frame above already decided not to inline;
|
||||
// do not recompile him.
|
||||
if (next->is_compiled()) {
|
||||
msg = "not going up into optimized code";
|
||||
break;
|
||||
}
|
||||
|
||||
// Interpreted frame above us was already compiled. Do not force
|
||||
// a recompile, although if the frame above us runs long enough an
|
||||
// OSR might still happen.
|
||||
if( current->is_interpreted() && next_m->has_compiled_code() ) {
|
||||
msg = "not going up -- already compiled caller";
|
||||
break;
|
||||
}
|
||||
|
||||
// Compute how frequent this call site is. We have current method 'm'.
|
||||
// We know next method 'next_m' is interpreted. Find the call site and
|
||||
// check the various invocation counts.
|
||||
int invcnt = 0; // Caller counts
|
||||
if (ProfileInterpreter) {
|
||||
invcnt = next_m->interpreter_invocation_count();
|
||||
}
|
||||
int cnt = 0; // Call site counts
|
||||
if (ProfileInterpreter && next_m->method_data() != NULL) {
|
||||
ResourceMark rm;
|
||||
int bci = next->top_vframe()->bci();
|
||||
ProfileData* data = next_m->method_data()->bci_to_data(bci);
|
||||
if (data != NULL && data->is_CounterData())
|
||||
cnt = data->as_CounterData()->count();
|
||||
}
|
||||
|
||||
// Caller counts / call-site counts; i.e. is this call site
|
||||
// a hot call site for method next_m?
|
||||
int freq = (invcnt) ? cnt/invcnt : cnt;
|
||||
|
||||
// Check size and frequency limits
|
||||
if ((msg = shouldInline(m, freq, cnt)) != NULL) {
|
||||
break;
|
||||
}
|
||||
// Check inlining negative tests
|
||||
if ((msg = shouldNotInline(m)) != NULL) {
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
// If the caller method is too big or something then we do not want to
|
||||
// compile it just to inline a method
|
||||
if (!can_be_compiled(next_m, CompLevel_any)) {
|
||||
msg = "caller cannot be compiled";
|
||||
break;
|
||||
}
|
||||
|
||||
if( next_m->name() == vmSymbols::class_initializer_name() ) {
|
||||
msg = "do not compile class initializer (OSR ok)";
|
||||
break;
|
||||
}
|
||||
|
||||
current = next;
|
||||
}
|
||||
|
||||
assert( !current || !current->is_compiled(), "" );
|
||||
|
||||
return current;
|
||||
}
|
||||
|
||||
RFrame* StackWalkCompPolicy::senderOf(RFrame* rf, GrowableArray<RFrame*>* stack) {
|
||||
RFrame* sender = rf->caller();
|
||||
if (sender && sender->num() == stack->length()) stack->push(sender);
|
||||
return sender;
|
||||
}
|
||||
|
||||
|
||||
const char* StackWalkCompPolicy::shouldInline(const methodHandle& m, float freq, int cnt) {
|
||||
// Allows targeted inlining
|
||||
// positive filter: should send be inlined? returns NULL (--> yes)
|
||||
// or rejection msg
|
||||
int max_size = MaxInlineSize;
|
||||
int cost = m->code_size();
|
||||
|
||||
// Check for too many throws (and not too huge)
|
||||
if (m->interpreter_throwout_count() > InlineThrowCount && cost < InlineThrowMaxSize ) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// bump the max size if the call is frequent
|
||||
if ((freq >= InlineFrequencyRatio) || (cnt >= InlineFrequencyCount)) {
|
||||
if (TraceFrequencyInlining) {
|
||||
tty->print("(Inlined frequent method)\n");
|
||||
m->print();
|
||||
}
|
||||
max_size = FreqInlineSize;
|
||||
}
|
||||
if (cost > max_size) {
|
||||
return (_msg = "too big");
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
const char* StackWalkCompPolicy::shouldNotInline(const methodHandle& m) {
|
||||
// negative filter: should send NOT be inlined? returns NULL (--> inline) or rejection msg
|
||||
if (m->is_abstract()) return (_msg = "abstract method");
|
||||
// note: we allow ik->is_abstract()
|
||||
if (!m->method_holder()->is_initialized()) return (_msg = "method holder not initialized");
|
||||
if (m->is_native()) return (_msg = "native method");
|
||||
CompiledMethod* m_code = m->code();
|
||||
if (m_code != NULL && m_code->code_size() > InlineSmallCode)
|
||||
return (_msg = "already compiled into a big method");
|
||||
|
||||
// use frequency-based objections only for non-trivial methods
|
||||
if (m->code_size() <= MaxTrivialSize) return NULL;
|
||||
if (UseInterpreter) { // don't use counts with -Xcomp
|
||||
if ((m->code() == NULL) && m->was_never_executed()) return (_msg = "never executed");
|
||||
if (!m->was_executed_more_than(MIN2(MinInliningThreshold, CompileThreshold >> 1))) return (_msg = "executed < MinInliningThreshold times");
|
||||
}
|
||||
if (Method::has_unloaded_classes_in_signature(m, JavaThread::current())) return (_msg = "unloaded signature classes");
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
|
||||
#endif // COMPILER2
|
||||
@ -22,8 +22,8 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_RUNTIME_COMPILATIONPOLICY_HPP
|
||||
#define SHARE_RUNTIME_COMPILATIONPOLICY_HPP
|
||||
#ifndef SHARE_COMPILER_COMPILATIONPOLICY_HPP
|
||||
#define SHARE_COMPILER_COMPILATIONPOLICY_HPP
|
||||
|
||||
#include "code/nmethod.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
@ -36,7 +36,6 @@
|
||||
// interpreted).
|
||||
class CompileTask;
|
||||
class CompileQueue;
|
||||
class RFrame;
|
||||
|
||||
class CompilationPolicy : public CHeapObj<mtCompiler> {
|
||||
static CompilationPolicy* _policy;
|
||||
@ -85,17 +84,19 @@ public:
|
||||
virtual bool should_not_inline(ciEnv* env, ciMethod* method) { return false; }
|
||||
};
|
||||
|
||||
// A base class for baseline policies.
|
||||
class NonTieredCompPolicy : public CompilationPolicy {
|
||||
// A simple compilation policy.
|
||||
class SimpleCompPolicy : public CompilationPolicy {
|
||||
int _compiler_count;
|
||||
protected:
|
||||
private:
|
||||
static void trace_frequency_counter_overflow(const methodHandle& m, int branch_bci, int bci);
|
||||
static void trace_osr_request(const methodHandle& method, nmethod* osr, int bci);
|
||||
static void trace_osr_completion(nmethod* osr_nm);
|
||||
void reset_counter_for_invocation_event(const methodHandle& method);
|
||||
void reset_counter_for_back_branch_event(const methodHandle& method);
|
||||
public:
|
||||
NonTieredCompPolicy() : _compiler_count(0) { }
|
||||
void method_invocation_event(const methodHandle& m, JavaThread* thread);
|
||||
void method_back_branch_event(const methodHandle& m, int bci, JavaThread* thread);
|
||||
public:
|
||||
SimpleCompPolicy() : _compiler_count(0) { }
|
||||
virtual CompLevel initial_compile_level() { return CompLevel_highest_tier; }
|
||||
virtual int compiler_count(CompLevel comp_level);
|
||||
virtual void do_safepoint_work();
|
||||
@ -106,38 +107,7 @@ public:
|
||||
virtual void initialize();
|
||||
virtual CompileTask* select_task(CompileQueue* compile_queue);
|
||||
virtual nmethod* event(const methodHandle& method, const methodHandle& inlinee, int branch_bci, int bci, CompLevel comp_level, CompiledMethod* nm, JavaThread* thread);
|
||||
virtual void method_invocation_event(const methodHandle& m, JavaThread* thread) = 0;
|
||||
virtual void method_back_branch_event(const methodHandle& m, int bci, JavaThread* thread) = 0;
|
||||
};
|
||||
|
||||
class SimpleCompPolicy : public NonTieredCompPolicy {
|
||||
public:
|
||||
virtual void method_invocation_event(const methodHandle& m, JavaThread* thread);
|
||||
virtual void method_back_branch_event(const methodHandle& m, int bci, JavaThread* thread);
|
||||
};
|
||||
|
||||
// StackWalkCompPolicy - existing C2 policy
|
||||
|
||||
#ifdef COMPILER2
|
||||
class StackWalkCompPolicy : public NonTieredCompPolicy {
|
||||
public:
|
||||
virtual void method_invocation_event(const methodHandle& m, JavaThread* thread);
|
||||
virtual void method_back_branch_event(const methodHandle& m, int bci, JavaThread* thread);
|
||||
|
||||
private:
|
||||
RFrame* findTopInlinableFrame(GrowableArray<RFrame*>* stack);
|
||||
RFrame* senderOf(RFrame* rf, GrowableArray<RFrame*>* stack);
|
||||
|
||||
// the following variables hold values computed by the last inlining decision
|
||||
// they are used for performance debugging only (print better messages)
|
||||
static const char* _msg; // reason for not inlining
|
||||
|
||||
static const char* shouldInline (const methodHandle& callee, float frequency, int cnt);
|
||||
// positive filter: should send be inlined? returns NULL (--> yes) or rejection msg
|
||||
static const char* shouldNotInline(const methodHandle& callee);
|
||||
// negative filter: should send NOT be inlined? returns NULL (--> inline) or rejection msg
|
||||
|
||||
};
|
||||
#endif
|
||||
|
||||
#endif // SHARE_RUNTIME_COMPILATIONPOLICY_HPP
|
||||
#endif // SHARE_COMPILER_COMPILATIONPOLICY_HPP
|
||||
@ -30,6 +30,7 @@
|
||||
#include "code/codeCache.hpp"
|
||||
#include "code/codeHeapState.hpp"
|
||||
#include "code/dependencyContext.hpp"
|
||||
#include "compiler/compilationPolicy.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
#include "compiler/compileLog.hpp"
|
||||
#include "compiler/compilerOracle.hpp"
|
||||
@ -48,7 +49,6 @@
|
||||
#include "prims/whitebox.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/compilationPolicy.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/init.hpp"
|
||||
#include "runtime/interfaceSupport.inline.hpp"
|
||||
|
||||
@ -188,14 +188,6 @@ void select_compilation_mode_ergonomically() {
|
||||
#endif // TIERED
|
||||
|
||||
void CompilerConfig::set_tiered_flags() {
|
||||
// With tiered, set default policy to SimpleThresholdPolicy, which is 2.
|
||||
if (FLAG_IS_DEFAULT(CompilationPolicyChoice)) {
|
||||
FLAG_SET_DEFAULT(CompilationPolicyChoice, 2);
|
||||
}
|
||||
if (CompilationPolicyChoice < 2) {
|
||||
vm_exit_during_initialization(
|
||||
"Incompatible compilation policy selected", NULL);
|
||||
}
|
||||
// Increase the code cache size - tiered compiles a lot more.
|
||||
if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
|
||||
FLAG_SET_ERGO(ReservedCodeCacheSize,
|
||||
@ -420,17 +412,6 @@ void CompilerConfig::ergo_initialize() {
|
||||
if (TieredCompilation) {
|
||||
set_tiered_flags();
|
||||
} else {
|
||||
int max_compilation_policy_choice = 1;
|
||||
#ifdef COMPILER2
|
||||
if (is_server_compilation_mode_vm()) {
|
||||
max_compilation_policy_choice = 2;
|
||||
}
|
||||
#endif
|
||||
// Check if the policy is valid.
|
||||
if (CompilationPolicyChoice >= max_compilation_policy_choice) {
|
||||
vm_exit_during_initialization(
|
||||
"Incompatible compilation policy selected", NULL);
|
||||
}
|
||||
// Scale CompileThreshold
|
||||
// CompileThresholdScaling == 0.0 is equivalent to -Xint and leaves CompileThreshold unchanged.
|
||||
if (!FLAG_IS_DEFAULT(CompileThresholdScaling) && CompileThresholdScaling > 0.0) {
|
||||
|
||||
@ -66,8 +66,7 @@ NOT_PRODUCT(cflags(TraceOptoOutput, bool, TraceOptoOutput, TraceOptoOutput))
|
||||
cflags(VectorizeDebug, uintx, 0, VectorizeDebug) \
|
||||
cflags(CloneMapDebug, bool, false, CloneMapDebug) \
|
||||
cflags(IGVPrintLevel, intx, PrintIdealGraphLevel, IGVPrintLevel) \
|
||||
cflags(MaxNodeLimit, intx, MaxNodeLimit, MaxNodeLimit) \
|
||||
ZGC_ONLY(cflags(ZTraceLoadBarriers, bool, false, ZTraceLoadBarriers))
|
||||
cflags(MaxNodeLimit, intx, MaxNodeLimit, MaxNodeLimit)
|
||||
#else
|
||||
#define compilerdirectives_c2_flags(cflags)
|
||||
#endif
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user