8341692: Implement JEP 490: ZGC: Remove the Non-Generational Mode

Reviewed-by: ihse, eosterlund, stefank, prr, cjplummer, dholmes
This commit is contained in:
Axel Boldt-Christmas 2024-10-30 11:05:07 +00:00
parent 0fe15d6836
commit 821c514a13
407 changed files with 425 additions and 39280 deletions

View File

@ -853,11 +853,7 @@ define SetupRunJtregTestBody
endif
ifneq ($$(findstring -XX:+UseZGC, $$(JTREG_ALL_OPTIONS)), )
ifneq ($$(findstring -XX:-ZGenerational, $$(JTREG_ALL_OPTIONS)), )
JTREG_AUTO_PROBLEM_LISTS += ProblemList-zgc.txt
else
JTREG_AUTO_PROBLEM_LISTS += ProblemList-generational-zgc.txt
endif
JTREG_AUTO_PROBLEM_LISTS += ProblemList-zgc.txt
endif
ifneq ($$(JTREG_EXTRA_PROBLEM_LISTS), )

View File

@ -193,8 +193,6 @@ ifeq ($(call check-jvm-feature, compiler2), true)
ifeq ($(call check-jvm-feature, zgc), true)
AD_SRC_FILES += $(call uniq, $(wildcard $(foreach d, $(AD_SRC_ROOTS), \
$d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/gc/x/x_$(HOTSPOT_TARGET_CPU).ad \
$d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/gc/x/x_$(HOTSPOT_TARGET_CPU_ARCH).ad \
$d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/gc/z/z_$(HOTSPOT_TARGET_CPU).ad \
$d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/gc/z/z_$(HOTSPOT_TARGET_CPU_ARCH).ad \
)))

View File

@ -150,7 +150,6 @@ endif
ifneq ($(call check-jvm-feature, zgc), true)
JVM_CFLAGS_FEATURES += -DINCLUDE_ZGC=0
JVM_EXCLUDE_PATTERNS += gc/z
JVM_EXCLUDE_PATTERNS += gc/x
endif
ifneq ($(call check-jvm-feature, shenandoahgc), true)

View File

@ -990,10 +990,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
__ decode_heap_oop(dest->as_register());
}
if (!(UseZGC && !ZGenerational)) {
// Load barrier has not yet been applied, so ZGC can't verify the oop here
__ verify_oop(dest->as_register());
}
__ verify_oop(dest->as_register());
}
}

View File

@ -1,462 +0,0 @@
/*
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/codeBlob.hpp"
#include "code/vmreg.inline.hpp"
#include "gc/x/xBarrier.inline.hpp"
#include "gc/x/xBarrierSet.hpp"
#include "gc/x/xBarrierSetAssembler.hpp"
#include "gc/x/xBarrierSetRuntime.hpp"
#include "gc/x/xThreadLocalData.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/sharedRuntime.hpp"
#include "utilities/macros.hpp"
#ifdef COMPILER1
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "gc/x/c1/xBarrierSetC1.hpp"
#endif // COMPILER1
#ifdef COMPILER2
#include "gc/x/c2/xBarrierSetC2.hpp"
#endif // COMPILER2
#ifdef PRODUCT
#define BLOCK_COMMENT(str) /* nothing */
#else
#define BLOCK_COMMENT(str) __ block_comment(str)
#endif
#undef __
#define __ masm->
void XBarrierSetAssembler::load_at(MacroAssembler* masm,
DecoratorSet decorators,
BasicType type,
Register dst,
Address src,
Register tmp1,
Register tmp2) {
if (!XBarrierSet::barrier_needed(decorators, type)) {
// Barrier not needed
BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2);
return;
}
assert_different_registers(rscratch1, rscratch2, src.base());
assert_different_registers(rscratch1, rscratch2, dst);
Label done;
// Load bad mask into scratch register.
__ ldr(rscratch1, address_bad_mask_from_thread(rthread));
__ lea(rscratch2, src);
__ ldr(dst, src);
// Test reference against bad mask. If mask bad, then we need to fix it up.
__ tst(dst, rscratch1);
__ br(Assembler::EQ, done);
__ enter(/*strip_ret_addr*/true);
__ push_call_clobbered_registers_except(RegSet::of(dst));
if (c_rarg0 != dst) {
__ mov(c_rarg0, dst);
}
__ mov(c_rarg1, rscratch2);
__ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2);
// Make sure dst has the return value.
if (dst != r0) {
__ mov(dst, r0);
}
__ pop_call_clobbered_registers_except(RegSet::of(dst));
__ leave();
__ bind(done);
}
#ifdef ASSERT
void XBarrierSetAssembler::store_at(MacroAssembler* masm,
DecoratorSet decorators,
BasicType type,
Address dst,
Register val,
Register tmp1,
Register tmp2,
Register tmp3) {
// Verify value
if (is_reference_type(type)) {
// Note that src could be noreg, which means we
// are storing null and can skip verification.
if (val != noreg) {
Label done;
// tmp1, tmp2 and tmp3 are often set to noreg.
RegSet savedRegs = RegSet::of(rscratch1);
__ push(savedRegs, sp);
__ ldr(rscratch1, address_bad_mask_from_thread(rthread));
__ tst(val, rscratch1);
__ br(Assembler::EQ, done);
__ stop("Verify oop store failed");
__ should_not_reach_here();
__ bind(done);
__ pop(savedRegs, sp);
}
}
// Store value
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, noreg);
}
#endif // ASSERT
void XBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm,
DecoratorSet decorators,
bool is_oop,
Register src,
Register dst,
Register count,
RegSet saved_regs) {
if (!is_oop) {
// Barrier not needed
return;
}
BLOCK_COMMENT("XBarrierSetAssembler::arraycopy_prologue {");
assert_different_registers(src, count, rscratch1);
__ push(saved_regs, sp);
if (count == c_rarg0) {
if (src == c_rarg1) {
// exactly backwards!!
__ mov(rscratch1, c_rarg0);
__ mov(c_rarg0, c_rarg1);
__ mov(c_rarg1, rscratch1);
} else {
__ mov(c_rarg1, count);
__ mov(c_rarg0, src);
}
} else {
__ mov(c_rarg0, src);
__ mov(c_rarg1, count);
}
__ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_array_addr(), 2);
__ pop(saved_regs, sp);
BLOCK_COMMENT("} XBarrierSetAssembler::arraycopy_prologue");
}
void XBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm,
Register jni_env,
Register robj,
Register tmp,
Label& slowpath) {
BLOCK_COMMENT("XBarrierSetAssembler::try_resolve_jobject_in_native {");
assert_different_registers(jni_env, robj, tmp);
// Resolve jobject
BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, robj, tmp, slowpath);
// The Address offset is too large to direct load - -784. Our range is +127, -128.
__ mov(tmp, (int64_t)(in_bytes(XThreadLocalData::address_bad_mask_offset()) -
in_bytes(JavaThread::jni_environment_offset())));
// Load address bad mask
__ add(tmp, jni_env, tmp);
__ ldr(tmp, Address(tmp));
// Check address bad mask
__ tst(robj, tmp);
__ br(Assembler::NE, slowpath);
BLOCK_COMMENT("} XBarrierSetAssembler::try_resolve_jobject_in_native");
}
#ifdef COMPILER1
#undef __
#define __ ce->masm()->
void XBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce,
LIR_Opr ref) const {
assert_different_registers(rscratch1, rthread, ref->as_register());
__ ldr(rscratch1, address_bad_mask_from_thread(rthread));
__ tst(ref->as_register(), rscratch1);
}
void XBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce,
XLoadBarrierStubC1* stub) const {
// Stub entry
__ bind(*stub->entry());
Register ref = stub->ref()->as_register();
Register ref_addr = noreg;
Register tmp = noreg;
if (stub->tmp()->is_valid()) {
// Load address into tmp register
ce->leal(stub->ref_addr(), stub->tmp());
ref_addr = tmp = stub->tmp()->as_pointer_register();
} else {
// Address already in register
ref_addr = stub->ref_addr()->as_address_ptr()->base()->as_pointer_register();
}
assert_different_registers(ref, ref_addr, noreg);
// Save r0 unless it is the result or tmp register
// Set up SP to accommodate parameters and maybe r0..
if (ref != r0 && tmp != r0) {
__ sub(sp, sp, 32);
__ str(r0, Address(sp, 16));
} else {
__ sub(sp, sp, 16);
}
// Setup arguments and call runtime stub
ce->store_parameter(ref_addr, 1);
ce->store_parameter(ref, 0);
__ far_call(stub->runtime_stub());
// Verify result
__ verify_oop(r0);
// Move result into place
if (ref != r0) {
__ mov(ref, r0);
}
// Restore r0 unless it is the result or tmp register
if (ref != r0 && tmp != r0) {
__ ldr(r0, Address(sp, 16));
__ add(sp, sp, 32);
} else {
__ add(sp, sp, 16);
}
// Stub exit
__ b(*stub->continuation());
}
#undef __
#define __ sasm->
void XBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
DecoratorSet decorators) const {
__ prologue("zgc_load_barrier stub", false);
__ push_call_clobbered_registers_except(RegSet::of(r0));
// Setup arguments
__ load_parameter(0, c_rarg0);
__ load_parameter(1, c_rarg1);
__ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2);
__ pop_call_clobbered_registers_except(RegSet::of(r0));
__ epilogue();
}
#endif // COMPILER1
#ifdef COMPILER2
OptoReg::Name XBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) {
if (!OptoReg::is_reg(opto_reg)) {
return OptoReg::Bad;
}
const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
if (vm_reg->is_FloatRegister()) {
return opto_reg & ~1;
}
return opto_reg;
}
#undef __
#define __ _masm->
class XSaveLiveRegisters {
private:
MacroAssembler* const _masm;
RegSet _gp_regs;
FloatRegSet _fp_regs;
PRegSet _p_regs;
public:
void initialize(XLoadBarrierStubC2* stub) {
// Record registers that needs to be saved/restored
RegMaskIterator rmi(stub->live());
while (rmi.has_next()) {
const OptoReg::Name opto_reg = rmi.next();
if (OptoReg::is_reg(opto_reg)) {
const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
if (vm_reg->is_Register()) {
_gp_regs += RegSet::of(vm_reg->as_Register());
} else if (vm_reg->is_FloatRegister()) {
_fp_regs += FloatRegSet::of(vm_reg->as_FloatRegister());
} else if (vm_reg->is_PRegister()) {
_p_regs += PRegSet::of(vm_reg->as_PRegister());
} else {
fatal("Unknown register type");
}
}
}
// Remove C-ABI SOE registers, scratch regs and _ref register that will be updated
_gp_regs -= RegSet::range(r19, r30) + RegSet::of(r8, r9, stub->ref());
}
XSaveLiveRegisters(MacroAssembler* masm, XLoadBarrierStubC2* stub) :
_masm(masm),
_gp_regs(),
_fp_regs(),
_p_regs() {
// Figure out what registers to save/restore
initialize(stub);
// Save registers
__ push(_gp_regs, sp);
__ push_fp(_fp_regs, sp);
__ push_p(_p_regs, sp);
}
~XSaveLiveRegisters() {
// Restore registers
__ pop_p(_p_regs, sp);
__ pop_fp(_fp_regs, sp);
// External runtime call may clobber ptrue reg
__ reinitialize_ptrue();
__ pop(_gp_regs, sp);
}
};
#undef __
#define __ _masm->
class XSetupArguments {
private:
MacroAssembler* const _masm;
const Register _ref;
const Address _ref_addr;
public:
XSetupArguments(MacroAssembler* masm, XLoadBarrierStubC2* stub) :
_masm(masm),
_ref(stub->ref()),
_ref_addr(stub->ref_addr()) {
// Setup arguments
if (_ref_addr.base() == noreg) {
// No self healing
if (_ref != c_rarg0) {
__ mov(c_rarg0, _ref);
}
__ mov(c_rarg1, 0);
} else {
// Self healing
if (_ref == c_rarg0) {
// _ref is already at correct place
__ lea(c_rarg1, _ref_addr);
} else if (_ref != c_rarg1) {
// _ref is in wrong place, but not in c_rarg1, so fix it first
__ lea(c_rarg1, _ref_addr);
__ mov(c_rarg0, _ref);
} else if (_ref_addr.base() != c_rarg0 && _ref_addr.index() != c_rarg0) {
assert(_ref == c_rarg1, "Mov ref first, vacating c_rarg0");
__ mov(c_rarg0, _ref);
__ lea(c_rarg1, _ref_addr);
} else {
assert(_ref == c_rarg1, "Need to vacate c_rarg1 and _ref_addr is using c_rarg0");
if (_ref_addr.base() == c_rarg0 || _ref_addr.index() == c_rarg0) {
__ mov(rscratch2, c_rarg1);
__ lea(c_rarg1, _ref_addr);
__ mov(c_rarg0, rscratch2);
} else {
ShouldNotReachHere();
}
}
}
}
~XSetupArguments() {
// Transfer result
if (_ref != r0) {
__ mov(_ref, r0);
}
}
};
#undef __
#define __ masm->
void XBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, XLoadBarrierStubC2* stub) const {
BLOCK_COMMENT("XLoadBarrierStubC2");
// Stub entry
__ bind(*stub->entry());
{
XSaveLiveRegisters save_live_registers(masm, stub);
XSetupArguments setup_arguments(masm, stub);
__ mov(rscratch1, stub->slow_path());
__ blr(rscratch1);
}
// Stub exit
__ b(*stub->continuation());
}
#endif // COMPILER2
#undef __
#define __ masm->
void XBarrierSetAssembler::check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error) {
// Check if mask is good.
// verifies that XAddressBadMask & r0 == 0
__ ldr(tmp2, Address(rthread, XThreadLocalData::address_bad_mask_offset()));
__ andr(tmp1, obj, tmp2);
__ cbnz(tmp1, error);
BarrierSetAssembler::check_oop(masm, obj, tmp1, tmp2, error);
}
#undef __

View File

@ -1,110 +0,0 @@
/*
* Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef CPU_AARCH64_GC_X_XBARRIERSETASSEMBLER_AARCH64_HPP
#define CPU_AARCH64_GC_X_XBARRIERSETASSEMBLER_AARCH64_HPP
#include "code/vmreg.hpp"
#include "oops/accessDecorators.hpp"
#ifdef COMPILER2
#include "opto/optoreg.hpp"
#endif // COMPILER2
#ifdef COMPILER1
class LIR_Assembler;
class LIR_Opr;
class StubAssembler;
#endif // COMPILER1
#ifdef COMPILER2
class Node;
#endif // COMPILER2
#ifdef COMPILER1
class XLoadBarrierStubC1;
#endif // COMPILER1
#ifdef COMPILER2
class XLoadBarrierStubC2;
#endif // COMPILER2
class XBarrierSetAssembler : public XBarrierSetAssemblerBase {
public:
virtual void load_at(MacroAssembler* masm,
DecoratorSet decorators,
BasicType type,
Register dst,
Address src,
Register tmp1,
Register tmp2);
#ifdef ASSERT
virtual void store_at(MacroAssembler* masm,
DecoratorSet decorators,
BasicType type,
Address dst,
Register val,
Register tmp1,
Register tmp2,
Register tmp3);
#endif // ASSERT
virtual void arraycopy_prologue(MacroAssembler* masm,
DecoratorSet decorators,
bool is_oop,
Register src,
Register dst,
Register count,
RegSet saved_regs);
virtual void try_resolve_jobject_in_native(MacroAssembler* masm,
Register jni_env,
Register robj,
Register tmp,
Label& slowpath);
virtual NMethodPatchingType nmethod_patching_type() { return NMethodPatchingType::conc_data_patch; }
#ifdef COMPILER1
void generate_c1_load_barrier_test(LIR_Assembler* ce,
LIR_Opr ref) const;
void generate_c1_load_barrier_stub(LIR_Assembler* ce,
XLoadBarrierStubC1* stub) const;
void generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
DecoratorSet decorators) const;
#endif // COMPILER1
#ifdef COMPILER2
OptoReg::Name refine_register(const Node* node,
OptoReg::Name opto_reg);
void generate_c2_load_barrier_stub(MacroAssembler* masm,
XLoadBarrierStubC2* stub) const;
#endif // COMPILER2
void check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error);
};
#endif // CPU_AARCH64_GC_X_XBARRIERSETASSEMBLER_AARCH64_HPP

View File

@ -1,212 +0,0 @@
/*
* Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/shared/gcLogPrecious.hpp"
#include "gc/shared/gc_globals.hpp"
#include "gc/x/xGlobals.hpp"
#include "runtime/globals.hpp"
#include "runtime/os.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/powerOfTwo.hpp"
#ifdef LINUX
#include <sys/mman.h>
#endif // LINUX
//
// The heap can have three different layouts, depending on the max heap size.
//
// Address Space & Pointer Layout 1
// --------------------------------
//
// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB)
// . .
// . .
// . .
// +--------------------------------+ 0x0000014000000000 (20TB)
// | Remapped View |
// +--------------------------------+ 0x0000010000000000 (16TB)
// . .
// +--------------------------------+ 0x00000c0000000000 (12TB)
// | Marked1 View |
// +--------------------------------+ 0x0000080000000000 (8TB)
// | Marked0 View |
// +--------------------------------+ 0x0000040000000000 (4TB)
// . .
// +--------------------------------+ 0x0000000000000000
//
// 6 4 4 4 4
// 3 6 5 2 1 0
// +--------------------+----+-----------------------------------------------+
// |00000000 00000000 00|1111|11 11111111 11111111 11111111 11111111 11111111|
// +--------------------+----+-----------------------------------------------+
// | | |
// | | * 41-0 Object Offset (42-bits, 4TB address space)
// | |
// | * 45-42 Metadata Bits (4-bits) 0001 = Marked0 (Address view 4-8TB)
// | 0010 = Marked1 (Address view 8-12TB)
// | 0100 = Remapped (Address view 16-20TB)
// | 1000 = Finalizable (Address view N/A)
// |
// * 63-46 Fixed (18-bits, always zero)
//
//
// Address Space & Pointer Layout 2
// --------------------------------
//
// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB)
// . .
// . .
// . .
// +--------------------------------+ 0x0000280000000000 (40TB)
// | Remapped View |
// +--------------------------------+ 0x0000200000000000 (32TB)
// . .
// +--------------------------------+ 0x0000180000000000 (24TB)
// | Marked1 View |
// +--------------------------------+ 0x0000100000000000 (16TB)
// | Marked0 View |
// +--------------------------------+ 0x0000080000000000 (8TB)
// . .
// +--------------------------------+ 0x0000000000000000
//
// 6 4 4 4 4
// 3 7 6 3 2 0
// +------------------+-----+------------------------------------------------+
// |00000000 00000000 0|1111|111 11111111 11111111 11111111 11111111 11111111|
// +-------------------+----+------------------------------------------------+
// | | |
// | | * 42-0 Object Offset (43-bits, 8TB address space)
// | |
// | * 46-43 Metadata Bits (4-bits) 0001 = Marked0 (Address view 8-16TB)
// | 0010 = Marked1 (Address view 16-24TB)
// | 0100 = Remapped (Address view 32-40TB)
// | 1000 = Finalizable (Address view N/A)
// |
// * 63-47 Fixed (17-bits, always zero)
//
//
// Address Space & Pointer Layout 3
// --------------------------------
//
// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB)
// . .
// . .
// . .
// +--------------------------------+ 0x0000500000000000 (80TB)
// | Remapped View |
// +--------------------------------+ 0x0000400000000000 (64TB)
// . .
// +--------------------------------+ 0x0000300000000000 (48TB)
// | Marked1 View |
// +--------------------------------+ 0x0000200000000000 (32TB)
// | Marked0 View |
// +--------------------------------+ 0x0000100000000000 (16TB)
// . .
// +--------------------------------+ 0x0000000000000000
//
// 6 4 4 4 4
// 3 8 7 4 3 0
// +------------------+----+-------------------------------------------------+
// |00000000 00000000 |1111|1111 11111111 11111111 11111111 11111111 11111111|
// +------------------+----+-------------------------------------------------+
// | | |
// | | * 43-0 Object Offset (44-bits, 16TB address space)
// | |
// | * 47-44 Metadata Bits (4-bits) 0001 = Marked0 (Address view 16-32TB)
// | 0010 = Marked1 (Address view 32-48TB)
// | 0100 = Remapped (Address view 64-80TB)
// | 1000 = Finalizable (Address view N/A)
// |
// * 63-48 Fixed (16-bits, always zero)
//
// Default value if probing is not implemented for a certain platform
// Max address bit is restricted by implicit assumptions in the code, for instance
// the bit layout of XForwardingEntry or Partial array entry (see XMarkStackEntry) in mark stack
static const size_t DEFAULT_MAX_ADDRESS_BIT = 46;
// Minimum value returned, if probing fails
static const size_t MINIMUM_MAX_ADDRESS_BIT = 36;
static size_t probe_valid_max_address_bit() {
#ifdef LINUX
size_t max_address_bit = 0;
const size_t page_size = os::vm_page_size();
for (size_t i = DEFAULT_MAX_ADDRESS_BIT; i > MINIMUM_MAX_ADDRESS_BIT; --i) {
const uintptr_t base_addr = ((uintptr_t) 1U) << i;
if (msync((void*)base_addr, page_size, MS_ASYNC) == 0) {
// msync succeeded, the address is valid, and maybe even already mapped.
max_address_bit = i;
break;
}
if (errno != ENOMEM) {
// Some error occurred. This should never happen, but msync
// has some undefined behavior, hence ignore this bit.
#ifdef ASSERT
fatal("Received '%s' while probing the address space for the highest valid bit", os::errno_name(errno));
#else // ASSERT
log_warning_p(gc)("Received '%s' while probing the address space for the highest valid bit", os::errno_name(errno));
#endif // ASSERT
continue;
}
// Since msync failed with ENOMEM, the page might not be mapped.
// Try to map it, to see if the address is valid.
void* const result_addr = mmap((void*) base_addr, page_size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, -1, 0);
if (result_addr != MAP_FAILED) {
munmap(result_addr, page_size);
}
if ((uintptr_t) result_addr == base_addr) {
// address is valid
max_address_bit = i;
break;
}
}
if (max_address_bit == 0) {
// probing failed, allocate a very high page and take that bit as the maximum
const uintptr_t high_addr = ((uintptr_t) 1U) << DEFAULT_MAX_ADDRESS_BIT;
void* const result_addr = mmap((void*) high_addr, page_size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, -1, 0);
if (result_addr != MAP_FAILED) {
max_address_bit = BitsPerSize_t - count_leading_zeros((size_t) result_addr) - 1;
munmap(result_addr, page_size);
}
}
log_info_p(gc, init)("Probing address space for the highest valid bit: " SIZE_FORMAT, max_address_bit);
return MAX2(max_address_bit, MINIMUM_MAX_ADDRESS_BIT);
#else // LINUX
return DEFAULT_MAX_ADDRESS_BIT;
#endif // LINUX
}
size_t XPlatformAddressOffsetBits() {
const static size_t valid_max_address_offset_bits = probe_valid_max_address_bit() + 1;
const size_t max_address_offset_bits = valid_max_address_offset_bits - 3;
const size_t min_address_offset_bits = max_address_offset_bits - 2;
const size_t address_offset = round_up_power_of_2(MaxHeapSize * XVirtualToPhysicalRatio);
const size_t address_offset_bits = log2i_exact(address_offset);
return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits);
}
size_t XPlatformAddressMetadataShift() {
return XPlatformAddressOffsetBits();
}

View File

@ -1,33 +0,0 @@
/*
* Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef CPU_AARCH64_GC_X_XGLOBALS_AARCH64_HPP
#define CPU_AARCH64_GC_X_XGLOBALS_AARCH64_HPP
const size_t XPlatformHeapViews = 3;
const size_t XPlatformCacheLineSize = 64;
size_t XPlatformAddressOffsetBits();
size_t XPlatformAddressMetadataShift();
#endif // CPU_AARCH64_GC_X_XGLOBALS_AARCH64_HPP

View File

@ -1,249 +0,0 @@
//
// Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License version 2 only, as
// published by the Free Software Foundation.
//
// This code is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
// version 2 for more details (a copy is included in the LICENSE file that
// accompanied this code).
//
// You should have received a copy of the GNU General Public License version
// 2 along with this work; if not, write to the Free Software Foundation,
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
//
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
// or visit www.oracle.com if you need additional information or have any
// questions.
//
source_hpp %{
#include "gc/shared/gc_globals.hpp"
#include "gc/x/c2/xBarrierSetC2.hpp"
#include "gc/x/xThreadLocalData.hpp"
%}
source %{
static void x_load_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data) {
if (barrier_data == XLoadBarrierElided) {
return;
}
XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, barrier_data);
__ ldr(tmp, Address(rthread, XThreadLocalData::address_bad_mask_offset()));
__ andr(tmp, tmp, ref);
__ cbnz(tmp, *stub->entry());
__ bind(*stub->continuation());
}
static void x_load_barrier_slow_path(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, XLoadBarrierStrong);
__ b(*stub->entry());
__ bind(*stub->continuation());
}
%}
// Load Pointer
instruct xLoadP(iRegPNoSp dst, memory8 mem, rFlagsReg cr)
%{
match(Set dst (LoadP mem));
predicate(UseZGC && !ZGenerational && !needs_acquiring_load(n) && (n->as_Load()->barrier_data() != 0));
effect(TEMP dst, KILL cr);
ins_cost(4 * INSN_COST);
format %{ "ldr $dst, $mem" %}
ins_encode %{
Address ref_addr = mem2address($mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
if (ref_addr.getMode() == Address::base_plus_offset) {
// Fix up any out-of-range offsets.
assert_different_registers(rscratch1, as_Register($mem$$base));
assert_different_registers(rscratch1, $dst$$Register);
ref_addr = __ legitimize_address(ref_addr, 8, rscratch1);
}
__ ldr($dst$$Register, ref_addr);
x_load_barrier(masm, this, ref_addr, $dst$$Register, rscratch2 /* tmp */, barrier_data());
%}
ins_pipe(iload_reg_mem);
%}
// Load Pointer Volatile
instruct xLoadPVolatile(iRegPNoSp dst, indirect mem /* sync_memory */, rFlagsReg cr)
%{
match(Set dst (LoadP mem));
predicate(UseZGC && !ZGenerational && needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0);
effect(TEMP dst, KILL cr);
ins_cost(VOLATILE_REF_COST);
format %{ "ldar $dst, $mem\t" %}
ins_encode %{
__ ldar($dst$$Register, $mem$$Register);
x_load_barrier(masm, this, Address($mem$$Register), $dst$$Register, rscratch2 /* tmp */, barrier_data());
%}
ins_pipe(pipe_serial);
%}
instruct xCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
predicate(UseZGC && !ZGenerational && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong);
effect(KILL cr, TEMP_DEF res);
ins_cost(2 * VOLATILE_REF_COST);
format %{ "cmpxchg $mem, $oldval, $newval\n\t"
"cset $res, EQ" %}
ins_encode %{
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
false /* acquire */, true /* release */, false /* weak */, rscratch2);
__ cset($res$$Register, Assembler::EQ);
if (barrier_data() != XLoadBarrierElided) {
Label good;
__ ldr(rscratch1, Address(rthread, XThreadLocalData::address_bad_mask_offset()));
__ andr(rscratch1, rscratch1, rscratch2);
__ cbz(rscratch1, good);
x_load_barrier_slow_path(masm, this, Address($mem$$Register), rscratch2 /* ref */, rscratch1 /* tmp */);
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
false /* acquire */, true /* release */, false /* weak */, rscratch2);
__ cset($res$$Register, Assembler::EQ);
__ bind(good);
}
%}
ins_pipe(pipe_slow);
%}
instruct xCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
predicate(UseZGC && !ZGenerational && needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == XLoadBarrierStrong));
effect(KILL cr, TEMP_DEF res);
ins_cost(2 * VOLATILE_REF_COST);
format %{ "cmpxchg $mem, $oldval, $newval\n\t"
"cset $res, EQ" %}
ins_encode %{
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
true /* acquire */, true /* release */, false /* weak */, rscratch2);
__ cset($res$$Register, Assembler::EQ);
if (barrier_data() != XLoadBarrierElided) {
Label good;
__ ldr(rscratch1, Address(rthread, XThreadLocalData::address_bad_mask_offset()));
__ andr(rscratch1, rscratch1, rscratch2);
__ cbz(rscratch1, good);
x_load_barrier_slow_path(masm, this, Address($mem$$Register), rscratch2 /* ref */, rscratch1 /* tmp */ );
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
true /* acquire */, true /* release */, false /* weak */, rscratch2);
__ cset($res$$Register, Assembler::EQ);
__ bind(good);
}
%}
ins_pipe(pipe_slow);
%}
instruct xCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
predicate(UseZGC && !ZGenerational && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong);
effect(TEMP_DEF res, KILL cr);
ins_cost(2 * VOLATILE_REF_COST);
format %{ "cmpxchg $res = $mem, $oldval, $newval" %}
ins_encode %{
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
false /* acquire */, true /* release */, false /* weak */, $res$$Register);
if (barrier_data() != XLoadBarrierElided) {
Label good;
__ ldr(rscratch1, Address(rthread, XThreadLocalData::address_bad_mask_offset()));
__ andr(rscratch1, rscratch1, $res$$Register);
__ cbz(rscratch1, good);
x_load_barrier_slow_path(masm, this, Address($mem$$Register), $res$$Register /* ref */, rscratch1 /* tmp */);
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
false /* acquire */, true /* release */, false /* weak */, $res$$Register);
__ bind(good);
}
%}
ins_pipe(pipe_slow);
%}
instruct xCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
predicate(UseZGC && !ZGenerational && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong);
effect(TEMP_DEF res, KILL cr);
ins_cost(2 * VOLATILE_REF_COST);
format %{ "cmpxchg $res = $mem, $oldval, $newval" %}
ins_encode %{
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
true /* acquire */, true /* release */, false /* weak */, $res$$Register);
if (barrier_data() != XLoadBarrierElided) {
Label good;
__ ldr(rscratch1, Address(rthread, XThreadLocalData::address_bad_mask_offset()));
__ andr(rscratch1, rscratch1, $res$$Register);
__ cbz(rscratch1, good);
x_load_barrier_slow_path(masm, this, Address($mem$$Register), $res$$Register /* ref */, rscratch1 /* tmp */);
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
true /* acquire */, true /* release */, false /* weak */, $res$$Register);
__ bind(good);
}
%}
ins_pipe(pipe_slow);
%}
instruct xGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
match(Set prev (GetAndSetP mem newv));
predicate(UseZGC && !ZGenerational && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
effect(TEMP_DEF prev, KILL cr);
ins_cost(2 * VOLATILE_REF_COST);
format %{ "atomic_xchg $prev, $newv, [$mem]" %}
ins_encode %{
__ atomic_xchg($prev$$Register, $newv$$Register, $mem$$Register);
x_load_barrier(masm, this, Address(noreg, 0), $prev$$Register, rscratch2 /* tmp */, barrier_data());
%}
ins_pipe(pipe_serial);
%}
instruct xGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
match(Set prev (GetAndSetP mem newv));
predicate(UseZGC && !ZGenerational && needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() != 0));
effect(TEMP_DEF prev, KILL cr);
ins_cost(VOLATILE_REF_COST);
format %{ "atomic_xchg_acq $prev, $newv, [$mem]" %}
ins_encode %{
__ atomic_xchgal($prev$$Register, $newv$$Register, $mem$$Register);
x_load_barrier(masm, this, Address(noreg, 0), $prev$$Register, rscratch2 /* tmp */, barrier_data());
%}
ins_pipe(pipe_serial);
%}

View File

@ -104,7 +104,7 @@ static void z_store_barrier(MacroAssembler* masm, const MachNode* node, Address
instruct zLoadP(iRegPNoSp dst, memory8 mem, rFlagsReg cr)
%{
match(Set dst (LoadP mem));
predicate(UseZGC && ZGenerational && !needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0);
predicate(UseZGC && !needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0);
effect(TEMP dst, KILL cr);
ins_cost(4 * INSN_COST);
@ -130,7 +130,7 @@ instruct zLoadP(iRegPNoSp dst, memory8 mem, rFlagsReg cr)
instruct zLoadPVolatile(iRegPNoSp dst, indirect mem /* sync_memory */, rFlagsReg cr)
%{
match(Set dst (LoadP mem));
predicate(UseZGC && ZGenerational && needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0);
predicate(UseZGC && needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0);
effect(TEMP dst, KILL cr);
ins_cost(VOLATILE_REF_COST);
@ -149,7 +149,7 @@ instruct zLoadPVolatile(iRegPNoSp dst, indirect mem /* sync_memory */, rFlagsReg
// Store Pointer
instruct zStoreP(memory mem, iRegP src, iRegPNoSp tmp, rFlagsReg cr)
%{
predicate(UseZGC && ZGenerational && !needs_releasing_store(n) && n->as_Store()->barrier_data() != 0);
predicate(UseZGC && !needs_releasing_store(n) && n->as_Store()->barrier_data() != 0);
match(Set mem (StoreP mem src));
effect(TEMP tmp, KILL cr);
@ -166,7 +166,7 @@ instruct zStoreP(memory mem, iRegP src, iRegPNoSp tmp, rFlagsReg cr)
// Store Pointer Volatile
instruct zStorePVolatile(indirect mem, iRegP src, iRegPNoSp tmp, rFlagsReg cr)
%{
predicate(UseZGC && ZGenerational && needs_releasing_store(n) && n->as_Store()->barrier_data() != 0);
predicate(UseZGC && needs_releasing_store(n) && n->as_Store()->barrier_data() != 0);
match(Set mem (StoreP mem src));
effect(TEMP tmp, KILL cr);
@ -183,7 +183,7 @@ instruct zStorePVolatile(indirect mem, iRegP src, iRegPNoSp tmp, rFlagsReg cr)
instruct zCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, rFlagsReg cr) %{
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
predicate(UseZGC && ZGenerational && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
predicate(UseZGC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
effect(TEMP oldval_tmp, TEMP newval_tmp, TEMP res, KILL cr);
ins_cost(2 * VOLATILE_REF_COST);
@ -207,7 +207,7 @@ instruct zCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newva
instruct zCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, rFlagsReg cr) %{
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
predicate(UseZGC && ZGenerational && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
predicate(UseZGC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
effect(TEMP oldval_tmp, TEMP newval_tmp, TEMP res, KILL cr);
ins_cost(2 * VOLATILE_REF_COST);
@ -231,7 +231,7 @@ instruct zCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP ne
instruct zCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, rFlagsReg cr) %{
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
predicate(UseZGC && ZGenerational && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
predicate(UseZGC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
effect(TEMP oldval_tmp, TEMP newval_tmp, TEMP res, KILL cr);
ins_cost(2 * VOLATILE_REF_COST);
@ -254,7 +254,7 @@ instruct zCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP n
instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, rFlagsReg cr) %{
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
predicate(UseZGC && ZGenerational && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
predicate(UseZGC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
effect(TEMP oldval_tmp, TEMP newval_tmp, TEMP res, KILL cr);
ins_cost(2 * VOLATILE_REF_COST);
@ -277,7 +277,7 @@ instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iReg
instruct zGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
match(Set prev (GetAndSetP mem newv));
predicate(UseZGC && ZGenerational && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
predicate(UseZGC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
effect(TEMP prev, KILL cr);
ins_cost(2 * VOLATILE_REF_COST);
@ -295,7 +295,7 @@ instruct zGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
instruct zGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
match(Set prev (GetAndSetP mem newv));
predicate(UseZGC && ZGenerational && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
predicate(UseZGC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
effect(TEMP prev, KILL cr);
ins_cost(2 * VOLATILE_REF_COST);

View File

@ -1,585 +0,0 @@
/*
* Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2024 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "asm/register.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/codeBlob.hpp"
#include "code/vmreg.inline.hpp"
#include "gc/x/xBarrier.inline.hpp"
#include "gc/x/xBarrierSet.hpp"
#include "gc/x/xBarrierSetAssembler.hpp"
#include "gc/x/xBarrierSetRuntime.hpp"
#include "gc/x/xThreadLocalData.hpp"
#include "memory/resourceArea.hpp"
#include "register_ppc.hpp"
#include "runtime/sharedRuntime.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
#ifdef COMPILER1
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "gc/x/c1/xBarrierSetC1.hpp"
#endif // COMPILER1
#ifdef COMPILER2
#include "gc/x/c2/xBarrierSetC2.hpp"
#endif // COMPILER2
#undef __
#define __ masm->
void XBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register base, RegisterOrConstant ind_or_offs, Register dst,
Register tmp1, Register tmp2,
MacroAssembler::PreservationLevel preservation_level, Label *L_handle_null) {
__ block_comment("load_at (zgc) {");
// Check whether a special gc barrier is required for this particular load
// (e.g. whether it's a reference load or not)
if (!XBarrierSet::barrier_needed(decorators, type)) {
BarrierSetAssembler::load_at(masm, decorators, type, base, ind_or_offs, dst,
tmp1, tmp2, preservation_level, L_handle_null);
return;
}
if (ind_or_offs.is_register()) {
assert_different_registers(base, ind_or_offs.as_register(), tmp1, tmp2, R0, noreg);
assert_different_registers(dst, ind_or_offs.as_register(), tmp1, tmp2, R0, noreg);
} else {
assert_different_registers(base, tmp1, tmp2, R0, noreg);
assert_different_registers(dst, tmp1, tmp2, R0, noreg);
}
/* ==== Load the pointer using the standard implementation for the actual heap access
and the decompression of compressed pointers ==== */
// Result of 'load_at' (standard implementation) will be written back to 'dst'.
// As 'base' is required for the C-call, it must be reserved in case of a register clash.
Register saved_base = base;
if (base == dst) {
__ mr(tmp2, base);
saved_base = tmp2;
}
BarrierSetAssembler::load_at(masm, decorators, type, base, ind_or_offs, dst,
tmp1, noreg, preservation_level, L_handle_null);
/* ==== Check whether pointer is dirty ==== */
Label skip_barrier;
// Load bad mask into scratch register.
__ ld(tmp1, (intptr_t) XThreadLocalData::address_bad_mask_offset(), R16_thread);
// The color bits of the to-be-tested pointer do not have to be equivalent to the 'bad_mask' testing bits.
// A pointer is classified as dirty if any of the color bits that also match the bad mask is set.
// Conversely, it follows that the logical AND of the bad mask and the pointer must be zero
// if the pointer is not dirty.
// Only dirty pointers must be processed by this barrier, so we can skip it in case the latter condition holds true.
__ and_(tmp1, tmp1, dst);
__ beq(CCR0, skip_barrier);
/* ==== Invoke barrier ==== */
int nbytes_save = 0;
const bool needs_frame = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR;
const bool preserve_gp_registers = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR_GP_REGS;
const bool preserve_fp_registers = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR_GP_FP_REGS;
const bool preserve_R3 = dst != R3_ARG1;
if (needs_frame) {
if (preserve_gp_registers) {
nbytes_save = (preserve_fp_registers
? MacroAssembler::num_volatile_gp_regs + MacroAssembler::num_volatile_fp_regs
: MacroAssembler::num_volatile_gp_regs) * BytesPerWord;
nbytes_save -= preserve_R3 ? 0 : BytesPerWord;
__ save_volatile_gprs(R1_SP, -nbytes_save, preserve_fp_registers, preserve_R3);
}
__ save_LR(tmp1);
__ push_frame_reg_args(nbytes_save, tmp1);
}
// Setup arguments
if (saved_base != R3_ARG1) {
__ mr_if_needed(R3_ARG1, dst);
__ add(R4_ARG2, ind_or_offs, saved_base);
} else if (dst != R4_ARG2) {
__ add(R4_ARG2, ind_or_offs, saved_base);
__ mr(R3_ARG1, dst);
} else {
__ add(R0, ind_or_offs, saved_base);
__ mr(R3_ARG1, dst);
__ mr(R4_ARG2, R0);
}
__ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators));
Register result = R3_RET;
if (needs_frame) {
__ pop_frame();
__ restore_LR(tmp1);
if (preserve_R3) {
__ mr(R0, R3_RET);
result = R0;
}
if (preserve_gp_registers) {
__ restore_volatile_gprs(R1_SP, -nbytes_save, preserve_fp_registers, preserve_R3);
}
}
__ mr_if_needed(dst, result);
__ bind(skip_barrier);
__ block_comment("} load_at (zgc)");
}
#ifdef ASSERT
// The Z store barrier only verifies the pointers it is operating on and is thus a sole debugging measure.
void XBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register base, RegisterOrConstant ind_or_offs, Register val,
Register tmp1, Register tmp2, Register tmp3,
MacroAssembler::PreservationLevel preservation_level) {
__ block_comment("store_at (zgc) {");
// If the 'val' register is 'noreg', the to-be-stored value is a null pointer.
if (is_reference_type(type) && val != noreg) {
__ ld(tmp1, in_bytes(XThreadLocalData::address_bad_mask_offset()), R16_thread);
__ and_(tmp1, tmp1, val);
__ asm_assert_eq("Detected dirty pointer on the heap in Z store barrier");
}
// Store value
BarrierSetAssembler::store_at(masm, decorators, type, base, ind_or_offs, val, tmp1, tmp2, tmp3, preservation_level);
__ block_comment("} store_at (zgc)");
}
#endif // ASSERT
void XBarrierSetAssembler::arraycopy_prologue(MacroAssembler *masm, DecoratorSet decorators, BasicType component_type,
Register src, Register dst, Register count,
Register preserve1, Register preserve2) {
__ block_comment("arraycopy_prologue (zgc) {");
/* ==== Check whether a special gc barrier is required for this particular load ==== */
if (!is_reference_type(component_type)) {
return;
}
Label skip_barrier;
// Fast path: Array is of length zero
__ cmpdi(CCR0, count, 0);
__ beq(CCR0, skip_barrier);
/* ==== Ensure register sanity ==== */
Register tmp_R11 = R11_scratch1;
assert_different_registers(src, dst, count, tmp_R11, noreg);
if (preserve1 != noreg) {
// Not technically required, but unlikely being intended.
assert_different_registers(preserve1, preserve2);
}
/* ==== Invoke barrier (slowpath) ==== */
int nbytes_save = 0;
{
assert(!noreg->is_volatile(), "sanity");
if (preserve1->is_volatile()) {
__ std(preserve1, -BytesPerWord * ++nbytes_save, R1_SP);
}
if (preserve2->is_volatile() && preserve1 != preserve2) {
__ std(preserve2, -BytesPerWord * ++nbytes_save, R1_SP);
}
__ std(src, -BytesPerWord * ++nbytes_save, R1_SP);
__ std(dst, -BytesPerWord * ++nbytes_save, R1_SP);
__ std(count, -BytesPerWord * ++nbytes_save, R1_SP);
__ save_LR(tmp_R11);
__ push_frame_reg_args(nbytes_save, tmp_R11);
}
// XBarrierSetRuntime::load_barrier_on_oop_array_addr(src, count)
if (count == R3_ARG1) {
if (src == R4_ARG2) {
// Arguments are provided in reverse order
__ mr(tmp_R11, count);
__ mr(R3_ARG1, src);
__ mr(R4_ARG2, tmp_R11);
} else {
__ mr(R4_ARG2, count);
__ mr(R3_ARG1, src);
}
} else {
__ mr_if_needed(R3_ARG1, src);
__ mr_if_needed(R4_ARG2, count);
}
__ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_array_addr());
__ pop_frame();
__ restore_LR(tmp_R11);
{
__ ld(count, -BytesPerWord * nbytes_save--, R1_SP);
__ ld(dst, -BytesPerWord * nbytes_save--, R1_SP);
__ ld(src, -BytesPerWord * nbytes_save--, R1_SP);
if (preserve2->is_volatile() && preserve1 != preserve2) {
__ ld(preserve2, -BytesPerWord * nbytes_save--, R1_SP);
}
if (preserve1->is_volatile()) {
__ ld(preserve1, -BytesPerWord * nbytes_save--, R1_SP);
}
}
__ bind(skip_barrier);
__ block_comment("} arraycopy_prologue (zgc)");
}
void XBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register dst, Register jni_env,
Register obj, Register tmp, Label& slowpath) {
__ block_comment("try_resolve_jobject_in_native (zgc) {");
assert_different_registers(jni_env, obj, tmp);
// Resolve the pointer using the standard implementation for weak tag handling and pointer verification.
BarrierSetAssembler::try_resolve_jobject_in_native(masm, dst, jni_env, obj, tmp, slowpath);
// Check whether pointer is dirty.
__ ld(tmp,
in_bytes(XThreadLocalData::address_bad_mask_offset() - JavaThread::jni_environment_offset()),
jni_env);
__ and_(tmp, obj, tmp);
__ bne(CCR0, slowpath);
__ block_comment("} try_resolve_jobject_in_native (zgc)");
}
#undef __
#ifdef COMPILER1
#define __ ce->masm()->
// Code emitted by LIR node "LIR_OpXLoadBarrierTest" which in turn is emitted by XBarrierSetC1::load_barrier.
// The actual compare and branch instructions are represented as stand-alone LIR nodes.
void XBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce,
LIR_Opr ref) const {
__ block_comment("load_barrier_test (zgc) {");
__ ld(R0, in_bytes(XThreadLocalData::address_bad_mask_offset()), R16_thread);
__ andr(R0, R0, ref->as_pointer_register());
__ cmpdi(CCR5 /* as mandated by LIR node */, R0, 0);
__ block_comment("} load_barrier_test (zgc)");
}
// Code emitted by code stub "XLoadBarrierStubC1" which in turn is emitted by XBarrierSetC1::load_barrier.
// Invokes the runtime stub which is defined just below.
void XBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce,
XLoadBarrierStubC1* stub) const {
__ block_comment("c1_load_barrier_stub (zgc) {");
__ bind(*stub->entry());
/* ==== Determine relevant data registers and ensure register sanity ==== */
Register ref = stub->ref()->as_register();
Register ref_addr = noreg;
// Determine reference address
if (stub->tmp()->is_valid()) {
// 'tmp' register is given, so address might have an index or a displacement.
ce->leal(stub->ref_addr(), stub->tmp());
ref_addr = stub->tmp()->as_pointer_register();
} else {
// 'tmp' register is not given, so address must have neither an index nor a displacement.
// The address' base register is thus usable as-is.
assert(stub->ref_addr()->as_address_ptr()->disp() == 0, "illegal displacement");
assert(!stub->ref_addr()->as_address_ptr()->index()->is_valid(), "illegal index");
ref_addr = stub->ref_addr()->as_address_ptr()->base()->as_pointer_register();
}
assert_different_registers(ref, ref_addr, R0, noreg);
/* ==== Invoke stub ==== */
// Pass arguments via stack. The stack pointer will be bumped by the stub.
__ std(ref, (intptr_t) -1 * BytesPerWord, R1_SP);
__ std(ref_addr, (intptr_t) -2 * BytesPerWord, R1_SP);
__ load_const_optimized(R0, stub->runtime_stub());
__ call_stub(R0);
// The runtime stub passes the result via the R0 register, overriding the previously-loaded stub address.
__ mr_if_needed(ref, R0);
__ b(*stub->continuation());
__ block_comment("} c1_load_barrier_stub (zgc)");
}
#undef __
#define __ sasm->
// Code emitted by runtime code stub which in turn is emitted by XBarrierSetC1::generate_c1_runtime_stubs.
void XBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
DecoratorSet decorators) const {
__ block_comment("c1_load_barrier_runtime_stub (zgc) {");
const int stack_parameters = 2;
const int nbytes_save = (MacroAssembler::num_volatile_regs + stack_parameters) * BytesPerWord;
__ save_volatile_gprs(R1_SP, -nbytes_save);
__ save_LR(R0);
// Load arguments back again from the stack.
__ ld(R3_ARG1, (intptr_t) -1 * BytesPerWord, R1_SP); // ref
__ ld(R4_ARG2, (intptr_t) -2 * BytesPerWord, R1_SP); // ref_addr
__ push_frame_reg_args(nbytes_save, R0);
__ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators));
__ verify_oop(R3_RET, "Bad pointer after barrier invocation");
__ mr(R0, R3_RET);
__ pop_frame();
__ restore_LR(R3_RET);
__ restore_volatile_gprs(R1_SP, -nbytes_save);
__ blr();
__ block_comment("} c1_load_barrier_runtime_stub (zgc)");
}
#undef __
#endif // COMPILER1
#ifdef COMPILER2
OptoReg::Name XBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) const {
if (!OptoReg::is_reg(opto_reg)) {
return OptoReg::Bad;
}
VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
if ((vm_reg->is_Register() || vm_reg ->is_FloatRegister()) && (opto_reg & 1) != 0) {
return OptoReg::Bad;
}
return opto_reg;
}
#define __ _masm->
class XSaveLiveRegisters {
MacroAssembler* _masm;
RegMask _reg_mask;
Register _result_reg;
int _frame_size;
public:
XSaveLiveRegisters(MacroAssembler *masm, XLoadBarrierStubC2 *stub)
: _masm(masm), _reg_mask(stub->live()), _result_reg(stub->ref()) {
const int register_save_size = iterate_over_register_mask(ACTION_COUNT_ONLY) * BytesPerWord;
_frame_size = align_up(register_save_size, frame::alignment_in_bytes)
+ frame::native_abi_reg_args_size;
__ save_LR_CR(R0);
__ push_frame(_frame_size, R0);
iterate_over_register_mask(ACTION_SAVE, _frame_size);
}
~XSaveLiveRegisters() {
iterate_over_register_mask(ACTION_RESTORE, _frame_size);
__ addi(R1_SP, R1_SP, _frame_size);
__ restore_LR_CR(R0);
}
private:
enum IterationAction : int {
ACTION_SAVE,
ACTION_RESTORE,
ACTION_COUNT_ONLY
};
int iterate_over_register_mask(IterationAction action, int offset = 0) {
int reg_save_index = 0;
RegMaskIterator live_regs_iterator(_reg_mask);
while(live_regs_iterator.has_next()) {
const OptoReg::Name opto_reg = live_regs_iterator.next();
// Filter out stack slots (spilled registers, i.e., stack-allocated registers).
if (!OptoReg::is_reg(opto_reg)) {
continue;
}
const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
if (vm_reg->is_Register()) {
Register std_reg = vm_reg->as_Register();
// '_result_reg' will hold the end result of the operation. Its content must thus not be preserved.
if (std_reg == _result_reg) {
continue;
}
if (std_reg->encoding() >= R2->encoding() && std_reg->encoding() <= R12->encoding()) {
reg_save_index++;
if (action == ACTION_SAVE) {
_masm->std(std_reg, offset - reg_save_index * BytesPerWord, R1_SP);
} else if (action == ACTION_RESTORE) {
_masm->ld(std_reg, offset - reg_save_index * BytesPerWord, R1_SP);
} else {
assert(action == ACTION_COUNT_ONLY, "Sanity");
}
}
} else if (vm_reg->is_FloatRegister()) {
FloatRegister fp_reg = vm_reg->as_FloatRegister();
if (fp_reg->encoding() >= F0->encoding() && fp_reg->encoding() <= F13->encoding()) {
reg_save_index++;
if (action == ACTION_SAVE) {
_masm->stfd(fp_reg, offset - reg_save_index * BytesPerWord, R1_SP);
} else if (action == ACTION_RESTORE) {
_masm->lfd(fp_reg, offset - reg_save_index * BytesPerWord, R1_SP);
} else {
assert(action == ACTION_COUNT_ONLY, "Sanity");
}
}
} else if (vm_reg->is_ConditionRegister()) {
// NOP. Conditions registers are covered by save_LR_CR
} else if (vm_reg->is_VectorSRegister()) {
assert(SuperwordUseVSX, "or should not reach here");
VectorSRegister vs_reg = vm_reg->as_VectorSRegister();
if (vs_reg->encoding() >= VSR32->encoding() && vs_reg->encoding() <= VSR51->encoding()) {
reg_save_index += 2;
Register spill_addr = R0;
if (action == ACTION_SAVE) {
_masm->addi(spill_addr, R1_SP, offset - reg_save_index * BytesPerWord);
_masm->stxvd2x(vs_reg, spill_addr);
} else if (action == ACTION_RESTORE) {
_masm->addi(spill_addr, R1_SP, offset - reg_save_index * BytesPerWord);
_masm->lxvd2x(vs_reg, spill_addr);
} else {
assert(action == ACTION_COUNT_ONLY, "Sanity");
}
}
} else {
if (vm_reg->is_SpecialRegister()) {
fatal("Special registers are unsupported. Found register %s", vm_reg->name());
} else {
fatal("Register type is not known");
}
}
}
return reg_save_index;
}
};
#undef __
#define __ _masm->
class XSetupArguments {
MacroAssembler* const _masm;
const Register _ref;
const Address _ref_addr;
public:
XSetupArguments(MacroAssembler* masm, XLoadBarrierStubC2* stub) :
_masm(masm),
_ref(stub->ref()),
_ref_addr(stub->ref_addr()) {
// Desired register/argument configuration:
// _ref: R3_ARG1
// _ref_addr: R4_ARG2
// '_ref_addr' can be unspecified. In that case, the barrier will not heal the reference.
if (_ref_addr.base() == noreg) {
assert_different_registers(_ref, R0, noreg);
__ mr_if_needed(R3_ARG1, _ref);
__ li(R4_ARG2, 0);
} else {
assert_different_registers(_ref, _ref_addr.base(), R0, noreg);
assert(!_ref_addr.index()->is_valid(), "reference addresses must not contain an index component");
if (_ref != R4_ARG2) {
// Calculate address first as the address' base register might clash with R4_ARG2
__ addi(R4_ARG2, _ref_addr.base(), _ref_addr.disp());
__ mr_if_needed(R3_ARG1, _ref);
} else if (_ref_addr.base() != R3_ARG1) {
__ mr(R3_ARG1, _ref);
__ addi(R4_ARG2, _ref_addr.base(), _ref_addr.disp()); // Clobbering _ref
} else {
// Arguments are provided in inverse order (i.e. _ref == R4_ARG2, _ref_addr == R3_ARG1)
__ mr(R0, _ref);
__ addi(R4_ARG2, _ref_addr.base(), _ref_addr.disp());
__ mr(R3_ARG1, R0);
}
}
}
};
#undef __
#define __ masm->
void XBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, XLoadBarrierStubC2* stub) const {
__ block_comment("generate_c2_load_barrier_stub (zgc) {");
__ bind(*stub->entry());
Register ref = stub->ref();
Address ref_addr = stub->ref_addr();
assert_different_registers(ref, ref_addr.base());
{
XSaveLiveRegisters save_live_registers(masm, stub);
XSetupArguments setup_arguments(masm, stub);
__ call_VM_leaf(stub->slow_path());
__ mr_if_needed(ref, R3_RET);
}
__ b(*stub->continuation());
__ block_comment("} generate_c2_load_barrier_stub (zgc)");
}
#undef __
#endif // COMPILER2

View File

@ -1,93 +0,0 @@
/*
* Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2022 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef CPU_PPC_GC_X_XBARRIERSETASSEMBLER_PPC_HPP
#define CPU_PPC_GC_X_XBARRIERSETASSEMBLER_PPC_HPP
#include "code/vmreg.hpp"
#include "oops/accessDecorators.hpp"
#ifdef COMPILER2
#include "opto/optoreg.hpp"
#endif // COMPILER2
#ifdef COMPILER1
class LIR_Assembler;
class LIR_Opr;
class StubAssembler;
#endif // COMPILER1
#ifdef COMPILER2
class Node;
#endif // COMPILER2
#ifdef COMPILER1
class XLoadBarrierStubC1;
#endif // COMPILER1
#ifdef COMPILER2
class XLoadBarrierStubC2;
#endif // COMPILER2
class XBarrierSetAssembler : public XBarrierSetAssemblerBase {
public:
virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register base, RegisterOrConstant ind_or_offs, Register dst,
Register tmp1, Register tmp2,
MacroAssembler::PreservationLevel preservation_level, Label *L_handle_null = nullptr);
#ifdef ASSERT
virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register base, RegisterOrConstant ind_or_offs, Register val,
Register tmp1, Register tmp2, Register tmp3,
MacroAssembler::PreservationLevel preservation_level);
#endif // ASSERT
virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register src, Register dst, Register count,
Register preserve1, Register preserve2);
virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register dst, Register jni_env,
Register obj, Register tmp, Label& slowpath);
virtual NMethodPatchingType nmethod_patching_type() { return NMethodPatchingType::conc_data_patch; }
#ifdef COMPILER1
void generate_c1_load_barrier_test(LIR_Assembler* ce,
LIR_Opr ref) const;
void generate_c1_load_barrier_stub(LIR_Assembler* ce,
XLoadBarrierStubC1* stub) const;
void generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
DecoratorSet decorators) const;
#endif // COMPILER1
#ifdef COMPILER2
OptoReg::Name refine_register(const Node* node, OptoReg::Name opto_reg) const;
void generate_c2_load_barrier_stub(MacroAssembler* masm, XLoadBarrierStubC2* stub) const;
#endif // COMPILER2
};
#endif // CPU_PPC_GC_X_XBARRIERSETASSEMBLER_PPC_HPP

View File

@ -1,203 +0,0 @@
/*
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/shared/gcLogPrecious.hpp"
#include "gc/shared/gc_globals.hpp"
#include "gc/x/xGlobals.hpp"
#include "runtime/globals.hpp"
#include "runtime/os.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/powerOfTwo.hpp"
#include <cstddef>
#ifdef LINUX
#include <sys/mman.h>
#endif // LINUX
//
// The overall memory layouts across different power platforms are similar and only differ with regards to
// the position of the highest addressable bit; the position of the metadata bits and the size of the actual
// addressable heap address space are adjusted accordingly.
//
// The following memory schema shows an exemplary layout in which bit '45' is the highest addressable bit.
// It is assumed that this virtual memory address space layout is predominant on the power platform.
//
// Standard Address Space & Pointer Layout
// ---------------------------------------
//
// +--------------------------------+ 0x00007FFFFFFFFFFF (127 TiB - 1)
// . .
// . .
// . .
// +--------------------------------+ 0x0000140000000000 (20 TiB)
// | Remapped View |
// +--------------------------------+ 0x0000100000000000 (16 TiB)
// . .
// +--------------------------------+ 0x00000c0000000000 (12 TiB)
// | Marked1 View |
// +--------------------------------+ 0x0000080000000000 (8 TiB)
// | Marked0 View |
// +--------------------------------+ 0x0000040000000000 (4 TiB)
// . .
// +--------------------------------+ 0x0000000000000000
//
// 6 4 4 4 4
// 3 6 5 2 1 0
// +--------------------+----+-----------------------------------------------+
// |00000000 00000000 00|1111|11 11111111 11111111 11111111 11111111 11111111|
// +--------------------+----+-----------------------------------------------+
// | | |
// | | * 41-0 Object Offset (42-bits, 4TB address space)
// | |
// | * 45-42 Metadata Bits (4-bits) 0001 = Marked0 (Address view 4-8TB)
// | 0010 = Marked1 (Address view 8-12TB)
// | 0100 = Remapped (Address view 16-20TB)
// | 1000 = Finalizable (Address view N/A)
// |
// * 63-46 Fixed (18-bits, always zero)
//
// Maximum value as per spec (Power ISA v2.07): 2 ^ 60 bytes, i.e. 1 EiB (exbibyte)
static const unsigned int MAXIMUM_MAX_ADDRESS_BIT = 60;
// Most modern power processors provide an address space with not more than 45 bit addressable bit,
// that is an address space of 32 TiB in size.
static const unsigned int DEFAULT_MAX_ADDRESS_BIT = 45;
// Minimum value returned, if probing fails: 64 GiB
static const unsigned int MINIMUM_MAX_ADDRESS_BIT = 36;
// Determines the highest addressable bit of the virtual address space (depends on platform)
// by trying to interact with memory in that address range,
// i.e. by syncing existing mappings (msync) or by temporarily mapping the memory area (mmap).
// If one of those operations succeeds, it is proven that the targeted memory area is within the virtual address space.
//
// To reduce the number of required system calls to a bare minimum, the DEFAULT_MAX_ADDRESS_BIT is intentionally set
// lower than what the ABI would theoretically permit.
// Such an avoidance strategy, however, might impose unnecessary limits on processors that exceed this limit.
// If DEFAULT_MAX_ADDRESS_BIT is addressable, the next higher bit will be tested as well to ensure that
// the made assumption does not artificially restrict the memory availability.
static unsigned int probe_valid_max_address_bit(size_t init_bit, size_t min_bit) {
assert(init_bit >= min_bit, "Sanity");
assert(init_bit <= MAXIMUM_MAX_ADDRESS_BIT, "Test bit is outside the assumed address space range");
#ifdef LINUX
unsigned int max_valid_address_bit = 0;
void* last_allocatable_address = nullptr;
const size_t page_size = os::vm_page_size();
for (size_t i = init_bit; i >= min_bit; --i) {
void* base_addr = (void*) (((unsigned long) 1U) << i);
/* ==== Try msync-ing already mapped memory page ==== */
if (msync(base_addr, page_size, MS_ASYNC) == 0) {
// The page of the given address was synced by the linux kernel and must thus be both, mapped and valid.
max_valid_address_bit = i;
break;
}
if (errno != ENOMEM) {
// An unexpected error occurred, i.e. an error not indicating that the targeted memory page is unmapped,
// but pointing out another type of issue.
// Even though this should never happen, those issues may come up due to undefined behavior.
#ifdef ASSERT
fatal("Received '%s' while probing the address space for the highest valid bit", os::errno_name(errno));
#else // ASSERT
log_warning_p(gc)("Received '%s' while probing the address space for the highest valid bit", os::errno_name(errno));
#endif // ASSERT
continue;
}
/* ==== Try mapping memory page on our own ==== */
last_allocatable_address = mmap(base_addr, page_size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, -1, 0);
if (last_allocatable_address != MAP_FAILED) {
munmap(last_allocatable_address, page_size);
}
if (last_allocatable_address == base_addr) {
// As the linux kernel mapped exactly the page we have requested, the address must be valid.
max_valid_address_bit = i;
break;
}
log_info_p(gc, init)("Probe failed for bit '%zu'", i);
}
if (max_valid_address_bit == 0) {
// Probing did not bring up any usable address bit.
// As an alternative, the VM evaluates the address returned by mmap as it is expected that the reserved page
// will be close to the probed address that was out-of-range.
// As per mmap(2), "the kernel [will take] [the address] as a hint about where to
// place the mapping; on Linux, the mapping will be created at a nearby page boundary".
// It should thus be a "close enough" approximation to the real virtual memory address space limit.
//
// This recovery strategy is only applied in production builds.
// In debug builds, an assertion in 'XPlatformAddressOffsetBits' will bail out the VM to indicate that
// the assumed address space is no longer up-to-date.
if (last_allocatable_address != MAP_FAILED) {
const unsigned int bitpos = BitsPerSize_t - count_leading_zeros((size_t) last_allocatable_address) - 1;
log_info_p(gc, init)("Did not find any valid addresses within the range, using address '%u' instead", bitpos);
return bitpos;
}
#ifdef ASSERT
fatal("Available address space can not be determined");
#else // ASSERT
log_warning_p(gc)("Cannot determine available address space. Falling back to default value.");
return DEFAULT_MAX_ADDRESS_BIT;
#endif // ASSERT
} else {
if (max_valid_address_bit == init_bit) {
// An usable address bit has been found immediately.
// To ensure that the entire virtual address space is exploited, the next highest bit will be tested as well.
log_info_p(gc, init)("Hit valid address '%u' on first try, retrying with next higher bit", max_valid_address_bit);
return MAX2(max_valid_address_bit, probe_valid_max_address_bit(init_bit + 1, init_bit + 1));
}
}
log_info_p(gc, init)("Found valid address '%u'", max_valid_address_bit);
return max_valid_address_bit;
#else // LINUX
return DEFAULT_MAX_ADDRESS_BIT;
#endif // LINUX
}
size_t XPlatformAddressOffsetBits() {
const static unsigned int valid_max_address_offset_bits =
probe_valid_max_address_bit(DEFAULT_MAX_ADDRESS_BIT, MINIMUM_MAX_ADDRESS_BIT) + 1;
assert(valid_max_address_offset_bits >= MINIMUM_MAX_ADDRESS_BIT,
"Highest addressable bit is outside the assumed address space range");
const size_t max_address_offset_bits = valid_max_address_offset_bits - 3;
const size_t min_address_offset_bits = max_address_offset_bits - 2;
const size_t address_offset = round_up_power_of_2(MaxHeapSize * XVirtualToPhysicalRatio);
const size_t address_offset_bits = log2i_exact(address_offset);
return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits);
}
size_t XPlatformAddressMetadataShift() {
return XPlatformAddressOffsetBits();
}

View File

@ -1,36 +0,0 @@
/*
* Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef CPU_PPC_GC_X_XGLOBALS_PPC_HPP
#define CPU_PPC_GC_X_XGLOBALS_PPC_HPP
#include "globalDefinitions_ppc.hpp"
const size_t XPlatformHeapViews = 3;
const size_t XPlatformCacheLineSize = DEFAULT_CACHE_LINE_SIZE;
size_t XPlatformAddressOffsetBits();
size_t XPlatformAddressMetadataShift();
#endif // CPU_PPC_GC_X_XGLOBALS_PPC_HPP

View File

@ -1,298 +0,0 @@
//
// Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2021 SAP SE. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License version 2 only, as
// published by the Free Software Foundation.
//
// This code is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
// version 2 for more details (a copy is included in the LICENSE file that
// accompanied this code).
//
// You should have received a copy of the GNU General Public License version
// 2 along with this work; if not, write to the Free Software Foundation,
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
//
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
// or visit www.oracle.com if you need additional information or have any
// questions.
//
source_hpp %{
#include "gc/shared/gc_globals.hpp"
#include "gc/x/c2/xBarrierSetC2.hpp"
#include "gc/x/xThreadLocalData.hpp"
%}
source %{
static void x_load_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref,
Register tmp, uint8_t barrier_data) {
if (barrier_data == XLoadBarrierElided) {
return;
}
XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, barrier_data);
__ ld(tmp, in_bytes(XThreadLocalData::address_bad_mask_offset()), R16_thread);
__ and_(tmp, tmp, ref);
__ bne_far(CCR0, *stub->entry(), MacroAssembler::bc_far_optimize_on_relocate);
__ bind(*stub->continuation());
}
static void x_load_barrier_slow_path(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref,
Register tmp) {
XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, XLoadBarrierStrong);
__ b(*stub->entry());
__ bind(*stub->continuation());
}
static void x_compare_and_swap(MacroAssembler* masm, const MachNode* node,
Register res, Register mem, Register oldval, Register newval,
Register tmp_xchg, Register tmp_mask,
bool weak, bool acquire) {
// z-specific load barrier requires strong CAS operations.
// Weak CAS operations are thus only emitted if the barrier is elided.
__ cmpxchgd(CCR0, tmp_xchg, oldval, newval, mem,
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), res, nullptr, true,
weak && node->barrier_data() == XLoadBarrierElided);
if (node->barrier_data() != XLoadBarrierElided) {
Label skip_barrier;
__ ld(tmp_mask, in_bytes(XThreadLocalData::address_bad_mask_offset()), R16_thread);
__ and_(tmp_mask, tmp_mask, tmp_xchg);
__ beq(CCR0, skip_barrier);
// CAS must have failed because pointer in memory is bad.
x_load_barrier_slow_path(masm, node, Address(mem), tmp_xchg, res /* used as tmp */);
__ cmpxchgd(CCR0, tmp_xchg, oldval, newval, mem,
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), res, nullptr, true, weak);
__ bind(skip_barrier);
}
if (acquire) {
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
// Uses the isync instruction as an acquire barrier.
// This exploits the compare and the branch in the z load barrier (load, compare and branch, isync).
__ isync();
} else {
__ sync();
}
}
}
static void x_compare_and_exchange(MacroAssembler* masm, const MachNode* node,
Register res, Register mem, Register oldval, Register newval, Register tmp,
bool weak, bool acquire) {
// z-specific load barrier requires strong CAS operations.
// Weak CAS operations are thus only emitted if the barrier is elided.
__ cmpxchgd(CCR0, res, oldval, newval, mem,
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), noreg, nullptr, true,
weak && node->barrier_data() == XLoadBarrierElided);
if (node->barrier_data() != XLoadBarrierElided) {
Label skip_barrier;
__ ld(tmp, in_bytes(XThreadLocalData::address_bad_mask_offset()), R16_thread);
__ and_(tmp, tmp, res);
__ beq(CCR0, skip_barrier);
x_load_barrier_slow_path(masm, node, Address(mem), res, tmp);
__ cmpxchgd(CCR0, res, oldval, newval, mem,
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), noreg, nullptr, true, weak);
__ bind(skip_barrier);
}
if (acquire) {
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
// Uses the isync instruction as an acquire barrier.
// This exploits the compare and the branch in the z load barrier (load, compare and branch, isync).
__ isync();
} else {
__ sync();
}
}
}
%}
instruct xLoadP(iRegPdst dst, memoryAlg4 mem, iRegPdst tmp, flagsRegCR0 cr0)
%{
match(Set dst (LoadP mem));
effect(TEMP_DEF dst, TEMP tmp, KILL cr0);
ins_cost(MEMORY_REF_COST);
predicate((UseZGC && !ZGenerational && n->as_Load()->barrier_data() != 0)
&& (n->as_Load()->is_unordered() || followed_by_acquire(n)));
format %{ "LD $dst, $mem" %}
ins_encode %{
assert($mem$$index == 0, "sanity");
__ ld($dst$$Register, $mem$$disp, $mem$$base$$Register);
x_load_barrier(masm, this, Address($mem$$base$$Register, $mem$$disp), $dst$$Register, $tmp$$Register, barrier_data());
%}
ins_pipe(pipe_class_default);
%}
// Load Pointer Volatile
instruct xLoadP_acq(iRegPdst dst, memoryAlg4 mem, iRegPdst tmp, flagsRegCR0 cr0)
%{
match(Set dst (LoadP mem));
effect(TEMP_DEF dst, TEMP tmp, KILL cr0);
ins_cost(3 * MEMORY_REF_COST);
// Predicate on instruction order is implicitly present due to the predicate of the cheaper zLoadP operation
predicate(UseZGC && !ZGenerational && n->as_Load()->barrier_data() != 0);
format %{ "LD acq $dst, $mem" %}
ins_encode %{
__ ld($dst$$Register, $mem$$disp, $mem$$base$$Register);
x_load_barrier(masm, this, Address($mem$$base$$Register, $mem$$disp), $dst$$Register, $tmp$$Register, barrier_data());
// Uses the isync instruction as an acquire barrier.
// This exploits the compare and the branch in the z load barrier (load, compare and branch, isync).
__ isync();
%}
ins_pipe(pipe_class_default);
%}
instruct xCompareAndSwapP(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval,
iRegPdst tmp_xchg, iRegPdst tmp_mask, flagsRegCR0 cr0) %{
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
effect(TEMP_DEF res, TEMP tmp_xchg, TEMP tmp_mask, KILL cr0);
predicate((UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong)
&& (((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*) n)->order() != MemNode::seqcst));
format %{ "CMPXCHG $res, $mem, $oldval, $newval; as bool; ptr" %}
ins_encode %{
x_compare_and_swap(masm, this,
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register,
$tmp_xchg$$Register, $tmp_mask$$Register,
false /* weak */, false /* acquire */);
%}
ins_pipe(pipe_class_default);
%}
instruct xCompareAndSwapP_acq(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval,
iRegPdst tmp_xchg, iRegPdst tmp_mask, flagsRegCR0 cr0) %{
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
effect(TEMP_DEF res, TEMP tmp_xchg, TEMP tmp_mask, KILL cr0);
predicate((UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong)
&& (((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*) n)->order() == MemNode::seqcst));
format %{ "CMPXCHG acq $res, $mem, $oldval, $newval; as bool; ptr" %}
ins_encode %{
x_compare_and_swap(masm, this,
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register,
$tmp_xchg$$Register, $tmp_mask$$Register,
false /* weak */, true /* acquire */);
%}
ins_pipe(pipe_class_default);
%}
instruct xCompareAndSwapPWeak(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval,
iRegPdst tmp_xchg, iRegPdst tmp_mask, flagsRegCR0 cr0) %{
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
effect(TEMP_DEF res, TEMP tmp_xchg, TEMP tmp_mask, KILL cr0);
predicate((UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong)
&& ((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*) n)->order() != MemNode::seqcst);
format %{ "weak CMPXCHG $res, $mem, $oldval, $newval; as bool; ptr" %}
ins_encode %{
x_compare_and_swap(masm, this,
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register,
$tmp_xchg$$Register, $tmp_mask$$Register,
true /* weak */, false /* acquire */);
%}
ins_pipe(pipe_class_default);
%}
instruct xCompareAndSwapPWeak_acq(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval,
iRegPdst tmp_xchg, iRegPdst tmp_mask, flagsRegCR0 cr0) %{
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
effect(TEMP_DEF res, TEMP tmp_xchg, TEMP tmp_mask, KILL cr0);
predicate((UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong)
&& (((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*) n)->order() == MemNode::seqcst));
format %{ "weak CMPXCHG acq $res, $mem, $oldval, $newval; as bool; ptr" %}
ins_encode %{
x_compare_and_swap(masm, this,
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register,
$tmp_xchg$$Register, $tmp_mask$$Register,
true /* weak */, true /* acquire */);
%}
ins_pipe(pipe_class_default);
%}
instruct xCompareAndExchangeP(iRegPdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval,
iRegPdst tmp, flagsRegCR0 cr0) %{
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
effect(TEMP_DEF res, TEMP tmp, KILL cr0);
predicate((UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong)
&& (
((CompareAndSwapNode*)n)->order() != MemNode::acquire
&& ((CompareAndSwapNode*)n)->order() != MemNode::seqcst
));
format %{ "CMPXCHG $res, $mem, $oldval, $newval; as ptr; ptr" %}
ins_encode %{
x_compare_and_exchange(masm, this,
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register, $tmp$$Register,
false /* weak */, false /* acquire */);
%}
ins_pipe(pipe_class_default);
%}
instruct xCompareAndExchangeP_acq(iRegPdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval,
iRegPdst tmp, flagsRegCR0 cr0) %{
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
effect(TEMP_DEF res, TEMP tmp, KILL cr0);
predicate((UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong)
&& (
((CompareAndSwapNode*)n)->order() == MemNode::acquire
|| ((CompareAndSwapNode*)n)->order() == MemNode::seqcst
));
format %{ "CMPXCHG acq $res, $mem, $oldval, $newval; as ptr; ptr" %}
ins_encode %{
x_compare_and_exchange(masm, this,
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register, $tmp$$Register,
false /* weak */, true /* acquire */);
%}
ins_pipe(pipe_class_default);
%}
instruct xGetAndSetP(iRegPdst res, iRegPdst mem, iRegPsrc newval, iRegPdst tmp, flagsRegCR0 cr0) %{
match(Set res (GetAndSetP mem newval));
effect(TEMP_DEF res, TEMP tmp, KILL cr0);
predicate(UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() != 0);
format %{ "GetAndSetP $res, $mem, $newval" %}
ins_encode %{
__ getandsetd($res$$Register, $newval$$Register, $mem$$Register, MacroAssembler::cmpxchgx_hint_atomic_update());
x_load_barrier(masm, this, Address(noreg, (intptr_t) 0), $res$$Register, $tmp$$Register, barrier_data());
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
__ isync();
} else {
__ sync();
}
%}
ins_pipe(pipe_class_default);
%}

View File

@ -143,7 +143,7 @@ instruct zLoadP(iRegPdst dst, memoryAlg4 mem, flagsRegCR0 cr0)
effect(TEMP_DEF dst, KILL cr0);
ins_cost(MEMORY_REF_COST);
predicate((UseZGC && ZGenerational && n->as_Load()->barrier_data() != 0)
predicate((UseZGC && n->as_Load()->barrier_data() != 0)
&& (n->as_Load()->is_unordered() || followed_by_acquire(n)));
format %{ "LD $dst, $mem" %}
@ -163,7 +163,7 @@ instruct zLoadP_acq(iRegPdst dst, memoryAlg4 mem, flagsRegCR0 cr0)
ins_cost(3 * MEMORY_REF_COST);
// Predicate on instruction order is implicitly present due to the predicate of the cheaper zLoadP operation
predicate(UseZGC && ZGenerational && n->as_Load()->barrier_data() != 0);
predicate(UseZGC && n->as_Load()->barrier_data() != 0);
format %{ "LD acq $dst, $mem" %}
ins_encode %{
@ -181,7 +181,7 @@ instruct zLoadP_acq(iRegPdst dst, memoryAlg4 mem, flagsRegCR0 cr0)
// Store Pointer
instruct zStoreP(memoryAlg4 mem, iRegPsrc src, iRegPdst tmp, flagsRegCR0 cr0)
%{
predicate(UseZGC && ZGenerational && n->as_Store()->barrier_data() != 0);
predicate(UseZGC && n->as_Store()->barrier_data() != 0);
match(Set mem (StoreP mem src));
effect(TEMP tmp, KILL cr0);
ins_cost(2 * MEMORY_REF_COST);
@ -195,7 +195,7 @@ instruct zStoreP(memoryAlg4 mem, iRegPsrc src, iRegPdst tmp, flagsRegCR0 cr0)
instruct zStorePNull(memoryAlg4 mem, immP_0 zero, iRegPdst tmp, flagsRegCR0 cr0)
%{
predicate(UseZGC && ZGenerational && n->as_Store()->barrier_data() != 0);
predicate(UseZGC && n->as_Store()->barrier_data() != 0);
match(Set mem (StoreP mem zero));
effect(TEMP tmp, KILL cr0);
ins_cost(MEMORY_REF_COST);
@ -213,7 +213,7 @@ instruct zCompareAndSwapP(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
effect(TEMP_DEF res, TEMP tmp1, TEMP tmp2, KILL cr0);
predicate((UseZGC && ZGenerational && n->as_LoadStore()->barrier_data() != 0)
predicate((UseZGC && n->as_LoadStore()->barrier_data() != 0)
&& (((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*) n)->order() != MemNode::seqcst));
format %{ "CMPXCHG $res, $mem, $oldval, $newval; as bool; ptr" %}
@ -232,7 +232,7 @@ instruct zCompareAndSwapP_acq(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegP
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
effect(TEMP_DEF res, TEMP tmp1, TEMP tmp2, KILL cr0);
predicate((UseZGC && ZGenerational && n->as_LoadStore()->barrier_data() != 0)
predicate((UseZGC && n->as_LoadStore()->barrier_data() != 0)
&& (((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*) n)->order() == MemNode::seqcst));
format %{ "CMPXCHG acq $res, $mem, $oldval, $newval; as bool; ptr" %}
@ -250,7 +250,7 @@ instruct zCompareAndExchangeP(iRegPdst res, iRegPdst mem, iRegPsrc oldval, iRegP
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
effect(TEMP_DEF res, TEMP tmp, KILL cr0);
predicate((UseZGC && ZGenerational && n->as_LoadStore()->barrier_data() != 0)
predicate((UseZGC && n->as_LoadStore()->barrier_data() != 0)
&& (
((CompareAndSwapNode*)n)->order() != MemNode::acquire
&& ((CompareAndSwapNode*)n)->order() != MemNode::seqcst
@ -270,7 +270,7 @@ instruct zCompareAndExchangeP_acq(iRegPdst res, iRegPdst mem, iRegPsrc oldval, i
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
effect(TEMP_DEF res, TEMP tmp, KILL cr0);
predicate((UseZGC && ZGenerational && n->as_LoadStore()->barrier_data() != 0)
predicate((UseZGC && n->as_LoadStore()->barrier_data() != 0)
&& (
((CompareAndSwapNode*)n)->order() == MemNode::acquire
|| ((CompareAndSwapNode*)n)->order() == MemNode::seqcst
@ -289,7 +289,7 @@ instruct zGetAndSetP(iRegPdst res, iRegPdst mem, iRegPsrc newval, iRegPdst tmp,
match(Set res (GetAndSetP mem newval));
effect(TEMP_DEF res, TEMP tmp, KILL cr0);
predicate(UseZGC && ZGenerational && n->as_LoadStore()->barrier_data() != 0);
predicate(UseZGC && n->as_LoadStore()->barrier_data() != 0);
format %{ "GetAndSetP $res, $mem, $newval" %}
ins_encode %{

View File

@ -49,7 +49,6 @@
#include "utilities/align.hpp"
#include "utilities/powerOfTwo.hpp"
#if INCLUDE_ZGC
#include "gc/x/xBarrierSetAssembler.hpp"
#include "gc/z/zBarrierSetAssembler.hpp"
#endif
@ -1976,7 +1975,7 @@ class StubGenerator: public StubCodeGenerator {
generate_conjoint_int_copy_core(aligned);
} else {
#if INCLUDE_ZGC
if (UseZGC && ZGenerational) {
if (UseZGC) {
ZBarrierSetAssembler *zbs = (ZBarrierSetAssembler*)bs;
zbs->generate_conjoint_oop_copy(_masm, dest_uninitialized);
} else
@ -2019,7 +2018,7 @@ class StubGenerator: public StubCodeGenerator {
generate_disjoint_int_copy_core(aligned);
} else {
#if INCLUDE_ZGC
if (UseZGC && ZGenerational) {
if (UseZGC) {
ZBarrierSetAssembler *zbs = (ZBarrierSetAssembler*)bs;
zbs->generate_disjoint_oop_copy(_masm, dest_uninitialized);
} else
@ -2137,7 +2136,7 @@ class StubGenerator: public StubCodeGenerator {
} else {
__ bind(store_null);
#if INCLUDE_ZGC
if (UseZGC && ZGenerational) {
if (UseZGC) {
__ store_heap_oop(R10_oop, R8_offset, R4_to, R11_scratch1, R12_tmp, noreg,
MacroAssembler::PRESERVATION_FRAME_LR_GP_REGS,
dest_uninitialized ? IS_DEST_UNINITIALIZED : 0);
@ -2153,7 +2152,7 @@ class StubGenerator: public StubCodeGenerator {
// ======== loop entry is here ========
__ bind(load_element);
#if INCLUDE_ZGC
if (UseZGC && ZGenerational) {
if (UseZGC) {
__ load_heap_oop(R10_oop, R8_offset, R3_from,
R11_scratch1, R12_tmp,
MacroAssembler::PRESERVATION_FRAME_LR_GP_REGS,

View File

@ -838,10 +838,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
__ decode_heap_oop(dest->as_register());
}
if (!(UseZGC && !ZGenerational)) {
// Load barrier has not yet been applied, so ZGC can't verify the oop here
__ verify_oop(dest->as_register());
}
__ verify_oop(dest->as_register());
}
}

View File

@ -1,454 +0,0 @@
/*
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/codeBlob.hpp"
#include "code/vmreg.inline.hpp"
#include "gc/x/xBarrier.inline.hpp"
#include "gc/x/xBarrierSet.hpp"
#include "gc/x/xBarrierSetAssembler.hpp"
#include "gc/x/xBarrierSetRuntime.hpp"
#include "gc/x/xThreadLocalData.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/sharedRuntime.hpp"
#include "utilities/macros.hpp"
#ifdef COMPILER1
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "gc/x/c1/xBarrierSetC1.hpp"
#endif // COMPILER1
#ifdef COMPILER2
#include "gc/x/c2/xBarrierSetC2.hpp"
#endif // COMPILER2
#ifdef PRODUCT
#define BLOCK_COMMENT(str) /* nothing */
#else
#define BLOCK_COMMENT(str) __ block_comment(str)
#endif
#undef __
#define __ masm->
void XBarrierSetAssembler::load_at(MacroAssembler* masm,
DecoratorSet decorators,
BasicType type,
Register dst,
Address src,
Register tmp1,
Register tmp2) {
if (!XBarrierSet::barrier_needed(decorators, type)) {
// Barrier not needed
BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2);
return;
}
assert_different_registers(t1, src.base());
assert_different_registers(t0, t1, dst);
Label done;
// Load bad mask into temp register.
__ la(t0, src);
__ ld(t1, address_bad_mask_from_thread(xthread));
__ ld(dst, Address(t0));
// Test reference against bad mask. If mask bad, then we need to fix it up.
__ andr(t1, dst, t1);
__ beqz(t1, done);
__ enter();
__ push_call_clobbered_registers_except(RegSet::of(dst));
if (c_rarg0 != dst) {
__ mv(c_rarg0, dst);
}
__ mv(c_rarg1, t0);
__ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2);
// Make sure dst has the return value.
if (dst != x10) {
__ mv(dst, x10);
}
__ pop_call_clobbered_registers_except(RegSet::of(dst));
__ leave();
__ bind(done);
}
#ifdef ASSERT
void XBarrierSetAssembler::store_at(MacroAssembler* masm,
DecoratorSet decorators,
BasicType type,
Address dst,
Register val,
Register tmp1,
Register tmp2,
Register tmp3) {
// Verify value
if (is_reference_type(type)) {
// Note that src could be noreg, which means we
// are storing null and can skip verification.
if (val != noreg) {
Label done;
// tmp1, tmp2 and tmp3 are often set to noreg.
RegSet savedRegs = RegSet::of(t0);
__ push_reg(savedRegs, sp);
__ ld(t0, address_bad_mask_from_thread(xthread));
__ andr(t0, val, t0);
__ beqz(t0, done);
__ stop("Verify oop store failed");
__ should_not_reach_here();
__ bind(done);
__ pop_reg(savedRegs, sp);
}
}
// Store value
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, noreg);
}
#endif // ASSERT
void XBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm,
DecoratorSet decorators,
bool is_oop,
Register src,
Register dst,
Register count,
RegSet saved_regs) {
if (!is_oop) {
// Barrier not needed
return;
}
BLOCK_COMMENT("XBarrierSetAssembler::arraycopy_prologue {");
assert_different_registers(src, count, t0);
__ push_reg(saved_regs, sp);
if (count == c_rarg0 && src == c_rarg1) {
// exactly backwards!!
__ xorr(c_rarg0, c_rarg0, c_rarg1);
__ xorr(c_rarg1, c_rarg0, c_rarg1);
__ xorr(c_rarg0, c_rarg0, c_rarg1);
} else {
__ mv(c_rarg0, src);
__ mv(c_rarg1, count);
}
__ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_array_addr(), 2);
__ pop_reg(saved_regs, sp);
BLOCK_COMMENT("} XBarrierSetAssembler::arraycopy_prologue");
}
void XBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm,
Register jni_env,
Register robj,
Register tmp,
Label& slowpath) {
BLOCK_COMMENT("XBarrierSetAssembler::try_resolve_jobject_in_native {");
assert_different_registers(jni_env, robj, tmp);
// Resolve jobject
BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, robj, tmp, slowpath);
// Compute the offset of address bad mask from the field of jni_environment
long int bad_mask_relative_offset = (long int) (in_bytes(XThreadLocalData::address_bad_mask_offset()) -
in_bytes(JavaThread::jni_environment_offset()));
// Load the address bad mask
__ ld(tmp, Address(jni_env, bad_mask_relative_offset));
// Check address bad mask
__ andr(tmp, robj, tmp);
__ bnez(tmp, slowpath);
BLOCK_COMMENT("} XBarrierSetAssembler::try_resolve_jobject_in_native");
}
#ifdef COMPILER2
OptoReg::Name XBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) {
if (!OptoReg::is_reg(opto_reg)) {
return OptoReg::Bad;
}
const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
if (vm_reg->is_FloatRegister()) {
return opto_reg & ~1;
}
return opto_reg;
}
#undef __
#define __ _masm->
class XSaveLiveRegisters {
private:
MacroAssembler* const _masm;
RegSet _gp_regs;
FloatRegSet _fp_regs;
VectorRegSet _vp_regs;
public:
void initialize(XLoadBarrierStubC2* stub) {
// Record registers that needs to be saved/restored
RegMaskIterator rmi(stub->live());
while (rmi.has_next()) {
const OptoReg::Name opto_reg = rmi.next();
if (OptoReg::is_reg(opto_reg)) {
const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
if (vm_reg->is_Register()) {
_gp_regs += RegSet::of(vm_reg->as_Register());
} else if (vm_reg->is_FloatRegister()) {
_fp_regs += FloatRegSet::of(vm_reg->as_FloatRegister());
} else if (vm_reg->is_VectorRegister()) {
const VMReg vm_reg_base = OptoReg::as_VMReg(opto_reg & ~(VectorRegister::max_slots_per_register - 1));
_vp_regs += VectorRegSet::of(vm_reg_base->as_VectorRegister());
} else {
fatal("Unknown register type");
}
}
}
// Remove C-ABI SOE registers, tmp regs and _ref register that will be updated
_gp_regs -= RegSet::range(x18, x27) + RegSet::of(x2) + RegSet::of(x8, x9) + RegSet::of(x5, stub->ref());
}
XSaveLiveRegisters(MacroAssembler* masm, XLoadBarrierStubC2* stub) :
_masm(masm),
_gp_regs(),
_fp_regs(),
_vp_regs() {
// Figure out what registers to save/restore
initialize(stub);
// Save registers
__ push_reg(_gp_regs, sp);
__ push_fp(_fp_regs, sp);
__ push_v(_vp_regs, sp);
}
~XSaveLiveRegisters() {
// Restore registers
__ pop_v(_vp_regs, sp);
__ pop_fp(_fp_regs, sp);
__ pop_reg(_gp_regs, sp);
}
};
class XSetupArguments {
private:
MacroAssembler* const _masm;
const Register _ref;
const Address _ref_addr;
public:
XSetupArguments(MacroAssembler* masm, XLoadBarrierStubC2* stub) :
_masm(masm),
_ref(stub->ref()),
_ref_addr(stub->ref_addr()) {
// Setup arguments
if (_ref_addr.base() == noreg) {
// No self healing
if (_ref != c_rarg0) {
__ mv(c_rarg0, _ref);
}
__ mv(c_rarg1, zr);
} else {
// Self healing
if (_ref == c_rarg0) {
// _ref is already at correct place
__ la(c_rarg1, _ref_addr);
} else if (_ref != c_rarg1) {
// _ref is in wrong place, but not in c_rarg1, so fix it first
__ la(c_rarg1, _ref_addr);
__ mv(c_rarg0, _ref);
} else if (_ref_addr.base() != c_rarg0) {
assert(_ref == c_rarg1, "Mov ref first, vacating c_rarg0");
__ mv(c_rarg0, _ref);
__ la(c_rarg1, _ref_addr);
} else {
assert(_ref == c_rarg1, "Need to vacate c_rarg1 and _ref_addr is using c_rarg0");
if (_ref_addr.base() == c_rarg0) {
__ mv(t1, c_rarg1);
__ la(c_rarg1, _ref_addr);
__ mv(c_rarg0, t1);
} else {
ShouldNotReachHere();
}
}
}
}
~XSetupArguments() {
// Transfer result
if (_ref != x10) {
__ mv(_ref, x10);
}
}
};
#undef __
#define __ masm->
void XBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, XLoadBarrierStubC2* stub) const {
BLOCK_COMMENT("XLoadBarrierStubC2");
// Stub entry
__ bind(*stub->entry());
{
XSaveLiveRegisters save_live_registers(masm, stub);
XSetupArguments setup_arguments(masm, stub);
__ mv(t1, stub->slow_path());
__ jalr(t1);
}
// Stub exit
__ j(*stub->continuation());
}
#endif // COMPILER2
#ifdef COMPILER1
#undef __
#define __ ce->masm()->
void XBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce,
LIR_Opr ref) const {
assert_different_registers(xthread, ref->as_register(), t1);
__ ld(t1, address_bad_mask_from_thread(xthread));
__ andr(t1, t1, ref->as_register());
}
void XBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce,
XLoadBarrierStubC1* stub) const {
// Stub entry
__ bind(*stub->entry());
Register ref = stub->ref()->as_register();
Register ref_addr = noreg;
Register tmp = noreg;
if (stub->tmp()->is_valid()) {
// Load address into tmp register
ce->leal(stub->ref_addr(), stub->tmp());
ref_addr = tmp = stub->tmp()->as_pointer_register();
} else {
// Address already in register
ref_addr = stub->ref_addr()->as_address_ptr()->base()->as_pointer_register();
}
assert_different_registers(ref, ref_addr, noreg);
// Save x10 unless it is the result or tmp register
// Set up SP to accommodate parameters and maybe x10.
if (ref != x10 && tmp != x10) {
__ sub(sp, sp, 32);
__ sd(x10, Address(sp, 16));
} else {
__ sub(sp, sp, 16);
}
// Setup arguments and call runtime stub
ce->store_parameter(ref_addr, 1);
ce->store_parameter(ref, 0);
__ far_call(stub->runtime_stub());
// Verify result
__ verify_oop(x10);
// Move result into place
if (ref != x10) {
__ mv(ref, x10);
}
// Restore x10 unless it is the result or tmp register
if (ref != x10 && tmp != x10) {
__ ld(x10, Address(sp, 16));
__ add(sp, sp, 32);
} else {
__ add(sp, sp, 16);
}
// Stub exit
__ j(*stub->continuation());
}
#undef __
#define __ sasm->
void XBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
DecoratorSet decorators) const {
__ prologue("zgc_load_barrier stub", false);
__ push_call_clobbered_registers_except(RegSet::of(x10));
// Setup arguments
__ load_parameter(0, c_rarg0);
__ load_parameter(1, c_rarg1);
__ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2);
__ pop_call_clobbered_registers_except(RegSet::of(x10));
__ epilogue();
}
#endif // COMPILER1
#undef __
#define __ masm->
void XBarrierSetAssembler::check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error) {
// Check if mask is good.
// verifies that XAddressBadMask & obj == 0
__ ld(tmp2, Address(xthread, XThreadLocalData::address_bad_mask_offset()));
__ andr(tmp1, obj, tmp2);
__ bnez(tmp1, error);
BarrierSetAssembler::check_oop(masm, obj, tmp1, tmp2, error);
}
#undef __

View File

@ -1,112 +0,0 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_RISCV_GC_X_XBARRIERSETASSEMBLER_RISCV_HPP
#define CPU_RISCV_GC_X_XBARRIERSETASSEMBLER_RISCV_HPP
#include "code/vmreg.hpp"
#include "oops/accessDecorators.hpp"
#ifdef COMPILER2
#include "opto/optoreg.hpp"
#endif // COMPILER2
#ifdef COMPILER1
class LIR_Assembler;
class LIR_Opr;
class StubAssembler;
#endif // COMPILER1
#ifdef COMPILER2
class Node;
#endif // COMPILER2
#ifdef COMPILER1
class XLoadBarrierStubC1;
#endif // COMPILER1
#ifdef COMPILER2
class XLoadBarrierStubC2;
#endif // COMPILER2
class XBarrierSetAssembler : public XBarrierSetAssemblerBase {
public:
virtual void load_at(MacroAssembler* masm,
DecoratorSet decorators,
BasicType type,
Register dst,
Address src,
Register tmp1,
Register tmp2);
#ifdef ASSERT
virtual void store_at(MacroAssembler* masm,
DecoratorSet decorators,
BasicType type,
Address dst,
Register val,
Register tmp1,
Register tmp2,
Register tmp3);
#endif // ASSERT
virtual void arraycopy_prologue(MacroAssembler* masm,
DecoratorSet decorators,
bool is_oop,
Register src,
Register dst,
Register count,
RegSet saved_regs);
virtual void try_resolve_jobject_in_native(MacroAssembler* masm,
Register jni_env,
Register robj,
Register tmp,
Label& slowpath);
virtual NMethodPatchingType nmethod_patching_type() { return NMethodPatchingType::conc_data_patch; }
#ifdef COMPILER1
void generate_c1_load_barrier_test(LIR_Assembler* ce,
LIR_Opr ref) const;
void generate_c1_load_barrier_stub(LIR_Assembler* ce,
XLoadBarrierStubC1* stub) const;
void generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
DecoratorSet decorators) const;
#endif // COMPILER1
#ifdef COMPILER2
OptoReg::Name refine_register(const Node* node,
OptoReg::Name opto_reg);
void generate_c2_load_barrier_stub(MacroAssembler* masm,
XLoadBarrierStubC2* stub) const;
#endif // COMPILER2
void check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error);
};
#endif // CPU_RISCV_GC_X_XBARRIERSETASSEMBLER_RISCV_HPP

View File

@ -1,212 +0,0 @@
/*
* Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/shared/gcLogPrecious.hpp"
#include "gc/shared/gc_globals.hpp"
#include "gc/x/xGlobals.hpp"
#include "runtime/globals.hpp"
#include "runtime/os.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/powerOfTwo.hpp"
#ifdef LINUX
#include <sys/mman.h>
#endif // LINUX
//
// The heap can have three different layouts, depending on the max heap size.
//
// Address Space & Pointer Layout 1
// --------------------------------
//
// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB)
// . .
// . .
// . .
// +--------------------------------+ 0x0000014000000000 (20TB)
// | Remapped View |
// +--------------------------------+ 0x0000010000000000 (16TB)
// . .
// +--------------------------------+ 0x00000c0000000000 (12TB)
// | Marked1 View |
// +--------------------------------+ 0x0000080000000000 (8TB)
// | Marked0 View |
// +--------------------------------+ 0x0000040000000000 (4TB)
// . .
// +--------------------------------+ 0x0000000000000000
//
// 6 4 4 4 4
// 3 6 5 2 1 0
// +--------------------+----+-----------------------------------------------+
// |00000000 00000000 00|1111|11 11111111 11111111 11111111 11111111 11111111|
// +--------------------+----+-----------------------------------------------+
// | | |
// | | * 41-0 Object Offset (42-bits, 4TB address space)
// | |
// | * 45-42 Metadata Bits (4-bits) 0001 = Marked0 (Address view 4-8TB)
// | 0010 = Marked1 (Address view 8-12TB)
// | 0100 = Remapped (Address view 16-20TB)
// | 1000 = Finalizable (Address view N/A)
// |
// * 63-46 Fixed (18-bits, always zero)
//
//
// Address Space & Pointer Layout 2
// --------------------------------
//
// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB)
// . .
// . .
// . .
// +--------------------------------+ 0x0000280000000000 (40TB)
// | Remapped View |
// +--------------------------------+ 0x0000200000000000 (32TB)
// . .
// +--------------------------------+ 0x0000180000000000 (24TB)
// | Marked1 View |
// +--------------------------------+ 0x0000100000000000 (16TB)
// | Marked0 View |
// +--------------------------------+ 0x0000080000000000 (8TB)
// . .
// +--------------------------------+ 0x0000000000000000
//
// 6 4 4 4 4
// 3 7 6 3 2 0
// +------------------+-----+------------------------------------------------+
// |00000000 00000000 0|1111|111 11111111 11111111 11111111 11111111 11111111|
// +-------------------+----+------------------------------------------------+
// | | |
// | | * 42-0 Object Offset (43-bits, 8TB address space)
// | |
// | * 46-43 Metadata Bits (4-bits) 0001 = Marked0 (Address view 8-16TB)
// | 0010 = Marked1 (Address view 16-24TB)
// | 0100 = Remapped (Address view 32-40TB)
// | 1000 = Finalizable (Address view N/A)
// |
// * 63-47 Fixed (17-bits, always zero)
//
//
// Address Space & Pointer Layout 3
// --------------------------------
//
// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB)
// . .
// . .
// . .
// +--------------------------------+ 0x0000500000000000 (80TB)
// | Remapped View |
// +--------------------------------+ 0x0000400000000000 (64TB)
// . .
// +--------------------------------+ 0x0000300000000000 (48TB)
// | Marked1 View |
// +--------------------------------+ 0x0000200000000000 (32TB)
// | Marked0 View |
// +--------------------------------+ 0x0000100000000000 (16TB)
// . .
// +--------------------------------+ 0x0000000000000000
//
// 6 4 4 4 4
// 3 8 7 4 3 0
// +------------------+----+-------------------------------------------------+
// |00000000 00000000 |1111|1111 11111111 11111111 11111111 11111111 11111111|
// +------------------+----+-------------------------------------------------+
// | | |
// | | * 43-0 Object Offset (44-bits, 16TB address space)
// | |
// | * 47-44 Metadata Bits (4-bits) 0001 = Marked0 (Address view 16-32TB)
// | 0010 = Marked1 (Address view 32-48TB)
// | 0100 = Remapped (Address view 64-80TB)
// | 1000 = Finalizable (Address view N/A)
// |
// * 63-48 Fixed (16-bits, always zero)
//
// Default value if probing is not implemented for a certain platform: 128TB
static const size_t DEFAULT_MAX_ADDRESS_BIT = 47;
// Minimum value returned, if probing fails: 64GB
static const size_t MINIMUM_MAX_ADDRESS_BIT = 36;
static size_t probe_valid_max_address_bit() {
#ifdef LINUX
size_t max_address_bit = 0;
const size_t page_size = os::vm_page_size();
for (size_t i = DEFAULT_MAX_ADDRESS_BIT; i > MINIMUM_MAX_ADDRESS_BIT; --i) {
const uintptr_t base_addr = ((uintptr_t) 1U) << i;
if (msync((void*)base_addr, page_size, MS_ASYNC) == 0) {
// msync succeeded, the address is valid, and maybe even already mapped.
max_address_bit = i;
break;
}
if (errno != ENOMEM) {
// Some error occurred. This should never happen, but msync
// has some undefined behavior, hence ignore this bit.
#ifdef ASSERT
fatal("Received '%s' while probing the address space for the highest valid bit", os::errno_name(errno));
#else // ASSERT
log_warning_p(gc)("Received '%s' while probing the address space for the highest valid bit", os::errno_name(errno));
#endif // ASSERT
continue;
}
// Since msync failed with ENOMEM, the page might not be mapped.
// Try to map it, to see if the address is valid.
void* const result_addr = mmap((void*) base_addr, page_size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, -1, 0);
if (result_addr != MAP_FAILED) {
munmap(result_addr, page_size);
}
if ((uintptr_t) result_addr == base_addr) {
// address is valid
max_address_bit = i;
break;
}
}
if (max_address_bit == 0) {
// probing failed, allocate a very high page and take that bit as the maximum
const uintptr_t high_addr = ((uintptr_t) 1U) << DEFAULT_MAX_ADDRESS_BIT;
void* const result_addr = mmap((void*) high_addr, page_size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, -1, 0);
if (result_addr != MAP_FAILED) {
max_address_bit = BitsPerSize_t - count_leading_zeros((size_t) result_addr) - 1;
munmap(result_addr, page_size);
}
}
log_info_p(gc, init)("Probing address space for the highest valid bit: " SIZE_FORMAT, max_address_bit);
return MAX2(max_address_bit, MINIMUM_MAX_ADDRESS_BIT);
#else // LINUX
return DEFAULT_MAX_ADDRESS_BIT;
#endif // LINUX
}
size_t XPlatformAddressOffsetBits() {
const static size_t valid_max_address_offset_bits = probe_valid_max_address_bit() + 1;
const size_t max_address_offset_bits = valid_max_address_offset_bits - 3;
const size_t min_address_offset_bits = max_address_offset_bits - 2;
const size_t address_offset = round_up_power_of_2(MaxHeapSize * XVirtualToPhysicalRatio);
const size_t address_offset_bits = log2i_exact(address_offset);
return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits);
}
size_t XPlatformAddressMetadataShift() {
return XPlatformAddressOffsetBits();
}

View File

@ -1,35 +0,0 @@
/*
* Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_RISCV_GC_X_XGLOBALS_RISCV_HPP
#define CPU_RISCV_GC_X_XGLOBALS_RISCV_HPP
const size_t XPlatformHeapViews = 3;
const size_t XPlatformCacheLineSize = 64;
size_t XPlatformAddressOffsetBits();
size_t XPlatformAddressMetadataShift();
#endif // CPU_RISCV_GC_X_XGLOBALS_RISCV_HPP

View File

@ -1,229 +0,0 @@
//
// Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License version 2 only, as
// published by the Free Software Foundation.
//
// This code is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
// version 2 for more details (a copy is included in the LICENSE file that
// accompanied this code).
//
// You should have received a copy of the GNU General Public License version
// 2 along with this work; if not, write to the Free Software Foundation,
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
//
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
// or visit www.oracle.com if you need additional information or have any
// questions.
//
source_hpp %{
#include "gc/shared/gc_globals.hpp"
#include "gc/x/c2/xBarrierSetC2.hpp"
#include "gc/x/xThreadLocalData.hpp"
%}
source %{
static void x_load_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, int barrier_data) {
if (barrier_data == XLoadBarrierElided) {
return;
}
XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, barrier_data);
__ ld(tmp, Address(xthread, XThreadLocalData::address_bad_mask_offset()));
__ andr(tmp, tmp, ref);
__ bnez(tmp, *stub->entry(), true /* far */);
__ bind(*stub->continuation());
}
static void x_load_barrier_slow_path(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, XLoadBarrierStrong);
__ j(*stub->entry());
__ bind(*stub->continuation());
}
%}
// Load Pointer
instruct xLoadP(iRegPNoSp dst, memory mem, iRegPNoSp tmp, rFlagsReg cr)
%{
match(Set dst (LoadP mem));
predicate(UseZGC && !ZGenerational && (n->as_Load()->barrier_data() != 0));
effect(TEMP dst, TEMP tmp, KILL cr);
ins_cost(4 * DEFAULT_COST);
format %{ "ld $dst, $mem, #@zLoadP" %}
ins_encode %{
const Address ref_addr (as_Register($mem$$base), $mem$$disp);
__ ld($dst$$Register, ref_addr);
x_load_barrier(masm, this, ref_addr, $dst$$Register, $tmp$$Register /* tmp */, barrier_data());
%}
ins_pipe(iload_reg_mem);
%}
instruct xCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
predicate(UseZGC && !ZGenerational && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong);
effect(TEMP_DEF res, TEMP tmp, KILL cr);
ins_cost(2 * VOLATILE_REF_COST);
format %{ "cmpxchg $mem, $oldval, $newval, #@zCompareAndSwapP\n\t"
"mv $res, $res == $oldval" %}
ins_encode %{
Label failed;
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
Assembler::relaxed /* acquire */, Assembler::rl /* release */, $tmp$$Register);
__ sub(t0, $tmp$$Register, $oldval$$Register);
__ seqz($res$$Register, t0);
if (barrier_data() != XLoadBarrierElided) {
Label good;
__ ld(t0, Address(xthread, XThreadLocalData::address_bad_mask_offset()));
__ andr(t0, t0, $tmp$$Register);
__ beqz(t0, good);
x_load_barrier_slow_path(masm, this, Address($mem$$Register), $tmp$$Register /* ref */, $res$$Register /* tmp */);
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register,
true /* result_as_bool */);
__ bind(good);
}
%}
ins_pipe(pipe_slow);
%}
instruct xCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
predicate(UseZGC && !ZGenerational && needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() == XLoadBarrierStrong));
effect(TEMP_DEF res, TEMP tmp, KILL cr);
ins_cost(2 * VOLATILE_REF_COST);
format %{ "cmpxchg $mem, $oldval, $newval, #@zCompareAndSwapPAcq\n\t"
"mv $res, $res == $oldval" %}
ins_encode %{
Label failed;
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
Assembler::aq /* acquire */, Assembler::rl /* release */, $tmp$$Register);
__ sub(t0, $tmp$$Register, $oldval$$Register);
__ seqz($res$$Register, t0);
if (barrier_data() != XLoadBarrierElided) {
Label good;
__ ld(t0, Address(xthread, XThreadLocalData::address_bad_mask_offset()));
__ andr(t0, t0, $tmp$$Register);
__ beqz(t0, good);
x_load_barrier_slow_path(masm, this, Address($mem$$Register), $tmp$$Register /* ref */, $res$$Register /* tmp */);
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register,
true /* result_as_bool */);
__ bind(good);
}
%}
ins_pipe(pipe_slow);
%}
instruct xCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
predicate(UseZGC && !ZGenerational && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong);
effect(TEMP_DEF res, TEMP tmp, KILL cr);
ins_cost(2 * VOLATILE_REF_COST);
format %{ "cmpxchg $res = $mem, $oldval, $newval, #@zCompareAndExchangeP" %}
ins_encode %{
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register);
if (barrier_data() != XLoadBarrierElided) {
Label good;
__ ld(t0, Address(xthread, XThreadLocalData::address_bad_mask_offset()));
__ andr(t0, t0, $res$$Register);
__ beqz(t0, good);
x_load_barrier_slow_path(masm, this, Address($mem$$Register), $res$$Register /* ref */, $tmp$$Register /* tmp */);
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register);
__ bind(good);
}
%}
ins_pipe(pipe_slow);
%}
instruct xCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
predicate(UseZGC && !ZGenerational && needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong);
effect(TEMP_DEF res, TEMP tmp, KILL cr);
ins_cost(2 * VOLATILE_REF_COST);
format %{ "cmpxchg $res = $mem, $oldval, $newval, #@zCompareAndExchangePAcq" %}
ins_encode %{
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register);
if (barrier_data() != XLoadBarrierElided) {
Label good;
__ ld(t0, Address(xthread, XThreadLocalData::address_bad_mask_offset()));
__ andr(t0, t0, $res$$Register);
__ beqz(t0, good);
x_load_barrier_slow_path(masm, this, Address($mem$$Register), $res$$Register /* ref */, $tmp$$Register /* tmp */);
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register);
__ bind(good);
}
%}
ins_pipe(pipe_slow);
%}
instruct xGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, iRegPNoSp tmp, rFlagsReg cr) %{
match(Set prev (GetAndSetP mem newv));
predicate(UseZGC && !ZGenerational && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
effect(TEMP_DEF prev, TEMP tmp, KILL cr);
ins_cost(2 * VOLATILE_REF_COST);
format %{ "atomic_xchg $prev, $newv, [$mem], #@zGetAndSetP" %}
ins_encode %{
__ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
x_load_barrier(masm, this, Address(noreg, 0), $prev$$Register, $tmp$$Register /* tmp */, barrier_data());
%}
ins_pipe(pipe_serial);
%}
instruct xGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, iRegPNoSp tmp, rFlagsReg cr) %{
match(Set prev (GetAndSetP mem newv));
predicate(UseZGC && !ZGenerational && needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() != 0));
effect(TEMP_DEF prev, TEMP tmp, KILL cr);
ins_cost(VOLATILE_REF_COST);
format %{ "atomic_xchg_acq $prev, $newv, [$mem], #@zGetAndSetPAcq" %}
ins_encode %{
__ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
x_load_barrier(masm, this, Address(noreg, 0), $prev$$Register, $tmp$$Register /* tmp */, barrier_data());
%}
ins_pipe(pipe_serial);
%}

View File

@ -94,7 +94,7 @@ static void z_store_barrier(MacroAssembler* masm, const MachNode* node, Address
instruct zLoadP(iRegPNoSp dst, memory mem, iRegPNoSp tmp, rFlagsReg cr)
%{
match(Set dst (LoadP mem));
predicate(UseZGC && ZGenerational && n->as_Load()->barrier_data() != 0);
predicate(UseZGC && n->as_Load()->barrier_data() != 0);
effect(TEMP dst, TEMP tmp, KILL cr);
ins_cost(4 * DEFAULT_COST);
@ -113,7 +113,7 @@ instruct zLoadP(iRegPNoSp dst, memory mem, iRegPNoSp tmp, rFlagsReg cr)
// Store Pointer
instruct zStoreP(memory mem, iRegP src, iRegPNoSp tmp1, iRegPNoSp tmp2, rFlagsReg cr)
%{
predicate(UseZGC && ZGenerational && n->as_Store()->barrier_data() != 0);
predicate(UseZGC && n->as_Store()->barrier_data() != 0);
match(Set mem (StoreP mem src));
effect(TEMP tmp1, TEMP tmp2, KILL cr);
@ -131,7 +131,7 @@ instruct zCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newva
iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, iRegPNoSp tmp1, rFlagsReg cr) %{
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
predicate(UseZGC && ZGenerational && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
predicate(UseZGC && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
effect(TEMP oldval_tmp, TEMP newval_tmp, TEMP tmp1, TEMP_DEF res, KILL cr);
ins_cost(2 * VOLATILE_REF_COST);
@ -154,7 +154,7 @@ instruct zCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP ne
iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, iRegPNoSp tmp1, rFlagsReg cr) %{
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
predicate(UseZGC && ZGenerational && needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
predicate(UseZGC && needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
effect(TEMP oldval_tmp, TEMP newval_tmp, TEMP tmp1, TEMP_DEF res, KILL cr);
ins_cost(2 * VOLATILE_REF_COST);
@ -176,7 +176,7 @@ instruct zCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP ne
instruct zCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval,
iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, iRegPNoSp tmp1, rFlagsReg cr) %{
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
predicate(UseZGC && ZGenerational && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
predicate(UseZGC && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
effect(TEMP oldval_tmp, TEMP newval_tmp, TEMP tmp1, TEMP_DEF res, KILL cr);
ins_cost(2 * VOLATILE_REF_COST);
@ -198,7 +198,7 @@ instruct zCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP n
instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval,
iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, iRegPNoSp tmp1, rFlagsReg cr) %{
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
predicate(UseZGC && ZGenerational && needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
predicate(UseZGC && needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
effect(TEMP oldval_tmp, TEMP newval_tmp, TEMP tmp1, TEMP_DEF res, KILL cr);
ins_cost(2 * VOLATILE_REF_COST);
@ -219,7 +219,7 @@ instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iReg
instruct zGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, iRegPNoSp tmp, rFlagsReg cr) %{
match(Set prev (GetAndSetP mem newv));
predicate(UseZGC && ZGenerational && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
predicate(UseZGC && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
effect(TEMP_DEF prev, TEMP tmp, KILL cr);
ins_cost(2 * VOLATILE_REF_COST);
@ -237,7 +237,7 @@ instruct zGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, iRegPNoSp tmp, rF
instruct zGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, iRegPNoSp tmp, rFlagsReg cr) %{
match(Set prev (GetAndSetP mem newv));
predicate(UseZGC && ZGenerational && needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
predicate(UseZGC && needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
effect(TEMP_DEF prev, TEMP tmp, KILL cr);
ins_cost(2 * VOLATILE_REF_COST);

View File

@ -946,7 +946,7 @@ class StubGenerator: public StubCodeGenerator {
// The size of copy32_loop body increases significantly with ZGC GC barriers.
// Need conditional far branches to reach a point beyond the loop in this case.
bool is_far = UseZGC && ZGenerational;
bool is_far = UseZGC;
__ beqz(count, done, is_far);
__ slli(cnt, count, exact_log2(granularity));

View File

@ -1333,10 +1333,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
}
#endif
if (!(UseZGC && !ZGenerational)) {
// Load barrier has not yet been applied, so ZGC can't verify the oop here
__ verify_oop(dest->as_register());
}
__ verify_oop(dest->as_register());
}
}

View File

@ -1,734 +0,0 @@
/*
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/codeBlob.hpp"
#include "code/vmreg.inline.hpp"
#include "gc/x/xBarrier.inline.hpp"
#include "gc/x/xBarrierSet.hpp"
#include "gc/x/xBarrierSetAssembler.hpp"
#include "gc/x/xBarrierSetRuntime.hpp"
#include "gc/x/xThreadLocalData.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/sharedRuntime.hpp"
#include "utilities/macros.hpp"
#ifdef COMPILER1
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "gc/x/c1/xBarrierSetC1.hpp"
#endif // COMPILER1
#ifdef COMPILER2
#include "gc/x/c2/xBarrierSetC2.hpp"
#endif // COMPILER2
#ifdef PRODUCT
#define BLOCK_COMMENT(str) /* nothing */
#else
#define BLOCK_COMMENT(str) __ block_comment(str)
#endif
#undef __
#define __ masm->
static void call_vm(MacroAssembler* masm,
address entry_point,
Register arg0,
Register arg1) {
// Setup arguments
if (arg1 == c_rarg0) {
if (arg0 == c_rarg1) {
__ xchgptr(c_rarg1, c_rarg0);
} else {
__ movptr(c_rarg1, arg1);
__ movptr(c_rarg0, arg0);
}
} else {
if (arg0 != c_rarg0) {
__ movptr(c_rarg0, arg0);
}
if (arg1 != c_rarg1) {
__ movptr(c_rarg1, arg1);
}
}
// Call VM
__ MacroAssembler::call_VM_leaf_base(entry_point, 2);
}
void XBarrierSetAssembler::load_at(MacroAssembler* masm,
DecoratorSet decorators,
BasicType type,
Register dst,
Address src,
Register tmp1,
Register tmp_thread) {
if (!XBarrierSet::barrier_needed(decorators, type)) {
// Barrier not needed
BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
return;
}
BLOCK_COMMENT("XBarrierSetAssembler::load_at {");
// Allocate scratch register
Register scratch = tmp1;
if (tmp1 == noreg) {
scratch = r12;
__ push(scratch);
}
assert_different_registers(dst, scratch);
Label done;
//
// Fast Path
//
// Load address
__ lea(scratch, src);
// Load oop at address
__ movptr(dst, Address(scratch, 0));
// Test address bad mask
__ testptr(dst, address_bad_mask_from_thread(r15_thread));
__ jcc(Assembler::zero, done);
//
// Slow path
//
// Save registers
__ push(rax);
__ push(rcx);
__ push(rdx);
__ push(rdi);
__ push(rsi);
__ push(r8);
__ push(r9);
__ push(r10);
__ push(r11);
// We may end up here from generate_native_wrapper, then the method may have
// floats as arguments, and we must spill them before calling the VM runtime
// leaf. From the interpreter all floats are passed on the stack.
assert(Argument::n_float_register_parameters_j == 8, "Assumption");
const int xmm_size = wordSize * 2;
const int xmm_spill_size = xmm_size * Argument::n_float_register_parameters_j;
__ subptr(rsp, xmm_spill_size);
__ movdqu(Address(rsp, xmm_size * 7), xmm7);
__ movdqu(Address(rsp, xmm_size * 6), xmm6);
__ movdqu(Address(rsp, xmm_size * 5), xmm5);
__ movdqu(Address(rsp, xmm_size * 4), xmm4);
__ movdqu(Address(rsp, xmm_size * 3), xmm3);
__ movdqu(Address(rsp, xmm_size * 2), xmm2);
__ movdqu(Address(rsp, xmm_size * 1), xmm1);
__ movdqu(Address(rsp, xmm_size * 0), xmm0);
// Call VM
call_vm(masm, XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), dst, scratch);
__ movdqu(xmm0, Address(rsp, xmm_size * 0));
__ movdqu(xmm1, Address(rsp, xmm_size * 1));
__ movdqu(xmm2, Address(rsp, xmm_size * 2));
__ movdqu(xmm3, Address(rsp, xmm_size * 3));
__ movdqu(xmm4, Address(rsp, xmm_size * 4));
__ movdqu(xmm5, Address(rsp, xmm_size * 5));
__ movdqu(xmm6, Address(rsp, xmm_size * 6));
__ movdqu(xmm7, Address(rsp, xmm_size * 7));
__ addptr(rsp, xmm_spill_size);
__ pop(r11);
__ pop(r10);
__ pop(r9);
__ pop(r8);
__ pop(rsi);
__ pop(rdi);
__ pop(rdx);
__ pop(rcx);
if (dst == rax) {
__ addptr(rsp, wordSize);
} else {
__ movptr(dst, rax);
__ pop(rax);
}
__ bind(done);
// Restore scratch register
if (tmp1 == noreg) {
__ pop(scratch);
}
BLOCK_COMMENT("} XBarrierSetAssembler::load_at");
}
#ifdef ASSERT
void XBarrierSetAssembler::store_at(MacroAssembler* masm,
DecoratorSet decorators,
BasicType type,
Address dst,
Register src,
Register tmp1,
Register tmp2,
Register tmp3) {
BLOCK_COMMENT("XBarrierSetAssembler::store_at {");
// Verify oop store
if (is_reference_type(type)) {
// Note that src could be noreg, which means we
// are storing null and can skip verification.
if (src != noreg) {
Label done;
__ testptr(src, address_bad_mask_from_thread(r15_thread));
__ jcc(Assembler::zero, done);
__ stop("Verify oop store failed");
__ should_not_reach_here();
__ bind(done);
}
}
// Store value
BarrierSetAssembler::store_at(masm, decorators, type, dst, src, tmp1, tmp2, tmp3);
BLOCK_COMMENT("} XBarrierSetAssembler::store_at");
}
#endif // ASSERT
void XBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm,
DecoratorSet decorators,
BasicType type,
Register src,
Register dst,
Register count) {
if (!XBarrierSet::barrier_needed(decorators, type)) {
// Barrier not needed
return;
}
BLOCK_COMMENT("XBarrierSetAssembler::arraycopy_prologue {");
// Save registers
__ pusha();
// Call VM
call_vm(masm, XBarrierSetRuntime::load_barrier_on_oop_array_addr(), src, count);
// Restore registers
__ popa();
BLOCK_COMMENT("} XBarrierSetAssembler::arraycopy_prologue");
}
void XBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm,
Register jni_env,
Register obj,
Register tmp,
Label& slowpath) {
BLOCK_COMMENT("XBarrierSetAssembler::try_resolve_jobject_in_native {");
// Resolve jobject
BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath);
// Test address bad mask
__ testptr(obj, address_bad_mask_from_jni_env(jni_env));
__ jcc(Assembler::notZero, slowpath);
BLOCK_COMMENT("} XBarrierSetAssembler::try_resolve_jobject_in_native");
}
#ifdef COMPILER1
#undef __
#define __ ce->masm()->
void XBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce,
LIR_Opr ref) const {
__ testptr(ref->as_register(), address_bad_mask_from_thread(r15_thread));
}
void XBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce,
XLoadBarrierStubC1* stub) const {
// Stub entry
__ bind(*stub->entry());
Register ref = stub->ref()->as_register();
Register ref_addr = noreg;
Register tmp = noreg;
if (stub->tmp()->is_valid()) {
// Load address into tmp register
ce->leal(stub->ref_addr(), stub->tmp());
ref_addr = tmp = stub->tmp()->as_pointer_register();
} else {
// Address already in register
ref_addr = stub->ref_addr()->as_address_ptr()->base()->as_pointer_register();
}
assert_different_registers(ref, ref_addr, noreg);
// Save rax unless it is the result or tmp register
if (ref != rax && tmp != rax) {
__ push(rax);
}
// Setup arguments and call runtime stub
__ subptr(rsp, 2 * BytesPerWord);
ce->store_parameter(ref_addr, 1);
ce->store_parameter(ref, 0);
__ call(RuntimeAddress(stub->runtime_stub()));
__ addptr(rsp, 2 * BytesPerWord);
// Verify result
__ verify_oop(rax);
// Move result into place
if (ref != rax) {
__ movptr(ref, rax);
}
// Restore rax unless it is the result or tmp register
if (ref != rax && tmp != rax) {
__ pop(rax);
}
// Stub exit
__ jmp(*stub->continuation());
}
#undef __
#define __ sasm->
void XBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
DecoratorSet decorators) const {
// Enter and save registers
__ enter();
__ save_live_registers_no_oop_map(true /* save_fpu_registers */);
// Setup arguments
__ load_parameter(1, c_rarg1);
__ load_parameter(0, c_rarg0);
// Call VM
__ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), c_rarg0, c_rarg1);
// Restore registers and return
__ restore_live_registers_except_rax(true /* restore_fpu_registers */);
__ leave();
__ ret(0);
}
#endif // COMPILER1
#ifdef COMPILER2
OptoReg::Name XBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) {
if (!OptoReg::is_reg(opto_reg)) {
return OptoReg::Bad;
}
const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
if (vm_reg->is_XMMRegister()) {
opto_reg &= ~15;
switch (node->ideal_reg()) {
case Op_VecX:
opto_reg |= 2;
break;
case Op_VecY:
opto_reg |= 4;
break;
case Op_VecZ:
opto_reg |= 8;
break;
default:
opto_reg |= 1;
break;
}
}
return opto_reg;
}
// We use the vec_spill_helper from the x86.ad file to avoid reinventing this wheel
extern void vec_spill_helper(C2_MacroAssembler *masm, bool is_load,
int stack_offset, int reg, uint ireg, outputStream* st);
#undef __
#define __ _masm->
class XSaveLiveRegisters {
private:
struct XMMRegisterData {
XMMRegister _reg;
int _size;
// Used by GrowableArray::find()
bool operator == (const XMMRegisterData& other) {
return _reg == other._reg;
}
};
MacroAssembler* const _masm;
GrowableArray<Register> _gp_registers;
GrowableArray<KRegister> _opmask_registers;
GrowableArray<XMMRegisterData> _xmm_registers;
int _spill_size;
int _spill_offset;
static int xmm_compare_register_size(XMMRegisterData* left, XMMRegisterData* right) {
if (left->_size == right->_size) {
return 0;
}
return (left->_size < right->_size) ? -1 : 1;
}
static int xmm_slot_size(OptoReg::Name opto_reg) {
// The low order 4 bytes denote what size of the XMM register is live
return (opto_reg & 15) << 3;
}
static uint xmm_ideal_reg_for_size(int reg_size) {
switch (reg_size) {
case 8:
return Op_VecD;
case 16:
return Op_VecX;
case 32:
return Op_VecY;
case 64:
return Op_VecZ;
default:
fatal("Invalid register size %d", reg_size);
return 0;
}
}
bool xmm_needs_vzeroupper() const {
return _xmm_registers.is_nonempty() && _xmm_registers.at(0)._size > 16;
}
void xmm_register_save(const XMMRegisterData& reg_data) {
const OptoReg::Name opto_reg = OptoReg::as_OptoReg(reg_data._reg->as_VMReg());
const uint ideal_reg = xmm_ideal_reg_for_size(reg_data._size);
_spill_offset -= reg_data._size;
C2_MacroAssembler c2_masm(__ code());
vec_spill_helper(&c2_masm, false /* is_load */, _spill_offset, opto_reg, ideal_reg, tty);
}
void xmm_register_restore(const XMMRegisterData& reg_data) {
const OptoReg::Name opto_reg = OptoReg::as_OptoReg(reg_data._reg->as_VMReg());
const uint ideal_reg = xmm_ideal_reg_for_size(reg_data._size);
C2_MacroAssembler c2_masm(__ code());
vec_spill_helper(&c2_masm, true /* is_load */, _spill_offset, opto_reg, ideal_reg, tty);
_spill_offset += reg_data._size;
}
void gp_register_save(Register reg) {
_spill_offset -= 8;
__ movq(Address(rsp, _spill_offset), reg);
}
void opmask_register_save(KRegister reg) {
_spill_offset -= 8;
__ kmov(Address(rsp, _spill_offset), reg);
}
void gp_register_restore(Register reg) {
__ movq(reg, Address(rsp, _spill_offset));
_spill_offset += 8;
}
void opmask_register_restore(KRegister reg) {
__ kmov(reg, Address(rsp, _spill_offset));
_spill_offset += 8;
}
void initialize(XLoadBarrierStubC2* stub) {
// Create mask of caller saved registers that need to
// be saved/restored if live
RegMask caller_saved;
caller_saved.Insert(OptoReg::as_OptoReg(rax->as_VMReg()));
caller_saved.Insert(OptoReg::as_OptoReg(rcx->as_VMReg()));
caller_saved.Insert(OptoReg::as_OptoReg(rdx->as_VMReg()));
caller_saved.Insert(OptoReg::as_OptoReg(rsi->as_VMReg()));
caller_saved.Insert(OptoReg::as_OptoReg(rdi->as_VMReg()));
caller_saved.Insert(OptoReg::as_OptoReg(r8->as_VMReg()));
caller_saved.Insert(OptoReg::as_OptoReg(r9->as_VMReg()));
caller_saved.Insert(OptoReg::as_OptoReg(r10->as_VMReg()));
caller_saved.Insert(OptoReg::as_OptoReg(r11->as_VMReg()));
caller_saved.Remove(OptoReg::as_OptoReg(stub->ref()->as_VMReg()));
if (UseAPX) {
caller_saved.Insert(OptoReg::as_OptoReg(r16->as_VMReg()));
caller_saved.Insert(OptoReg::as_OptoReg(r17->as_VMReg()));
caller_saved.Insert(OptoReg::as_OptoReg(r18->as_VMReg()));
caller_saved.Insert(OptoReg::as_OptoReg(r19->as_VMReg()));
caller_saved.Insert(OptoReg::as_OptoReg(r20->as_VMReg()));
caller_saved.Insert(OptoReg::as_OptoReg(r21->as_VMReg()));
caller_saved.Insert(OptoReg::as_OptoReg(r22->as_VMReg()));
caller_saved.Insert(OptoReg::as_OptoReg(r23->as_VMReg()));
caller_saved.Insert(OptoReg::as_OptoReg(r24->as_VMReg()));
caller_saved.Insert(OptoReg::as_OptoReg(r25->as_VMReg()));
caller_saved.Insert(OptoReg::as_OptoReg(r26->as_VMReg()));
caller_saved.Insert(OptoReg::as_OptoReg(r27->as_VMReg()));
caller_saved.Insert(OptoReg::as_OptoReg(r28->as_VMReg()));
caller_saved.Insert(OptoReg::as_OptoReg(r29->as_VMReg()));
caller_saved.Insert(OptoReg::as_OptoReg(r30->as_VMReg()));
caller_saved.Insert(OptoReg::as_OptoReg(r31->as_VMReg()));
}
// Create mask of live registers
RegMask live = stub->live();
if (stub->tmp() != noreg) {
live.Insert(OptoReg::as_OptoReg(stub->tmp()->as_VMReg()));
}
int gp_spill_size = 0;
int opmask_spill_size = 0;
int xmm_spill_size = 0;
// Record registers that needs to be saved/restored
RegMaskIterator rmi(live);
while (rmi.has_next()) {
const OptoReg::Name opto_reg = rmi.next();
const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
if (vm_reg->is_Register()) {
if (caller_saved.Member(opto_reg)) {
_gp_registers.append(vm_reg->as_Register());
gp_spill_size += 8;
}
} else if (vm_reg->is_KRegister()) {
// All opmask registers are caller saved, thus spill the ones
// which are live.
if (_opmask_registers.find(vm_reg->as_KRegister()) == -1) {
_opmask_registers.append(vm_reg->as_KRegister());
opmask_spill_size += 8;
}
} else if (vm_reg->is_XMMRegister()) {
// We encode in the low order 4 bits of the opto_reg, how large part of the register is live
const VMReg vm_reg_base = OptoReg::as_VMReg(opto_reg & ~15);
const int reg_size = xmm_slot_size(opto_reg);
const XMMRegisterData reg_data = { vm_reg_base->as_XMMRegister(), reg_size };
const int reg_index = _xmm_registers.find(reg_data);
if (reg_index == -1) {
// Not previously appended
_xmm_registers.append(reg_data);
xmm_spill_size += reg_size;
} else {
// Previously appended, update size
const int reg_size_prev = _xmm_registers.at(reg_index)._size;
if (reg_size > reg_size_prev) {
_xmm_registers.at_put(reg_index, reg_data);
xmm_spill_size += reg_size - reg_size_prev;
}
}
} else {
fatal("Unexpected register type");
}
}
// Sort by size, largest first
_xmm_registers.sort(xmm_compare_register_size);
// On Windows, the caller reserves stack space for spilling register arguments
const int arg_spill_size = frame::arg_reg_save_area_bytes;
// Stack pointer must be 16 bytes aligned for the call
_spill_offset = _spill_size = align_up(xmm_spill_size + gp_spill_size + opmask_spill_size + arg_spill_size, 16);
}
public:
XSaveLiveRegisters(MacroAssembler* masm, XLoadBarrierStubC2* stub) :
_masm(masm),
_gp_registers(),
_opmask_registers(),
_xmm_registers(),
_spill_size(0),
_spill_offset(0) {
//
// Stack layout after registers have been spilled:
//
// | ... | original rsp, 16 bytes aligned
// ------------------
// | zmm0 high |
// | ... |
// | zmm0 low | 16 bytes aligned
// | ... |
// | ymm1 high |
// | ... |
// | ymm1 low | 16 bytes aligned
// | ... |
// | xmmN high |
// | ... |
// | xmmN low | 8 bytes aligned
// | reg0 | 8 bytes aligned
// | reg1 |
// | ... |
// | regN | new rsp, if 16 bytes aligned
// | <padding> | else new rsp, 16 bytes aligned
// ------------------
//
// Figure out what registers to save/restore
initialize(stub);
// Allocate stack space
if (_spill_size > 0) {
__ subptr(rsp, _spill_size);
}
// Save XMM/YMM/ZMM registers
for (int i = 0; i < _xmm_registers.length(); i++) {
xmm_register_save(_xmm_registers.at(i));
}
if (xmm_needs_vzeroupper()) {
__ vzeroupper();
}
// Save general purpose registers
for (int i = 0; i < _gp_registers.length(); i++) {
gp_register_save(_gp_registers.at(i));
}
// Save opmask registers
for (int i = 0; i < _opmask_registers.length(); i++) {
opmask_register_save(_opmask_registers.at(i));
}
}
~XSaveLiveRegisters() {
// Restore opmask registers
for (int i = _opmask_registers.length() - 1; i >= 0; i--) {
opmask_register_restore(_opmask_registers.at(i));
}
// Restore general purpose registers
for (int i = _gp_registers.length() - 1; i >= 0; i--) {
gp_register_restore(_gp_registers.at(i));
}
__ vzeroupper();
// Restore XMM/YMM/ZMM registers
for (int i = _xmm_registers.length() - 1; i >= 0; i--) {
xmm_register_restore(_xmm_registers.at(i));
}
// Free stack space
if (_spill_size > 0) {
__ addptr(rsp, _spill_size);
}
}
};
class XSetupArguments {
private:
MacroAssembler* const _masm;
const Register _ref;
const Address _ref_addr;
public:
XSetupArguments(MacroAssembler* masm, XLoadBarrierStubC2* stub) :
_masm(masm),
_ref(stub->ref()),
_ref_addr(stub->ref_addr()) {
// Setup arguments
if (_ref_addr.base() == noreg) {
// No self healing
if (_ref != c_rarg0) {
__ movq(c_rarg0, _ref);
}
__ xorq(c_rarg1, c_rarg1);
} else {
// Self healing
if (_ref == c_rarg0) {
__ lea(c_rarg1, _ref_addr);
} else if (_ref != c_rarg1) {
__ lea(c_rarg1, _ref_addr);
__ movq(c_rarg0, _ref);
} else if (_ref_addr.base() != c_rarg0 && _ref_addr.index() != c_rarg0) {
__ movq(c_rarg0, _ref);
__ lea(c_rarg1, _ref_addr);
} else {
__ xchgq(c_rarg0, c_rarg1);
if (_ref_addr.base() == c_rarg0) {
__ lea(c_rarg1, Address(c_rarg1, _ref_addr.index(), _ref_addr.scale(), _ref_addr.disp()));
} else if (_ref_addr.index() == c_rarg0) {
__ lea(c_rarg1, Address(_ref_addr.base(), c_rarg1, _ref_addr.scale(), _ref_addr.disp()));
} else {
ShouldNotReachHere();
}
}
}
}
~XSetupArguments() {
// Transfer result
if (_ref != rax) {
__ movq(_ref, rax);
}
}
};
#undef __
#define __ masm->
void XBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, XLoadBarrierStubC2* stub) const {
BLOCK_COMMENT("XLoadBarrierStubC2");
// Stub entry
__ bind(*stub->entry());
{
XSaveLiveRegisters save_live_registers(masm, stub);
XSetupArguments setup_arguments(masm, stub);
__ call(RuntimeAddress(stub->slow_path()));
}
// Stub exit
__ jmp(*stub->continuation());
}
#endif // COMPILER2
#undef __
#define __ masm->
void XBarrierSetAssembler::check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error) {
// Check if metadata bits indicate a bad oop
__ testptr(obj, Address(r15_thread, XThreadLocalData::address_bad_mask_offset()));
__ jcc(Assembler::notZero, error);
BarrierSetAssembler::check_oop(masm, obj, tmp1, tmp2, error);
}
#undef __

View File

@ -1,109 +0,0 @@
/*
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef CPU_X86_GC_X_XBARRIERSETASSEMBLER_X86_HPP
#define CPU_X86_GC_X_XBARRIERSETASSEMBLER_X86_HPP
#include "code/vmreg.hpp"
#include "oops/accessDecorators.hpp"
#ifdef COMPILER2
#include "opto/optoreg.hpp"
#endif // COMPILER2
class MacroAssembler;
#ifdef COMPILER1
class LIR_Assembler;
class LIR_Opr;
class StubAssembler;
#endif // COMPILER1
#ifdef COMPILER2
class Node;
#endif // COMPILER2
#ifdef COMPILER1
class XLoadBarrierStubC1;
#endif // COMPILER1
#ifdef COMPILER2
class XLoadBarrierStubC2;
#endif // COMPILER2
class XBarrierSetAssembler : public XBarrierSetAssemblerBase {
public:
virtual void load_at(MacroAssembler* masm,
DecoratorSet decorators,
BasicType type,
Register dst,
Address src,
Register tmp1,
Register tmp_thread);
#ifdef ASSERT
virtual void store_at(MacroAssembler* masm,
DecoratorSet decorators,
BasicType type,
Address dst,
Register src,
Register tmp1,
Register tmp2,
Register tmp3);
#endif // ASSERT
virtual void arraycopy_prologue(MacroAssembler* masm,
DecoratorSet decorators,
BasicType type,
Register src,
Register dst,
Register count);
virtual void try_resolve_jobject_in_native(MacroAssembler* masm,
Register jni_env,
Register obj,
Register tmp,
Label& slowpath);
#ifdef COMPILER1
void generate_c1_load_barrier_test(LIR_Assembler* ce,
LIR_Opr ref) const;
void generate_c1_load_barrier_stub(LIR_Assembler* ce,
XLoadBarrierStubC1* stub) const;
void generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
DecoratorSet decorators) const;
#endif // COMPILER1
#ifdef COMPILER2
OptoReg::Name refine_register(const Node* node,
OptoReg::Name opto_reg);
void generate_c2_load_barrier_stub(MacroAssembler* masm,
XLoadBarrierStubC2* stub) const;
#endif // COMPILER2
void check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error);
};
#endif // CPU_X86_GC_X_XBARRIERSETASSEMBLER_X86_HPP

View File

@ -1,149 +0,0 @@
/*
* Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/shared/gc_globals.hpp"
#include "gc/x/xGlobals.hpp"
#include "runtime/globals.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/powerOfTwo.hpp"
//
// The heap can have three different layouts, depending on the max heap size.
//
// Address Space & Pointer Layout 1
// --------------------------------
//
// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB)
// . .
// . .
// . .
// +--------------------------------+ 0x0000014000000000 (20TB)
// | Remapped View |
// +--------------------------------+ 0x0000010000000000 (16TB)
// . .
// +--------------------------------+ 0x00000c0000000000 (12TB)
// | Marked1 View |
// +--------------------------------+ 0x0000080000000000 (8TB)
// | Marked0 View |
// +--------------------------------+ 0x0000040000000000 (4TB)
// . .
// +--------------------------------+ 0x0000000000000000
//
// 6 4 4 4 4
// 3 6 5 2 1 0
// +--------------------+----+-----------------------------------------------+
// |00000000 00000000 00|1111|11 11111111 11111111 11111111 11111111 11111111|
// +--------------------+----+-----------------------------------------------+
// | | |
// | | * 41-0 Object Offset (42-bits, 4TB address space)
// | |
// | * 45-42 Metadata Bits (4-bits) 0001 = Marked0 (Address view 4-8TB)
// | 0010 = Marked1 (Address view 8-12TB)
// | 0100 = Remapped (Address view 16-20TB)
// | 1000 = Finalizable (Address view N/A)
// |
// * 63-46 Fixed (18-bits, always zero)
//
//
// Address Space & Pointer Layout 2
// --------------------------------
//
// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB)
// . .
// . .
// . .
// +--------------------------------+ 0x0000280000000000 (40TB)
// | Remapped View |
// +--------------------------------+ 0x0000200000000000 (32TB)
// . .
// +--------------------------------+ 0x0000180000000000 (24TB)
// | Marked1 View |
// +--------------------------------+ 0x0000100000000000 (16TB)
// | Marked0 View |
// +--------------------------------+ 0x0000080000000000 (8TB)
// . .
// +--------------------------------+ 0x0000000000000000
//
// 6 4 4 4 4
// 3 7 6 3 2 0
// +------------------+-----+------------------------------------------------+
// |00000000 00000000 0|1111|111 11111111 11111111 11111111 11111111 11111111|
// +-------------------+----+------------------------------------------------+
// | | |
// | | * 42-0 Object Offset (43-bits, 8TB address space)
// | |
// | * 46-43 Metadata Bits (4-bits) 0001 = Marked0 (Address view 8-16TB)
// | 0010 = Marked1 (Address view 16-24TB)
// | 0100 = Remapped (Address view 32-40TB)
// | 1000 = Finalizable (Address view N/A)
// |
// * 63-47 Fixed (17-bits, always zero)
//
//
// Address Space & Pointer Layout 3
// --------------------------------
//
// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB)
// . .
// . .
// . .
// +--------------------------------+ 0x0000500000000000 (80TB)
// | Remapped View |
// +--------------------------------+ 0x0000400000000000 (64TB)
// . .
// +--------------------------------+ 0x0000300000000000 (48TB)
// | Marked1 View |
// +--------------------------------+ 0x0000200000000000 (32TB)
// | Marked0 View |
// +--------------------------------+ 0x0000100000000000 (16TB)
// . .
// +--------------------------------+ 0x0000000000000000
//
// 6 4 4 4 4
// 3 8 7 4 3 0
// +------------------+----+-------------------------------------------------+
// |00000000 00000000 |1111|1111 11111111 11111111 11111111 11111111 11111111|
// +------------------+----+-------------------------------------------------+
// | | |
// | | * 43-0 Object Offset (44-bits, 16TB address space)
// | |
// | * 47-44 Metadata Bits (4-bits) 0001 = Marked0 (Address view 16-32TB)
// | 0010 = Marked1 (Address view 32-48TB)
// | 0100 = Remapped (Address view 64-80TB)
// | 1000 = Finalizable (Address view N/A)
// |
// * 63-48 Fixed (16-bits, always zero)
//
size_t XPlatformAddressOffsetBits() {
const size_t min_address_offset_bits = 42; // 4TB
const size_t max_address_offset_bits = 44; // 16TB
const size_t address_offset = round_up_power_of_2(MaxHeapSize * XVirtualToPhysicalRatio);
const size_t address_offset_bits = log2i_exact(address_offset);
return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits);
}
size_t XPlatformAddressMetadataShift() {
return XPlatformAddressOffsetBits();
}

View File

@ -1,33 +0,0 @@
/*
* Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef CPU_X86_GC_X_XGLOBALS_X86_HPP
#define CPU_X86_GC_X_XGLOBALS_X86_HPP
const size_t XPlatformHeapViews = 3;
const size_t XPlatformCacheLineSize = 64;
size_t XPlatformAddressOffsetBits();
size_t XPlatformAddressMetadataShift();
#endif // CPU_X86_GC_X_XGLOBALS_X86_HPP

View File

@ -1,156 +0,0 @@
//
// Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License version 2 only, as
// published by the Free Software Foundation.
//
// This code is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
// version 2 for more details (a copy is included in the LICENSE file that
// accompanied this code).
//
// You should have received a copy of the GNU General Public License version
// 2 along with this work; if not, write to the Free Software Foundation,
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
//
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
// or visit www.oracle.com if you need additional information or have any
// questions.
//
source_hpp %{
#include "gc/shared/gc_globals.hpp"
#include "gc/x/c2/xBarrierSetC2.hpp"
#include "gc/x/xThreadLocalData.hpp"
%}
source %{
#include "c2_intelJccErratum_x86.hpp"
static void x_load_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data) {
if (barrier_data == XLoadBarrierElided) {
return;
}
XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, barrier_data);
{
IntelJccErratumAlignment intel_alignment(masm, 10 /* jcc_size */);
__ testptr(ref, Address(r15_thread, XThreadLocalData::address_bad_mask_offset()));
__ jcc(Assembler::notZero, *stub->entry());
}
__ bind(*stub->continuation());
}
static void x_load_barrier_cmpxchg(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, Label& good) {
XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, XLoadBarrierStrong);
{
IntelJccErratumAlignment intel_alignment(masm, 10 /* jcc_size */);
__ testptr(ref, Address(r15_thread, XThreadLocalData::address_bad_mask_offset()));
__ jcc(Assembler::zero, good);
}
{
IntelJccErratumAlignment intel_alignment(masm, 5 /* jcc_size */);
__ jmp(*stub->entry());
}
__ bind(*stub->continuation());
}
static void x_cmpxchg_common(MacroAssembler* masm, const MachNode* node, Register mem_reg, Register newval, Register tmp) {
// Compare value (oldval) is in rax
const Address mem = Address(mem_reg, 0);
if (node->barrier_data() != XLoadBarrierElided) {
__ movptr(tmp, rax);
}
__ lock();
__ cmpxchgptr(newval, mem);
if (node->barrier_data() != XLoadBarrierElided) {
Label good;
x_load_barrier_cmpxchg(masm, node, mem, rax, tmp, good);
__ movptr(rax, tmp);
__ lock();
__ cmpxchgptr(newval, mem);
__ bind(good);
}
}
%}
// Load Pointer
instruct xLoadP(rRegP dst, memory mem, rFlagsReg cr)
%{
predicate(UseZGC && !ZGenerational && n->as_Load()->barrier_data() != 0);
match(Set dst (LoadP mem));
effect(KILL cr, TEMP dst);
ins_cost(125);
format %{ "movq $dst, $mem" %}
ins_encode %{
__ movptr($dst$$Register, $mem$$Address);
x_load_barrier(masm, this, $mem$$Address, $dst$$Register, noreg /* tmp */, barrier_data());
%}
ins_pipe(ialu_reg_mem);
%}
instruct xCompareAndExchangeP(indirect mem, rax_RegP oldval, rRegP newval, rRegP tmp, rFlagsReg cr) %{
match(Set oldval (CompareAndExchangeP mem (Binary oldval newval)));
predicate(UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong);
effect(KILL cr, TEMP tmp);
format %{ "lock\n\t"
"cmpxchgq $newval, $mem" %}
ins_encode %{
precond($oldval$$Register == rax);
x_cmpxchg_common(masm, this, $mem$$Register, $newval$$Register, $tmp$$Register);
%}
ins_pipe(pipe_cmpxchg);
%}
instruct xCompareAndSwapP(rRegI res, indirect mem, rRegP newval, rRegP tmp, rFlagsReg cr, rax_RegP oldval) %{
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
predicate(UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong);
effect(KILL cr, KILL oldval, TEMP tmp);
format %{ "lock\n\t"
"cmpxchgq $newval, $mem\n\t"
"setcc $res \t# emits sete + movzbl or setzue for APX" %}
ins_encode %{
precond($oldval$$Register == rax);
x_cmpxchg_common(masm, this, $mem$$Register, $newval$$Register, $tmp$$Register);
if (barrier_data() != XLoadBarrierElided) {
__ cmpptr($tmp$$Register, rax);
}
__ setcc(Assembler::equal, $res$$Register);
%}
ins_pipe(pipe_cmpxchg);
%}
instruct xXChgP(indirect mem, rRegP newval, rFlagsReg cr) %{
match(Set newval (GetAndSetP mem newval));
predicate(UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() != 0);
effect(KILL cr);
format %{ "xchgq $newval, $mem" %}
ins_encode %{
__ xchgptr($newval$$Register, Address($mem$$Register, 0));
x_load_barrier(masm, this, Address(noreg, 0), $newval$$Register, noreg /* tmp */, barrier_data());
%}
ins_pipe(pipe_cmpxchg);
%}

View File

@ -115,7 +115,7 @@ operand no_rax_RegP()
// Load Pointer
instruct zLoadP(rRegP dst, memory mem, rFlagsReg cr)
%{
predicate(UseZGC && ZGenerational && n->as_Load()->barrier_data() != 0);
predicate(UseZGC && n->as_Load()->barrier_data() != 0);
match(Set dst (LoadP mem));
effect(TEMP dst, KILL cr);
@ -134,7 +134,7 @@ instruct zLoadP(rRegP dst, memory mem, rFlagsReg cr)
// Load Pointer and Null Check
instruct zLoadPNullCheck(rFlagsReg cr, memory op, immP0 zero)
%{
predicate(UseZGC && ZGenerational && n->in(1)->as_Load()->barrier_data() != 0);
predicate(UseZGC && n->in(1)->as_Load()->barrier_data() != 0);
match(Set cr (CmpP (LoadP op) zero));
ins_cost(500); // XXX
@ -150,7 +150,7 @@ instruct zLoadPNullCheck(rFlagsReg cr, memory op, immP0 zero)
// Store Pointer
instruct zStoreP(memory mem, any_RegP src, rRegP tmp, rFlagsReg cr)
%{
predicate(UseZGC && ZGenerational && n->as_Store()->barrier_data() != 0);
predicate(UseZGC && n->as_Store()->barrier_data() != 0);
match(Set mem (StoreP mem src));
effect(TEMP tmp, KILL cr);
@ -166,7 +166,7 @@ instruct zStoreP(memory mem, any_RegP src, rRegP tmp, rFlagsReg cr)
// Store Null Pointer
instruct zStorePNull(memory mem, immP0 zero, rRegP tmp, rFlagsReg cr)
%{
predicate(UseZGC && ZGenerational && n->as_Store()->barrier_data() != 0);
predicate(UseZGC && n->as_Store()->barrier_data() != 0);
match(Set mem (StoreP mem zero));
effect(TEMP tmp, KILL cr);
@ -185,7 +185,7 @@ instruct zStorePNull(memory mem, immP0 zero, rRegP tmp, rFlagsReg cr)
instruct zCompareAndExchangeP(indirect mem, no_rax_RegP newval, rRegP tmp, rax_RegP oldval, rFlagsReg cr) %{
match(Set oldval (CompareAndExchangeP mem (Binary oldval newval)));
predicate(UseZGC && ZGenerational && n->as_LoadStore()->barrier_data() != 0);
predicate(UseZGC && n->as_LoadStore()->barrier_data() != 0);
effect(TEMP tmp, KILL cr);
format %{ "lock\n\t"
@ -208,7 +208,7 @@ instruct zCompareAndExchangeP(indirect mem, no_rax_RegP newval, rRegP tmp, rax_R
instruct zCompareAndSwapP(rRegI res, indirect mem, rRegP newval, rRegP tmp, rax_RegP oldval, rFlagsReg cr) %{
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
predicate(UseZGC && ZGenerational && n->as_LoadStore()->barrier_data() != 0);
predicate(UseZGC && n->as_LoadStore()->barrier_data() != 0);
effect(TEMP tmp, KILL oldval, KILL cr);
format %{ "lock\n\t"
@ -230,7 +230,7 @@ instruct zCompareAndSwapP(rRegI res, indirect mem, rRegP newval, rRegP tmp, rax_
instruct zXChgP(indirect mem, rRegP newval, rRegP tmp, rFlagsReg cr) %{
match(Set newval (GetAndSetP mem newval));
predicate(UseZGC && ZGenerational && n->as_LoadStore()->barrier_data() != 0);
predicate(UseZGC && n->as_LoadStore()->barrier_data() != 0);
effect(TEMP tmp, KILL cr);
format %{ "xchgq $newval, $mem" %}

View File

@ -1,34 +0,0 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/x/xLargePages.hpp"
#include "runtime/globals.hpp"
void XLargePages::pd_initialize() {
if (UseLargePages) {
_state = Explicit;
} else {
_state = Disabled;
}
}

View File

@ -1,43 +0,0 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/x/xNUMA.hpp"
#include "utilities/globalDefinitions.hpp"
void XNUMA::pd_initialize() {
_enabled = false;
}
uint32_t XNUMA::count() {
return 1;
}
uint32_t XNUMA::id() {
return 0;
}
uint32_t XNUMA::memory_id(uintptr_t addr) {
// NUMA support not enabled, assume everything belongs to node zero
return 0;
}

View File

@ -1,181 +0,0 @@
/*
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/shared/gcLogPrecious.hpp"
#include "gc/x/xErrno.hpp"
#include "gc/x/xGlobals.hpp"
#include "gc/x/xLargePages.inline.hpp"
#include "gc/x/xPhysicalMemory.inline.hpp"
#include "gc/x/xPhysicalMemoryBacking_bsd.hpp"
#include "logging/log.hpp"
#include "runtime/globals.hpp"
#include "runtime/os.hpp"
#include "utilities/align.hpp"
#include "utilities/debug.hpp"
#include <mach/mach.h>
#include <mach/mach_vm.h>
#include <sys/mman.h>
#include <sys/types.h>
// The backing is represented by a reserved virtual address space, in which
// we commit and uncommit physical memory. Multi-mapping the different heap
// views is done by simply remapping the backing memory using mach_vm_remap().
static int vm_flags_superpage() {
if (!XLargePages::is_explicit()) {
return 0;
}
const int page_size_in_megabytes = XGranuleSize >> 20;
return page_size_in_megabytes << VM_FLAGS_SUPERPAGE_SHIFT;
}
static XErrno mremap(uintptr_t from_addr, uintptr_t to_addr, size_t size) {
mach_vm_address_t remap_addr = to_addr;
vm_prot_t remap_cur_prot;
vm_prot_t remap_max_prot;
// Remap memory to an additional location
const kern_return_t res = mach_vm_remap(mach_task_self(),
&remap_addr,
size,
0 /* mask */,
VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE | vm_flags_superpage(),
mach_task_self(),
from_addr,
FALSE /* copy */,
&remap_cur_prot,
&remap_max_prot,
VM_INHERIT_COPY);
return (res == KERN_SUCCESS) ? XErrno(0) : XErrno(EINVAL);
}
XPhysicalMemoryBacking::XPhysicalMemoryBacking(size_t max_capacity) :
_base(0),
_initialized(false) {
// Reserve address space for backing memory
_base = (uintptr_t)os::reserve_memory(max_capacity);
if (_base == 0) {
// Failed
log_error_pd(gc)("Failed to reserve address space for backing memory");
return;
}
// Successfully initialized
_initialized = true;
}
bool XPhysicalMemoryBacking::is_initialized() const {
return _initialized;
}
void XPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const {
// Does nothing
}
bool XPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) const {
assert(is_aligned(offset, os::vm_page_size()), "Invalid offset");
assert(is_aligned(length, os::vm_page_size()), "Invalid length");
log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
offset / M, (offset + length) / M, length / M);
const uintptr_t addr = _base + offset;
const void* const res = mmap((void*)addr, length, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
if (res == MAP_FAILED) {
XErrno err;
log_error(gc)("Failed to commit memory (%s)", err.to_string());
return false;
}
// Success
return true;
}
size_t XPhysicalMemoryBacking::commit(size_t offset, size_t length) const {
// Try to commit the whole region
if (commit_inner(offset, length)) {
// Success
return length;
}
// Failed, try to commit as much as possible
size_t start = offset;
size_t end = offset + length;
for (;;) {
length = align_down((end - start) / 2, XGranuleSize);
if (length == 0) {
// Done, don't commit more
return start - offset;
}
if (commit_inner(start, length)) {
// Success, try commit more
start += length;
} else {
// Failed, try commit less
end -= length;
}
}
}
size_t XPhysicalMemoryBacking::uncommit(size_t offset, size_t length) const {
assert(is_aligned(offset, os::vm_page_size()), "Invalid offset");
assert(is_aligned(length, os::vm_page_size()), "Invalid length");
log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
offset / M, (offset + length) / M, length / M);
const uintptr_t start = _base + offset;
const void* const res = mmap((void*)start, length, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
if (res == MAP_FAILED) {
XErrno err;
log_error(gc)("Failed to uncommit memory (%s)", err.to_string());
return 0;
}
return length;
}
void XPhysicalMemoryBacking::map(uintptr_t addr, size_t size, uintptr_t offset) const {
const XErrno err = mremap(_base + offset, addr, size);
if (err) {
fatal("Failed to remap memory (%s)", err.to_string());
}
}
void XPhysicalMemoryBacking::unmap(uintptr_t addr, size_t size) const {
// Note that we must keep the address space reservation intact and just detach
// the backing memory. For this reason we map a new anonymous, non-accessible
// and non-reserved page over the mapping instead of actually unmapping.
const void* const res = mmap((void*)addr, size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
if (res == MAP_FAILED) {
XErrno err;
fatal("Failed to map memory (%s)", err.to_string());
}
}

View File

@ -1,48 +0,0 @@
/*
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef OS_BSD_GC_X_XPHYSICALMEMORYBACKING_BSD_HPP
#define OS_BSD_GC_X_XPHYSICALMEMORYBACKING_BSD_HPP
class XPhysicalMemoryBacking {
private:
uintptr_t _base;
bool _initialized;
bool commit_inner(size_t offset, size_t length) const;
public:
XPhysicalMemoryBacking(size_t max_capacity);
bool is_initialized() const;
void warn_commit_limits(size_t max_capacity) const;
size_t commit(size_t offset, size_t length) const;
size_t uncommit(size_t offset, size_t length) const;
void map(uintptr_t addr, size_t size, uintptr_t offset) const;
void unmap(uintptr_t addr, size_t size) const;
};
#endif // OS_BSD_GC_X_XPHYSICALMEMORYBACKING_BSD_HPP

View File

@ -1,38 +0,0 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/x/xLargePages.hpp"
#include "runtime/globals.hpp"
void XLargePages::pd_initialize() {
if (UseLargePages) {
if (UseTransparentHugePages) {
_state = Transparent;
} else {
_state = Explicit;
}
} else {
_state = Disabled;
}
}

View File

@ -1,154 +0,0 @@
/*
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/shared/gcLogPrecious.hpp"
#include "gc/x/xArray.inline.hpp"
#include "gc/x/xErrno.hpp"
#include "gc/x/xMountPoint_linux.hpp"
#include "runtime/globals.hpp"
#include "runtime/os.hpp"
#include "utilities/globalDefinitions.hpp"
#include <stdio.h>
#include <unistd.h>
// Mount information, see proc(5) for more details.
#define PROC_SELF_MOUNTINFO "/proc/self/mountinfo"
XMountPoint::XMountPoint(const char* filesystem, const char** preferred_mountpoints) {
if (AllocateHeapAt != nullptr) {
// Use specified path
_path = os::strdup(AllocateHeapAt, mtGC);
} else {
// Find suitable path
_path = find_mountpoint(filesystem, preferred_mountpoints);
}
}
XMountPoint::~XMountPoint() {
os::free(_path);
_path = nullptr;
}
char* XMountPoint::get_mountpoint(const char* line, const char* filesystem) const {
char* line_mountpoint = nullptr;
char* line_filesystem = nullptr;
// Parse line and return a newly allocated string containing the mount point if
// the line contains a matching filesystem and the mount point is accessible by
// the current user.
// sscanf, using %m, will return malloced memory. Need raw ::free, not os::free.
if (sscanf(line, "%*u %*u %*u:%*u %*s %ms %*[^-]- %ms", &line_mountpoint, &line_filesystem) != 2 ||
strcmp(line_filesystem, filesystem) != 0 ||
access(line_mountpoint, R_OK|W_OK|X_OK) != 0) {
// Not a matching or accessible filesystem
ALLOW_C_FUNCTION(::free, ::free(line_mountpoint);)
line_mountpoint = nullptr;
}
ALLOW_C_FUNCTION(::free, ::free(line_filesystem);)
return line_mountpoint;
}
void XMountPoint::get_mountpoints(const char* filesystem, XArray<char*>* mountpoints) const {
FILE* fd = os::fopen(PROC_SELF_MOUNTINFO, "r");
if (fd == nullptr) {
XErrno err;
log_error_p(gc)("Failed to open %s: %s", PROC_SELF_MOUNTINFO, err.to_string());
return;
}
char* line = nullptr;
size_t length = 0;
while (getline(&line, &length, fd) != -1) {
char* const mountpoint = get_mountpoint(line, filesystem);
if (mountpoint != nullptr) {
mountpoints->append(mountpoint);
}
}
// readline will return malloced memory. Need raw ::free, not os::free.
ALLOW_C_FUNCTION(::free, ::free(line);)
fclose(fd);
}
void XMountPoint::free_mountpoints(XArray<char*>* mountpoints) const {
XArrayIterator<char*> iter(mountpoints);
for (char* mountpoint; iter.next(&mountpoint);) {
ALLOW_C_FUNCTION(::free, ::free(mountpoint);) // *not* os::free
}
mountpoints->clear();
}
char* XMountPoint::find_preferred_mountpoint(const char* filesystem,
XArray<char*>* mountpoints,
const char** preferred_mountpoints) const {
// Find preferred mount point
XArrayIterator<char*> iter1(mountpoints);
for (char* mountpoint; iter1.next(&mountpoint);) {
for (const char** preferred = preferred_mountpoints; *preferred != nullptr; preferred++) {
if (!strcmp(mountpoint, *preferred)) {
// Preferred mount point found
return os::strdup(mountpoint, mtGC);
}
}
}
// Preferred mount point not found
log_error_p(gc)("More than one %s filesystem found:", filesystem);
XArrayIterator<char*> iter2(mountpoints);
for (char* mountpoint; iter2.next(&mountpoint);) {
log_error_p(gc)(" %s", mountpoint);
}
return nullptr;
}
char* XMountPoint::find_mountpoint(const char* filesystem, const char** preferred_mountpoints) const {
char* path = nullptr;
XArray<char*> mountpoints;
get_mountpoints(filesystem, &mountpoints);
if (mountpoints.length() == 0) {
// No mount point found
log_error_p(gc)("Failed to find an accessible %s filesystem", filesystem);
} else if (mountpoints.length() == 1) {
// One mount point found
path = os::strdup(mountpoints.at(0), mtGC);
} else {
// More than one mount point found
path = find_preferred_mountpoint(filesystem, &mountpoints, preferred_mountpoints);
}
free_mountpoints(&mountpoints);
return path;
}
const char* XMountPoint::get() const {
return _path;
}

View File

@ -1,52 +0,0 @@
/*
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef OS_LINUX_GC_X_XMOUNTPOINT_LINUX_HPP
#define OS_LINUX_GC_X_XMOUNTPOINT_LINUX_HPP
#include "gc/x/xArray.hpp"
#include "memory/allocation.hpp"
class XMountPoint : public StackObj {
private:
char* _path;
char* get_mountpoint(const char* line,
const char* filesystem) const;
void get_mountpoints(const char* filesystem,
XArray<char*>* mountpoints) const;
void free_mountpoints(XArray<char*>* mountpoints) const;
char* find_preferred_mountpoint(const char* filesystem,
XArray<char*>* mountpoints,
const char** preferred_mountpoints) const;
char* find_mountpoint(const char* filesystem,
const char** preferred_mountpoints) const;
public:
XMountPoint(const char* filesystem, const char** preferred_mountpoints);
~XMountPoint();
const char* get() const;
};
#endif // OS_LINUX_GC_X_XMOUNTPOINT_LINUX_HPP

View File

@ -1,71 +0,0 @@
/*
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "gc/x/xCPU.inline.hpp"
#include "gc/x/xErrno.hpp"
#include "gc/x/xNUMA.hpp"
#include "gc/x/xSyscall_linux.hpp"
#include "os_linux.hpp"
#include "runtime/globals.hpp"
#include "runtime/os.hpp"
#include "utilities/debug.hpp"
void XNUMA::pd_initialize() {
_enabled = UseNUMA;
}
uint32_t XNUMA::count() {
if (!_enabled) {
// NUMA support not enabled
return 1;
}
return os::Linux::numa_max_node() + 1;
}
uint32_t XNUMA::id() {
if (!_enabled) {
// NUMA support not enabled
return 0;
}
return os::Linux::get_node_by_cpu(XCPU::id());
}
uint32_t XNUMA::memory_id(uintptr_t addr) {
if (!_enabled) {
// NUMA support not enabled, assume everything belongs to node zero
return 0;
}
uint32_t id = (uint32_t)-1;
if (XSyscall::get_mempolicy((int*)&id, nullptr, 0, (void*)addr, MPOL_F_NODE | MPOL_F_ADDR) == -1) {
XErrno err;
fatal("Failed to get NUMA id for memory at " PTR_FORMAT " (%s)", addr, err.to_string());
}
assert(id < count(), "Invalid NUMA id");
return id;
}

View File

@ -1,724 +0,0 @@
/*
* Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/shared/gcLogPrecious.hpp"
#include "gc/x/xArray.inline.hpp"
#include "gc/x/xErrno.hpp"
#include "gc/x/xGlobals.hpp"
#include "gc/x/xLargePages.inline.hpp"
#include "gc/x/xMountPoint_linux.hpp"
#include "gc/x/xNUMA.inline.hpp"
#include "gc/x/xPhysicalMemoryBacking_linux.hpp"
#include "gc/x/xSyscall_linux.hpp"
#include "logging/log.hpp"
#include "os_linux.hpp"
#include "runtime/init.hpp"
#include "runtime/os.hpp"
#include "runtime/safefetch.hpp"
#include "utilities/align.hpp"
#include "utilities/debug.hpp"
#include "utilities/growableArray.hpp"
#include <fcntl.h>
#include <stdio.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/statfs.h>
#include <sys/types.h>
#include <unistd.h>
//
// Support for building on older Linux systems
//
// memfd_create(2) flags
#ifndef MFD_CLOEXEC
#define MFD_CLOEXEC 0x0001U
#endif
#ifndef MFD_HUGETLB
#define MFD_HUGETLB 0x0004U
#endif
#ifndef MFD_HUGE_2MB
#define MFD_HUGE_2MB 0x54000000U
#endif
// open(2) flags
#ifndef O_CLOEXEC
#define O_CLOEXEC 02000000
#endif
#ifndef O_TMPFILE
#define O_TMPFILE (020000000 | O_DIRECTORY)
#endif
// fallocate(2) flags
#ifndef FALLOC_FL_KEEP_SIZE
#define FALLOC_FL_KEEP_SIZE 0x01
#endif
#ifndef FALLOC_FL_PUNCH_HOLE
#define FALLOC_FL_PUNCH_HOLE 0x02
#endif
// Filesystem types, see statfs(2)
#ifndef TMPFS_MAGIC
#define TMPFS_MAGIC 0x01021994
#endif
#ifndef HUGETLBFS_MAGIC
#define HUGETLBFS_MAGIC 0x958458f6
#endif
// Filesystem names
#define XFILESYSTEM_TMPFS "tmpfs"
#define XFILESYSTEM_HUGETLBFS "hugetlbfs"
// Proc file entry for max map mount
#define XFILENAME_PROC_MAX_MAP_COUNT "/proc/sys/vm/max_map_count"
// Sysfs file for transparent huge page on tmpfs
#define XFILENAME_SHMEM_ENABLED "/sys/kernel/mm/transparent_hugepage/shmem_enabled"
// Java heap filename
#define XFILENAME_HEAP "java_heap"
// Preferred tmpfs mount points, ordered by priority
static const char* z_preferred_tmpfs_mountpoints[] = {
"/dev/shm",
"/run/shm",
nullptr
};
// Preferred hugetlbfs mount points, ordered by priority
static const char* z_preferred_hugetlbfs_mountpoints[] = {
"/dev/hugepages",
"/hugepages",
nullptr
};
static int z_fallocate_hugetlbfs_attempts = 3;
static bool z_fallocate_supported = true;
XPhysicalMemoryBacking::XPhysicalMemoryBacking(size_t max_capacity) :
_fd(-1),
_filesystem(0),
_block_size(0),
_available(0),
_initialized(false) {
// Create backing file
_fd = create_fd(XFILENAME_HEAP);
if (_fd == -1) {
return;
}
// Truncate backing file
while (ftruncate(_fd, max_capacity) == -1) {
if (errno != EINTR) {
XErrno err;
log_error_p(gc)("Failed to truncate backing file (%s)", err.to_string());
return;
}
}
// Get filesystem statistics
struct statfs buf;
if (fstatfs(_fd, &buf) == -1) {
XErrno err;
log_error_p(gc)("Failed to determine filesystem type for backing file (%s)", err.to_string());
return;
}
_filesystem = buf.f_type;
_block_size = buf.f_bsize;
_available = buf.f_bavail * _block_size;
log_info_p(gc, init)("Heap Backing Filesystem: %s (" UINT64_FORMAT_X ")",
is_tmpfs() ? XFILESYSTEM_TMPFS : is_hugetlbfs() ? XFILESYSTEM_HUGETLBFS : "other", _filesystem);
// Make sure the filesystem type matches requested large page type
if (XLargePages::is_transparent() && !is_tmpfs()) {
log_error_p(gc)("-XX:+UseTransparentHugePages can only be enabled when using a %s filesystem",
XFILESYSTEM_TMPFS);
return;
}
if (XLargePages::is_transparent() && !tmpfs_supports_transparent_huge_pages()) {
log_error_p(gc)("-XX:+UseTransparentHugePages on a %s filesystem not supported by kernel",
XFILESYSTEM_TMPFS);
return;
}
if (XLargePages::is_explicit() && !is_hugetlbfs()) {
log_error_p(gc)("-XX:+UseLargePages (without -XX:+UseTransparentHugePages) can only be enabled "
"when using a %s filesystem", XFILESYSTEM_HUGETLBFS);
return;
}
if (!XLargePages::is_explicit() && is_hugetlbfs()) {
log_error_p(gc)("-XX:+UseLargePages must be enabled when using a %s filesystem",
XFILESYSTEM_HUGETLBFS);
return;
}
// Make sure the filesystem block size is compatible
if (XGranuleSize % _block_size != 0) {
log_error_p(gc)("Filesystem backing the heap has incompatible block size (" SIZE_FORMAT ")",
_block_size);
return;
}
if (is_hugetlbfs() && _block_size != XGranuleSize) {
log_error_p(gc)("%s filesystem has unexpected block size " SIZE_FORMAT " (expected " SIZE_FORMAT ")",
XFILESYSTEM_HUGETLBFS, _block_size, XGranuleSize);
return;
}
// Successfully initialized
_initialized = true;
}
int XPhysicalMemoryBacking::create_mem_fd(const char* name) const {
assert(XGranuleSize == 2 * M, "Granule size must match MFD_HUGE_2MB");
// Create file name
char filename[PATH_MAX];
snprintf(filename, sizeof(filename), "%s%s", name, XLargePages::is_explicit() ? ".hugetlb" : "");
// Create file
const int extra_flags = XLargePages::is_explicit() ? (MFD_HUGETLB | MFD_HUGE_2MB) : 0;
const int fd = XSyscall::memfd_create(filename, MFD_CLOEXEC | extra_flags);
if (fd == -1) {
XErrno err;
log_debug_p(gc, init)("Failed to create memfd file (%s)",
(XLargePages::is_explicit() && (err == EINVAL || err == ENODEV)) ?
"Hugepages (2M) not available" : err.to_string());
return -1;
}
log_info_p(gc, init)("Heap Backing File: /memfd:%s", filename);
return fd;
}
int XPhysicalMemoryBacking::create_file_fd(const char* name) const {
const char* const filesystem = XLargePages::is_explicit()
? XFILESYSTEM_HUGETLBFS
: XFILESYSTEM_TMPFS;
const char** const preferred_mountpoints = XLargePages::is_explicit()
? z_preferred_hugetlbfs_mountpoints
: z_preferred_tmpfs_mountpoints;
// Find mountpoint
XMountPoint mountpoint(filesystem, preferred_mountpoints);
if (mountpoint.get() == nullptr) {
log_error_p(gc)("Use -XX:AllocateHeapAt to specify the path to a %s filesystem", filesystem);
return -1;
}
// Try to create an anonymous file using the O_TMPFILE flag. Note that this
// flag requires kernel >= 3.11. If this fails we fall back to open/unlink.
const int fd_anon = os::open(mountpoint.get(), O_TMPFILE|O_EXCL|O_RDWR|O_CLOEXEC, S_IRUSR|S_IWUSR);
if (fd_anon == -1) {
XErrno err;
log_debug_p(gc, init)("Failed to create anonymous file in %s (%s)", mountpoint.get(),
(err == EINVAL ? "Not supported" : err.to_string()));
} else {
// Get inode number for anonymous file
struct stat stat_buf;
if (fstat(fd_anon, &stat_buf) == -1) {
XErrno err;
log_error_pd(gc)("Failed to determine inode number for anonymous file (%s)", err.to_string());
return -1;
}
log_info_p(gc, init)("Heap Backing File: %s/#" UINT64_FORMAT, mountpoint.get(), (uint64_t)stat_buf.st_ino);
return fd_anon;
}
log_debug_p(gc, init)("Falling back to open/unlink");
// Create file name
char filename[PATH_MAX];
snprintf(filename, sizeof(filename), "%s/%s.%d", mountpoint.get(), name, os::current_process_id());
// Create file
const int fd = os::open(filename, O_CREAT|O_EXCL|O_RDWR|O_CLOEXEC, S_IRUSR|S_IWUSR);
if (fd == -1) {
XErrno err;
log_error_p(gc)("Failed to create file %s (%s)", filename, err.to_string());
return -1;
}
// Unlink file
if (unlink(filename) == -1) {
XErrno err;
log_error_p(gc)("Failed to unlink file %s (%s)", filename, err.to_string());
return -1;
}
log_info_p(gc, init)("Heap Backing File: %s", filename);
return fd;
}
int XPhysicalMemoryBacking::create_fd(const char* name) const {
if (AllocateHeapAt == nullptr) {
// If the path is not explicitly specified, then we first try to create a memfd file
// instead of looking for a tmpfd/hugetlbfs mount point. Note that memfd_create() might
// not be supported at all (requires kernel >= 3.17), or it might not support large
// pages (requires kernel >= 4.14). If memfd_create() fails, then we try to create a
// file on an accessible tmpfs or hugetlbfs mount point.
const int fd = create_mem_fd(name);
if (fd != -1) {
return fd;
}
log_debug_p(gc)("Falling back to searching for an accessible mount point");
}
return create_file_fd(name);
}
bool XPhysicalMemoryBacking::is_initialized() const {
return _initialized;
}
void XPhysicalMemoryBacking::warn_available_space(size_t max_capacity) const {
// Note that the available space on a tmpfs or a hugetlbfs filesystem
// will be zero if no size limit was specified when it was mounted.
if (_available == 0) {
// No size limit set, skip check
log_info_p(gc, init)("Available space on backing filesystem: N/A");
return;
}
log_info_p(gc, init)("Available space on backing filesystem: " SIZE_FORMAT "M", _available / M);
// Warn if the filesystem doesn't currently have enough space available to hold
// the max heap size. The max heap size will be capped if we later hit this limit
// when trying to expand the heap.
if (_available < max_capacity) {
log_warning_p(gc)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****");
log_warning_p(gc)("Not enough space available on the backing filesystem to hold the current max Java heap");
log_warning_p(gc)("size (" SIZE_FORMAT "M). Please adjust the size of the backing filesystem accordingly "
"(available", max_capacity / M);
log_warning_p(gc)("space is currently " SIZE_FORMAT "M). Continuing execution with the current filesystem "
"size could", _available / M);
log_warning_p(gc)("lead to a premature OutOfMemoryError being thrown, due to failure to commit memory.");
}
}
void XPhysicalMemoryBacking::warn_max_map_count(size_t max_capacity) const {
const char* const filename = XFILENAME_PROC_MAX_MAP_COUNT;
FILE* const file = os::fopen(filename, "r");
if (file == nullptr) {
// Failed to open file, skip check
log_debug_p(gc, init)("Failed to open %s", filename);
return;
}
size_t actual_max_map_count = 0;
const int result = fscanf(file, SIZE_FORMAT, &actual_max_map_count);
fclose(file);
if (result != 1) {
// Failed to read file, skip check
log_debug_p(gc, init)("Failed to read %s", filename);
return;
}
// The required max map count is impossible to calculate exactly since subsystems
// other than ZGC are also creating memory mappings, and we have no control over that.
// However, ZGC tends to create the most mappings and dominate the total count.
// In the worst cases, ZGC will map each granule three times, i.e. once per heap view.
// We speculate that we need another 20% to allow for non-ZGC subsystems to map memory.
const size_t required_max_map_count = (max_capacity / XGranuleSize) * 3 * 1.2;
if (actual_max_map_count < required_max_map_count) {
log_warning_p(gc)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****");
log_warning_p(gc)("The system limit on number of memory mappings per process might be too low for the given");
log_warning_p(gc)("max Java heap size (" SIZE_FORMAT "M). Please adjust %s to allow for at",
max_capacity / M, filename);
log_warning_p(gc)("least " SIZE_FORMAT " mappings (current limit is " SIZE_FORMAT "). Continuing execution "
"with the current", required_max_map_count, actual_max_map_count);
log_warning_p(gc)("limit could lead to a premature OutOfMemoryError being thrown, due to failure to map memory.");
}
}
void XPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const {
// Warn if available space is too low
warn_available_space(max_capacity);
// Warn if max map count is too low
warn_max_map_count(max_capacity);
}
bool XPhysicalMemoryBacking::is_tmpfs() const {
return _filesystem == TMPFS_MAGIC;
}
bool XPhysicalMemoryBacking::is_hugetlbfs() const {
return _filesystem == HUGETLBFS_MAGIC;
}
bool XPhysicalMemoryBacking::tmpfs_supports_transparent_huge_pages() const {
// If the shmem_enabled file exists and is readable then we
// know the kernel supports transparent huge pages for tmpfs.
return access(XFILENAME_SHMEM_ENABLED, R_OK) == 0;
}
XErrno XPhysicalMemoryBacking::fallocate_compat_mmap_hugetlbfs(size_t offset, size_t length, bool touch) const {
// On hugetlbfs, mapping a file segment will fail immediately, without
// the need to touch the mapped pages first, if there aren't enough huge
// pages available to back the mapping.
void* const addr = mmap(nullptr, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, offset);
if (addr == MAP_FAILED) {
// Failed
return errno;
}
// Once mapped, the huge pages are only reserved. We need to touch them
// to associate them with the file segment. Note that we can not punch
// hole in file segments which only have reserved pages.
if (touch) {
char* const start = (char*)addr;
char* const end = start + length;
os::pretouch_memory(start, end, _block_size);
}
// Unmap again. From now on, the huge pages that were mapped are allocated
// to this file. There's no risk of getting a SIGBUS when mapping and
// touching these pages again.
if (munmap(addr, length) == -1) {
// Failed
return errno;
}
// Success
return 0;
}
static bool safe_touch_mapping(void* addr, size_t length, size_t page_size) {
char* const start = (char*)addr;
char* const end = start + length;
// Touching a mapping that can't be backed by memory will generate a
// SIGBUS. By using SafeFetch32 any SIGBUS will be safely caught and
// handled. On tmpfs, doing a fetch (rather than a store) is enough
// to cause backing pages to be allocated (there's no zero-page to
// worry about).
for (char *p = start; p < end; p += page_size) {
if (SafeFetch32((int*)p, -1) == -1) {
// Failed
return false;
}
}
// Success
return true;
}
XErrno XPhysicalMemoryBacking::fallocate_compat_mmap_tmpfs(size_t offset, size_t length) const {
// On tmpfs, we need to touch the mapped pages to figure out
// if there are enough pages available to back the mapping.
void* const addr = mmap(nullptr, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, offset);
if (addr == MAP_FAILED) {
// Failed
return errno;
}
// Advise mapping to use transparent huge pages
os::realign_memory((char*)addr, length, XGranuleSize);
// Touch the mapping (safely) to make sure it's backed by memory
const bool backed = safe_touch_mapping(addr, length, _block_size);
// Unmap again. If successfully touched, the backing memory will
// be allocated to this file. There's no risk of getting a SIGBUS
// when mapping and touching these pages again.
if (munmap(addr, length) == -1) {
// Failed
return errno;
}
// Success
return backed ? 0 : ENOMEM;
}
XErrno XPhysicalMemoryBacking::fallocate_compat_pwrite(size_t offset, size_t length) const {
uint8_t data = 0;
// Allocate backing memory by writing to each block
for (size_t pos = offset; pos < offset + length; pos += _block_size) {
if (pwrite(_fd, &data, sizeof(data), pos) == -1) {
// Failed
return errno;
}
}
// Success
return 0;
}
XErrno XPhysicalMemoryBacking::fallocate_fill_hole_compat(size_t offset, size_t length) const {
// fallocate(2) is only supported by tmpfs since Linux 3.5, and by hugetlbfs
// since Linux 4.3. When fallocate(2) is not supported we emulate it using
// mmap/munmap (for hugetlbfs and tmpfs with transparent huge pages) or pwrite
// (for tmpfs without transparent huge pages and other filesystem types).
if (XLargePages::is_explicit()) {
return fallocate_compat_mmap_hugetlbfs(offset, length, false /* touch */);
} else if (XLargePages::is_transparent()) {
return fallocate_compat_mmap_tmpfs(offset, length);
} else {
return fallocate_compat_pwrite(offset, length);
}
}
XErrno XPhysicalMemoryBacking::fallocate_fill_hole_syscall(size_t offset, size_t length) const {
const int mode = 0; // Allocate
const int res = XSyscall::fallocate(_fd, mode, offset, length);
if (res == -1) {
// Failed
return errno;
}
// Success
return 0;
}
XErrno XPhysicalMemoryBacking::fallocate_fill_hole(size_t offset, size_t length) const {
// Using compat mode is more efficient when allocating space on hugetlbfs.
// Note that allocating huge pages this way will only reserve them, and not
// associate them with segments of the file. We must guarantee that we at
// some point touch these segments, otherwise we can not punch hole in them.
// Also note that we need to use compat mode when using transparent huge pages,
// since we need to use madvise(2) on the mapping before the page is allocated.
if (z_fallocate_supported && !XLargePages::is_enabled()) {
const XErrno err = fallocate_fill_hole_syscall(offset, length);
if (!err) {
// Success
return 0;
}
if (err != ENOSYS && err != EOPNOTSUPP) {
// Failed
return err;
}
// Not supported
log_debug_p(gc)("Falling back to fallocate() compatibility mode");
z_fallocate_supported = false;
}
return fallocate_fill_hole_compat(offset, length);
}
XErrno XPhysicalMemoryBacking::fallocate_punch_hole(size_t offset, size_t length) const {
if (XLargePages::is_explicit()) {
// We can only punch hole in pages that have been touched. Non-touched
// pages are only reserved, and not associated with any specific file
// segment. We don't know which pages have been previously touched, so
// we always touch them here to guarantee that we can punch hole.
const XErrno err = fallocate_compat_mmap_hugetlbfs(offset, length, true /* touch */);
if (err) {
// Failed
return err;
}
}
const int mode = FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE;
if (XSyscall::fallocate(_fd, mode, offset, length) == -1) {
// Failed
return errno;
}
// Success
return 0;
}
XErrno XPhysicalMemoryBacking::split_and_fallocate(bool punch_hole, size_t offset, size_t length) const {
// Try first half
const size_t offset0 = offset;
const size_t length0 = align_up(length / 2, _block_size);
const XErrno err0 = fallocate(punch_hole, offset0, length0);
if (err0) {
return err0;
}
// Try second half
const size_t offset1 = offset0 + length0;
const size_t length1 = length - length0;
const XErrno err1 = fallocate(punch_hole, offset1, length1);
if (err1) {
return err1;
}
// Success
return 0;
}
XErrno XPhysicalMemoryBacking::fallocate(bool punch_hole, size_t offset, size_t length) const {
assert(is_aligned(offset, _block_size), "Invalid offset");
assert(is_aligned(length, _block_size), "Invalid length");
const XErrno err = punch_hole ? fallocate_punch_hole(offset, length) : fallocate_fill_hole(offset, length);
if (err == EINTR && length > _block_size) {
// Calling fallocate(2) with a large length can take a long time to
// complete. When running profilers, such as VTune, this syscall will
// be constantly interrupted by signals. Expanding the file in smaller
// steps avoids this problem.
return split_and_fallocate(punch_hole, offset, length);
}
return err;
}
bool XPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) const {
log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
offset / M, (offset + length) / M, length / M);
retry:
const XErrno err = fallocate(false /* punch_hole */, offset, length);
if (err) {
if (err == ENOSPC && !is_init_completed() && XLargePages::is_explicit() && z_fallocate_hugetlbfs_attempts-- > 0) {
// If we fail to allocate during initialization, due to lack of space on
// the hugetlbfs filesystem, then we wait and retry a few times before
// giving up. Otherwise there is a risk that running JVMs back-to-back
// will fail, since there is a delay between process termination and the
// huge pages owned by that process being returned to the huge page pool
// and made available for new allocations.
log_debug_p(gc, init)("Failed to commit memory (%s), retrying", err.to_string());
// Wait and retry in one second, in the hope that huge pages will be
// available by then.
sleep(1);
goto retry;
}
// Failed
log_error_p(gc)("Failed to commit memory (%s)", err.to_string());
return false;
}
// Success
return true;
}
static int offset_to_node(size_t offset) {
const GrowableArray<int>* mapping = os::Linux::numa_nindex_to_node();
const size_t nindex = (offset >> XGranuleSizeShift) % mapping->length();
return mapping->at((int)nindex);
}
size_t XPhysicalMemoryBacking::commit_numa_interleaved(size_t offset, size_t length) const {
size_t committed = 0;
// Commit one granule at a time, so that each granule
// can be allocated from a different preferred node.
while (committed < length) {
const size_t granule_offset = offset + committed;
// Setup NUMA policy to allocate memory from a preferred node
os::Linux::numa_set_preferred(offset_to_node(granule_offset));
if (!commit_inner(granule_offset, XGranuleSize)) {
// Failed
break;
}
committed += XGranuleSize;
}
// Restore NUMA policy
os::Linux::numa_set_preferred(-1);
return committed;
}
size_t XPhysicalMemoryBacking::commit_default(size_t offset, size_t length) const {
// Try to commit the whole region
if (commit_inner(offset, length)) {
// Success
return length;
}
// Failed, try to commit as much as possible
size_t start = offset;
size_t end = offset + length;
for (;;) {
length = align_down((end - start) / 2, XGranuleSize);
if (length < XGranuleSize) {
// Done, don't commit more
return start - offset;
}
if (commit_inner(start, length)) {
// Success, try commit more
start += length;
} else {
// Failed, try commit less
end -= length;
}
}
}
size_t XPhysicalMemoryBacking::commit(size_t offset, size_t length) const {
if (XNUMA::is_enabled() && !XLargePages::is_explicit()) {
// To get granule-level NUMA interleaving when using non-large pages,
// we must explicitly interleave the memory at commit/fallocate time.
return commit_numa_interleaved(offset, length);
}
return commit_default(offset, length);
}
size_t XPhysicalMemoryBacking::uncommit(size_t offset, size_t length) const {
log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
offset / M, (offset + length) / M, length / M);
const XErrno err = fallocate(true /* punch_hole */, offset, length);
if (err) {
log_error(gc)("Failed to uncommit memory (%s)", err.to_string());
return 0;
}
return length;
}
void XPhysicalMemoryBacking::map(uintptr_t addr, size_t size, uintptr_t offset) const {
const void* const res = mmap((void*)addr, size, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_SHARED, _fd, offset);
if (res == MAP_FAILED) {
XErrno err;
fatal("Failed to map memory (%s)", err.to_string());
}
}
void XPhysicalMemoryBacking::unmap(uintptr_t addr, size_t size) const {
// Note that we must keep the address space reservation intact and just detach
// the backing memory. For this reason we map a new anonymous, non-accessible
// and non-reserved page over the mapping instead of actually unmapping.
const void* const res = mmap((void*)addr, size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
if (res == MAP_FAILED) {
XErrno err;
fatal("Failed to map memory (%s)", err.to_string());
}
}

View File

@ -1,77 +0,0 @@
/*
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef OS_LINUX_GC_X_XPHYSICALMEMORYBACKING_LINUX_HPP
#define OS_LINUX_GC_X_XPHYSICALMEMORYBACKING_LINUX_HPP
class XErrno;
class XPhysicalMemoryBacking {
private:
int _fd;
size_t _size;
uint64_t _filesystem;
size_t _block_size;
size_t _available;
bool _initialized;
void warn_available_space(size_t max_capacity) const;
void warn_max_map_count(size_t max_capacity) const;
int create_mem_fd(const char* name) const;
int create_file_fd(const char* name) const;
int create_fd(const char* name) const;
bool is_tmpfs() const;
bool is_hugetlbfs() const;
bool tmpfs_supports_transparent_huge_pages() const;
XErrno fallocate_compat_mmap_hugetlbfs(size_t offset, size_t length, bool touch) const;
XErrno fallocate_compat_mmap_tmpfs(size_t offset, size_t length) const;
XErrno fallocate_compat_pwrite(size_t offset, size_t length) const;
XErrno fallocate_fill_hole_compat(size_t offset, size_t length) const;
XErrno fallocate_fill_hole_syscall(size_t offset, size_t length) const;
XErrno fallocate_fill_hole(size_t offset, size_t length) const;
XErrno fallocate_punch_hole(size_t offset, size_t length) const;
XErrno split_and_fallocate(bool punch_hole, size_t offset, size_t length) const;
XErrno fallocate(bool punch_hole, size_t offset, size_t length) const;
bool commit_inner(size_t offset, size_t length) const;
size_t commit_numa_interleaved(size_t offset, size_t length) const;
size_t commit_default(size_t offset, size_t length) const;
public:
XPhysicalMemoryBacking(size_t max_capacity);
bool is_initialized() const;
void warn_commit_limits(size_t max_capacity) const;
size_t commit(size_t offset, size_t length) const;
size_t uncommit(size_t offset, size_t length) const;
void map(uintptr_t addr, size_t size, uintptr_t offset) const;
void unmap(uintptr_t addr, size_t size) const;
};
#endif // OS_LINUX_GC_X_XPHYSICALMEMORYBACKING_LINUX_HPP

View File

@ -1,40 +0,0 @@
/*
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/x/xSyscall_linux.hpp"
#include OS_CPU_HEADER(gc/x/xSyscall)
#include <unistd.h>
int XSyscall::memfd_create(const char *name, unsigned int flags) {
return syscall(SYS_memfd_create, name, flags);
}
int XSyscall::fallocate(int fd, int mode, size_t offset, size_t length) {
return syscall(SYS_fallocate, fd, mode, offset, length);
}
long XSyscall::get_mempolicy(int* mode, unsigned long* nodemask, unsigned long maxnode, void* addr, unsigned long flags) {
return syscall(SYS_get_mempolicy, mode, nodemask, maxnode, addr, flags);
}

View File

@ -1,45 +0,0 @@
/*
* Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef OS_LINUX_GC_X_XSYSCALL_LINUX_HPP
#define OS_LINUX_GC_X_XSYSCALL_LINUX_HPP
#include "memory/allStatic.hpp"
#include "utilities/globalDefinitions.hpp"
// Flags for get_mempolicy()
#ifndef MPOL_F_NODE
#define MPOL_F_NODE (1<<0)
#endif
#ifndef MPOL_F_ADDR
#define MPOL_F_ADDR (1<<1)
#endif
class XSyscall : public AllStatic {
public:
static int memfd_create(const char* name, unsigned int flags);
static int fallocate(int fd, int mode, size_t offset, size_t length);
static long get_mempolicy(int* mode, unsigned long* nodemask, unsigned long maxnode, void* addr, unsigned long flags);
};
#endif // OS_LINUX_GC_X_XSYSCALL_LINUX_HPP

View File

@ -1,29 +0,0 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/x/xArguments.hpp"
bool XArguments::is_os_supported() {
return true;
}

View File

@ -1,29 +0,0 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/x/xInitialize.hpp"
void XInitialize::pd_initialize() {
// Does nothing
}

View File

@ -1,43 +0,0 @@
/*
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/x/xUtils.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
#include <stdlib.h>
uintptr_t XUtils::alloc_aligned(size_t alignment, size_t size) {
void* res = nullptr;
// Use raw posix_memalign as long as we have no wrapper for it
ALLOW_C_FUNCTION(::posix_memalign, int rc = posix_memalign(&res, alignment, size);)
if (rc != 0) {
fatal("posix_memalign() failed");
}
memset(res, 0, size);
return (uintptr_t)res;
}

View File

@ -1,60 +0,0 @@
/*
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/x/xAddress.inline.hpp"
#include "gc/x/xVirtualMemory.hpp"
#include "logging/log.hpp"
#include <sys/mman.h>
#include <sys/types.h>
void XVirtualMemoryManager::pd_initialize_before_reserve() {
// Does nothing
}
void XVirtualMemoryManager::pd_initialize_after_reserve() {
// Does nothing
}
bool XVirtualMemoryManager::pd_reserve(uintptr_t addr, size_t size) {
const uintptr_t res = (uintptr_t)mmap((void*)addr, size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
if (res == (uintptr_t)MAP_FAILED) {
// Failed to reserve memory
return false;
}
if (res != addr) {
// Failed to reserve memory at the requested address
munmap((void*)res, size);
return false;
}
// Success
return true;
}
void XVirtualMemoryManager::pd_unreserve(uintptr_t addr, size_t size) {
const int res = munmap((void*)addr, size);
assert(res == 0, "Failed to unmap memory");
}

View File

@ -1,30 +0,0 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/x/xArguments.hpp"
#include "gc/x/xSyscall_windows.hpp"
bool XArguments::is_os_supported() {
return XSyscall::is_supported();
}

View File

@ -1,30 +0,0 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/x/xInitialize.hpp"
#include "gc/x/xSyscall_windows.hpp"
void XInitialize::pd_initialize() {
XSyscall::initialize();
}

View File

@ -1,40 +0,0 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/shared/gcLogPrecious.hpp"
#include "gc/x/xLargePages.hpp"
#include "gc/x/xSyscall_windows.hpp"
#include "runtime/globals.hpp"
void XLargePages::pd_initialize() {
if (UseLargePages) {
if (XSyscall::is_large_pages_supported()) {
_state = Explicit;
return;
}
log_info_p(gc, init)("Shared large pages not supported on this OS version");
}
_state = Disabled;
}

View File

@ -1,310 +0,0 @@
/*
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/x/xMapper_windows.hpp"
#include "gc/x/xSyscall_windows.hpp"
#include "logging/log.hpp"
#include "utilities/debug.hpp"
#include <Windows.h>
// Memory reservation, commit, views, and placeholders.
//
// To be able to up-front reserve address space for the heap views, and later
// multi-map the heap views to the same physical memory, without ever losing the
// reservation of the reserved address space, we use "placeholders".
//
// These placeholders block out the address space from being used by other parts
// of the process. To commit memory in this address space, the placeholder must
// be replaced by anonymous memory, or replaced by mapping a view against a
// paging file mapping. We use the later to support multi-mapping.
//
// We want to be able to dynamically commit and uncommit the physical memory of
// the heap (and also unmap ZPages), in granules of ZGranuleSize bytes. There is
// no way to grow and shrink the committed memory of a paging file mapping.
// Therefore, we create multiple granule-sized page file mappings. The memory is
// committed by creating a page file mapping, map a view against it, commit the
// memory, unmap the view. The memory will stay committed until all views are
// unmapped, and the paging file mapping handle is closed.
//
// When replacing a placeholder address space reservation with a mapped view
// against a paging file mapping, the virtual address space must exactly match
// an existing placeholder's address and size. Therefore we only deal with
// granule-sized placeholders at this layer. Higher layers that keep track of
// reserved available address space can (and will) coalesce placeholders, but
// they will be split before being used.
#define fatal_error(msg, addr, size) \
fatal(msg ": " PTR_FORMAT " " SIZE_FORMAT "M (%d)", \
(addr), (size) / M, GetLastError())
uintptr_t XMapper::reserve(uintptr_t addr, size_t size) {
void* const res = XSyscall::VirtualAlloc2(
GetCurrentProcess(), // Process
(void*)addr, // BaseAddress
size, // Size
MEM_RESERVE | MEM_RESERVE_PLACEHOLDER, // AllocationType
PAGE_NOACCESS, // PageProtection
nullptr, // ExtendedParameters
0 // ParameterCount
);
// Caller responsible for error handling
return (uintptr_t)res;
}
void XMapper::unreserve(uintptr_t addr, size_t size) {
const bool res = XSyscall::VirtualFreeEx(
GetCurrentProcess(), // hProcess
(void*)addr, // lpAddress
size, // dwSize
MEM_RELEASE // dwFreeType
);
if (!res) {
fatal_error("Failed to unreserve memory", addr, size);
}
}
HANDLE XMapper::create_paging_file_mapping(size_t size) {
// Create mapping with SEC_RESERVE instead of SEC_COMMIT.
//
// We use MapViewOfFile3 for two different reasons:
// 1) When committing memory for the created paging file
// 2) When mapping a view of the memory created in (2)
//
// The non-platform code is only setup to deal with out-of-memory
// errors in (1). By using SEC_RESERVE, we prevent MapViewOfFile3
// from failing because of "commit limit" checks. To actually commit
// memory in (1), a call to VirtualAlloc2 is done.
HANDLE const res = XSyscall::CreateFileMappingW(
INVALID_HANDLE_VALUE, // hFile
nullptr, // lpFileMappingAttribute
PAGE_READWRITE | SEC_RESERVE, // flProtect
size >> 32, // dwMaximumSizeHigh
size & 0xFFFFFFFF, // dwMaximumSizeLow
nullptr // lpName
);
// Caller responsible for error handling
return res;
}
bool XMapper::commit_paging_file_mapping(HANDLE file_handle, uintptr_t file_offset, size_t size) {
const uintptr_t addr = map_view_no_placeholder(file_handle, file_offset, size);
if (addr == 0) {
log_error(gc)("Failed to map view of paging file mapping (%d)", GetLastError());
return false;
}
const uintptr_t res = commit(addr, size);
if (res != addr) {
log_error(gc)("Failed to commit memory (%d)", GetLastError());
}
unmap_view_no_placeholder(addr, size);
return res == addr;
}
uintptr_t XMapper::map_view_no_placeholder(HANDLE file_handle, uintptr_t file_offset, size_t size) {
void* const res = XSyscall::MapViewOfFile3(
file_handle, // FileMapping
GetCurrentProcess(), // ProcessHandle
nullptr, // BaseAddress
file_offset, // Offset
size, // ViewSize
0, // AllocationType
PAGE_NOACCESS, // PageProtection
nullptr, // ExtendedParameters
0 // ParameterCount
);
// Caller responsible for error handling
return (uintptr_t)res;
}
void XMapper::unmap_view_no_placeholder(uintptr_t addr, size_t size) {
const bool res = XSyscall::UnmapViewOfFile2(
GetCurrentProcess(), // ProcessHandle
(void*)addr, // BaseAddress
0 // UnmapFlags
);
if (!res) {
fatal_error("Failed to unmap memory", addr, size);
}
}
uintptr_t XMapper::commit(uintptr_t addr, size_t size) {
void* const res = XSyscall::VirtualAlloc2(
GetCurrentProcess(), // Process
(void*)addr, // BaseAddress
size, // Size
MEM_COMMIT, // AllocationType
PAGE_NOACCESS, // PageProtection
nullptr, // ExtendedParameters
0 // ParameterCount
);
// Caller responsible for error handling
return (uintptr_t)res;
}
HANDLE XMapper::create_and_commit_paging_file_mapping(size_t size) {
HANDLE const file_handle = create_paging_file_mapping(size);
if (file_handle == 0) {
log_error(gc)("Failed to create paging file mapping (%d)", GetLastError());
return 0;
}
const bool res = commit_paging_file_mapping(file_handle, 0 /* file_offset */, size);
if (!res) {
close_paging_file_mapping(file_handle);
return 0;
}
return file_handle;
}
void XMapper::close_paging_file_mapping(HANDLE file_handle) {
const bool res = CloseHandle(
file_handle // hObject
);
if (!res) {
fatal("Failed to close paging file handle (%d)", GetLastError());
}
}
HANDLE XMapper::create_shared_awe_section() {
MEM_EXTENDED_PARAMETER parameter = { 0 };
parameter.Type = MemSectionExtendedParameterUserPhysicalFlags;
parameter.ULong64 = 0;
HANDLE section = XSyscall::CreateFileMapping2(
INVALID_HANDLE_VALUE, // File
nullptr, // SecurityAttributes
SECTION_MAP_READ | SECTION_MAP_WRITE, // DesiredAccess
PAGE_READWRITE, // PageProtection
SEC_RESERVE | SEC_LARGE_PAGES, // AllocationAttributes
0, // MaximumSize
nullptr, // Name
&parameter, // ExtendedParameters
1 // ParameterCount
);
if (section == nullptr) {
fatal("Could not create shared AWE section (%d)", GetLastError());
}
return section;
}
uintptr_t XMapper::reserve_for_shared_awe(HANDLE awe_section, uintptr_t addr, size_t size) {
MEM_EXTENDED_PARAMETER parameter = { 0 };
parameter.Type = MemExtendedParameterUserPhysicalHandle;
parameter.Handle = awe_section;
void* const res = XSyscall::VirtualAlloc2(
GetCurrentProcess(), // Process
(void*)addr, // BaseAddress
size, // Size
MEM_RESERVE | MEM_PHYSICAL, // AllocationType
PAGE_READWRITE, // PageProtection
&parameter, // ExtendedParameters
1 // ParameterCount
);
// Caller responsible for error handling
return (uintptr_t)res;
}
void XMapper::unreserve_for_shared_awe(uintptr_t addr, size_t size) {
bool res = VirtualFree(
(void*)addr, // lpAddress
0, // dwSize
MEM_RELEASE // dwFreeType
);
if (!res) {
fatal("Failed to unreserve memory: " PTR_FORMAT " " SIZE_FORMAT "M (%d)",
addr, size / M, GetLastError());
}
}
void XMapper::split_placeholder(uintptr_t addr, size_t size) {
const bool res = VirtualFree(
(void*)addr, // lpAddress
size, // dwSize
MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER // dwFreeType
);
if (!res) {
fatal_error("Failed to split placeholder", addr, size);
}
}
void XMapper::coalesce_placeholders(uintptr_t addr, size_t size) {
const bool res = VirtualFree(
(void*)addr, // lpAddress
size, // dwSize
MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS // dwFreeType
);
if (!res) {
fatal_error("Failed to coalesce placeholders", addr, size);
}
}
void XMapper::map_view_replace_placeholder(HANDLE file_handle, uintptr_t file_offset, uintptr_t addr, size_t size) {
void* const res = XSyscall::MapViewOfFile3(
file_handle, // FileMapping
GetCurrentProcess(), // ProcessHandle
(void*)addr, // BaseAddress
file_offset, // Offset
size, // ViewSize
MEM_REPLACE_PLACEHOLDER, // AllocationType
PAGE_READWRITE, // PageProtection
nullptr, // ExtendedParameters
0 // ParameterCount
);
if (res == nullptr) {
fatal_error("Failed to map memory", addr, size);
}
}
void XMapper::unmap_view_preserve_placeholder(uintptr_t addr, size_t size) {
const bool res = XSyscall::UnmapViewOfFile2(
GetCurrentProcess(), // ProcessHandle
(void*)addr, // BaseAddress
MEM_PRESERVE_PLACEHOLDER // UnmapFlags
);
if (!res) {
fatal_error("Failed to unmap memory", addr, size);
}
}

View File

@ -1,94 +0,0 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef OS_WINDOWS_GC_X_XMAPPER_WINDOWS_HPP
#define OS_WINDOWS_GC_X_XMAPPER_WINDOWS_HPP
#include "memory/allStatic.hpp"
#include "utilities/globalDefinitions.hpp"
#include <Windows.h>
class XMapper : public AllStatic {
private:
// Create paging file mapping
static HANDLE create_paging_file_mapping(size_t size);
// Commit paging file mapping
static bool commit_paging_file_mapping(HANDLE file_handle, uintptr_t file_offset, size_t size);
// Map a view anywhere without a placeholder
static uintptr_t map_view_no_placeholder(HANDLE file_handle, uintptr_t file_offset, size_t size);
// Unmap a view without preserving a placeholder
static void unmap_view_no_placeholder(uintptr_t addr, size_t size);
// Commit memory covering the given virtual address range
static uintptr_t commit(uintptr_t addr, size_t size);
public:
// Reserve memory with a placeholder
static uintptr_t reserve(uintptr_t addr, size_t size);
// Unreserve memory
static void unreserve(uintptr_t addr, size_t size);
// Create and commit paging file mapping
static HANDLE create_and_commit_paging_file_mapping(size_t size);
// Close paging file mapping
static void close_paging_file_mapping(HANDLE file_handle);
// Create a shared AWE section
static HANDLE create_shared_awe_section();
// Reserve memory attached to the shared AWE section
static uintptr_t reserve_for_shared_awe(HANDLE awe_section, uintptr_t addr, size_t size);
// Unreserve memory attached to a shared AWE section
static void unreserve_for_shared_awe(uintptr_t addr, size_t size);
// Split a placeholder
//
// A view can only replace an entire placeholder, so placeholders need to be
// split and coalesced to be the exact size of the new views.
// [addr, addr + size) needs to be a proper sub-placeholder of an existing
// placeholder.
static void split_placeholder(uintptr_t addr, size_t size);
// Coalesce a placeholder
//
// [addr, addr + size) is the new placeholder. A sub-placeholder needs to
// exist within that range.
static void coalesce_placeholders(uintptr_t addr, size_t size);
// Map a view of the file handle and replace the placeholder covering the
// given virtual address range
static void map_view_replace_placeholder(HANDLE file_handle, uintptr_t file_offset, uintptr_t addr, size_t size);
// Unmap the view and reinstate a placeholder covering the given virtual
// address range
static void unmap_view_preserve_placeholder(uintptr_t addr, size_t size);
};
#endif // OS_WINDOWS_GC_X_XMAPPER_WINDOWS_HPP

View File

@ -1,42 +0,0 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/x/xNUMA.hpp"
void XNUMA::pd_initialize() {
_enabled = false;
}
uint32_t XNUMA::count() {
return 1;
}
uint32_t XNUMA::id() {
return 0;
}
uint32_t XNUMA::memory_id(uintptr_t addr) {
// NUMA support not enabled, assume everything belongs to node zero
return 0;
}

View File

@ -1,252 +0,0 @@
/*
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/x/xGlobals.hpp"
#include "gc/x/xGranuleMap.inline.hpp"
#include "gc/x/xLargePages.inline.hpp"
#include "gc/x/xMapper_windows.hpp"
#include "gc/x/xPhysicalMemoryBacking_windows.hpp"
#include "logging/log.hpp"
#include "runtime/globals.hpp"
#include "utilities/debug.hpp"
class XPhysicalMemoryBackingImpl : public CHeapObj<mtGC> {
public:
virtual size_t commit(size_t offset, size_t size) = 0;
virtual size_t uncommit(size_t offset, size_t size) = 0;
virtual void map(uintptr_t addr, size_t size, size_t offset) const = 0;
virtual void unmap(uintptr_t addr, size_t size) const = 0;
};
// Implements small pages (paged) support using placeholder reservation.
//
// The backing commits and uncommits physical memory, that can be
// multi-mapped into the virtual address space. To support fine-graned
// committing and uncommitting, each XGranuleSize'd chunk is mapped to
// a separate paging file mapping.
class XPhysicalMemoryBackingSmallPages : public XPhysicalMemoryBackingImpl {
private:
XGranuleMap<HANDLE> _handles;
HANDLE get_handle(uintptr_t offset) const {
HANDLE const handle = _handles.get(offset);
assert(handle != 0, "Should be set");
return handle;
}
void put_handle(uintptr_t offset, HANDLE handle) {
assert(handle != INVALID_HANDLE_VALUE, "Invalid handle");
assert(_handles.get(offset) == 0, "Should be cleared");
_handles.put(offset, handle);
}
void clear_handle(uintptr_t offset) {
assert(_handles.get(offset) != 0, "Should be set");
_handles.put(offset, 0);
}
public:
XPhysicalMemoryBackingSmallPages(size_t max_capacity) :
XPhysicalMemoryBackingImpl(),
_handles(max_capacity) {}
size_t commit(size_t offset, size_t size) {
for (size_t i = 0; i < size; i += XGranuleSize) {
HANDLE const handle = XMapper::create_and_commit_paging_file_mapping(XGranuleSize);
if (handle == 0) {
return i;
}
put_handle(offset + i, handle);
}
return size;
}
size_t uncommit(size_t offset, size_t size) {
for (size_t i = 0; i < size; i += XGranuleSize) {
HANDLE const handle = get_handle(offset + i);
clear_handle(offset + i);
XMapper::close_paging_file_mapping(handle);
}
return size;
}
void map(uintptr_t addr, size_t size, size_t offset) const {
assert(is_aligned(offset, XGranuleSize), "Misaligned");
assert(is_aligned(addr, XGranuleSize), "Misaligned");
assert(is_aligned(size, XGranuleSize), "Misaligned");
for (size_t i = 0; i < size; i += XGranuleSize) {
HANDLE const handle = get_handle(offset + i);
XMapper::map_view_replace_placeholder(handle, 0 /* offset */, addr + i, XGranuleSize);
}
}
void unmap(uintptr_t addr, size_t size) const {
assert(is_aligned(addr, XGranuleSize), "Misaligned");
assert(is_aligned(size, XGranuleSize), "Misaligned");
for (size_t i = 0; i < size; i += XGranuleSize) {
XMapper::unmap_view_preserve_placeholder(addr + i, XGranuleSize);
}
}
};
// Implements Large Pages (locked) support using shared AWE physical memory.
//
// Shared AWE physical memory also works with small pages, but it has
// a few drawbacks that makes it a no-go to use it at this point:
//
// 1) It seems to use 8 bytes of committed memory per *reserved* memory.
// Given our scheme to use a large address space range this turns out to
// use too much memory.
//
// 2) It requires memory locking privileges, even for small pages. This
// has always been a requirement for large pages, and would be an extra
// restriction for usage with small pages.
//
// Note: The large pages size is tied to our XGranuleSize.
extern HANDLE XAWESection;
class XPhysicalMemoryBackingLargePages : public XPhysicalMemoryBackingImpl {
private:
ULONG_PTR* const _page_array;
static ULONG_PTR* alloc_page_array(size_t max_capacity) {
const size_t npages = max_capacity / XGranuleSize;
const size_t array_size = npages * sizeof(ULONG_PTR);
return (ULONG_PTR*)os::malloc(array_size, mtGC);
}
public:
XPhysicalMemoryBackingLargePages(size_t max_capacity) :
XPhysicalMemoryBackingImpl(),
_page_array(alloc_page_array(max_capacity)) {}
size_t commit(size_t offset, size_t size) {
const size_t index = offset >> XGranuleSizeShift;
const size_t npages = size >> XGranuleSizeShift;
size_t npages_res = npages;
const bool res = AllocateUserPhysicalPages(XAWESection, &npages_res, &_page_array[index]);
if (!res) {
fatal("Failed to allocate physical memory " SIZE_FORMAT "M @ " PTR_FORMAT " (%d)",
size / M, offset, GetLastError());
} else {
log_debug(gc)("Allocated physical memory: " SIZE_FORMAT "M @ " PTR_FORMAT, size / M, offset);
}
// AllocateUserPhysicalPages might not be able to allocate the requested amount of memory.
// The allocated number of pages are written in npages_res.
return npages_res << XGranuleSizeShift;
}
size_t uncommit(size_t offset, size_t size) {
const size_t index = offset >> XGranuleSizeShift;
const size_t npages = size >> XGranuleSizeShift;
size_t npages_res = npages;
const bool res = FreeUserPhysicalPages(XAWESection, &npages_res, &_page_array[index]);
if (!res) {
fatal("Failed to uncommit physical memory " SIZE_FORMAT "M @ " PTR_FORMAT " (%d)",
size, offset, GetLastError());
}
return npages_res << XGranuleSizeShift;
}
void map(uintptr_t addr, size_t size, size_t offset) const {
const size_t npages = size >> XGranuleSizeShift;
const size_t index = offset >> XGranuleSizeShift;
const bool res = MapUserPhysicalPages((char*)addr, npages, &_page_array[index]);
if (!res) {
fatal("Failed to map view " PTR_FORMAT " " SIZE_FORMAT "M @ " PTR_FORMAT " (%d)",
addr, size / M, offset, GetLastError());
}
}
void unmap(uintptr_t addr, size_t size) const {
const size_t npages = size >> XGranuleSizeShift;
const bool res = MapUserPhysicalPages((char*)addr, npages, nullptr);
if (!res) {
fatal("Failed to unmap view " PTR_FORMAT " " SIZE_FORMAT "M (%d)",
addr, size / M, GetLastError());
}
}
};
static XPhysicalMemoryBackingImpl* select_impl(size_t max_capacity) {
if (XLargePages::is_enabled()) {
return new XPhysicalMemoryBackingLargePages(max_capacity);
}
return new XPhysicalMemoryBackingSmallPages(max_capacity);
}
XPhysicalMemoryBacking::XPhysicalMemoryBacking(size_t max_capacity) :
_impl(select_impl(max_capacity)) {}
bool XPhysicalMemoryBacking::is_initialized() const {
return true;
}
void XPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const {
// Does nothing
}
size_t XPhysicalMemoryBacking::commit(size_t offset, size_t length) {
log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
offset / M, (offset + length) / M, length / M);
return _impl->commit(offset, length);
}
size_t XPhysicalMemoryBacking::uncommit(size_t offset, size_t length) {
log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
offset / M, (offset + length) / M, length / M);
return _impl->uncommit(offset, length);
}
void XPhysicalMemoryBacking::map(uintptr_t addr, size_t size, size_t offset) const {
assert(is_aligned(offset, XGranuleSize), "Misaligned: " PTR_FORMAT, offset);
assert(is_aligned(addr, XGranuleSize), "Misaligned: " PTR_FORMAT, addr);
assert(is_aligned(size, XGranuleSize), "Misaligned: " PTR_FORMAT, size);
_impl->map(addr, size, offset);
}
void XPhysicalMemoryBacking::unmap(uintptr_t addr, size_t size) const {
assert(is_aligned(addr, XGranuleSize), "Misaligned");
assert(is_aligned(size, XGranuleSize), "Misaligned");
_impl->unmap(addr, size);
}

View File

@ -1,51 +0,0 @@
/*
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef OS_WINDOWS_GC_X_XPHYSICALMEMORYBACKING_WINDOWS_HPP
#define OS_WINDOWS_GC_X_XPHYSICALMEMORYBACKING_WINDOWS_HPP
#include "utilities/globalDefinitions.hpp"
#include <Windows.h>
class XPhysicalMemoryBackingImpl;
class XPhysicalMemoryBacking {
private:
XPhysicalMemoryBackingImpl* _impl;
public:
XPhysicalMemoryBacking(size_t max_capacity);
bool is_initialized() const;
void warn_commit_limits(size_t max_capacity) const;
size_t commit(size_t offset, size_t length);
size_t uncommit(size_t offset, size_t length);
void map(uintptr_t addr, size_t size, size_t offset) const;
void unmap(uintptr_t addr, size_t size) const;
};
#endif // OS_WINDOWS_GC_X_XPHYSICALMEMORYBACKING_WINDOWS_HPP

View File

@ -1,93 +0,0 @@
/*
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/shared/gcLogPrecious.hpp"
#include "gc/x/xSyscall_windows.hpp"
#include "runtime/java.hpp"
#include "runtime/os.hpp"
XSyscall::CreateFileMappingWFn XSyscall::CreateFileMappingW;
XSyscall::CreateFileMapping2Fn XSyscall::CreateFileMapping2;
XSyscall::VirtualAlloc2Fn XSyscall::VirtualAlloc2;
XSyscall::VirtualFreeExFn XSyscall::VirtualFreeEx;
XSyscall::MapViewOfFile3Fn XSyscall::MapViewOfFile3;
XSyscall::UnmapViewOfFile2Fn XSyscall::UnmapViewOfFile2;
static void* lookup_kernelbase_library() {
const char* const name = "KernelBase";
char ebuf[1024];
void* const handle = os::dll_load(name, ebuf, sizeof(ebuf));
if (handle == nullptr) {
log_error_p(gc)("Failed to load library: %s", name);
}
return handle;
}
static void* lookup_kernelbase_symbol(const char* name) {
static void* const handle = lookup_kernelbase_library();
if (handle == nullptr) {
return nullptr;
}
return os::dll_lookup(handle, name);
}
static bool has_kernelbase_symbol(const char* name) {
return lookup_kernelbase_symbol(name) != nullptr;
}
template <typename Fn>
static void install_kernelbase_symbol(Fn*& fn, const char* name) {
fn = reinterpret_cast<Fn*>(lookup_kernelbase_symbol(name));
}
template <typename Fn>
static void install_kernelbase_1803_symbol_or_exit(Fn*& fn, const char* name) {
install_kernelbase_symbol(fn, name);
if (fn == nullptr) {
log_error_p(gc)("Failed to lookup symbol: %s", name);
vm_exit_during_initialization("ZGC requires Windows version 1803 or later");
}
}
void XSyscall::initialize() {
// Required
install_kernelbase_1803_symbol_or_exit(CreateFileMappingW, "CreateFileMappingW");
install_kernelbase_1803_symbol_or_exit(VirtualAlloc2, "VirtualAlloc2");
install_kernelbase_1803_symbol_or_exit(VirtualFreeEx, "VirtualFreeEx");
install_kernelbase_1803_symbol_or_exit(MapViewOfFile3, "MapViewOfFile3");
install_kernelbase_1803_symbol_or_exit(UnmapViewOfFile2, "UnmapViewOfFile2");
// Optional - for large pages support
install_kernelbase_symbol(CreateFileMapping2, "CreateFileMapping2");
}
bool XSyscall::is_supported() {
// Available in Windows version 1803 and later
return has_kernelbase_symbol("VirtualAlloc2");
}
bool XSyscall::is_large_pages_supported() {
// Available in Windows version 1809 and later
return has_kernelbase_symbol("CreateFileMapping2");
}

View File

@ -1,55 +0,0 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef OS_WINDOWS_GC_X_XSYSCALL_WINDOWS_HPP
#define OS_WINDOWS_GC_X_XSYSCALL_WINDOWS_HPP
#include "utilities/globalDefinitions.hpp"
#include <Windows.h>
#include <Memoryapi.h>
class XSyscall {
private:
typedef HANDLE (*CreateFileMappingWFn)(HANDLE, LPSECURITY_ATTRIBUTES, DWORD, DWORD, DWORD, LPCWSTR);
typedef HANDLE (*CreateFileMapping2Fn)(HANDLE, LPSECURITY_ATTRIBUTES, ULONG, ULONG, ULONG, ULONG64, PCWSTR, PMEM_EXTENDED_PARAMETER, ULONG);
typedef PVOID (*VirtualAlloc2Fn)(HANDLE, PVOID, SIZE_T, ULONG, ULONG, MEM_EXTENDED_PARAMETER*, ULONG);
typedef BOOL (*VirtualFreeExFn)(HANDLE, LPVOID, SIZE_T, DWORD);
typedef PVOID (*MapViewOfFile3Fn)(HANDLE, HANDLE, PVOID, ULONG64, SIZE_T, ULONG, ULONG, MEM_EXTENDED_PARAMETER*, ULONG);
typedef BOOL (*UnmapViewOfFile2Fn)(HANDLE, PVOID, ULONG);
public:
static CreateFileMappingWFn CreateFileMappingW;
static CreateFileMapping2Fn CreateFileMapping2;
static VirtualAlloc2Fn VirtualAlloc2;
static VirtualFreeExFn VirtualFreeEx;
static MapViewOfFile3Fn MapViewOfFile3;
static UnmapViewOfFile2Fn UnmapViewOfFile2;
static void initialize();
static bool is_supported();
static bool is_large_pages_supported();
};
#endif // OS_WINDOWS_GC_X_XSYSCALL_WINDOWS_HPP

View File

@ -1,40 +0,0 @@
/*
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/x/xUtils.hpp"
#include "utilities/debug.hpp"
#include <malloc.h>
uintptr_t XUtils::alloc_aligned(size_t alignment, size_t size) {
void* const res = _aligned_malloc(size, alignment);
if (res == nullptr) {
fatal("_aligned_malloc failed");
}
memset(res, 0, size);
return (uintptr_t)res;
}

View File

@ -1,195 +0,0 @@
/*
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/x/xAddress.inline.hpp"
#include "gc/x/xGlobals.hpp"
#include "gc/x/xLargePages.inline.hpp"
#include "gc/x/xMapper_windows.hpp"
#include "gc/x/xSyscall_windows.hpp"
#include "gc/x/xVirtualMemory.inline.hpp"
#include "utilities/align.hpp"
#include "utilities/debug.hpp"
class XVirtualMemoryManagerImpl : public CHeapObj<mtGC> {
public:
virtual void initialize_before_reserve() {}
virtual void initialize_after_reserve(XMemoryManager* manager) {}
virtual bool reserve(uintptr_t addr, size_t size) = 0;
virtual void unreserve(uintptr_t addr, size_t size) = 0;
};
// Implements small pages (paged) support using placeholder reservation.
class XVirtualMemoryManagerSmallPages : public XVirtualMemoryManagerImpl {
private:
class PlaceholderCallbacks : public AllStatic {
public:
static void split_placeholder(uintptr_t start, size_t size) {
XMapper::split_placeholder(XAddress::marked0(start), size);
XMapper::split_placeholder(XAddress::marked1(start), size);
XMapper::split_placeholder(XAddress::remapped(start), size);
}
static void coalesce_placeholders(uintptr_t start, size_t size) {
XMapper::coalesce_placeholders(XAddress::marked0(start), size);
XMapper::coalesce_placeholders(XAddress::marked1(start), size);
XMapper::coalesce_placeholders(XAddress::remapped(start), size);
}
static void split_into_placeholder_granules(uintptr_t start, size_t size) {
for (uintptr_t addr = start; addr < start + size; addr += XGranuleSize) {
split_placeholder(addr, XGranuleSize);
}
}
static void coalesce_into_one_placeholder(uintptr_t start, size_t size) {
assert(is_aligned(size, XGranuleSize), "Must be granule aligned");
if (size > XGranuleSize) {
coalesce_placeholders(start, size);
}
}
static void create_callback(const XMemory* area) {
assert(is_aligned(area->size(), XGranuleSize), "Must be granule aligned");
coalesce_into_one_placeholder(area->start(), area->size());
}
static void destroy_callback(const XMemory* area) {
assert(is_aligned(area->size(), XGranuleSize), "Must be granule aligned");
// Don't try split the last granule - VirtualFree will fail
split_into_placeholder_granules(area->start(), area->size() - XGranuleSize);
}
static void shrink_from_front_callback(const XMemory* area, size_t size) {
assert(is_aligned(size, XGranuleSize), "Must be granule aligned");
split_into_placeholder_granules(area->start(), size);
}
static void shrink_from_back_callback(const XMemory* area, size_t size) {
assert(is_aligned(size, XGranuleSize), "Must be granule aligned");
// Don't try split the last granule - VirtualFree will fail
split_into_placeholder_granules(area->end() - size, size - XGranuleSize);
}
static void grow_from_front_callback(const XMemory* area, size_t size) {
assert(is_aligned(area->size(), XGranuleSize), "Must be granule aligned");
coalesce_into_one_placeholder(area->start() - size, area->size() + size);
}
static void grow_from_back_callback(const XMemory* area, size_t size) {
assert(is_aligned(area->size(), XGranuleSize), "Must be granule aligned");
coalesce_into_one_placeholder(area->start(), area->size() + size);
}
static void register_with(XMemoryManager* manager) {
// Each reserved virtual memory address area registered in _manager is
// exactly covered by a single placeholder. Callbacks are installed so
// that whenever a memory area changes, the corresponding placeholder
// is adjusted.
//
// The create and grow callbacks are called when virtual memory is
// returned to the memory manager. The new memory area is then covered
// by a new single placeholder.
//
// The destroy and shrink callbacks are called when virtual memory is
// allocated from the memory manager. The memory area is then is split
// into granule-sized placeholders.
//
// See comment in zMapper_windows.cpp explaining why placeholders are
// split into XGranuleSize sized placeholders.
XMemoryManager::Callbacks callbacks;
callbacks._create = &create_callback;
callbacks._destroy = &destroy_callback;
callbacks._shrink_from_front = &shrink_from_front_callback;
callbacks._shrink_from_back = &shrink_from_back_callback;
callbacks._grow_from_front = &grow_from_front_callback;
callbacks._grow_from_back = &grow_from_back_callback;
manager->register_callbacks(callbacks);
}
};
virtual void initialize_after_reserve(XMemoryManager* manager) {
PlaceholderCallbacks::register_with(manager);
}
virtual bool reserve(uintptr_t addr, size_t size) {
const uintptr_t res = XMapper::reserve(addr, size);
assert(res == addr || res == 0, "Should not reserve other memory than requested");
return res == addr;
}
virtual void unreserve(uintptr_t addr, size_t size) {
XMapper::unreserve(addr, size);
}
};
// Implements Large Pages (locked) support using shared AWE physical memory.
// XPhysicalMemory layer needs access to the section
HANDLE XAWESection;
class XVirtualMemoryManagerLargePages : public XVirtualMemoryManagerImpl {
private:
virtual void initialize_before_reserve() {
XAWESection = XMapper::create_shared_awe_section();
}
virtual bool reserve(uintptr_t addr, size_t size) {
const uintptr_t res = XMapper::reserve_for_shared_awe(XAWESection, addr, size);
assert(res == addr || res == 0, "Should not reserve other memory than requested");
return res == addr;
}
virtual void unreserve(uintptr_t addr, size_t size) {
XMapper::unreserve_for_shared_awe(addr, size);
}
};
static XVirtualMemoryManagerImpl* _impl = nullptr;
void XVirtualMemoryManager::pd_initialize_before_reserve() {
if (XLargePages::is_enabled()) {
_impl = new XVirtualMemoryManagerLargePages();
} else {
_impl = new XVirtualMemoryManagerSmallPages();
}
_impl->initialize_before_reserve();
}
void XVirtualMemoryManager::pd_initialize_after_reserve() {
_impl->initialize_after_reserve(&_manager);
}
bool XVirtualMemoryManager::pd_reserve(uintptr_t addr, size_t size) {
return _impl->reserve(addr, size);
}
void XVirtualMemoryManager::pd_unreserve(uintptr_t addr, size_t size) {
_impl->unreserve(addr, size);
}

View File

@ -1,40 +0,0 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef OS_CPU_LINUX_AARCH64_GC_X_XSYSCALL_LINUX_AARCH64_HPP
#define OS_CPU_LINUX_AARCH64_GC_X_XSYSCALL_LINUX_AARCH64_HPP
#include <sys/syscall.h>
//
// Support for building on older Linux systems
//
#ifndef SYS_memfd_create
#define SYS_memfd_create 279
#endif
#ifndef SYS_fallocate
#define SYS_fallocate 47
#endif
#endif // OS_CPU_LINUX_AARCH64_GC_X_XSYSCALL_LINUX_AARCH64_HPP

View File

@ -1,42 +0,0 @@
/*
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef OS_CPU_LINUX_PPC_GC_X_XSYSCALL_LINUX_PPC_HPP
#define OS_CPU_LINUX_PPC_GC_X_XSYSCALL_LINUX_PPC_HPP
#include <sys/syscall.h>
//
// Support for building on older Linux systems
//
#ifndef SYS_memfd_create
#define SYS_memfd_create 360
#endif
#ifndef SYS_fallocate
#define SYS_fallocate 309
#endif
#endif // OS_CPU_LINUX_PPC_GC_X_XSYSCALL_LINUX_PPC_HPP

View File

@ -1,42 +0,0 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_LINUX_RISCV_GC_X_XSYSCALL_LINUX_RISCV_HPP
#define OS_CPU_LINUX_RISCV_GC_X_XSYSCALL_LINUX_RISCV_HPP
#include <sys/syscall.h>
//
// Support for building on older Linux systems
//
#ifndef SYS_memfd_create
#define SYS_memfd_create 279
#endif
#ifndef SYS_fallocate
#define SYS_fallocate 47
#endif
#endif // OS_CPU_LINUX_RISCV_GC_X_XSYSCALL_LINUX_RISCV_HPP

View File

@ -1,40 +0,0 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef OS_CPU_LINUX_X86_GC_X_XSYSCALL_LINUX_X86_HPP
#define OS_CPU_LINUX_X86_GC_X_XSYSCALL_LINUX_X86_HPP
#include <sys/syscall.h>
//
// Support for building on older Linux systems
//
#ifndef SYS_memfd_create
#define SYS_memfd_create 319
#endif
#ifndef SYS_fallocate
#define SYS_fallocate 285
#endif
#endif // OS_CPU_LINUX_X86_GC_X_XSYSCALL_LINUX_X86_HPP

View File

@ -33,7 +33,6 @@
EPSILONGC_ONLY(f(EpsilonBarrierSet)) \
G1GC_ONLY(f(G1BarrierSet)) \
SHENANDOAHGC_ONLY(f(ShenandoahBarrierSet)) \
ZGC_ONLY(f(XBarrierSet)) \
ZGC_ONLY(f(ZBarrierSet))
#define FOR_EACH_ABSTRACT_BARRIER_SET_DO(f) \

View File

@ -40,7 +40,6 @@
#include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
#endif
#if INCLUDE_ZGC
#include "gc/x/xBarrierSet.inline.hpp"
#include "gc/z/zBarrierSet.inline.hpp"
#endif

View File

@ -44,7 +44,7 @@
#include "gc/shenandoah/shenandoahArguments.hpp"
#endif
#if INCLUDE_ZGC
#include "gc/z/shared/zSharedArguments.hpp"
#include "gc/z/zArguments.hpp"
#endif
struct IncludedGC {
@ -62,7 +62,7 @@ struct IncludedGC {
PARALLELGC_ONLY(static ParallelArguments parallelArguments;)
SERIALGC_ONLY(static SerialArguments serialArguments;)
SHENANDOAHGC_ONLY(static ShenandoahArguments shenandoahArguments;)
ZGC_ONLY(static ZSharedArguments zArguments;)
ZGC_ONLY(static ZArguments zArguments;)
// Table of included GCs, for translating between command
// line flag, CollectedHeap::Name and GCArguments instance.

View File

@ -43,11 +43,7 @@ GCName GCConfiguration::young_collector() const {
}
if (UseZGC) {
if (ZGenerational) {
return ZMinor;
} else {
return NA;
}
return ZMinor;
}
if (UseShenandoahGC) {
@ -66,12 +62,8 @@ GCName GCConfiguration::old_collector() const {
return ParallelOld;
}
if (UseZGC) {
if (ZGenerational) {
return ZMajor;
} else {
return Z;
}
if (UseZGC) {
return ZMajor;
}
if (UseShenandoahGC) {

View File

@ -37,7 +37,6 @@ enum GCName {
G1Full,
ZMinor,
ZMajor,
Z, // Support for the legacy, single-gen mode
Shenandoah,
NA,
GCNameEndSentinel
@ -56,7 +55,6 @@ class GCNameHelper {
case G1Full: return "G1Full";
case ZMinor: return "ZGC Minor";
case ZMajor: return "ZGC Major";
case Z: return "Z";
case Shenandoah: return "Shenandoah";
case NA: return "N/A";
default: ShouldNotReachHere(); return nullptr;

View File

@ -43,7 +43,7 @@
#include "gc/shenandoah/shenandoah_globals.hpp"
#endif
#if INCLUDE_ZGC
#include "gc/z/shared/z_shared_globals.hpp"
#include "gc/z/z_globals.hpp"
#endif
#define GC_FLAGS(develop, \
@ -93,7 +93,7 @@
range, \
constraint)) \
\
ZGC_ONLY(GC_Z_SHARED_FLAGS( \
ZGC_ONLY(GC_Z_FLAGS( \
develop, \
develop_pd, \
product, \
@ -118,9 +118,6 @@
product(bool, UseZGC, false, \
"Use the Z garbage collector") \
\
product(bool, ZGenerational, true, \
"Use the generational version of ZGC") \
\
product(bool, UseShenandoahGC, false, \
"Use the Shenandoah garbage collector") \
\

View File

@ -46,7 +46,7 @@
#include "gc/shenandoah/vmStructs_shenandoah.hpp"
#endif
#if INCLUDE_ZGC
#include "gc/z/shared/vmStructs_z_shared.hpp"
#include "gc/z/vmStructs_z.hpp"
#endif
#define VM_STRUCTS_GC(nonstatic_field, \
@ -69,7 +69,7 @@
SHENANDOAHGC_ONLY(VM_STRUCTS_SHENANDOAH(nonstatic_field, \
volatile_nonstatic_field, \
static_field)) \
ZGC_ONLY(VM_STRUCTS_Z_SHARED(nonstatic_field, \
ZGC_ONLY(VM_STRUCTS_Z(nonstatic_field, \
volatile_nonstatic_field, \
static_field)) \
\
@ -121,7 +121,7 @@
SHENANDOAHGC_ONLY(VM_TYPES_SHENANDOAH(declare_type, \
declare_toplevel_type, \
declare_integer_type)) \
ZGC_ONLY(VM_TYPES_Z_SHARED(declare_type, \
ZGC_ONLY(VM_TYPES_Z(declare_type, \
declare_toplevel_type, \
declare_integer_type)) \
\
@ -175,7 +175,7 @@
declare_constant_with_value)) \
SHENANDOAHGC_ONLY(VM_INT_CONSTANTS_SHENANDOAH(declare_constant, \
declare_constant_with_value)) \
ZGC_ONLY(VM_INT_CONSTANTS_Z_SHARED(declare_constant, \
ZGC_ONLY(VM_INT_CONSTANTS_Z(declare_constant, \
declare_constant_with_value)) \
\
/********************************************/ \
@ -199,6 +199,6 @@
declare_constant(CollectedHeap::G1) \
#define VM_LONG_CONSTANTS_GC(declare_constant) \
ZGC_ONLY(VM_LONG_CONSTANTS_Z_SHARED(declare_constant))
ZGC_ONLY(VM_LONG_CONSTANTS_Z(declare_constant))
#endif // SHARE_GC_SHARED_VMSTRUCTS_GC_HPP

View File

@ -1,237 +0,0 @@
/*
* Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "c1/c1_LIR.hpp"
#include "c1/c1_LIRGenerator.hpp"
#include "c1/c1_CodeStubs.hpp"
#include "gc/x/c1/xBarrierSetC1.hpp"
#include "gc/x/xBarrierSet.hpp"
#include "gc/x/xBarrierSetAssembler.hpp"
#include "gc/x/xThreadLocalData.hpp"
#include "utilities/macros.hpp"
XLoadBarrierStubC1::XLoadBarrierStubC1(LIRAccess& access, LIR_Opr ref, address runtime_stub) :
_decorators(access.decorators()),
_ref_addr(access.resolved_addr()),
_ref(ref),
_tmp(LIR_OprFact::illegalOpr),
_runtime_stub(runtime_stub) {
assert(_ref_addr->is_address(), "Must be an address");
assert(_ref->is_register(), "Must be a register");
// Allocate tmp register if needed
if (_ref_addr->as_address_ptr()->index()->is_valid() ||
_ref_addr->as_address_ptr()->disp() != 0) {
// Has index or displacement, need tmp register to load address into
_tmp = access.gen()->new_pointer_register();
}
FrameMap* f = Compilation::current()->frame_map();
f->update_reserved_argument_area_size(2 * BytesPerWord);
}
DecoratorSet XLoadBarrierStubC1::decorators() const {
return _decorators;
}
LIR_Opr XLoadBarrierStubC1::ref() const {
return _ref;
}
LIR_Opr XLoadBarrierStubC1::ref_addr() const {
return _ref_addr;
}
LIR_Opr XLoadBarrierStubC1::tmp() const {
return _tmp;
}
address XLoadBarrierStubC1::runtime_stub() const {
return _runtime_stub;
}
void XLoadBarrierStubC1::visit(LIR_OpVisitState* visitor) {
visitor->do_slow_case();
visitor->do_input(_ref_addr);
visitor->do_output(_ref);
if (_tmp->is_valid()) {
visitor->do_temp(_tmp);
}
}
void XLoadBarrierStubC1::emit_code(LIR_Assembler* ce) {
XBarrierSet::assembler()->generate_c1_load_barrier_stub(ce, this);
}
#ifndef PRODUCT
void XLoadBarrierStubC1::print_name(outputStream* out) const {
out->print("XLoadBarrierStubC1");
}
#endif // PRODUCT
class LIR_OpXLoadBarrierTest : public LIR_Op {
private:
LIR_Opr _opr;
public:
LIR_OpXLoadBarrierTest(LIR_Opr opr) :
LIR_Op(lir_xloadbarrier_test, LIR_OprFact::illegalOpr, nullptr),
_opr(opr) {}
virtual void visit(LIR_OpVisitState* state) {
state->do_input(_opr);
}
virtual void emit_code(LIR_Assembler* ce) {
XBarrierSet::assembler()->generate_c1_load_barrier_test(ce, _opr);
}
virtual void print_instr(outputStream* out) const {
_opr->print(out);
out->print(" ");
}
#ifndef PRODUCT
virtual const char* name() const {
return "lir_z_load_barrier_test";
}
#endif // PRODUCT
};
static bool barrier_needed(LIRAccess& access) {
return XBarrierSet::barrier_needed(access.decorators(), access.type());
}
XBarrierSetC1::XBarrierSetC1() :
_load_barrier_on_oop_field_preloaded_runtime_stub(nullptr),
_load_barrier_on_weak_oop_field_preloaded_runtime_stub(nullptr) {}
address XBarrierSetC1::load_barrier_on_oop_field_preloaded_runtime_stub(DecoratorSet decorators) const {
assert((decorators & ON_PHANTOM_OOP_REF) == 0, "Unsupported decorator");
//assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Unsupported decorator");
if ((decorators & ON_WEAK_OOP_REF) != 0) {
return _load_barrier_on_weak_oop_field_preloaded_runtime_stub;
} else {
return _load_barrier_on_oop_field_preloaded_runtime_stub;
}
}
#ifdef ASSERT
#define __ access.gen()->lir(__FILE__, __LINE__)->
#else
#define __ access.gen()->lir()->
#endif
void XBarrierSetC1::load_barrier(LIRAccess& access, LIR_Opr result) const {
// Fast path
__ append(new LIR_OpXLoadBarrierTest(result));
// Slow path
const address runtime_stub = load_barrier_on_oop_field_preloaded_runtime_stub(access.decorators());
CodeStub* const stub = new XLoadBarrierStubC1(access, result, runtime_stub);
__ branch(lir_cond_notEqual, stub);
__ branch_destination(stub->continuation());
}
LIR_Opr XBarrierSetC1::resolve_address(LIRAccess& access, bool resolve_in_register) {
// We must resolve in register when patching. This is to avoid
// having a patch area in the load barrier stub, since the call
// into the runtime to patch will not have the proper oop map.
const bool patch_before_barrier = barrier_needed(access) && (access.decorators() & C1_NEEDS_PATCHING) != 0;
return BarrierSetC1::resolve_address(access, resolve_in_register || patch_before_barrier);
}
#undef __
void XBarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
BarrierSetC1::load_at_resolved(access, result);
if (barrier_needed(access)) {
load_barrier(access, result);
}
}
static void pre_load_barrier(LIRAccess& access) {
DecoratorSet decorators = access.decorators();
// Downgrade access to MO_UNORDERED
decorators = (decorators & ~MO_DECORATOR_MASK) | MO_UNORDERED;
// Remove ACCESS_WRITE
decorators = (decorators & ~ACCESS_WRITE);
// Generate synthetic load at
access.gen()->access_load_at(decorators,
access.type(),
access.base().item(),
access.offset().opr(),
access.gen()->new_register(access.type()),
nullptr /* patch_emit_info */,
nullptr /* load_emit_info */);
}
LIR_Opr XBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value) {
if (barrier_needed(access)) {
pre_load_barrier(access);
}
return BarrierSetC1::atomic_xchg_at_resolved(access, value);
}
LIR_Opr XBarrierSetC1::atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) {
if (barrier_needed(access)) {
pre_load_barrier(access);
}
return BarrierSetC1::atomic_cmpxchg_at_resolved(access, cmp_value, new_value);
}
class XLoadBarrierRuntimeStubCodeGenClosure : public StubAssemblerCodeGenClosure {
private:
const DecoratorSet _decorators;
public:
XLoadBarrierRuntimeStubCodeGenClosure(DecoratorSet decorators) :
_decorators(decorators) {}
virtual OopMapSet* generate_code(StubAssembler* sasm) {
XBarrierSet::assembler()->generate_c1_load_barrier_runtime_stub(sasm, _decorators);
return nullptr;
}
};
static address generate_c1_runtime_stub(BufferBlob* blob, DecoratorSet decorators, const char* name) {
XLoadBarrierRuntimeStubCodeGenClosure cl(decorators);
CodeBlob* const code_blob = Runtime1::generate_blob(blob, C1StubId::NO_STUBID /* stub_id */, name, false /* expect_oop_map*/, &cl);
return code_blob->code_begin();
}
void XBarrierSetC1::generate_c1_runtime_stubs(BufferBlob* blob) {
_load_barrier_on_oop_field_preloaded_runtime_stub =
generate_c1_runtime_stub(blob, ON_STRONG_OOP_REF, "load_barrier_on_oop_field_preloaded_runtime_stub");
_load_barrier_on_weak_oop_field_preloaded_runtime_stub =
generate_c1_runtime_stub(blob, ON_WEAK_OOP_REF, "load_barrier_on_weak_oop_field_preloaded_runtime_stub");
}

View File

@ -1,78 +0,0 @@
/*
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_X_C1_XBARRIERSETC1_HPP
#define SHARE_GC_X_C1_XBARRIERSETC1_HPP
#include "c1/c1_CodeStubs.hpp"
#include "c1/c1_IR.hpp"
#include "c1/c1_LIR.hpp"
#include "gc/shared/c1/barrierSetC1.hpp"
#include "oops/accessDecorators.hpp"
class XLoadBarrierStubC1 : public CodeStub {
private:
DecoratorSet _decorators;
LIR_Opr _ref_addr;
LIR_Opr _ref;
LIR_Opr _tmp;
address _runtime_stub;
public:
XLoadBarrierStubC1(LIRAccess& access, LIR_Opr ref, address runtime_stub);
DecoratorSet decorators() const;
LIR_Opr ref() const;
LIR_Opr ref_addr() const;
LIR_Opr tmp() const;
address runtime_stub() const;
virtual void emit_code(LIR_Assembler* ce);
virtual void visit(LIR_OpVisitState* visitor);
#ifndef PRODUCT
virtual void print_name(outputStream* out) const;
#endif // PRODUCT
};
class XBarrierSetC1 : public BarrierSetC1 {
private:
address _load_barrier_on_oop_field_preloaded_runtime_stub;
address _load_barrier_on_weak_oop_field_preloaded_runtime_stub;
address load_barrier_on_oop_field_preloaded_runtime_stub(DecoratorSet decorators) const;
void load_barrier(LIRAccess& access, LIR_Opr result) const;
protected:
virtual LIR_Opr resolve_address(LIRAccess& access, bool resolve_in_register);
virtual void load_at_resolved(LIRAccess& access, LIR_Opr result);
virtual LIR_Opr atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value);
virtual LIR_Opr atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value);
public:
XBarrierSetC1();
virtual void generate_c1_runtime_stubs(BufferBlob* blob);
};
#endif // SHARE_GC_X_C1_XBARRIERSETC1_HPP

View File

@ -1,583 +0,0 @@
/*
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "classfile/javaClasses.hpp"
#include "gc/x/c2/xBarrierSetC2.hpp"
#include "gc/x/xBarrierSet.hpp"
#include "gc/x/xBarrierSetAssembler.hpp"
#include "gc/x/xBarrierSetRuntime.hpp"
#include "opto/arraycopynode.hpp"
#include "opto/addnode.hpp"
#include "opto/block.hpp"
#include "opto/compile.hpp"
#include "opto/graphKit.hpp"
#include "opto/machnode.hpp"
#include "opto/macro.hpp"
#include "opto/memnode.hpp"
#include "opto/node.hpp"
#include "opto/output.hpp"
#include "opto/regalloc.hpp"
#include "opto/rootnode.hpp"
#include "opto/runtime.hpp"
#include "opto/type.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/macros.hpp"
class XBarrierSetC2State : public ArenaObj {
private:
GrowableArray<XLoadBarrierStubC2*>* _stubs;
Node_Array _live;
public:
XBarrierSetC2State(Arena* arena) :
_stubs(new (arena) GrowableArray<XLoadBarrierStubC2*>(arena, 8, 0, nullptr)),
_live(arena) {}
GrowableArray<XLoadBarrierStubC2*>* stubs() {
return _stubs;
}
RegMask* live(const Node* node) {
if (!node->is_Mach()) {
// Don't need liveness for non-MachNodes
return nullptr;
}
const MachNode* const mach = node->as_Mach();
if (mach->barrier_data() == XLoadBarrierElided) {
// Don't need liveness data for nodes without barriers
return nullptr;
}
RegMask* live = (RegMask*)_live[node->_idx];
if (live == nullptr) {
live = new (Compile::current()->comp_arena()->AmallocWords(sizeof(RegMask))) RegMask();
_live.map(node->_idx, (Node*)live);
}
return live;
}
};
static XBarrierSetC2State* barrier_set_state() {
return reinterpret_cast<XBarrierSetC2State*>(Compile::current()->barrier_set_state());
}
XLoadBarrierStubC2* XLoadBarrierStubC2::create(const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data) {
XLoadBarrierStubC2* const stub = new (Compile::current()->comp_arena()) XLoadBarrierStubC2(node, ref_addr, ref, tmp, barrier_data);
if (!Compile::current()->output()->in_scratch_emit_size()) {
barrier_set_state()->stubs()->append(stub);
}
return stub;
}
XLoadBarrierStubC2::XLoadBarrierStubC2(const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data) :
_node(node),
_ref_addr(ref_addr),
_ref(ref),
_tmp(tmp),
_barrier_data(barrier_data),
_entry(),
_continuation() {
assert_different_registers(ref, ref_addr.base());
assert_different_registers(ref, ref_addr.index());
}
Address XLoadBarrierStubC2::ref_addr() const {
return _ref_addr;
}
Register XLoadBarrierStubC2::ref() const {
return _ref;
}
Register XLoadBarrierStubC2::tmp() const {
return _tmp;
}
address XLoadBarrierStubC2::slow_path() const {
DecoratorSet decorators = DECORATORS_NONE;
if (_barrier_data & XLoadBarrierStrong) {
decorators |= ON_STRONG_OOP_REF;
}
if (_barrier_data & XLoadBarrierWeak) {
decorators |= ON_WEAK_OOP_REF;
}
if (_barrier_data & XLoadBarrierPhantom) {
decorators |= ON_PHANTOM_OOP_REF;
}
if (_barrier_data & XLoadBarrierNoKeepalive) {
decorators |= AS_NO_KEEPALIVE;
}
return XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators);
}
RegMask& XLoadBarrierStubC2::live() const {
RegMask* mask = barrier_set_state()->live(_node);
assert(mask != nullptr, "must be mach-node with barrier");
return *mask;
}
Label* XLoadBarrierStubC2::entry() {
// The _entry will never be bound when in_scratch_emit_size() is true.
// However, we still need to return a label that is not bound now, but
// will eventually be bound. Any label will do, as it will only act as
// a placeholder, so we return the _continuation label.
return Compile::current()->output()->in_scratch_emit_size() ? &_continuation : &_entry;
}
Label* XLoadBarrierStubC2::continuation() {
return &_continuation;
}
void* XBarrierSetC2::create_barrier_state(Arena* comp_arena) const {
return new (comp_arena) XBarrierSetC2State(comp_arena);
}
void XBarrierSetC2::late_barrier_analysis() const {
analyze_dominating_barriers();
compute_liveness_at_stubs();
}
void XBarrierSetC2::emit_stubs(CodeBuffer& cb) const {
MacroAssembler masm(&cb);
GrowableArray<XLoadBarrierStubC2*>* const stubs = barrier_set_state()->stubs();
for (int i = 0; i < stubs->length(); i++) {
// Make sure there is enough space in the code buffer
if (cb.insts()->maybe_expand_to_ensure_remaining(PhaseOutput::MAX_inst_size) && cb.blob() == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
}
XBarrierSet::assembler()->generate_c2_load_barrier_stub(&masm, stubs->at(i));
}
masm.flush();
}
int XBarrierSetC2::estimate_stub_size() const {
Compile* const C = Compile::current();
BufferBlob* const blob = C->output()->scratch_buffer_blob();
GrowableArray<XLoadBarrierStubC2*>* const stubs = barrier_set_state()->stubs();
int size = 0;
for (int i = 0; i < stubs->length(); i++) {
CodeBuffer cb(blob->content_begin(), (address)C->output()->scratch_locs_memory() - blob->content_begin());
MacroAssembler masm(&cb);
XBarrierSet::assembler()->generate_c2_load_barrier_stub(&masm, stubs->at(i));
size += cb.insts_size();
}
return size;
}
static void set_barrier_data(C2Access& access) {
if (XBarrierSet::barrier_needed(access.decorators(), access.type())) {
uint8_t barrier_data = 0;
if (access.decorators() & ON_PHANTOM_OOP_REF) {
barrier_data |= XLoadBarrierPhantom;
} else if (access.decorators() & ON_WEAK_OOP_REF) {
barrier_data |= XLoadBarrierWeak;
} else {
barrier_data |= XLoadBarrierStrong;
}
if (access.decorators() & AS_NO_KEEPALIVE) {
barrier_data |= XLoadBarrierNoKeepalive;
}
access.set_barrier_data(barrier_data);
}
}
Node* XBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
set_barrier_data(access);
return BarrierSetC2::load_at_resolved(access, val_type);
}
Node* XBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
Node* new_val, const Type* val_type) const {
set_barrier_data(access);
return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, val_type);
}
Node* XBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
Node* new_val, const Type* value_type) const {
set_barrier_data(access);
return BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
}
Node* XBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* val_type) const {
set_barrier_data(access);
return BarrierSetC2::atomic_xchg_at_resolved(access, new_val, val_type);
}
bool XBarrierSetC2::array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type,
bool is_clone, bool is_clone_instance,
ArrayCopyPhase phase) const {
if (phase == ArrayCopyPhase::Parsing) {
return false;
}
if (phase == ArrayCopyPhase::Optimization) {
return is_clone_instance;
}
// else ArrayCopyPhase::Expansion
return type == T_OBJECT || type == T_ARRAY;
}
// This TypeFunc assumes a 64bit system
static const TypeFunc* clone_type() {
// Create input type (domain)
const Type** domain_fields = TypeTuple::fields(4);
domain_fields[TypeFunc::Parms + 0] = TypeInstPtr::NOTNULL; // src
domain_fields[TypeFunc::Parms + 1] = TypeInstPtr::NOTNULL; // dst
domain_fields[TypeFunc::Parms + 2] = TypeLong::LONG; // size lower
domain_fields[TypeFunc::Parms + 3] = Type::HALF; // size upper
const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + 4, domain_fields);
// Create result type (range)
const Type** range_fields = TypeTuple::fields(0);
const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 0, range_fields);
return TypeFunc::make(domain, range);
}
#define XTOP LP64_ONLY(COMMA phase->top())
void XBarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const {
Node* const src = ac->in(ArrayCopyNode::Src);
const TypeAryPtr* ary_ptr = src->get_ptr_type()->isa_aryptr();
if (ac->is_clone_array() && ary_ptr != nullptr) {
BasicType bt = ary_ptr->elem()->array_element_basic_type();
if (is_reference_type(bt)) {
// Clone object array
bt = T_OBJECT;
} else {
// Clone primitive array
bt = T_LONG;
}
Node* ctrl = ac->in(TypeFunc::Control);
Node* mem = ac->in(TypeFunc::Memory);
Node* src = ac->in(ArrayCopyNode::Src);
Node* src_offset = ac->in(ArrayCopyNode::SrcPos);
Node* dest = ac->in(ArrayCopyNode::Dest);
Node* dest_offset = ac->in(ArrayCopyNode::DestPos);
Node* length = ac->in(ArrayCopyNode::Length);
if (bt == T_OBJECT) {
// BarrierSetC2::clone sets the offsets via BarrierSetC2::arraycopy_payload_base_offset
// which 8-byte aligns them to allow for word size copies. Make sure the offsets point
// to the first element in the array when cloning object arrays. Otherwise, load
// barriers are applied to parts of the header. Also adjust the length accordingly.
assert(src_offset == dest_offset, "should be equal");
jlong offset = src_offset->get_long();
if (offset != arrayOopDesc::base_offset_in_bytes(T_OBJECT)) {
assert(!UseCompressedClassPointers, "should only happen without compressed class pointers");
assert((arrayOopDesc::base_offset_in_bytes(T_OBJECT) - offset) == BytesPerLong, "unexpected offset");
length = phase->transform_later(new SubLNode(length, phase->longcon(1))); // Size is in longs
src_offset = phase->longcon(arrayOopDesc::base_offset_in_bytes(T_OBJECT));
dest_offset = src_offset;
}
}
Node* payload_src = phase->basic_plus_adr(src, src_offset);
Node* payload_dst = phase->basic_plus_adr(dest, dest_offset);
const char* copyfunc_name = "arraycopy";
address copyfunc_addr = phase->basictype2arraycopy(bt, nullptr, nullptr, true, copyfunc_name, true);
const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
const TypeFunc* call_type = OptoRuntime::fast_arraycopy_Type();
Node* call = phase->make_leaf_call(ctrl, mem, call_type, copyfunc_addr, copyfunc_name, raw_adr_type, payload_src, payload_dst, length XTOP);
phase->transform_later(call);
phase->igvn().replace_node(ac, call);
return;
}
// Clone instance
Node* const ctrl = ac->in(TypeFunc::Control);
Node* const mem = ac->in(TypeFunc::Memory);
Node* const dst = ac->in(ArrayCopyNode::Dest);
Node* const size = ac->in(ArrayCopyNode::Length);
assert(size->bottom_type()->is_long(), "Should be long");
// The native clone we are calling here expects the instance size in words
// Add header/offset size to payload size to get instance size.
Node* const base_offset = phase->longcon(arraycopy_payload_base_offset(ac->is_clone_array()) >> LogBytesPerLong);
Node* const full_size = phase->transform_later(new AddLNode(size, base_offset));
Node* const call = phase->make_leaf_call(ctrl,
mem,
clone_type(),
XBarrierSetRuntime::clone_addr(),
"XBarrierSetRuntime::clone",
TypeRawPtr::BOTTOM,
src,
dst,
full_size,
phase->top());
phase->transform_later(call);
phase->igvn().replace_node(ac, call);
}
#undef XTOP
// == Dominating barrier elision ==
static bool block_has_safepoint(const Block* block, uint from, uint to) {
for (uint i = from; i < to; i++) {
if (block->get_node(i)->is_MachSafePoint()) {
// Safepoint found
return true;
}
}
// Safepoint not found
return false;
}
static bool block_has_safepoint(const Block* block) {
return block_has_safepoint(block, 0, block->number_of_nodes());
}
static uint block_index(const Block* block, const Node* node) {
for (uint j = 0; j < block->number_of_nodes(); ++j) {
if (block->get_node(j) == node) {
return j;
}
}
ShouldNotReachHere();
return 0;
}
void XBarrierSetC2::analyze_dominating_barriers() const {
ResourceMark rm;
Compile* const C = Compile::current();
PhaseCFG* const cfg = C->cfg();
Block_List worklist;
Node_List mem_ops;
Node_List barrier_loads;
// Step 1 - Find accesses, and track them in lists
for (uint i = 0; i < cfg->number_of_blocks(); ++i) {
const Block* const block = cfg->get_block(i);
for (uint j = 0; j < block->number_of_nodes(); ++j) {
const Node* const node = block->get_node(j);
if (!node->is_Mach()) {
continue;
}
MachNode* const mach = node->as_Mach();
switch (mach->ideal_Opcode()) {
case Op_LoadP:
if ((mach->barrier_data() & XLoadBarrierStrong) != 0) {
barrier_loads.push(mach);
}
if ((mach->barrier_data() & (XLoadBarrierStrong | XLoadBarrierNoKeepalive)) ==
XLoadBarrierStrong) {
mem_ops.push(mach);
}
break;
case Op_CompareAndExchangeP:
case Op_CompareAndSwapP:
case Op_GetAndSetP:
if ((mach->barrier_data() & XLoadBarrierStrong) != 0) {
barrier_loads.push(mach);
}
case Op_StoreP:
mem_ops.push(mach);
break;
default:
break;
}
}
}
// Step 2 - Find dominating accesses for each load
for (uint i = 0; i < barrier_loads.size(); i++) {
MachNode* const load = barrier_loads.at(i)->as_Mach();
const TypePtr* load_adr_type = nullptr;
intptr_t load_offset = 0;
const Node* const load_obj = load->get_base_and_disp(load_offset, load_adr_type);
Block* const load_block = cfg->get_block_for_node(load);
const uint load_index = block_index(load_block, load);
for (uint j = 0; j < mem_ops.size(); j++) {
MachNode* mem = mem_ops.at(j)->as_Mach();
const TypePtr* mem_adr_type = nullptr;
intptr_t mem_offset = 0;
const Node* mem_obj = mem->get_base_and_disp(mem_offset, mem_adr_type);
Block* mem_block = cfg->get_block_for_node(mem);
uint mem_index = block_index(mem_block, mem);
if (load_obj == NodeSentinel || mem_obj == NodeSentinel ||
load_obj == nullptr || mem_obj == nullptr ||
load_offset < 0 || mem_offset < 0) {
continue;
}
if (mem_obj != load_obj || mem_offset != load_offset) {
// Not the same addresses, not a candidate
continue;
}
if (load_block == mem_block) {
// Earlier accesses in the same block
if (mem_index < load_index && !block_has_safepoint(mem_block, mem_index + 1, load_index)) {
load->set_barrier_data(XLoadBarrierElided);
}
} else if (mem_block->dominates(load_block)) {
// Dominating block? Look around for safepoints
ResourceMark rm;
Block_List stack;
VectorSet visited;
stack.push(load_block);
bool safepoint_found = block_has_safepoint(load_block);
while (!safepoint_found && stack.size() > 0) {
Block* block = stack.pop();
if (visited.test_set(block->_pre_order)) {
continue;
}
if (block_has_safepoint(block)) {
safepoint_found = true;
break;
}
if (block == mem_block) {
continue;
}
// Push predecessor blocks
for (uint p = 1; p < block->num_preds(); ++p) {
Block* pred = cfg->get_block_for_node(block->pred(p));
stack.push(pred);
}
}
if (!safepoint_found) {
load->set_barrier_data(XLoadBarrierElided);
}
}
}
}
}
// == Reduced spilling optimization ==
void XBarrierSetC2::compute_liveness_at_stubs() const {
ResourceMark rm;
Compile* const C = Compile::current();
Arena* const A = Thread::current()->resource_area();
PhaseCFG* const cfg = C->cfg();
PhaseRegAlloc* const regalloc = C->regalloc();
RegMask* const live = NEW_ARENA_ARRAY(A, RegMask, cfg->number_of_blocks() * sizeof(RegMask));
XBarrierSetAssembler* const bs = XBarrierSet::assembler();
Block_List worklist;
for (uint i = 0; i < cfg->number_of_blocks(); ++i) {
new ((void*)(live + i)) RegMask();
worklist.push(cfg->get_block(i));
}
while (worklist.size() > 0) {
const Block* const block = worklist.pop();
RegMask& old_live = live[block->_pre_order];
RegMask new_live;
// Initialize to union of successors
for (uint i = 0; i < block->_num_succs; i++) {
const uint succ_id = block->_succs[i]->_pre_order;
new_live.OR(live[succ_id]);
}
// Walk block backwards, computing liveness
for (int i = block->number_of_nodes() - 1; i >= 0; --i) {
const Node* const node = block->get_node(i);
// Remove def bits
const OptoReg::Name first = bs->refine_register(node, regalloc->get_reg_first(node));
const OptoReg::Name second = bs->refine_register(node, regalloc->get_reg_second(node));
if (first != OptoReg::Bad) {
new_live.Remove(first);
}
if (second != OptoReg::Bad) {
new_live.Remove(second);
}
// Add use bits
for (uint j = 1; j < node->req(); ++j) {
const Node* const use = node->in(j);
const OptoReg::Name first = bs->refine_register(use, regalloc->get_reg_first(use));
const OptoReg::Name second = bs->refine_register(use, regalloc->get_reg_second(use));
if (first != OptoReg::Bad) {
new_live.Insert(first);
}
if (second != OptoReg::Bad) {
new_live.Insert(second);
}
}
// If this node tracks liveness, update it
RegMask* const regs = barrier_set_state()->live(node);
if (regs != nullptr) {
regs->OR(new_live);
}
}
// Now at block top, see if we have any changes
new_live.SUBTRACT(old_live);
if (new_live.is_NotEmpty()) {
// Liveness has refined, update and propagate to prior blocks
old_live.OR(new_live);
for (uint i = 1; i < block->num_preds(); ++i) {
Block* const pred = cfg->get_block_for_node(block->pred(i));
worklist.push(pred);
}
}
}
}
#ifndef PRODUCT
void XBarrierSetC2::dump_barrier_data(const MachNode* mach, outputStream* st) const {
if ((mach->barrier_data() & XLoadBarrierStrong) != 0) {
st->print("strong ");
}
if ((mach->barrier_data() & XLoadBarrierWeak) != 0) {
st->print("weak ");
}
if ((mach->barrier_data() & XLoadBarrierPhantom) != 0) {
st->print("phantom ");
}
if ((mach->barrier_data() & XLoadBarrierNoKeepalive) != 0) {
st->print("nokeepalive ");
}
}
#endif // !PRODUCT

View File

@ -1,100 +0,0 @@
/*
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_X_C2_XBARRIERSETC2_HPP
#define SHARE_GC_X_C2_XBARRIERSETC2_HPP
#include "gc/shared/c2/barrierSetC2.hpp"
#include "memory/allocation.hpp"
#include "opto/node.hpp"
#include "utilities/growableArray.hpp"
const uint8_t XLoadBarrierElided = 0;
const uint8_t XLoadBarrierStrong = 1;
const uint8_t XLoadBarrierWeak = 2;
const uint8_t XLoadBarrierPhantom = 4;
const uint8_t XLoadBarrierNoKeepalive = 8;
class XLoadBarrierStubC2 : public ArenaObj {
private:
const MachNode* _node;
const Address _ref_addr;
const Register _ref;
const Register _tmp;
const uint8_t _barrier_data;
Label _entry;
Label _continuation;
XLoadBarrierStubC2(const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data);
public:
static XLoadBarrierStubC2* create(const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data);
Address ref_addr() const;
Register ref() const;
Register tmp() const;
address slow_path() const;
RegMask& live() const;
Label* entry();
Label* continuation();
};
class XBarrierSetC2 : public BarrierSetC2 {
private:
void compute_liveness_at_stubs() const;
void analyze_dominating_barriers() const;
protected:
virtual Node* load_at_resolved(C2Access& access, const Type* val_type) const;
virtual Node* atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access,
Node* expected_val,
Node* new_val,
const Type* val_type) const;
virtual Node* atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access,
Node* expected_val,
Node* new_val,
const Type* value_type) const;
virtual Node* atomic_xchg_at_resolved(C2AtomicParseAccess& access,
Node* new_val,
const Type* val_type) const;
public:
virtual void* create_barrier_state(Arena* comp_arena) const;
virtual bool array_copy_requires_gc_barriers(bool tightly_coupled_alloc,
BasicType type,
bool is_clone,
bool is_clone_instance,
ArrayCopyPhase phase) const;
virtual void clone_at_expansion(PhaseMacroExpand* phase,
ArrayCopyNode* ac) const;
virtual void late_barrier_analysis() const;
virtual int estimate_stub_size() const;
virtual void emit_stubs(CodeBuffer& cb) const;
#ifndef PRODUCT
virtual void dump_barrier_data(const MachNode* mach, outputStream* st) const;
#endif
};
#endif // SHARE_GC_X_C2_XBARRIERSETC2_HPP

View File

@ -1,41 +0,0 @@
/*
* Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/x/vmStructs_x.hpp"
XGlobalsForVMStructs::XGlobalsForVMStructs() :
_XGlobalPhase(&XGlobalPhase),
_XGlobalSeqNum(&XGlobalSeqNum),
_XAddressOffsetMask(&XAddressOffsetMask),
_XAddressMetadataMask(&XAddressMetadataMask),
_XAddressMetadataFinalizable(&XAddressMetadataFinalizable),
_XAddressGoodMask(&XAddressGoodMask),
_XAddressBadMask(&XAddressBadMask),
_XAddressWeakBadMask(&XAddressWeakBadMask),
_XObjectAlignmentSmallShift(&XObjectAlignmentSmallShift),
_XObjectAlignmentSmall(&XObjectAlignmentSmall) {
}
XGlobalsForVMStructs XGlobalsForVMStructs::_instance;
XGlobalsForVMStructs* XGlobalsForVMStructs::_instance_p = &XGlobalsForVMStructs::_instance;

View File

@ -1,143 +0,0 @@
/*
* Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_X_VMSTRUCTS_X_HPP
#define SHARE_GC_X_VMSTRUCTS_X_HPP
#include "gc/x/xAttachedArray.hpp"
#include "gc/x/xCollectedHeap.hpp"
#include "gc/x/xForwarding.hpp"
#include "gc/x/xGranuleMap.hpp"
#include "gc/x/xHeap.hpp"
#include "gc/x/xPageAllocator.hpp"
#include "utilities/macros.hpp"
// Expose some ZGC globals to the SA agent.
class XGlobalsForVMStructs {
static XGlobalsForVMStructs _instance;
public:
static XGlobalsForVMStructs* _instance_p;
XGlobalsForVMStructs();
uint32_t* _XGlobalPhase;
uint32_t* _XGlobalSeqNum;
uintptr_t* _XAddressOffsetMask;
uintptr_t* _XAddressMetadataMask;
uintptr_t* _XAddressMetadataFinalizable;
uintptr_t* _XAddressGoodMask;
uintptr_t* _XAddressBadMask;
uintptr_t* _XAddressWeakBadMask;
const int* _XObjectAlignmentSmallShift;
const int* _XObjectAlignmentSmall;
};
typedef XGranuleMap<XPage*> XGranuleMapForPageTable;
typedef XGranuleMap<XForwarding*> XGranuleMapForForwarding;
typedef XAttachedArray<XForwarding, XForwardingEntry> XAttachedArrayForForwarding;
#define VM_STRUCTS_X(nonstatic_field, volatile_nonstatic_field, static_field) \
static_field(XGlobalsForVMStructs, _instance_p, XGlobalsForVMStructs*) \
nonstatic_field(XGlobalsForVMStructs, _XGlobalPhase, uint32_t*) \
nonstatic_field(XGlobalsForVMStructs, _XGlobalSeqNum, uint32_t*) \
nonstatic_field(XGlobalsForVMStructs, _XAddressOffsetMask, uintptr_t*) \
nonstatic_field(XGlobalsForVMStructs, _XAddressMetadataMask, uintptr_t*) \
nonstatic_field(XGlobalsForVMStructs, _XAddressMetadataFinalizable, uintptr_t*) \
nonstatic_field(XGlobalsForVMStructs, _XAddressGoodMask, uintptr_t*) \
nonstatic_field(XGlobalsForVMStructs, _XAddressBadMask, uintptr_t*) \
nonstatic_field(XGlobalsForVMStructs, _XAddressWeakBadMask, uintptr_t*) \
nonstatic_field(XGlobalsForVMStructs, _XObjectAlignmentSmallShift, const int*) \
nonstatic_field(XGlobalsForVMStructs, _XObjectAlignmentSmall, const int*) \
\
nonstatic_field(XCollectedHeap, _heap, XHeap) \
\
nonstatic_field(XHeap, _page_allocator, XPageAllocator) \
nonstatic_field(XHeap, _page_table, XPageTable) \
nonstatic_field(XHeap, _forwarding_table, XForwardingTable) \
nonstatic_field(XHeap, _relocate, XRelocate) \
\
nonstatic_field(XPage, _type, const uint8_t) \
nonstatic_field(XPage, _seqnum, uint32_t) \
nonstatic_field(XPage, _virtual, const XVirtualMemory) \
volatile_nonstatic_field(XPage, _top, uintptr_t) \
\
nonstatic_field(XPageAllocator, _max_capacity, const size_t) \
volatile_nonstatic_field(XPageAllocator, _capacity, size_t) \
volatile_nonstatic_field(XPageAllocator, _used, size_t) \
\
nonstatic_field(XPageTable, _map, XGranuleMapForPageTable) \
\
nonstatic_field(XGranuleMapForPageTable, _map, XPage** const) \
nonstatic_field(XGranuleMapForForwarding, _map, XForwarding** const) \
\
nonstatic_field(XForwardingTable, _map, XGranuleMapForForwarding) \
\
nonstatic_field(XVirtualMemory, _start, const uintptr_t) \
nonstatic_field(XVirtualMemory, _end, const uintptr_t) \
\
nonstatic_field(XForwarding, _virtual, const XVirtualMemory) \
nonstatic_field(XForwarding, _object_alignment_shift, const size_t) \
volatile_nonstatic_field(XForwarding, _ref_count, int) \
nonstatic_field(XForwarding, _entries, const XAttachedArrayForForwarding) \
nonstatic_field(XForwardingEntry, _entry, uint64_t) \
nonstatic_field(XAttachedArrayForForwarding, _length, const size_t)
#define VM_INT_CONSTANTS_X(declare_constant, declare_constant_with_value) \
declare_constant(XPhaseRelocate) \
declare_constant(XPageTypeSmall) \
declare_constant(XPageTypeMedium) \
declare_constant(XPageTypeLarge) \
declare_constant(XObjectAlignmentMediumShift) \
declare_constant(XObjectAlignmentLargeShift)
#define VM_LONG_CONSTANTS_X(declare_constant) \
declare_constant(XGranuleSizeShift) \
declare_constant(XPageSizeSmallShift) \
declare_constant(XPageSizeMediumShift) \
declare_constant(XAddressOffsetShift) \
declare_constant(XAddressOffsetBits) \
declare_constant(XAddressOffsetMask) \
declare_constant(XAddressOffsetMax)
#define VM_TYPES_X(declare_type, declare_toplevel_type, declare_integer_type) \
declare_toplevel_type(XGlobalsForVMStructs) \
declare_type(XCollectedHeap, CollectedHeap) \
declare_toplevel_type(XHeap) \
declare_toplevel_type(XRelocate) \
declare_toplevel_type(XPage) \
declare_toplevel_type(XPageAllocator) \
declare_toplevel_type(XPageTable) \
declare_toplevel_type(XAttachedArrayForForwarding) \
declare_toplevel_type(XGranuleMapForPageTable) \
declare_toplevel_type(XGranuleMapForForwarding) \
declare_toplevel_type(XVirtualMemory) \
declare_toplevel_type(XForwardingTable) \
declare_toplevel_type(XForwarding) \
declare_toplevel_type(XForwardingEntry) \
declare_toplevel_type(XPhysicalMemoryManager)
#endif // SHARE_GC_X_VMSTRUCTS_X_HPP

View File

@ -1,32 +0,0 @@
/*
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/x/xAbort.hpp"
#include "runtime/atomic.hpp"
volatile bool XAbort::_should_abort = false;
void XAbort::abort() {
Atomic::release_store_fence(&_should_abort, true);
}

View File

@ -1,38 +0,0 @@
/*
* Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_X_XABORT_HPP
#define SHARE_GC_X_XABORT_HPP
#include "memory/allStatic.hpp"
class XAbort : public AllStatic {
private:
static volatile bool _should_abort;
public:
static bool should_abort();
static void abort();
};
#endif // SHARE_GC_X_XABORT_HPP

View File

@ -1,35 +0,0 @@
/*
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_X_XABORT_INLINE_HPP
#define SHARE_GC_X_XABORT_INLINE_HPP
#include "gc/x/xAbort.hpp"
#include "runtime/atomic.hpp"
inline bool XAbort::should_abort() {
return Atomic::load_acquire(&_should_abort);
}
#endif // SHARE_GC_X_XABORT_INLINE_HPP

View File

@ -1,58 +0,0 @@
/*
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/x/xAddress.hpp"
#include "gc/x/xGlobals.hpp"
void XAddress::set_good_mask(uintptr_t mask) {
XAddressGoodMask = mask;
XAddressBadMask = XAddressGoodMask ^ XAddressMetadataMask;
XAddressWeakBadMask = (XAddressGoodMask | XAddressMetadataRemapped | XAddressMetadataFinalizable) ^ XAddressMetadataMask;
}
void XAddress::initialize() {
XAddressOffsetBits = XPlatformAddressOffsetBits();
XAddressOffsetMask = (((uintptr_t)1 << XAddressOffsetBits) - 1) << XAddressOffsetShift;
XAddressOffsetMax = (uintptr_t)1 << XAddressOffsetBits;
XAddressMetadataShift = XPlatformAddressMetadataShift();
XAddressMetadataMask = (((uintptr_t)1 << XAddressMetadataBits) - 1) << XAddressMetadataShift;
XAddressMetadataMarked0 = (uintptr_t)1 << (XAddressMetadataShift + 0);
XAddressMetadataMarked1 = (uintptr_t)1 << (XAddressMetadataShift + 1);
XAddressMetadataRemapped = (uintptr_t)1 << (XAddressMetadataShift + 2);
XAddressMetadataFinalizable = (uintptr_t)1 << (XAddressMetadataShift + 3);
XAddressMetadataMarked = XAddressMetadataMarked0;
set_good_mask(XAddressMetadataRemapped);
}
void XAddress::flip_to_marked() {
XAddressMetadataMarked ^= (XAddressMetadataMarked0 | XAddressMetadataMarked1);
set_good_mask(XAddressMetadataMarked);
}
void XAddress::flip_to_remapped() {
set_good_mask(XAddressMetadataRemapped);
}

View File

@ -1,67 +0,0 @@
/*
* Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_X_XADDRESS_HPP
#define SHARE_GC_X_XADDRESS_HPP
#include "memory/allStatic.hpp"
#include "utilities/globalDefinitions.hpp"
class XAddress : public AllStatic {
friend class XAddressTest;
private:
static void set_good_mask(uintptr_t mask);
public:
static void initialize();
static void flip_to_marked();
static void flip_to_remapped();
static bool is_null(uintptr_t value);
static bool is_bad(uintptr_t value);
static bool is_good(uintptr_t value);
static bool is_good_or_null(uintptr_t value);
static bool is_weak_bad(uintptr_t value);
static bool is_weak_good(uintptr_t value);
static bool is_weak_good_or_null(uintptr_t value);
static bool is_marked(uintptr_t value);
static bool is_marked_or_null(uintptr_t value);
static bool is_finalizable(uintptr_t value);
static bool is_finalizable_good(uintptr_t value);
static bool is_remapped(uintptr_t value);
static bool is_in(uintptr_t value);
static uintptr_t offset(uintptr_t value);
static uintptr_t good(uintptr_t value);
static uintptr_t good_or_null(uintptr_t value);
static uintptr_t finalizable_good(uintptr_t value);
static uintptr_t marked(uintptr_t value);
static uintptr_t marked0(uintptr_t value);
static uintptr_t marked1(uintptr_t value);
static uintptr_t remapped(uintptr_t value);
static uintptr_t remapped_or_null(uintptr_t value);
};
#endif // SHARE_GC_X_XADDRESS_HPP

View File

@ -1,137 +0,0 @@
/*
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_X_XADDRESS_INLINE_HPP
#define SHARE_GC_X_XADDRESS_INLINE_HPP
#include "gc/x/xAddress.hpp"
#include "gc/x/xGlobals.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
#include "utilities/powerOfTwo.hpp"
inline bool XAddress::is_null(uintptr_t value) {
return value == 0;
}
inline bool XAddress::is_bad(uintptr_t value) {
return value & XAddressBadMask;
}
inline bool XAddress::is_good(uintptr_t value) {
return !is_bad(value) && !is_null(value);
}
inline bool XAddress::is_good_or_null(uintptr_t value) {
// Checking if an address is "not bad" is an optimized version of
// checking if it's "good or null", which eliminates an explicit
// null check. However, the implicit null check only checks that
// the mask bits are zero, not that the entire address is zero.
// This means that an address without mask bits would pass through
// the barrier as if it was null. This should be harmless as such
// addresses should ever be passed through the barrier.
const bool result = !is_bad(value);
assert((is_good(value) || is_null(value)) == result, "Bad address");
return result;
}
inline bool XAddress::is_weak_bad(uintptr_t value) {
return value & XAddressWeakBadMask;
}
inline bool XAddress::is_weak_good(uintptr_t value) {
return !is_weak_bad(value) && !is_null(value);
}
inline bool XAddress::is_weak_good_or_null(uintptr_t value) {
return !is_weak_bad(value);
}
inline bool XAddress::is_marked(uintptr_t value) {
return value & XAddressMetadataMarked;
}
inline bool XAddress::is_marked_or_null(uintptr_t value) {
return is_marked(value) || is_null(value);
}
inline bool XAddress::is_finalizable(uintptr_t value) {
return value & XAddressMetadataFinalizable;
}
inline bool XAddress::is_finalizable_good(uintptr_t value) {
return is_finalizable(value) && is_good(value ^ XAddressMetadataFinalizable);
}
inline bool XAddress::is_remapped(uintptr_t value) {
return value & XAddressMetadataRemapped;
}
inline bool XAddress::is_in(uintptr_t value) {
// Check that exactly one non-offset bit is set
if (!is_power_of_2(value & ~XAddressOffsetMask)) {
return false;
}
// Check that one of the non-finalizable metadata is set
return value & (XAddressMetadataMask & ~XAddressMetadataFinalizable);
}
inline uintptr_t XAddress::offset(uintptr_t value) {
return value & XAddressOffsetMask;
}
inline uintptr_t XAddress::good(uintptr_t value) {
return offset(value) | XAddressGoodMask;
}
inline uintptr_t XAddress::good_or_null(uintptr_t value) {
return is_null(value) ? 0 : good(value);
}
inline uintptr_t XAddress::finalizable_good(uintptr_t value) {
return offset(value) | XAddressMetadataFinalizable | XAddressGoodMask;
}
inline uintptr_t XAddress::marked(uintptr_t value) {
return offset(value) | XAddressMetadataMarked;
}
inline uintptr_t XAddress::marked0(uintptr_t value) {
return offset(value) | XAddressMetadataMarked0;
}
inline uintptr_t XAddress::marked1(uintptr_t value) {
return offset(value) | XAddressMetadataMarked1;
}
inline uintptr_t XAddress::remapped(uintptr_t value) {
return offset(value) | XAddressMetadataRemapped;
}
inline uintptr_t XAddress::remapped_or_null(uintptr_t value) {
return is_null(value) ? 0 : remapped(value);
}
#endif // SHARE_GC_X_XADDRESS_INLINE_HPP

View File

@ -1,53 +0,0 @@
/*
* Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/shared/gc_globals.hpp"
#include "gc/x/xAddressSpaceLimit.hpp"
#include "gc/x/xGlobals.hpp"
#include "runtime/globals.hpp"
#include "runtime/os.hpp"
#include "utilities/align.hpp"
static size_t address_space_limit() {
size_t limit = 0;
if (os::has_allocatable_memory_limit(&limit)) {
return limit;
}
// No limit
return SIZE_MAX;
}
size_t XAddressSpaceLimit::mark_stack() {
// Allow mark stacks to occupy 10% of the address space
const size_t limit = address_space_limit() / 10;
return align_up(limit, XMarkStackSpaceExpandSize);
}
size_t XAddressSpaceLimit::heap_view() {
// Allow all heap views to occupy 50% of the address space
const size_t limit = address_space_limit() / MaxVirtMemFraction / XHeapViews;
return align_up(limit, XGranuleSize);
}

View File

@ -1,36 +0,0 @@
/*
* Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_X_XADDRESSSPACELIMIT_HPP
#define SHARE_GC_X_XADDRESSSPACELIMIT_HPP
#include "memory/allStatic.hpp"
#include "utilities/globalDefinitions.hpp"
class XAddressSpaceLimit : public AllStatic {
public:
static size_t mark_stack();
static size_t heap_view();
};
#endif // SHARE_GC_X_XADDRESSSPACELIMIT_HPP

View File

@ -1,85 +0,0 @@
/*
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_X_XALLOCATIONFLAGS_HPP
#define SHARE_GC_X_XALLOCATIONFLAGS_HPP
#include "gc/x/xBitField.hpp"
#include "memory/allocation.hpp"
//
// Allocation flags layout
// -----------------------
//
// 7 2 1 0
// +-----+-+-+-+
// |00000|1|1|1|
// +-----+-+-+-+
// | | | |
// | | | * 0-0 Non-Blocking Flag (1-bit)
// | | |
// | | * 1-1 Worker Relocation Flag (1-bit)
// | |
// | * 2-2 Low Address Flag (1-bit)
// |
// * 7-3 Unused (5-bits)
//
class XAllocationFlags {
private:
typedef XBitField<uint8_t, bool, 0, 1> field_non_blocking;
typedef XBitField<uint8_t, bool, 1, 1> field_worker_relocation;
typedef XBitField<uint8_t, bool, 2, 1> field_low_address;
uint8_t _flags;
public:
XAllocationFlags() :
_flags(0) {}
void set_non_blocking() {
_flags |= field_non_blocking::encode(true);
}
void set_worker_relocation() {
_flags |= field_worker_relocation::encode(true);
}
void set_low_address() {
_flags |= field_low_address::encode(true);
}
bool non_blocking() const {
return field_non_blocking::decode(_flags);
}
bool worker_relocation() const {
return field_worker_relocation::decode(_flags);
}
bool low_address() const {
return field_low_address::decode(_flags);
}
};
#endif // SHARE_GC_X_XALLOCATIONFLAGS_HPP

View File

@ -1,129 +0,0 @@
/*
* Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/x/xAddressSpaceLimit.hpp"
#include "gc/x/xArguments.hpp"
#include "gc/x/xCollectedHeap.hpp"
#include "gc/x/xGlobals.hpp"
#include "gc/x/xHeuristics.hpp"
#include "gc/shared/gcArguments.hpp"
#include "runtime/globals.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/java.hpp"
void XArguments::initialize_alignments() {
SpaceAlignment = XGranuleSize;
HeapAlignment = SpaceAlignment;
}
void XArguments::initialize_heap_flags_and_sizes() {
// Nothing extra to do
}
void XArguments::initialize() {
warning("Non-generational ZGC is deprecated.");
// Check mark stack size
const size_t mark_stack_space_limit = XAddressSpaceLimit::mark_stack();
if (ZMarkStackSpaceLimit > mark_stack_space_limit) {
if (!FLAG_IS_DEFAULT(ZMarkStackSpaceLimit)) {
vm_exit_during_initialization("ZMarkStackSpaceLimit too large for limited address space");
}
FLAG_SET_DEFAULT(ZMarkStackSpaceLimit, mark_stack_space_limit);
}
// Enable NUMA by default
if (FLAG_IS_DEFAULT(UseNUMA)) {
FLAG_SET_DEFAULT(UseNUMA, true);
}
if (FLAG_IS_DEFAULT(ZFragmentationLimit)) {
FLAG_SET_DEFAULT(ZFragmentationLimit, 25.0);
}
// Select number of parallel threads
if (FLAG_IS_DEFAULT(ParallelGCThreads)) {
FLAG_SET_DEFAULT(ParallelGCThreads, XHeuristics::nparallel_workers());
}
if (ParallelGCThreads == 0) {
vm_exit_during_initialization("The flag -XX:+UseZGC can not be combined with -XX:ParallelGCThreads=0");
}
// Select number of concurrent threads
if (FLAG_IS_DEFAULT(ConcGCThreads)) {
FLAG_SET_DEFAULT(ConcGCThreads, XHeuristics::nconcurrent_workers());
}
if (ConcGCThreads == 0) {
vm_exit_during_initialization("The flag -XX:+UseZGC can not be combined with -XX:ConcGCThreads=0");
}
// Large page size must match granule size
if (!FLAG_IS_DEFAULT(LargePageSizeInBytes) && LargePageSizeInBytes != XGranuleSize) {
vm_exit_during_initialization(err_msg("Incompatible -XX:LargePageSizeInBytes, only "
SIZE_FORMAT "M large pages are supported by ZGC",
XGranuleSize / M));
}
// The heuristics used when UseDynamicNumberOfGCThreads is
// enabled defaults to using a ZAllocationSpikeTolerance of 1.
if (UseDynamicNumberOfGCThreads && FLAG_IS_DEFAULT(ZAllocationSpikeTolerance)) {
FLAG_SET_DEFAULT(ZAllocationSpikeTolerance, 1);
}
#ifdef COMPILER2
// Enable loop strip mining by default
if (FLAG_IS_DEFAULT(UseCountedLoopSafepoints)) {
FLAG_SET_DEFAULT(UseCountedLoopSafepoints, true);
if (FLAG_IS_DEFAULT(LoopStripMiningIter)) {
FLAG_SET_DEFAULT(LoopStripMiningIter, 1000);
}
}
#endif
// CompressedOops not supported
FLAG_SET_DEFAULT(UseCompressedOops, false);
// Verification before startup and after exit not (yet) supported
FLAG_SET_DEFAULT(VerifyDuringStartup, false);
FLAG_SET_DEFAULT(VerifyBeforeExit, false);
if (VerifyBeforeGC || VerifyDuringGC || VerifyAfterGC) {
FLAG_SET_DEFAULT(ZVerifyRoots, true);
FLAG_SET_DEFAULT(ZVerifyObjects, true);
}
}
size_t XArguments::heap_virtual_to_physical_ratio() {
return XHeapViews * XVirtualToPhysicalRatio;
}
CollectedHeap* XArguments::create_heap() {
return new XCollectedHeap();
}
bool XArguments::is_supported() {
return is_os_supported();
}

View File

@ -1,44 +0,0 @@
/*
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_X_XARGUMENTS_HPP
#define SHARE_GC_X_XARGUMENTS_HPP
#include "gc/shared/gcArguments.hpp"
class CollectedHeap;
class XArguments : AllStatic {
public:
static void initialize_alignments();
static void initialize_heap_flags_and_sizes();
static void initialize();
static size_t heap_virtual_to_physical_ratio();
static CollectedHeap* create_heap();
static bool is_supported();
static bool is_os_supported();
};
#endif // SHARE_GC_X_XARGUMENTS_HPP

View File

@ -1,51 +0,0 @@
/*
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_X_XARRAY_HPP
#define SHARE_GC_X_XARRAY_HPP
#include "memory/allocation.hpp"
#include "utilities/growableArray.hpp"
template <typename T> using XArray = GrowableArrayCHeap<T, mtGC>;
template <typename T, bool Parallel>
class XArrayIteratorImpl : public StackObj {
private:
const T* _next;
const T* const _end;
bool next_serial(T* elem);
bool next_parallel(T* elem);
public:
XArrayIteratorImpl(const T* array, size_t length);
XArrayIteratorImpl(const XArray<T>* array);
bool next(T* elem);
};
template <typename T> using XArrayIterator = XArrayIteratorImpl<T, false /* Parallel */>;
template <typename T> using XArrayParallelIterator = XArrayIteratorImpl<T, true /* Parallel */>;
#endif // SHARE_GC_X_XARRAY_HPP

View File

@ -1,81 +0,0 @@
/*
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_X_XARRAY_INLINE_HPP
#define SHARE_GC_X_XARRAY_INLINE_HPP
#include "gc/x/xArray.hpp"
#include "runtime/atomic.hpp"
template <typename T, bool Parallel>
inline bool XArrayIteratorImpl<T, Parallel>::next_serial(T* elem) {
if (_next == _end) {
return false;
}
*elem = *_next;
_next++;
return true;
}
template <typename T, bool Parallel>
inline bool XArrayIteratorImpl<T, Parallel>::next_parallel(T* elem) {
const T* old_next = Atomic::load(&_next);
for (;;) {
if (old_next == _end) {
return false;
}
const T* const new_next = old_next + 1;
const T* const prev_next = Atomic::cmpxchg(&_next, old_next, new_next);
if (prev_next == old_next) {
*elem = *old_next;
return true;
}
old_next = prev_next;
}
}
template <typename T, bool Parallel>
inline XArrayIteratorImpl<T, Parallel>::XArrayIteratorImpl(const T* array, size_t length) :
_next(array),
_end(array + length) {}
template <typename T, bool Parallel>
inline XArrayIteratorImpl<T, Parallel>::XArrayIteratorImpl(const XArray<T>* array) :
XArrayIteratorImpl<T, Parallel>(array->is_empty() ? nullptr : array->adr_at(0), array->length()) {}
template <typename T, bool Parallel>
inline bool XArrayIteratorImpl<T, Parallel>::next(T* elem) {
if (Parallel) {
return next_parallel(elem);
} else {
return next_serial(elem);
}
}
#endif // SHARE_GC_X_XARRAY_INLINE_HPP

View File

@ -1,54 +0,0 @@
/*
* Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_X_XATTACHEDARRAY_HPP
#define SHARE_GC_X_XATTACHEDARRAY_HPP
#include "utilities/globalDefinitions.hpp"
class VMStructs;
template <typename ObjectT, typename ArrayT>
class XAttachedArray {
friend class ::VMStructs;
private:
const size_t _length;
static size_t object_size();
static size_t array_size(size_t length);
public:
template <typename Allocator>
static void* alloc(Allocator* allocator, size_t length);
static void* alloc(size_t length);
static void free(ObjectT* obj);
XAttachedArray(size_t length);
size_t length() const;
ArrayT* operator()(const ObjectT* obj) const;
};
#endif // SHARE_GC_X_XATTACHEDARRAY_HPP

View File

@ -1,86 +0,0 @@
/*
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_X_XATTACHEDARRAY_INLINE_HPP
#define SHARE_GC_X_XATTACHEDARRAY_INLINE_HPP
#include "gc/x/xAttachedArray.hpp"
#include "memory/allocation.hpp"
#include "utilities/align.hpp"
template <typename ObjectT, typename ArrayT>
inline size_t XAttachedArray<ObjectT, ArrayT>::object_size() {
return align_up(sizeof(ObjectT), sizeof(ArrayT));
}
template <typename ObjectT, typename ArrayT>
inline size_t XAttachedArray<ObjectT, ArrayT>::array_size(size_t length) {
return sizeof(ArrayT) * length;
}
template <typename ObjectT, typename ArrayT>
template <typename Allocator>
inline void* XAttachedArray<ObjectT, ArrayT>::alloc(Allocator* allocator, size_t length) {
// Allocate memory for object and array
const size_t size = object_size() + array_size(length);
void* const addr = allocator->alloc(size);
// Placement new array
void* const array_addr = reinterpret_cast<char*>(addr) + object_size();
::new (array_addr) ArrayT[length];
// Return pointer to object
return addr;
}
template <typename ObjectT, typename ArrayT>
inline void* XAttachedArray<ObjectT, ArrayT>::alloc(size_t length) {
struct Allocator {
void* alloc(size_t size) const {
return AllocateHeap(size, mtGC);
}
} allocator;
return alloc(&allocator, length);
}
template <typename ObjectT, typename ArrayT>
inline void XAttachedArray<ObjectT, ArrayT>::free(ObjectT* obj) {
FreeHeap(obj);
}
template <typename ObjectT, typename ArrayT>
inline XAttachedArray<ObjectT, ArrayT>::XAttachedArray(size_t length) :
_length(length) {}
template <typename ObjectT, typename ArrayT>
inline size_t XAttachedArray<ObjectT, ArrayT>::length() const {
return _length;
}
template <typename ObjectT, typename ArrayT>
inline ArrayT* XAttachedArray<ObjectT, ArrayT>::operator()(const ObjectT* obj) const {
return reinterpret_cast<ArrayT*>(reinterpret_cast<uintptr_t>(obj) + object_size());
}
#endif // SHARE_GC_X_XATTACHEDARRAY_INLINE_HPP

View File

@ -1,275 +0,0 @@
/*
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "classfile/javaClasses.hpp"
#include "gc/x/xBarrier.inline.hpp"
#include "gc/x/xHeap.inline.hpp"
#include "gc/x/xOop.inline.hpp"
#include "gc/x/xThread.inline.hpp"
#include "memory/iterator.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/safepoint.hpp"
#include "utilities/debug.hpp"
template <bool finalizable>
bool XBarrier::should_mark_through(uintptr_t addr) {
// Finalizable marked oops can still exists on the heap after marking
// has completed, in which case we just want to convert this into a
// good oop and not push it on the mark stack.
if (!during_mark()) {
assert(XAddress::is_marked(addr), "Should be marked");
assert(XAddress::is_finalizable(addr), "Should be finalizable");
return false;
}
// During marking, we mark through already marked oops to avoid having
// some large part of the object graph hidden behind a pushed, but not
// yet flushed, entry on a mutator mark stack. Always marking through
// allows the GC workers to proceed through the object graph even if a
// mutator touched an oop first, which in turn will reduce the risk of
// having to flush mark stacks multiple times to terminate marking.
//
// However, when doing finalizable marking we don't always want to mark
// through. First, marking through an already strongly marked oop would
// be wasteful, since we will then proceed to do finalizable marking on
// an object which is, or will be, marked strongly. Second, marking
// through an already finalizable marked oop would also be wasteful,
// since such oops can never end up on a mutator mark stack and can
// therefore not hide some part of the object graph from GC workers.
if (finalizable) {
return !XAddress::is_marked(addr);
}
// Mark through
return true;
}
template <bool gc_thread, bool follow, bool finalizable, bool publish>
uintptr_t XBarrier::mark(uintptr_t addr) {
uintptr_t good_addr;
if (XAddress::is_marked(addr)) {
// Already marked, but try to mark though anyway
good_addr = XAddress::good(addr);
} else if (XAddress::is_remapped(addr)) {
// Already remapped, but also needs to be marked
good_addr = XAddress::good(addr);
} else {
// Needs to be both remapped and marked
good_addr = remap(addr);
}
// Mark
if (should_mark_through<finalizable>(addr)) {
XHeap::heap()->mark_object<gc_thread, follow, finalizable, publish>(good_addr);
}
if (finalizable) {
// Make the oop finalizable marked/good, instead of normal marked/good.
// This is needed because an object might first becomes finalizable
// marked by the GC, and then loaded by a mutator thread. In this case,
// the mutator thread must be able to tell that the object needs to be
// strongly marked. The finalizable bit in the oop exists to make sure
// that a load of a finalizable marked oop will fall into the barrier
// slow path so that we can mark the object as strongly reachable.
return XAddress::finalizable_good(good_addr);
}
return good_addr;
}
uintptr_t XBarrier::remap(uintptr_t addr) {
assert(!XAddress::is_good(addr), "Should not be good");
assert(!XAddress::is_weak_good(addr), "Should not be weak good");
return XHeap::heap()->remap_object(addr);
}
uintptr_t XBarrier::relocate(uintptr_t addr) {
assert(!XAddress::is_good(addr), "Should not be good");
assert(!XAddress::is_weak_good(addr), "Should not be weak good");
return XHeap::heap()->relocate_object(addr);
}
uintptr_t XBarrier::relocate_or_mark(uintptr_t addr) {
return during_relocate() ? relocate(addr) : mark<AnyThread, Follow, Strong, Publish>(addr);
}
uintptr_t XBarrier::relocate_or_mark_no_follow(uintptr_t addr) {
return during_relocate() ? relocate(addr) : mark<AnyThread, DontFollow, Strong, Publish>(addr);
}
uintptr_t XBarrier::relocate_or_remap(uintptr_t addr) {
return during_relocate() ? relocate(addr) : remap(addr);
}
//
// Load barrier
//
uintptr_t XBarrier::load_barrier_on_oop_slow_path(uintptr_t addr) {
return relocate_or_mark(addr);
}
uintptr_t XBarrier::load_barrier_on_invisible_root_oop_slow_path(uintptr_t addr) {
return relocate_or_mark_no_follow(addr);
}
void XBarrier::load_barrier_on_oop_fields(oop o) {
assert(XAddress::is_good(XOop::to_address(o)), "Should be good");
XLoadBarrierOopClosure cl;
o->oop_iterate(&cl);
}
//
// Weak load barrier
//
uintptr_t XBarrier::weak_load_barrier_on_oop_slow_path(uintptr_t addr) {
return XAddress::is_weak_good(addr) ? XAddress::good(addr) : relocate_or_remap(addr);
}
uintptr_t XBarrier::weak_load_barrier_on_weak_oop_slow_path(uintptr_t addr) {
const uintptr_t good_addr = weak_load_barrier_on_oop_slow_path(addr);
if (XHeap::heap()->is_object_strongly_live(good_addr)) {
return good_addr;
}
// Not strongly live
return 0;
}
uintptr_t XBarrier::weak_load_barrier_on_phantom_oop_slow_path(uintptr_t addr) {
const uintptr_t good_addr = weak_load_barrier_on_oop_slow_path(addr);
if (XHeap::heap()->is_object_live(good_addr)) {
return good_addr;
}
// Not live
return 0;
}
//
// Keep alive barrier
//
uintptr_t XBarrier::keep_alive_barrier_on_oop_slow_path(uintptr_t addr) {
assert(during_mark(), "Invalid phase");
// Mark
return mark<AnyThread, Follow, Strong, Overflow>(addr);
}
uintptr_t XBarrier::keep_alive_barrier_on_weak_oop_slow_path(uintptr_t addr) {
assert(XResurrection::is_blocked(), "This operation is only valid when resurrection is blocked");
const uintptr_t good_addr = weak_load_barrier_on_oop_slow_path(addr);
assert(XHeap::heap()->is_object_strongly_live(good_addr), "Should be live");
return good_addr;
}
uintptr_t XBarrier::keep_alive_barrier_on_phantom_oop_slow_path(uintptr_t addr) {
assert(XResurrection::is_blocked(), "This operation is only valid when resurrection is blocked");
const uintptr_t good_addr = weak_load_barrier_on_oop_slow_path(addr);
assert(XHeap::heap()->is_object_live(good_addr), "Should be live");
return good_addr;
}
//
// Mark barrier
//
uintptr_t XBarrier::mark_barrier_on_oop_slow_path(uintptr_t addr) {
assert(during_mark(), "Invalid phase");
assert(XThread::is_worker(), "Invalid thread");
// Mark
return mark<GCThread, Follow, Strong, Overflow>(addr);
}
uintptr_t XBarrier::mark_barrier_on_finalizable_oop_slow_path(uintptr_t addr) {
assert(during_mark(), "Invalid phase");
assert(XThread::is_worker(), "Invalid thread");
// Mark
return mark<GCThread, Follow, Finalizable, Overflow>(addr);
}
//
// Narrow oop variants, never used.
//
oop XBarrier::load_barrier_on_oop_field(volatile narrowOop* p) {
ShouldNotReachHere();
return nullptr;
}
oop XBarrier::load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oop o) {
ShouldNotReachHere();
return nullptr;
}
void XBarrier::load_barrier_on_oop_array(volatile narrowOop* p, size_t length) {
ShouldNotReachHere();
}
oop XBarrier::load_barrier_on_weak_oop_field_preloaded(volatile narrowOop* p, oop o) {
ShouldNotReachHere();
return nullptr;
}
oop XBarrier::load_barrier_on_phantom_oop_field_preloaded(volatile narrowOop* p, oop o) {
ShouldNotReachHere();
return nullptr;
}
oop XBarrier::weak_load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oop o) {
ShouldNotReachHere();
return nullptr;
}
oop XBarrier::weak_load_barrier_on_weak_oop_field_preloaded(volatile narrowOop* p, oop o) {
ShouldNotReachHere();
return nullptr;
}
oop XBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(volatile narrowOop* p, oop o) {
ShouldNotReachHere();
return nullptr;
}
#ifdef ASSERT
// ON_WEAK barriers should only ever be applied to j.l.r.Reference.referents.
void XBarrier::verify_on_weak(volatile oop* referent_addr) {
if (referent_addr != nullptr) {
uintptr_t base = (uintptr_t)referent_addr - java_lang_ref_Reference::referent_offset();
oop obj = cast_to_oop(base);
assert(oopDesc::is_oop(obj), "Verification failed for: ref " PTR_FORMAT " obj: " PTR_FORMAT, (uintptr_t)referent_addr, base);
assert(java_lang_ref_Reference::is_referent_field(obj, java_lang_ref_Reference::referent_offset()), "Sanity");
}
}
#endif
void XLoadBarrierOopClosure::do_oop(oop* p) {
XBarrier::load_barrier_on_oop_field(p);
}
void XLoadBarrierOopClosure::do_oop(narrowOop* p) {
ShouldNotReachHere();
}

View File

@ -1,135 +0,0 @@
/*
* Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_X_XBARRIER_HPP
#define SHARE_GC_X_XBARRIER_HPP
#include "memory/allStatic.hpp"
#include "memory/iterator.hpp"
#include "oops/oop.hpp"
typedef bool (*XBarrierFastPath)(uintptr_t);
typedef uintptr_t (*XBarrierSlowPath)(uintptr_t);
class XBarrier : public AllStatic {
private:
static const bool GCThread = true;
static const bool AnyThread = false;
static const bool Follow = true;
static const bool DontFollow = false;
static const bool Strong = false;
static const bool Finalizable = true;
static const bool Publish = true;
static const bool Overflow = false;
template <XBarrierFastPath fast_path> static void self_heal(volatile oop* p, uintptr_t addr, uintptr_t heal_addr);
template <XBarrierFastPath fast_path, XBarrierSlowPath slow_path> static oop barrier(volatile oop* p, oop o);
template <XBarrierFastPath fast_path, XBarrierSlowPath slow_path> static oop weak_barrier(volatile oop* p, oop o);
template <XBarrierFastPath fast_path, XBarrierSlowPath slow_path> static void root_barrier(oop* p, oop o);
static bool is_good_or_null_fast_path(uintptr_t addr);
static bool is_weak_good_or_null_fast_path(uintptr_t addr);
static bool is_marked_or_null_fast_path(uintptr_t addr);
static bool during_mark();
static bool during_relocate();
template <bool finalizable> static bool should_mark_through(uintptr_t addr);
template <bool gc_thread, bool follow, bool finalizable, bool publish> static uintptr_t mark(uintptr_t addr);
static uintptr_t remap(uintptr_t addr);
static uintptr_t relocate(uintptr_t addr);
static uintptr_t relocate_or_mark(uintptr_t addr);
static uintptr_t relocate_or_mark_no_follow(uintptr_t addr);
static uintptr_t relocate_or_remap(uintptr_t addr);
static uintptr_t load_barrier_on_oop_slow_path(uintptr_t addr);
static uintptr_t load_barrier_on_invisible_root_oop_slow_path(uintptr_t addr);
static uintptr_t weak_load_barrier_on_oop_slow_path(uintptr_t addr);
static uintptr_t weak_load_barrier_on_weak_oop_slow_path(uintptr_t addr);
static uintptr_t weak_load_barrier_on_phantom_oop_slow_path(uintptr_t addr);
static uintptr_t keep_alive_barrier_on_oop_slow_path(uintptr_t addr);
static uintptr_t keep_alive_barrier_on_weak_oop_slow_path(uintptr_t addr);
static uintptr_t keep_alive_barrier_on_phantom_oop_slow_path(uintptr_t addr);
static uintptr_t mark_barrier_on_oop_slow_path(uintptr_t addr);
static uintptr_t mark_barrier_on_finalizable_oop_slow_path(uintptr_t addr);
static void verify_on_weak(volatile oop* referent_addr) NOT_DEBUG_RETURN;
public:
// Load barrier
static oop load_barrier_on_oop(oop o);
static oop load_barrier_on_oop_field(volatile oop* p);
static oop load_barrier_on_oop_field_preloaded(volatile oop* p, oop o);
static void load_barrier_on_oop_array(volatile oop* p, size_t length);
static void load_barrier_on_oop_fields(oop o);
static oop load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o);
static oop load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o);
static void load_barrier_on_root_oop_field(oop* p);
static void load_barrier_on_invisible_root_oop_field(oop* p);
// Weak load barrier
static oop weak_load_barrier_on_oop_field(volatile oop* p);
static oop weak_load_barrier_on_oop_field_preloaded(volatile oop* p, oop o);
static oop weak_load_barrier_on_weak_oop(oop o);
static oop weak_load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o);
static oop weak_load_barrier_on_phantom_oop(oop o);
static oop weak_load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o);
// Is alive barrier
static bool is_alive_barrier_on_weak_oop(oop o);
static bool is_alive_barrier_on_phantom_oop(oop o);
// Keep alive barrier
static void keep_alive_barrier_on_oop(oop o);
static void keep_alive_barrier_on_weak_oop_field(volatile oop* p);
static void keep_alive_barrier_on_phantom_oop_field(volatile oop* p);
static void keep_alive_barrier_on_phantom_root_oop_field(oop* p);
// Mark barrier
static void mark_barrier_on_oop_field(volatile oop* p, bool finalizable);
static void mark_barrier_on_oop_array(volatile oop* p, size_t length, bool finalizable);
// Narrow oop variants, never used.
static oop load_barrier_on_oop_field(volatile narrowOop* p);
static oop load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oop o);
static void load_barrier_on_oop_array(volatile narrowOop* p, size_t length);
static oop load_barrier_on_weak_oop_field_preloaded(volatile narrowOop* p, oop o);
static oop load_barrier_on_phantom_oop_field_preloaded(volatile narrowOop* p, oop o);
static oop weak_load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oop o);
static oop weak_load_barrier_on_weak_oop_field_preloaded(volatile narrowOop* p, oop o);
static oop weak_load_barrier_on_phantom_oop_field_preloaded(volatile narrowOop* p, oop o);
};
class XLoadBarrierOopClosure : public BasicOopIterateClosure {
public:
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
};
#endif // SHARE_GC_X_XBARRIER_HPP

View File

@ -1,394 +0,0 @@
/*
* Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_X_XBARRIER_INLINE_HPP
#define SHARE_GC_X_XBARRIER_INLINE_HPP
#include "gc/x/xBarrier.hpp"
#include "code/codeCache.hpp"
#include "gc/x/xAddress.inline.hpp"
#include "gc/x/xOop.inline.hpp"
#include "gc/x/xResurrection.inline.hpp"
#include "oops/oop.hpp"
#include "runtime/atomic.hpp"
#include "runtime/continuation.hpp"
// A self heal must always "upgrade" the address metadata bits in
// accordance with the metadata bits state machine, which has the
// valid state transitions as described below (where N is the GC
// cycle).
//
// Note the subtleness of overlapping GC cycles. Specifically that
// oops are colored Remapped(N) starting at relocation N and ending
// at marking N + 1.
//
// +--- Mark Start
// | +--- Mark End
// | | +--- Relocate Start
// | | | +--- Relocate End
// | | | |
// Marked |---N---|--N+1--|--N+2--|----
// Finalizable |---N---|--N+1--|--N+2--|----
// Remapped ----|---N---|--N+1--|--N+2--|
//
// VALID STATE TRANSITIONS
//
// Marked(N) -> Remapped(N)
// -> Marked(N + 1)
// -> Finalizable(N + 1)
//
// Finalizable(N) -> Marked(N)
// -> Remapped(N)
// -> Marked(N + 1)
// -> Finalizable(N + 1)
//
// Remapped(N) -> Marked(N + 1)
// -> Finalizable(N + 1)
//
// PHASE VIEW
//
// XPhaseMark
// Load & Mark
// Marked(N) <- Marked(N - 1)
// <- Finalizable(N - 1)
// <- Remapped(N - 1)
// <- Finalizable(N)
//
// Mark(Finalizable)
// Finalizable(N) <- Marked(N - 1)
// <- Finalizable(N - 1)
// <- Remapped(N - 1)
//
// Load(AS_NO_KEEPALIVE)
// Remapped(N - 1) <- Marked(N - 1)
// <- Finalizable(N - 1)
//
// XPhaseMarkCompleted (Resurrection blocked)
// Load & Load(ON_WEAK/PHANTOM_OOP_REF | AS_NO_KEEPALIVE) & KeepAlive
// Marked(N) <- Marked(N - 1)
// <- Finalizable(N - 1)
// <- Remapped(N - 1)
// <- Finalizable(N)
//
// Load(ON_STRONG_OOP_REF | AS_NO_KEEPALIVE)
// Remapped(N - 1) <- Marked(N - 1)
// <- Finalizable(N - 1)
//
// XPhaseMarkCompleted (Resurrection unblocked)
// Load
// Marked(N) <- Finalizable(N)
//
// XPhaseRelocate
// Load & Load(AS_NO_KEEPALIVE)
// Remapped(N) <- Marked(N)
// <- Finalizable(N)
template <XBarrierFastPath fast_path>
inline void XBarrier::self_heal(volatile oop* p, uintptr_t addr, uintptr_t heal_addr) {
if (heal_addr == 0) {
// Never heal with null since it interacts badly with reference processing.
// A mutator clearing an oop would be similar to calling Reference.clear(),
// which would make the reference non-discoverable or silently dropped
// by the reference processor.
return;
}
assert(!fast_path(addr), "Invalid self heal");
assert(fast_path(heal_addr), "Invalid self heal");
for (;;) {
// Heal
const uintptr_t prev_addr = Atomic::cmpxchg((volatile uintptr_t*)p, addr, heal_addr, memory_order_relaxed);
if (prev_addr == addr) {
// Success
return;
}
if (fast_path(prev_addr)) {
// Must not self heal
return;
}
// The oop location was healed by another barrier, but still needs upgrading.
// Re-apply healing to make sure the oop is not left with weaker (remapped or
// finalizable) metadata bits than what this barrier tried to apply.
assert(XAddress::offset(prev_addr) == XAddress::offset(heal_addr), "Invalid offset");
addr = prev_addr;
}
}
template <XBarrierFastPath fast_path, XBarrierSlowPath slow_path>
inline oop XBarrier::barrier(volatile oop* p, oop o) {
const uintptr_t addr = XOop::to_address(o);
// Fast path
if (fast_path(addr)) {
return XOop::from_address(addr);
}
// Slow path
const uintptr_t good_addr = slow_path(addr);
if (p != nullptr) {
self_heal<fast_path>(p, addr, good_addr);
}
return XOop::from_address(good_addr);
}
template <XBarrierFastPath fast_path, XBarrierSlowPath slow_path>
inline oop XBarrier::weak_barrier(volatile oop* p, oop o) {
const uintptr_t addr = XOop::to_address(o);
// Fast path
if (fast_path(addr)) {
// Return the good address instead of the weak good address
// to ensure that the currently active heap view is used.
return XOop::from_address(XAddress::good_or_null(addr));
}
// Slow path
const uintptr_t good_addr = slow_path(addr);
if (p != nullptr) {
// The slow path returns a good/marked address or null, but we never mark
// oops in a weak load barrier so we always heal with the remapped address.
self_heal<fast_path>(p, addr, XAddress::remapped_or_null(good_addr));
}
return XOop::from_address(good_addr);
}
template <XBarrierFastPath fast_path, XBarrierSlowPath slow_path>
inline void XBarrier::root_barrier(oop* p, oop o) {
const uintptr_t addr = XOop::to_address(o);
// Fast path
if (fast_path(addr)) {
return;
}
// Slow path
const uintptr_t good_addr = slow_path(addr);
// Non-atomic healing helps speed up root scanning. This is safe to do
// since we are always healing roots in a safepoint, or under a lock,
// which ensures we are never racing with mutators modifying roots while
// we are healing them. It's also safe in case multiple GC threads try
// to heal the same root if it is aligned, since they would always heal
// the root in the same way and it does not matter in which order it
// happens. For misaligned oops, there needs to be mutual exclusion.
*p = XOop::from_address(good_addr);
}
inline bool XBarrier::is_good_or_null_fast_path(uintptr_t addr) {
return XAddress::is_good_or_null(addr);
}
inline bool XBarrier::is_weak_good_or_null_fast_path(uintptr_t addr) {
return XAddress::is_weak_good_or_null(addr);
}
inline bool XBarrier::is_marked_or_null_fast_path(uintptr_t addr) {
return XAddress::is_marked_or_null(addr);
}
inline bool XBarrier::during_mark() {
return XGlobalPhase == XPhaseMark;
}
inline bool XBarrier::during_relocate() {
return XGlobalPhase == XPhaseRelocate;
}
//
// Load barrier
//
inline oop XBarrier::load_barrier_on_oop(oop o) {
return load_barrier_on_oop_field_preloaded((oop*)nullptr, o);
}
inline oop XBarrier::load_barrier_on_oop_field(volatile oop* p) {
const oop o = Atomic::load(p);
return load_barrier_on_oop_field_preloaded(p, o);
}
inline oop XBarrier::load_barrier_on_oop_field_preloaded(volatile oop* p, oop o) {
return barrier<is_good_or_null_fast_path, load_barrier_on_oop_slow_path>(p, o);
}
inline void XBarrier::load_barrier_on_oop_array(volatile oop* p, size_t length) {
for (volatile const oop* const end = p + length; p < end; p++) {
load_barrier_on_oop_field(p);
}
}
inline oop XBarrier::load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) {
verify_on_weak(p);
if (XResurrection::is_blocked()) {
return barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o);
}
return load_barrier_on_oop_field_preloaded(p, o);
}
inline oop XBarrier::load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) {
if (XResurrection::is_blocked()) {
return barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o);
}
return load_barrier_on_oop_field_preloaded(p, o);
}
inline void XBarrier::load_barrier_on_root_oop_field(oop* p) {
const oop o = *p;
root_barrier<is_good_or_null_fast_path, load_barrier_on_oop_slow_path>(p, o);
}
inline void XBarrier::load_barrier_on_invisible_root_oop_field(oop* p) {
const oop o = *p;
root_barrier<is_good_or_null_fast_path, load_barrier_on_invisible_root_oop_slow_path>(p, o);
}
//
// Weak load barrier
//
inline oop XBarrier::weak_load_barrier_on_oop_field(volatile oop* p) {
assert(!XResurrection::is_blocked(), "Should not be called during resurrection blocked phase");
const oop o = Atomic::load(p);
return weak_load_barrier_on_oop_field_preloaded(p, o);
}
inline oop XBarrier::weak_load_barrier_on_oop_field_preloaded(volatile oop* p, oop o) {
return weak_barrier<is_weak_good_or_null_fast_path, weak_load_barrier_on_oop_slow_path>(p, o);
}
inline oop XBarrier::weak_load_barrier_on_weak_oop(oop o) {
return weak_load_barrier_on_weak_oop_field_preloaded((oop*)nullptr, o);
}
inline oop XBarrier::weak_load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) {
verify_on_weak(p);
if (XResurrection::is_blocked()) {
return barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o);
}
return weak_load_barrier_on_oop_field_preloaded(p, o);
}
inline oop XBarrier::weak_load_barrier_on_phantom_oop(oop o) {
return weak_load_barrier_on_phantom_oop_field_preloaded((oop*)nullptr, o);
}
inline oop XBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) {
if (XResurrection::is_blocked()) {
return barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o);
}
return weak_load_barrier_on_oop_field_preloaded(p, o);
}
//
// Is alive barrier
//
inline bool XBarrier::is_alive_barrier_on_weak_oop(oop o) {
// Check if oop is logically non-null. This operation
// is only valid when resurrection is blocked.
assert(XResurrection::is_blocked(), "Invalid phase");
return weak_load_barrier_on_weak_oop(o) != nullptr;
}
inline bool XBarrier::is_alive_barrier_on_phantom_oop(oop o) {
// Check if oop is logically non-null. This operation
// is only valid when resurrection is blocked.
assert(XResurrection::is_blocked(), "Invalid phase");
return weak_load_barrier_on_phantom_oop(o) != nullptr;
}
//
// Keep alive barrier
//
inline void XBarrier::keep_alive_barrier_on_weak_oop_field(volatile oop* p) {
assert(XResurrection::is_blocked(), "This operation is only valid when resurrection is blocked");
const oop o = Atomic::load(p);
barrier<is_good_or_null_fast_path, keep_alive_barrier_on_weak_oop_slow_path>(p, o);
}
inline void XBarrier::keep_alive_barrier_on_phantom_oop_field(volatile oop* p) {
assert(XResurrection::is_blocked(), "This operation is only valid when resurrection is blocked");
const oop o = Atomic::load(p);
barrier<is_good_or_null_fast_path, keep_alive_barrier_on_phantom_oop_slow_path>(p, o);
}
inline void XBarrier::keep_alive_barrier_on_phantom_root_oop_field(oop* p) {
// The keep alive operation is only valid when resurrection is blocked.
//
// Except with Loom, where we intentionally trigger arms nmethods after
// unlinking, to get a sense of what nmethods are alive. This will trigger
// the keep alive barriers, but the oops are healed and the slow-paths
// will not trigger. We have stronger checks in the slow-paths.
assert(XResurrection::is_blocked() || (CodeCache::contains((void*)p)),
"This operation is only valid when resurrection is blocked");
const oop o = *p;
root_barrier<is_good_or_null_fast_path, keep_alive_barrier_on_phantom_oop_slow_path>(p, o);
}
inline void XBarrier::keep_alive_barrier_on_oop(oop o) {
const uintptr_t addr = XOop::to_address(o);
assert(XAddress::is_good(addr), "Invalid address");
if (during_mark()) {
keep_alive_barrier_on_oop_slow_path(addr);
}
}
//
// Mark barrier
//
inline void XBarrier::mark_barrier_on_oop_field(volatile oop* p, bool finalizable) {
const oop o = Atomic::load(p);
if (finalizable) {
barrier<is_marked_or_null_fast_path, mark_barrier_on_finalizable_oop_slow_path>(p, o);
} else {
const uintptr_t addr = XOop::to_address(o);
if (XAddress::is_good(addr)) {
// Mark through good oop
mark_barrier_on_oop_slow_path(addr);
} else {
// Mark through bad oop
barrier<is_good_or_null_fast_path, mark_barrier_on_oop_slow_path>(p, o);
}
}
}
inline void XBarrier::mark_barrier_on_oop_array(volatile oop* p, size_t length, bool finalizable) {
for (volatile const oop* const end = p + length; p < end; p++) {
mark_barrier_on_oop_field(p, finalizable);
}
}
#endif // SHARE_GC_X_XBARRIER_INLINE_HPP

View File

@ -1,99 +0,0 @@
/*
* Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/x/xBarrierSet.hpp"
#include "gc/x/xBarrierSetAssembler.hpp"
#include "gc/x/xBarrierSetNMethod.hpp"
#include "gc/x/xBarrierSetStackChunk.hpp"
#include "gc/x/xGlobals.hpp"
#include "gc/x/xHeap.inline.hpp"
#include "gc/x/xStackWatermark.hpp"
#include "gc/x/xThreadLocalData.hpp"
#include "runtime/javaThread.hpp"
#include "utilities/macros.hpp"
#ifdef COMPILER1
#include "gc/x/c1/xBarrierSetC1.hpp"
#endif
#ifdef COMPILER2
#include "gc/x/c2/xBarrierSetC2.hpp"
#endif
class XBarrierSetC1;
class XBarrierSetC2;
XBarrierSet::XBarrierSet() :
BarrierSet(make_barrier_set_assembler<XBarrierSetAssembler>(),
make_barrier_set_c1<XBarrierSetC1>(),
make_barrier_set_c2<XBarrierSetC2>(),
new XBarrierSetNMethod(),
new XBarrierSetStackChunk(),
BarrierSet::FakeRtti(BarrierSet::XBarrierSet)) {}
XBarrierSetAssembler* XBarrierSet::assembler() {
BarrierSetAssembler* const bsa = BarrierSet::barrier_set()->barrier_set_assembler();
return reinterpret_cast<XBarrierSetAssembler*>(bsa);
}
bool XBarrierSet::barrier_needed(DecoratorSet decorators, BasicType type) {
assert((decorators & AS_RAW) == 0, "Unexpected decorator");
//assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Unexpected decorator");
if (is_reference_type(type)) {
assert((decorators & (IN_HEAP | IN_NATIVE)) != 0, "Where is reference?");
// Barrier needed even when IN_NATIVE, to allow concurrent scanning.
return true;
}
// Barrier not needed
return false;
}
void XBarrierSet::on_thread_create(Thread* thread) {
// Create thread local data
XThreadLocalData::create(thread);
}
void XBarrierSet::on_thread_destroy(Thread* thread) {
// Destroy thread local data
XThreadLocalData::destroy(thread);
}
void XBarrierSet::on_thread_attach(Thread* thread) {
// Set thread local address bad mask
XThreadLocalData::set_address_bad_mask(thread, XAddressBadMask);
if (thread->is_Java_thread()) {
JavaThread* const jt = JavaThread::cast(thread);
StackWatermark* const watermark = new XStackWatermark(jt);
StackWatermarkSet::add_watermark(jt, watermark);
}
}
void XBarrierSet::on_thread_detach(Thread* thread) {
// Flush and free any remaining mark stacks
XHeap::heap()->mark_flush_and_free(thread);
}
void XBarrierSet::print_on(outputStream* st) const {
st->print_cr("XBarrierSet");
}

View File

@ -1,109 +0,0 @@
/*
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_X_XBARRIERSET_HPP
#define SHARE_GC_X_XBARRIERSET_HPP
#include "gc/shared/barrierSet.hpp"
class XBarrierSetAssembler;
class XBarrierSet : public BarrierSet {
public:
XBarrierSet();
static XBarrierSetAssembler* assembler();
static bool barrier_needed(DecoratorSet decorators, BasicType type);
virtual void on_thread_create(Thread* thread);
virtual void on_thread_destroy(Thread* thread);
virtual void on_thread_attach(Thread* thread);
virtual void on_thread_detach(Thread* thread);
virtual void print_on(outputStream* st) const;
template <DecoratorSet decorators, typename BarrierSetT = XBarrierSet>
class AccessBarrier : public BarrierSet::AccessBarrier<decorators, BarrierSetT> {
private:
typedef BarrierSet::AccessBarrier<decorators, BarrierSetT> Raw;
template <DecoratorSet expected>
static void verify_decorators_present();
template <DecoratorSet expected>
static void verify_decorators_absent();
static oop* field_addr(oop base, ptrdiff_t offset);
template <typename T>
static oop load_barrier_on_oop_field_preloaded(T* addr, oop o);
template <typename T>
static oop load_barrier_on_unknown_oop_field_preloaded(oop base, ptrdiff_t offset, T* addr, oop o);
public:
//
// In heap
//
template <typename T>
static oop oop_load_in_heap(T* addr);
static oop oop_load_in_heap_at(oop base, ptrdiff_t offset);
template <typename T>
static oop oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value);
static oop oop_atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, oop compare_value, oop new_value);
template <typename T>
static oop oop_atomic_xchg_in_heap(T* addr, oop new_value);
static oop oop_atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, oop new_value);
template <typename T>
static bool oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
size_t length);
static void clone_in_heap(oop src, oop dst, size_t size);
//
// Not in heap
//
template <typename T>
static oop oop_load_not_in_heap(T* addr);
template <typename T>
static oop oop_atomic_cmpxchg_not_in_heap(T* addr, oop compare_value, oop new_value);
template <typename T>
static oop oop_atomic_xchg_not_in_heap(T* addr, oop new_value);
};
};
template<> struct BarrierSet::GetName<XBarrierSet> {
static const BarrierSet::Name value = BarrierSet::XBarrierSet;
};
template<> struct BarrierSet::GetType<BarrierSet::XBarrierSet> {
typedef ::XBarrierSet type;
};
#endif // SHARE_GC_X_XBARRIERSET_HPP

View File

@ -1,242 +0,0 @@
/*
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_X_XBARRIERSET_INLINE_HPP
#define SHARE_GC_X_XBARRIERSET_INLINE_HPP
#include "gc/x/xBarrierSet.hpp"
#include "gc/shared/accessBarrierSupport.inline.hpp"
#include "gc/x/xBarrier.inline.hpp"
#include "utilities/debug.hpp"
template <DecoratorSet decorators, typename BarrierSetT>
template <DecoratorSet expected>
inline void XBarrierSet::AccessBarrier<decorators, BarrierSetT>::verify_decorators_present() {
if ((decorators & expected) == 0) {
fatal("Using unsupported access decorators");
}
}
template <DecoratorSet decorators, typename BarrierSetT>
template <DecoratorSet expected>
inline void XBarrierSet::AccessBarrier<decorators, BarrierSetT>::verify_decorators_absent() {
if ((decorators & expected) != 0) {
fatal("Using unsupported access decorators");
}
}
template <DecoratorSet decorators, typename BarrierSetT>
inline oop* XBarrierSet::AccessBarrier<decorators, BarrierSetT>::field_addr(oop base, ptrdiff_t offset) {
assert(base != nullptr, "Invalid base");
return reinterpret_cast<oop*>(reinterpret_cast<intptr_t>((void*)base) + offset);
}
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
inline oop XBarrierSet::AccessBarrier<decorators, BarrierSetT>::load_barrier_on_oop_field_preloaded(T* addr, oop o) {
verify_decorators_absent<ON_UNKNOWN_OOP_REF>();
if (HasDecorator<decorators, AS_NO_KEEPALIVE>::value) {
if (HasDecorator<decorators, ON_STRONG_OOP_REF>::value) {
return XBarrier::weak_load_barrier_on_oop_field_preloaded(addr, o);
} else if (HasDecorator<decorators, ON_WEAK_OOP_REF>::value) {
return XBarrier::weak_load_barrier_on_weak_oop_field_preloaded(addr, o);
} else {
assert((HasDecorator<decorators, ON_PHANTOM_OOP_REF>::value), "Must be");
return XBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(addr, o);
}
} else {
if (HasDecorator<decorators, ON_STRONG_OOP_REF>::value) {
return XBarrier::load_barrier_on_oop_field_preloaded(addr, o);
} else if (HasDecorator<decorators, ON_WEAK_OOP_REF>::value) {
return XBarrier::load_barrier_on_weak_oop_field_preloaded(addr, o);
} else {
assert((HasDecorator<decorators, ON_PHANTOM_OOP_REF>::value), "Must be");
return XBarrier::load_barrier_on_phantom_oop_field_preloaded(addr, o);
}
}
}
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
inline oop XBarrierSet::AccessBarrier<decorators, BarrierSetT>::load_barrier_on_unknown_oop_field_preloaded(oop base, ptrdiff_t offset, T* addr, oop o) {
verify_decorators_present<ON_UNKNOWN_OOP_REF>();
const DecoratorSet decorators_known_strength =
AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength<decorators>(base, offset);
if (HasDecorator<decorators, AS_NO_KEEPALIVE>::value) {
if (decorators_known_strength & ON_STRONG_OOP_REF) {
return XBarrier::weak_load_barrier_on_oop_field_preloaded(addr, o);
} else if (decorators_known_strength & ON_WEAK_OOP_REF) {
return XBarrier::weak_load_barrier_on_weak_oop_field_preloaded(addr, o);
} else {
assert(decorators_known_strength & ON_PHANTOM_OOP_REF, "Must be");
return XBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(addr, o);
}
} else {
if (decorators_known_strength & ON_STRONG_OOP_REF) {
return XBarrier::load_barrier_on_oop_field_preloaded(addr, o);
} else if (decorators_known_strength & ON_WEAK_OOP_REF) {
return XBarrier::load_barrier_on_weak_oop_field_preloaded(addr, o);
} else {
assert(decorators_known_strength & ON_PHANTOM_OOP_REF, "Must be");
return XBarrier::load_barrier_on_phantom_oop_field_preloaded(addr, o);
}
}
}
//
// In heap
//
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
inline oop XBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_load_in_heap(T* addr) {
verify_decorators_absent<ON_UNKNOWN_OOP_REF>();
const oop o = Raw::oop_load_in_heap(addr);
return load_barrier_on_oop_field_preloaded(addr, o);
}
template <DecoratorSet decorators, typename BarrierSetT>
inline oop XBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_load_in_heap_at(oop base, ptrdiff_t offset) {
oop* const addr = field_addr(base, offset);
const oop o = Raw::oop_load_in_heap(addr);
if (HasDecorator<decorators, ON_UNKNOWN_OOP_REF>::value) {
return load_barrier_on_unknown_oop_field_preloaded(base, offset, addr, o);
}
return load_barrier_on_oop_field_preloaded(addr, o);
}
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
inline oop XBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value) {
verify_decorators_present<ON_STRONG_OOP_REF>();
verify_decorators_absent<AS_NO_KEEPALIVE>();
XBarrier::load_barrier_on_oop_field(addr);
return Raw::oop_atomic_cmpxchg_in_heap(addr, compare_value, new_value);
}
template <DecoratorSet decorators, typename BarrierSetT>
inline oop XBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, oop compare_value, oop new_value) {
verify_decorators_present<ON_STRONG_OOP_REF | ON_UNKNOWN_OOP_REF>();
verify_decorators_absent<AS_NO_KEEPALIVE>();
// Through Unsafe.CompareAndExchangeObject()/CompareAndSetObject() we can receive
// calls with ON_UNKNOWN_OOP_REF set. However, we treat these as ON_STRONG_OOP_REF,
// with the motivation that if you're doing Unsafe operations on a Reference.referent
// field, then you're on your own anyway.
XBarrier::load_barrier_on_oop_field(field_addr(base, offset));
return Raw::oop_atomic_cmpxchg_in_heap_at(base, offset, compare_value, new_value);
}
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
inline oop XBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_in_heap(T* addr, oop new_value) {
verify_decorators_present<ON_STRONG_OOP_REF>();
verify_decorators_absent<AS_NO_KEEPALIVE>();
const oop o = Raw::oop_atomic_xchg_in_heap(addr, new_value);
return XBarrier::load_barrier_on_oop(o);
}
template <DecoratorSet decorators, typename BarrierSetT>
inline oop XBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, oop new_value) {
verify_decorators_present<ON_STRONG_OOP_REF>();
verify_decorators_absent<AS_NO_KEEPALIVE>();
const oop o = Raw::oop_atomic_xchg_in_heap_at(base, offset, new_value);
return XBarrier::load_barrier_on_oop(o);
}
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
inline bool XBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
size_t length) {
T* src = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
T* dst = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
if (!HasDecorator<decorators, ARRAYCOPY_CHECKCAST>::value) {
// No check cast, bulk barrier and bulk copy
XBarrier::load_barrier_on_oop_array(src, length);
return Raw::oop_arraycopy_in_heap(nullptr, 0, src, nullptr, 0, dst, length);
}
// Check cast and copy each elements
Klass* const dst_klass = objArrayOop(dst_obj)->element_klass();
for (const T* const end = src + length; src < end; src++, dst++) {
const oop elem = XBarrier::load_barrier_on_oop_field(src);
if (!oopDesc::is_instanceof_or_null(elem, dst_klass)) {
// Check cast failed
return false;
}
// Cast is safe, since we know it's never a narrowOop
*(oop*)dst = elem;
}
return true;
}
template <DecoratorSet decorators, typename BarrierSetT>
inline void XBarrierSet::AccessBarrier<decorators, BarrierSetT>::clone_in_heap(oop src, oop dst, size_t size) {
XBarrier::load_barrier_on_oop_fields(src);
Raw::clone_in_heap(src, dst, size);
}
//
// Not in heap
//
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
inline oop XBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_load_not_in_heap(T* addr) {
verify_decorators_absent<ON_UNKNOWN_OOP_REF>();
const oop o = Raw::oop_load_not_in_heap(addr);
return load_barrier_on_oop_field_preloaded(addr, o);
}
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
inline oop XBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_not_in_heap(T* addr, oop compare_value, oop new_value) {
verify_decorators_present<ON_STRONG_OOP_REF>();
verify_decorators_absent<AS_NO_KEEPALIVE>();
return Raw::oop_atomic_cmpxchg_not_in_heap(addr, compare_value, new_value);
}
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
inline oop XBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_not_in_heap(T* addr, oop new_value) {
verify_decorators_present<ON_STRONG_OOP_REF>();
verify_decorators_absent<AS_NO_KEEPALIVE>();
return Raw::oop_atomic_xchg_not_in_heap(addr, new_value);
}
#endif // SHARE_GC_X_XBARRIERSET_INLINE_HPP

View File

@ -1,35 +0,0 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/x/xBarrierSetAssembler.hpp"
#include "gc/x/xThreadLocalData.hpp"
#include "runtime/javaThread.hpp"
Address XBarrierSetAssemblerBase::address_bad_mask_from_thread(Register thread) {
return Address(thread, XThreadLocalData::address_bad_mask_offset());
}
Address XBarrierSetAssemblerBase::address_bad_mask_from_jni_env(Register env) {
return Address(env, XThreadLocalData::address_bad_mask_offset() - JavaThread::jni_environment_offset());
}

View File

@ -1,39 +0,0 @@
/*
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_X_XBARRIERSETASSEMBLER_HPP
#define SHARE_GC_X_XBARRIERSETASSEMBLER_HPP
#include "gc/shared/barrierSetAssembler.hpp"
#include "utilities/macros.hpp"
class XBarrierSetAssemblerBase : public BarrierSetAssembler {
public:
static Address address_bad_mask_from_thread(Register thread);
static Address address_bad_mask_from_jni_env(Register env);
};
// Needs to be included after definition of XBarrierSetAssemblerBase
#include CPU_HEADER(gc/x/xBarrierSetAssembler)
#endif // SHARE_GC_X_XBARRIERSETASSEMBLER_HPP

Some files were not shown because too many files have changed in this diff Show More