8330685: ZGC: share barrier spilling logic

Reviewed-by: eosterlund, mdoerr, fyang, aboldtch
This commit is contained in:
Roberto Castañeda Lozano 2024-04-29 08:41:59 +00:00
parent 76cda7b895
commit 549bc6a039
26 changed files with 1077 additions and 854 deletions

View File

@ -1126,10 +1126,6 @@ extern RegMask _NO_SPECIAL_REG32_mask;
extern RegMask _NO_SPECIAL_REG_mask;
extern RegMask _NO_SPECIAL_PTR_REG_mask;
// Figure out which register class each belongs in: rc_int, rc_float or
// rc_stack.
enum RC { rc_bad, rc_int, rc_float, rc_predicate, rc_stack };
class CallStubImpl {
//--------------------------------------------------------------

View File

@ -34,6 +34,10 @@
#include "runtime/jniHandles.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#ifdef COMPILER2
#include "code/vmreg.inline.hpp"
#include "gc/shared/c2/barrierSetC2.hpp"
#endif // COMPILER2
#define __ masm->
@ -419,3 +423,204 @@ void BarrierSetAssembler::check_oop(MacroAssembler* masm, Register obj, Register
__ load_klass(obj, obj); // get klass
__ cbz(obj, error); // if klass is null it is broken
}
#ifdef COMPILER2
OptoReg::Name BarrierSetAssembler::encode_float_vector_register_size(const Node* node, OptoReg::Name opto_reg) {
switch (node->ideal_reg()) {
case Op_RegF:
// No need to refine. The original encoding is already fine to distinguish.
assert(opto_reg % 4 == 0, "Float register should only occupy a single slot");
break;
// Use different encoding values of the same fp/vector register to help distinguish different sizes.
// Such as V16. The OptoReg::name and its corresponding slot value are
// "V16": 64, "V16_H": 65, "V16_J": 66, "V16_K": 67.
case Op_RegD:
case Op_VecD:
opto_reg &= ~3;
opto_reg |= 1;
break;
case Op_VecX:
opto_reg &= ~3;
opto_reg |= 2;
break;
case Op_VecA:
opto_reg &= ~3;
opto_reg |= 3;
break;
default:
assert(false, "unexpected ideal register");
ShouldNotReachHere();
}
return opto_reg;
}
OptoReg::Name BarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) {
if (!OptoReg::is_reg(opto_reg)) {
return OptoReg::Bad;
}
const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
if (vm_reg->is_FloatRegister()) {
opto_reg = encode_float_vector_register_size(node, opto_reg);
}
return opto_reg;
}
#undef __
#define __ _masm->
void SaveLiveRegisters::initialize(BarrierStubC2* stub) {
int index = -1;
GrowableArray<RegisterData> registers;
VMReg prev_vm_reg = VMRegImpl::Bad();
RegMaskIterator rmi(stub->live());
while (rmi.has_next()) {
OptoReg::Name opto_reg = rmi.next();
VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
if (vm_reg->is_Register()) {
// GPR may have one or two slots in regmask
// Determine whether the current vm_reg is the same physical register as the previous one
if (is_same_register(vm_reg, prev_vm_reg)) {
registers.at(index)._slots++;
} else {
RegisterData reg_data = { vm_reg, 1 };
index = registers.append(reg_data);
}
} else if (vm_reg->is_FloatRegister()) {
// We have size encoding in OptoReg of stub->live()
// After encoding, float/neon/sve register has only one slot in regmask
// Decode it to get the actual size
VMReg vm_reg_base = vm_reg->as_FloatRegister()->as_VMReg();
int slots = decode_float_vector_register_size(opto_reg);
RegisterData reg_data = { vm_reg_base, slots };
index = registers.append(reg_data);
} else if (vm_reg->is_PRegister()) {
// PRegister has only one slot in regmask
RegisterData reg_data = { vm_reg, 1 };
index = registers.append(reg_data);
} else {
assert(false, "Unknown register type");
ShouldNotReachHere();
}
prev_vm_reg = vm_reg;
}
// Record registers that needs to be saved/restored
for (GrowableArrayIterator<RegisterData> it = registers.begin(); it != registers.end(); ++it) {
RegisterData reg_data = *it;
VMReg vm_reg = reg_data._reg;
int slots = reg_data._slots;
if (vm_reg->is_Register()) {
assert(slots == 1 || slots == 2, "Unexpected register save size");
_gp_regs += RegSet::of(vm_reg->as_Register());
} else if (vm_reg->is_FloatRegister()) {
if (slots == 1 || slots == 2) {
_fp_regs += FloatRegSet::of(vm_reg->as_FloatRegister());
} else if (slots == 4) {
_neon_regs += FloatRegSet::of(vm_reg->as_FloatRegister());
} else {
assert(slots == Matcher::scalable_vector_reg_size(T_FLOAT), "Unexpected register save size");
_sve_regs += FloatRegSet::of(vm_reg->as_FloatRegister());
}
} else {
assert(vm_reg->is_PRegister() && slots == 1, "Unknown register type");
_p_regs += PRegSet::of(vm_reg->as_PRegister());
}
}
// Remove C-ABI SOE registers, scratch regs and _ref register that will be updated
if (stub->result() != noreg) {
_gp_regs -= RegSet::range(r19, r30) + RegSet::of(r8, r9, stub->result());
} else {
_gp_regs -= RegSet::range(r19, r30) + RegSet::of(r8, r9);
}
// Remove C-ABI SOE fp registers
_fp_regs -= FloatRegSet::range(v8, v15);
}
enum RC SaveLiveRegisters::rc_class(VMReg reg) {
if (reg->is_reg()) {
if (reg->is_Register()) {
return rc_int;
} else if (reg->is_FloatRegister()) {
return rc_float;
} else if (reg->is_PRegister()) {
return rc_predicate;
}
}
if (reg->is_stack()) {
return rc_stack;
}
return rc_bad;
}
bool SaveLiveRegisters::is_same_register(VMReg reg1, VMReg reg2) {
if (reg1 == reg2) {
return true;
}
if (rc_class(reg1) == rc_class(reg2)) {
if (reg1->is_Register()) {
return reg1->as_Register() == reg2->as_Register();
} else if (reg1->is_FloatRegister()) {
return reg1->as_FloatRegister() == reg2->as_FloatRegister();
} else if (reg1->is_PRegister()) {
return reg1->as_PRegister() == reg2->as_PRegister();
}
}
return false;
}
int SaveLiveRegisters::decode_float_vector_register_size(OptoReg::Name opto_reg) {
switch (opto_reg & 3) {
case 0:
return 1;
case 1:
return 2;
case 2:
return 4;
case 3:
return Matcher::scalable_vector_reg_size(T_FLOAT);
default:
ShouldNotReachHere();
return 0;
}
}
SaveLiveRegisters::SaveLiveRegisters(MacroAssembler* masm, BarrierStubC2* stub)
: _masm(masm),
_gp_regs(),
_fp_regs(),
_neon_regs(),
_sve_regs(),
_p_regs() {
// Figure out what registers to save/restore
initialize(stub);
// Save registers
__ push(_gp_regs, sp);
__ push_fp(_fp_regs, sp, MacroAssembler::PushPopFp);
__ push_fp(_neon_regs, sp, MacroAssembler::PushPopNeon);
__ push_fp(_sve_regs, sp, MacroAssembler::PushPopSVE);
__ push_p(_p_regs, sp);
}
SaveLiveRegisters::~SaveLiveRegisters() {
// Restore registers
__ pop_p(_p_regs, sp);
__ pop_fp(_sve_regs, sp, MacroAssembler::PushPopSVE);
__ pop_fp(_neon_regs, sp, MacroAssembler::PushPopNeon);
__ pop_fp(_fp_regs, sp, MacroAssembler::PushPopFp);
// External runtime call may clobber ptrue reg
__ reinitialize_ptrue();
__ pop(_gp_regs, sp);
}
#endif // COMPILER2

View File

@ -30,6 +30,12 @@
#include "gc/shared/barrierSetNMethod.hpp"
#include "memory/allocation.hpp"
#include "oops/access.hpp"
#ifdef COMPILER2
#include "opto/optoreg.hpp"
class BarrierStubC2;
class Node;
#endif // COMPILER2
enum class NMethodPatchingType {
stw_instruction_and_data_patch,
@ -129,6 +135,54 @@ public:
static address patching_epoch_addr();
static void clear_patching_epoch();
static void increment_patching_epoch();
#ifdef COMPILER2
OptoReg::Name encode_float_vector_register_size(const Node* node,
OptoReg::Name opto_reg);
OptoReg::Name refine_register(const Node* node,
OptoReg::Name opto_reg);
#endif // COMPILER2
};
#ifdef COMPILER2
// This class saves and restores the registers that need to be preserved across
// the runtime call represented by a given C2 barrier stub. Use as follows:
// {
// SaveLiveRegisters save(masm, stub);
// ..
// __ blr(...);
// ..
// }
class SaveLiveRegisters {
private:
struct RegisterData {
VMReg _reg;
int _slots; // slots occupied once pushed into stack
// Used by GrowableArray::find()
bool operator == (const RegisterData& other) {
return _reg == other._reg;
}
};
MacroAssembler* const _masm;
RegSet _gp_regs;
FloatRegSet _fp_regs;
FloatRegSet _neon_regs;
FloatRegSet _sve_regs;
PRegSet _p_regs;
static enum RC rc_class(VMReg reg);
static bool is_same_register(VMReg reg1, VMReg reg2);
static int decode_float_vector_register_size(OptoReg::Name opto_reg);
public:
void initialize(BarrierStubC2* stub);
SaveLiveRegisters(MacroAssembler* masm, BarrierStubC2* stub);
~SaveLiveRegisters();
};
#endif // COMPILER2
#endif // CPU_AARCH64_GC_SHARED_BARRIERSETASSEMBLER_AARCH64_HPP

View File

@ -1081,224 +1081,6 @@ void ZBarrierSetAssembler::generate_c1_store_barrier_runtime_stub(StubAssembler*
#ifdef COMPILER2
OptoReg::Name ZBarrierSetAssembler::encode_float_vector_register_size(const Node* node, OptoReg::Name opto_reg) {
switch (node->ideal_reg()) {
case Op_RegF:
// No need to refine. The original encoding is already fine to distinguish.
assert(opto_reg % 4 == 0, "Float register should only occupy a single slot");
break;
// Use different encoding values of the same fp/vector register to help distinguish different sizes.
// Such as V16. The OptoReg::name and its corresponding slot value are
// "V16": 64, "V16_H": 65, "V16_J": 66, "V16_K": 67.
case Op_RegD:
case Op_VecD:
opto_reg &= ~3;
opto_reg |= 1;
break;
case Op_VecX:
opto_reg &= ~3;
opto_reg |= 2;
break;
case Op_VecA:
opto_reg &= ~3;
opto_reg |= 3;
break;
default:
assert(false, "unexpected ideal register");
ShouldNotReachHere();
}
return opto_reg;
}
OptoReg::Name ZBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) {
if (!OptoReg::is_reg(opto_reg)) {
return OptoReg::Bad;
}
const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
if (vm_reg->is_FloatRegister()) {
opto_reg = encode_float_vector_register_size(node, opto_reg);
}
return opto_reg;
}
#undef __
#define __ _masm->
class ZSaveLiveRegisters {
private:
struct RegisterData {
VMReg _reg;
int _slots; // slots occupied once pushed into stack
// Used by GrowableArray::find()
bool operator == (const RegisterData& other) {
return _reg == other._reg;
}
};
MacroAssembler* const _masm;
RegSet _gp_regs;
FloatRegSet _fp_regs;
FloatRegSet _neon_regs;
FloatRegSet _sve_regs;
PRegSet _p_regs;
public:
void initialize(ZBarrierStubC2* stub) {
int index = -1;
GrowableArray<RegisterData> registers;
VMReg prev_vm_reg = VMRegImpl::Bad();
RegMaskIterator rmi(stub->live());
while (rmi.has_next()) {
OptoReg::Name opto_reg = rmi.next();
VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
if (vm_reg->is_Register()) {
// GPR may have one or two slots in regmask
// Determine whether the current vm_reg is the same physical register as the previous one
if (is_same_register(vm_reg, prev_vm_reg)) {
registers.at(index)._slots++;
} else {
RegisterData reg_data = { vm_reg, 1 };
index = registers.append(reg_data);
}
} else if (vm_reg->is_FloatRegister()) {
// We have size encoding in OptoReg of stub->live()
// After encoding, float/neon/sve register has only one slot in regmask
// Decode it to get the actual size
VMReg vm_reg_base = vm_reg->as_FloatRegister()->as_VMReg();
int slots = decode_float_vector_register_size(opto_reg);
RegisterData reg_data = { vm_reg_base, slots };
index = registers.append(reg_data);
} else if (vm_reg->is_PRegister()) {
// PRegister has only one slot in regmask
RegisterData reg_data = { vm_reg, 1 };
index = registers.append(reg_data);
} else {
assert(false, "Unknown register type");
ShouldNotReachHere();
}
prev_vm_reg = vm_reg;
}
// Record registers that needs to be saved/restored
for (GrowableArrayIterator<RegisterData> it = registers.begin(); it != registers.end(); ++it) {
RegisterData reg_data = *it;
VMReg vm_reg = reg_data._reg;
int slots = reg_data._slots;
if (vm_reg->is_Register()) {
assert(slots == 1 || slots == 2, "Unexpected register save size");
_gp_regs += RegSet::of(vm_reg->as_Register());
} else if (vm_reg->is_FloatRegister()) {
if (slots == 1 || slots == 2) {
_fp_regs += FloatRegSet::of(vm_reg->as_FloatRegister());
} else if (slots == 4) {
_neon_regs += FloatRegSet::of(vm_reg->as_FloatRegister());
} else {
assert(slots == Matcher::scalable_vector_reg_size(T_FLOAT), "Unexpected register save size");
_sve_regs += FloatRegSet::of(vm_reg->as_FloatRegister());
}
} else {
assert(vm_reg->is_PRegister() && slots == 1, "Unknown register type");
_p_regs += PRegSet::of(vm_reg->as_PRegister());
}
}
// Remove C-ABI SOE registers, scratch regs and _ref register that will be updated
if (stub->result() != noreg) {
_gp_regs -= RegSet::range(r19, r30) + RegSet::of(r8, r9, stub->result());
} else {
_gp_regs -= RegSet::range(r19, r30) + RegSet::of(r8, r9);
}
// Remove C-ABI SOE fp registers
_fp_regs -= FloatRegSet::range(v8, v15);
}
static enum RC rc_class(VMReg reg) {
if (reg->is_reg()) {
if (reg->is_Register()) {
return rc_int;
} else if (reg->is_FloatRegister()) {
return rc_float;
} else if (reg->is_PRegister()) {
return rc_predicate;
}
}
if (reg->is_stack()) {
return rc_stack;
}
return rc_bad;
}
static bool is_same_register(VMReg reg1, VMReg reg2) {
if (reg1 == reg2) {
return true;
}
if (rc_class(reg1) == rc_class(reg2)) {
if (reg1->is_Register()) {
return reg1->as_Register() == reg2->as_Register();
} else if (reg1->is_FloatRegister()) {
return reg1->as_FloatRegister() == reg2->as_FloatRegister();
} else if (reg1->is_PRegister()) {
return reg1->as_PRegister() == reg2->as_PRegister();
}
}
return false;
}
static int decode_float_vector_register_size(OptoReg::Name opto_reg) {
switch (opto_reg & 3) {
case 0:
return 1;
case 1:
return 2;
case 2:
return 4;
case 3:
return Matcher::scalable_vector_reg_size(T_FLOAT);
default:
ShouldNotReachHere();
return 0;
}
}
ZSaveLiveRegisters(MacroAssembler* masm, ZBarrierStubC2* stub)
: _masm(masm),
_gp_regs(),
_fp_regs(),
_neon_regs(),
_sve_regs(),
_p_regs() {
// Figure out what registers to save/restore
initialize(stub);
// Save registers
__ push(_gp_regs, sp);
__ push_fp(_fp_regs, sp, MacroAssembler::PushPopFp);
__ push_fp(_neon_regs, sp, MacroAssembler::PushPopNeon);
__ push_fp(_sve_regs, sp, MacroAssembler::PushPopSVE);
__ push_p(_p_regs, sp);
}
~ZSaveLiveRegisters() {
// Restore registers
__ pop_p(_p_regs, sp);
__ pop_fp(_sve_regs, sp, MacroAssembler::PushPopSVE);
__ pop_fp(_neon_regs, sp, MacroAssembler::PushPopNeon);
__ pop_fp(_fp_regs, sp, MacroAssembler::PushPopFp);
// External runtime call may clobber ptrue reg
__ reinitialize_ptrue();
__ pop(_gp_regs, sp);
}
};
#undef __
#define __ _masm->
@ -1368,7 +1150,7 @@ void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, Z
}
{
ZSaveLiveRegisters save_live_registers(masm, stub);
SaveLiveRegisters save_live_registers(masm, stub);
ZSetupArguments setup_arguments(masm, stub);
__ mov(rscratch1, stub->slow_path());
__ blr(rscratch1);
@ -1400,7 +1182,7 @@ void ZBarrierSetAssembler::generate_c2_store_barrier_stub(MacroAssembler* masm,
__ bind(slow);
{
ZSaveLiveRegisters save_live_registers(masm, stub);
SaveLiveRegisters save_live_registers(masm, stub);
__ lea(c_rarg0, stub->ref_addr());
if (stub->is_native()) {

View File

@ -187,12 +187,6 @@ public:
#endif // COMPILER1
#ifdef COMPILER2
OptoReg::Name encode_float_vector_register_size(const Node* node,
OptoReg::Name opto_reg);
OptoReg::Name refine_register(const Node* node,
OptoReg::Name opto_reg);
void generate_c2_load_barrier_stub(MacroAssembler* masm,
ZLoadBarrierStubC2* stub) const;
void generate_c2_store_barrier_stub(MacroAssembler* masm,

View File

@ -403,4 +403,7 @@ inline Register as_Register(FloatRegister reg) {
return as_Register(reg->encoding());
}
// High-level register class of an OptoReg or a VMReg register.
enum RC { rc_bad, rc_int, rc_float, rc_predicate, rc_stack };
#endif // CPU_AARCH64_REGISTER_AARCH64_HPP

View File

@ -242,3 +242,11 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm) {
__ bind(skip);
__ block_comment("nmethod_barrier end");
}
#ifdef COMPILER2
OptoReg::Name BarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) {
Unimplemented(); // This must be implemented to support late barrier expansion.
}
#endif // COMPILER2

View File

@ -28,6 +28,12 @@
#include "asm/macroAssembler.hpp"
#include "memory/allocation.hpp"
#include "oops/access.hpp"
#ifdef COMPILER2
#include "code/vmreg.hpp"
#include "opto/optoreg.hpp"
class Node;
#endif // COMPILER2
enum class NMethodPatchingType {
stw_instruction_and_data_patch,
@ -62,6 +68,11 @@ public:
virtual void barrier_stubs_init() {}
virtual NMethodPatchingType nmethod_patching_type() { return NMethodPatchingType::stw_instruction_and_data_patch; }
virtual void nmethod_entry_barrier(MacroAssembler* masm);
#ifdef COMPILER2
OptoReg::Name refine_register(const Node* node,
OptoReg::Name opto_reg);
#endif // COMPILER2
};
#endif // CPU_ARM_GC_SHARED_BARRIERSETASSEMBLER_ARM_HPP

View File

@ -34,6 +34,9 @@
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "utilities/macros.hpp"
#ifdef COMPILER2
#include "gc/shared/c2/barrierSetC2.hpp"
#endif // COMPILER2
#define __ masm->
@ -259,3 +262,119 @@ void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler *masm, Register tmp1,
void BarrierSetAssembler::check_oop(MacroAssembler *masm, Register oop, const char* msg) {
__ verify_oop(oop, msg);
}
#ifdef COMPILER2
OptoReg::Name BarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) const {
if (!OptoReg::is_reg(opto_reg)) {
return OptoReg::Bad;
}
VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
if ((vm_reg->is_Register() || vm_reg ->is_FloatRegister()) && (opto_reg & 1) != 0) {
return OptoReg::Bad;
}
return opto_reg;
}
#undef __
#define __ _masm->
SaveLiveRegisters::SaveLiveRegisters(MacroAssembler *masm, BarrierStubC2 *stub)
: _masm(masm), _reg_mask(stub->live()), _result_reg(stub->result()) {
const int register_save_size = iterate_over_register_mask(ACTION_COUNT_ONLY) * BytesPerWord;
_frame_size = align_up(register_save_size, frame::alignment_in_bytes)
+ frame::native_abi_reg_args_size;
__ save_LR_CR(R0);
__ push_frame(_frame_size, R0);
iterate_over_register_mask(ACTION_SAVE, _frame_size);
}
SaveLiveRegisters::~SaveLiveRegisters() {
iterate_over_register_mask(ACTION_RESTORE, _frame_size);
__ addi(R1_SP, R1_SP, _frame_size);
__ restore_LR_CR(R0);
}
int SaveLiveRegisters::iterate_over_register_mask(IterationAction action, int offset) {
int reg_save_index = 0;
RegMaskIterator live_regs_iterator(_reg_mask);
while(live_regs_iterator.has_next()) {
const OptoReg::Name opto_reg = live_regs_iterator.next();
// Filter out stack slots (spilled registers, i.e., stack-allocated registers).
if (!OptoReg::is_reg(opto_reg)) {
continue;
}
const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
if (vm_reg->is_Register()) {
Register std_reg = vm_reg->as_Register();
// '_result_reg' will hold the end result of the operation. Its content must thus not be preserved.
if (std_reg == _result_reg) {
continue;
}
if (std_reg->encoding() >= R2->encoding() && std_reg->encoding() <= R12->encoding()) {
reg_save_index++;
if (action == ACTION_SAVE) {
_masm->std(std_reg, offset - reg_save_index * BytesPerWord, R1_SP);
} else if (action == ACTION_RESTORE) {
_masm->ld(std_reg, offset - reg_save_index * BytesPerWord, R1_SP);
} else {
assert(action == ACTION_COUNT_ONLY, "Sanity");
}
}
} else if (vm_reg->is_FloatRegister()) {
FloatRegister fp_reg = vm_reg->as_FloatRegister();
if (fp_reg->encoding() >= F0->encoding() && fp_reg->encoding() <= F13->encoding()) {
reg_save_index++;
if (action == ACTION_SAVE) {
_masm->stfd(fp_reg, offset - reg_save_index * BytesPerWord, R1_SP);
} else if (action == ACTION_RESTORE) {
_masm->lfd(fp_reg, offset - reg_save_index * BytesPerWord, R1_SP);
} else {
assert(action == ACTION_COUNT_ONLY, "Sanity");
}
}
} else if (vm_reg->is_ConditionRegister()) {
// NOP. Conditions registers are covered by save_LR_CR
} else if (vm_reg->is_VectorSRegister()) {
assert(SuperwordUseVSX, "or should not reach here");
VectorSRegister vs_reg = vm_reg->as_VectorSRegister();
if (vs_reg->encoding() >= VSR32->encoding() && vs_reg->encoding() <= VSR51->encoding()) {
reg_save_index += 2;
Register spill_addr = R0;
if (action == ACTION_SAVE) {
_masm->addi(spill_addr, R1_SP, offset - reg_save_index * BytesPerWord);
_masm->stxvd2x(vs_reg, spill_addr);
} else if (action == ACTION_RESTORE) {
_masm->addi(spill_addr, R1_SP, offset - reg_save_index * BytesPerWord);
_masm->lxvd2x(vs_reg, spill_addr);
} else {
assert(action == ACTION_COUNT_ONLY, "Sanity");
}
}
} else {
if (vm_reg->is_SpecialRegister()) {
fatal("Special registers are unsupported. Found register %s", vm_reg->name());
} else {
fatal("Register type is not known");
}
}
}
return reg_save_index;
}
#endif // COMPILER2

View File

@ -29,6 +29,14 @@
#include "asm/macroAssembler.hpp"
#include "memory/allocation.hpp"
#include "oops/access.hpp"
#ifdef COMPILER2
#include "code/vmreg.hpp"
#include "opto/optoreg.hpp"
#include "opto/regmask.hpp"
class BarrierStubC2;
class Node;
#endif // COMPILER2
enum class NMethodPatchingType {
stw_instruction_and_data_patch,
@ -71,6 +79,43 @@ public:
virtual void c2i_entry_barrier(MacroAssembler* masm, Register tmp1, Register tmp2, Register tmp3);
virtual void check_oop(MacroAssembler *masm, Register oop, const char* msg);
#ifdef COMPILER2
OptoReg::Name refine_register(const Node* node, OptoReg::Name opto_reg) const;
#endif // COMPILER2
};
#ifdef COMPILER2
// This class saves and restores the registers that need to be preserved across
// the runtime call represented by a given C2 barrier stub. Use as follows:
// {
// SaveLiveRegisters save(masm, stub);
// ..
// __ call_VM_leaf(...);
// ..
// }
class SaveLiveRegisters {
MacroAssembler* _masm;
RegMask _reg_mask;
Register _result_reg;
int _frame_size;
public:
SaveLiveRegisters(MacroAssembler *masm, BarrierStubC2 *stub);
~SaveLiveRegisters();
private:
enum IterationAction : int {
ACTION_SAVE,
ACTION_RESTORE,
ACTION_COUNT_ONLY
};
int iterate_over_register_mask(IterationAction action, int offset = 0);
};
#endif // COMPILER2
#endif // CPU_PPC_GC_SHARED_BARRIERSETASSEMBLER_PPC_HPP

View File

@ -838,132 +838,6 @@ void ZBarrierSetAssembler::generate_c1_store_barrier_runtime_stub(StubAssembler*
#ifdef COMPILER2
OptoReg::Name ZBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) const {
if (!OptoReg::is_reg(opto_reg)) {
return OptoReg::Bad;
}
VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
if ((vm_reg->is_Register() || vm_reg ->is_FloatRegister()) && (opto_reg & 1) != 0) {
return OptoReg::Bad;
}
return opto_reg;
}
#define __ _masm->
class ZSaveLiveRegisters {
MacroAssembler* _masm;
RegMask _reg_mask;
Register _result_reg;
int _frame_size;
public:
ZSaveLiveRegisters(MacroAssembler *masm, ZBarrierStubC2 *stub)
: _masm(masm), _reg_mask(stub->live()), _result_reg(stub->result()) {
const int register_save_size = iterate_over_register_mask(ACTION_COUNT_ONLY) * BytesPerWord;
_frame_size = align_up(register_save_size, frame::alignment_in_bytes)
+ frame::native_abi_reg_args_size;
__ save_LR_CR(R0);
__ push_frame(_frame_size, R0);
iterate_over_register_mask(ACTION_SAVE, _frame_size);
}
~ZSaveLiveRegisters() {
iterate_over_register_mask(ACTION_RESTORE, _frame_size);
__ addi(R1_SP, R1_SP, _frame_size);
__ restore_LR_CR(R0);
}
private:
enum IterationAction : int {
ACTION_SAVE,
ACTION_RESTORE,
ACTION_COUNT_ONLY
};
int iterate_over_register_mask(IterationAction action, int offset = 0) {
int reg_save_index = 0;
RegMaskIterator live_regs_iterator(_reg_mask);
while(live_regs_iterator.has_next()) {
const OptoReg::Name opto_reg = live_regs_iterator.next();
// Filter out stack slots (spilled registers, i.e., stack-allocated registers).
if (!OptoReg::is_reg(opto_reg)) {
continue;
}
const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
if (vm_reg->is_Register()) {
Register std_reg = vm_reg->as_Register();
// '_result_reg' will hold the end result of the operation. Its content must thus not be preserved.
if (std_reg == _result_reg) {
continue;
}
if (std_reg->encoding() >= R2->encoding() && std_reg->encoding() <= R12->encoding()) {
reg_save_index++;
if (action == ACTION_SAVE) {
_masm->std(std_reg, offset - reg_save_index * BytesPerWord, R1_SP);
} else if (action == ACTION_RESTORE) {
_masm->ld(std_reg, offset - reg_save_index * BytesPerWord, R1_SP);
} else {
assert(action == ACTION_COUNT_ONLY, "Sanity");
}
}
} else if (vm_reg->is_FloatRegister()) {
FloatRegister fp_reg = vm_reg->as_FloatRegister();
if (fp_reg->encoding() >= F0->encoding() && fp_reg->encoding() <= F13->encoding()) {
reg_save_index++;
if (action == ACTION_SAVE) {
_masm->stfd(fp_reg, offset - reg_save_index * BytesPerWord, R1_SP);
} else if (action == ACTION_RESTORE) {
_masm->lfd(fp_reg, offset - reg_save_index * BytesPerWord, R1_SP);
} else {
assert(action == ACTION_COUNT_ONLY, "Sanity");
}
}
} else if (vm_reg->is_ConditionRegister()) {
// NOP. Conditions registers are covered by save_LR_CR
} else if (vm_reg->is_VectorSRegister()) {
assert(SuperwordUseVSX, "or should not reach here");
VectorSRegister vs_reg = vm_reg->as_VectorSRegister();
if (vs_reg->encoding() >= VSR32->encoding() && vs_reg->encoding() <= VSR51->encoding()) {
reg_save_index += 2;
Register spill_addr = R0;
if (action == ACTION_SAVE) {
_masm->addi(spill_addr, R1_SP, offset - reg_save_index * BytesPerWord);
_masm->stxvd2x(vs_reg, spill_addr);
} else if (action == ACTION_RESTORE) {
_masm->addi(spill_addr, R1_SP, offset - reg_save_index * BytesPerWord);
_masm->lxvd2x(vs_reg, spill_addr);
} else {
assert(action == ACTION_COUNT_ONLY, "Sanity");
}
}
} else {
if (vm_reg->is_SpecialRegister()) {
fatal("Special registers are unsupported. Found register %s", vm_reg->name());
} else {
fatal("Register type is not known");
}
}
}
return reg_save_index;
}
};
#undef __
#define __ _masm->
@ -1024,7 +898,7 @@ void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, Z
assert_different_registers(ref, ref_addr.base());
{
ZSaveLiveRegisters save_live_registers(masm, stub);
SaveLiveRegisters save_live_registers(masm, stub);
ZSetupArguments setup_arguments(masm, stub);
__ call_VM_leaf(stub->slow_path());
@ -1063,7 +937,7 @@ void ZBarrierSetAssembler::generate_c2_store_barrier_stub(MacroAssembler* masm,
__ bind(slow);
{
ZSaveLiveRegisters save_live_registers(masm, stub);
SaveLiveRegisters save_live_registers(masm, stub);
__ add(R3_ARG1, ind_or_offs, rbase);
if (stub->is_native()) {
__ call_VM_leaf(ZBarrierSetRuntime::store_barrier_on_native_oop_field_without_healing_addr(), R3_ARG1);

View File

@ -105,8 +105,6 @@ public:
#endif // COMPILER1
#ifdef COMPILER2
OptoReg::Name refine_register(const Node* node, OptoReg::Name opto_reg) const;
void generate_c2_load_barrier_stub(MacroAssembler* masm, ZLoadBarrierStubC2* stub) const;
void generate_c2_store_barrier_stub(MacroAssembler* masm, ZStoreBarrierStubC2* stub) const;

View File

@ -35,6 +35,9 @@
#include "runtime/jniHandles.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#ifdef COMPILER2
#include "gc/shared/c2/barrierSetC2.hpp"
#endif // COMPILER2
#define __ masm->
@ -372,3 +375,72 @@ void BarrierSetAssembler::check_oop(MacroAssembler* masm, Register obj, Register
__ load_klass(obj, obj, tmp1); // get klass
__ beqz(obj, error); // if klass is null it is broken
}
#ifdef COMPILER2
OptoReg::Name BarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) {
if (!OptoReg::is_reg(opto_reg)) {
return OptoReg::Bad;
}
const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
if (vm_reg->is_FloatRegister()) {
return opto_reg & ~1;
}
return opto_reg;
}
#undef __
#define __ _masm->
void SaveLiveRegisters::initialize(BarrierStubC2* stub) {
// Record registers that needs to be saved/restored
RegMaskIterator rmi(stub->live());
while (rmi.has_next()) {
const OptoReg::Name opto_reg = rmi.next();
if (OptoReg::is_reg(opto_reg)) {
const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
if (vm_reg->is_Register()) {
_gp_regs += RegSet::of(vm_reg->as_Register());
} else if (vm_reg->is_FloatRegister()) {
_fp_regs += FloatRegSet::of(vm_reg->as_FloatRegister());
} else if (vm_reg->is_VectorRegister()) {
const VMReg vm_reg_base = OptoReg::as_VMReg(opto_reg & ~(VectorRegister::max_slots_per_register - 1));
_vp_regs += VectorRegSet::of(vm_reg_base->as_VectorRegister());
} else {
fatal("Unknown register type");
}
}
}
// Remove C-ABI SOE registers, tmp regs and _ref register that will be updated
if (stub->result() != noreg) {
_gp_regs -= RegSet::range(x18, x27) + RegSet::of(x2) + RegSet::of(x8, x9) + RegSet::of(x5, stub->result());
} else {
_gp_regs -= RegSet::range(x18, x27) + RegSet::of(x2, x5) + RegSet::of(x8, x9);
}
}
SaveLiveRegisters::SaveLiveRegisters(MacroAssembler* masm, BarrierStubC2* stub)
: _masm(masm),
_gp_regs(),
_fp_regs(),
_vp_regs() {
// Figure out what registers to save/restore
initialize(stub);
// Save registers
__ push_reg(_gp_regs, sp);
__ push_fp(_fp_regs, sp);
__ push_v(_vp_regs, sp);
}
SaveLiveRegisters::~SaveLiveRegisters() {
// Restore registers
__ pop_v(_vp_regs, sp);
__ pop_fp(_fp_regs, sp);
__ pop_reg(_gp_regs, sp);
}
#endif // COMPILER2

View File

@ -31,6 +31,12 @@
#include "gc/shared/barrierSetNMethod.hpp"
#include "memory/allocation.hpp"
#include "oops/access.hpp"
#ifdef COMPILER2
#include "opto/optoreg.hpp"
class BarrierStubC2;
class Node;
#endif // COMPILER2
enum class NMethodPatchingType {
stw_instruction_and_data_patch,
@ -106,6 +112,36 @@ public:
static address patching_epoch_addr();
static void clear_patching_epoch();
static void increment_patching_epoch();
#ifdef COMPILER2
OptoReg::Name refine_register(const Node* node,
OptoReg::Name opto_reg);
#endif // COMPILER2
};
#ifdef COMPILER2
// This class saves and restores the registers that need to be preserved across
// the runtime call represented by a given C2 barrier stub. Use as follows:
// {
// SaveLiveRegisters save(masm, stub);
// ..
// __ jalr(...);
// ..
// }
class SaveLiveRegisters {
private:
MacroAssembler* const _masm;
RegSet _gp_regs;
FloatRegSet _fp_regs;
VectorRegSet _vp_regs;
public:
void initialize(BarrierStubC2* stub);
SaveLiveRegisters(MacroAssembler* masm, BarrierStubC2* stub);
~SaveLiveRegisters();
};
#endif // COMPILER2
#endif // CPU_RISCV_GC_SHARED_BARRIERSETASSEMBLER_RISCV_HPP

View File

@ -642,80 +642,9 @@ void ZBarrierSetAssembler::patch_barrier_relocation(address addr, int format) {
#ifdef COMPILER2
OptoReg::Name ZBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) {
if (!OptoReg::is_reg(opto_reg)) {
return OptoReg::Bad;
}
const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
if (vm_reg->is_FloatRegister()) {
return opto_reg & ~1;
}
return opto_reg;
}
#undef __
#define __ _masm->
class ZSaveLiveRegisters {
private:
MacroAssembler* const _masm;
RegSet _gp_regs;
FloatRegSet _fp_regs;
VectorRegSet _vp_regs;
public:
void initialize(ZBarrierStubC2* stub) {
// Record registers that needs to be saved/restored
RegMaskIterator rmi(stub->live());
while (rmi.has_next()) {
const OptoReg::Name opto_reg = rmi.next();
if (OptoReg::is_reg(opto_reg)) {
const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
if (vm_reg->is_Register()) {
_gp_regs += RegSet::of(vm_reg->as_Register());
} else if (vm_reg->is_FloatRegister()) {
_fp_regs += FloatRegSet::of(vm_reg->as_FloatRegister());
} else if (vm_reg->is_VectorRegister()) {
const VMReg vm_reg_base = OptoReg::as_VMReg(opto_reg & ~(VectorRegister::max_slots_per_register - 1));
_vp_regs += VectorRegSet::of(vm_reg_base->as_VectorRegister());
} else {
fatal("Unknown register type");
}
}
}
// Remove C-ABI SOE registers, tmp regs and _ref register that will be updated
if (stub->result() != noreg) {
_gp_regs -= RegSet::range(x18, x27) + RegSet::of(x2) + RegSet::of(x8, x9) + RegSet::of(x5, stub->result());
} else {
_gp_regs -= RegSet::range(x18, x27) + RegSet::of(x2, x5) + RegSet::of(x8, x9);
}
}
ZSaveLiveRegisters(MacroAssembler* masm, ZBarrierStubC2* stub)
: _masm(masm),
_gp_regs(),
_fp_regs(),
_vp_regs() {
// Figure out what registers to save/restore
initialize(stub);
// Save registers
__ push_reg(_gp_regs, sp);
__ push_fp(_fp_regs, sp);
__ push_v(_vp_regs, sp);
}
~ZSaveLiveRegisters() {
// Restore registers
__ pop_v(_vp_regs, sp);
__ pop_fp(_fp_regs, sp);
__ pop_reg(_gp_regs, sp);
}
};
class ZSetupArguments {
private:
MacroAssembler* const _masm;
@ -781,7 +710,7 @@ void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, Z
}
{
ZSaveLiveRegisters save_live_registers(masm, stub);
SaveLiveRegisters save_live_registers(masm, stub);
ZSetupArguments setup_arguments(masm, stub);
__ mv(t0, stub->slow_path());
__ jalr(t0);
@ -813,7 +742,7 @@ void ZBarrierSetAssembler::generate_c2_store_barrier_stub(MacroAssembler* masm,
__ bind(slow);
{
ZSaveLiveRegisters save_live_registers(masm, stub);
SaveLiveRegisters save_live_registers(masm, stub);
__ la(c_rarg0, stub->ref_addr());
if (stub->is_native()) {

View File

@ -166,9 +166,6 @@ public:
#endif // COMPILER1
#ifdef COMPILER2
OptoReg::Name refine_register(const Node* node,
OptoReg::Name opto_reg);
void generate_c2_load_barrier_stub(MacroAssembler* masm,
ZLoadBarrierStubC2* stub) const;
void generate_c2_store_barrier_stub(MacroAssembler* masm,

View File

@ -147,3 +147,11 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm) {
// Fall through to method body.
__ block_comment("} nmethod_entry_barrier (nmethod_entry_barrier)");
}
#ifdef COMPILER2
OptoReg::Name BarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) {
Unimplemented(); // This must be implemented to support late barrier expansion.
}
#endif // COMPILER2

View File

@ -29,6 +29,12 @@
#include "asm/macroAssembler.hpp"
#include "memory/allocation.hpp"
#include "oops/access.hpp"
#ifdef COMPILER2
#include "code/vmreg.hpp"
#include "opto/optoreg.hpp"
class Node;
#endif // COMPILER2
class InterpreterMacroAssembler;
@ -52,6 +58,11 @@ public:
virtual void nmethod_entry_barrier(MacroAssembler* masm);
virtual void barrier_stubs_init() {}
#ifdef COMPILER2
OptoReg::Name refine_register(const Node* node,
OptoReg::Name opto_reg);
#endif // COMPILER2
};
#endif // CPU_S390_GC_SHARED_BARRIERSETASSEMBLER_S390_HPP

View File

@ -34,6 +34,9 @@
#include "runtime/jniHandles.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#ifdef COMPILER2
#include "gc/shared/c2/barrierSetC2.hpp"
#endif // COMPILER2
#define __ masm->
@ -488,3 +491,280 @@ void BarrierSetAssembler::check_oop(MacroAssembler* masm, Register obj, Register
__ testptr(obj, obj);
__ jcc(Assembler::zero, error); // if klass is null it is broken
}
#ifdef COMPILER2
#ifdef _LP64
OptoReg::Name BarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) {
if (!OptoReg::is_reg(opto_reg)) {
return OptoReg::Bad;
}
const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
if (vm_reg->is_XMMRegister()) {
opto_reg &= ~15;
switch (node->ideal_reg()) {
case Op_VecX:
opto_reg |= 2;
break;
case Op_VecY:
opto_reg |= 4;
break;
case Op_VecZ:
opto_reg |= 8;
break;
default:
opto_reg |= 1;
break;
}
}
return opto_reg;
}
// We use the vec_spill_helper from the x86.ad file to avoid reinventing this wheel
extern void vec_spill_helper(C2_MacroAssembler *masm, bool is_load,
int stack_offset, int reg, uint ireg, outputStream* st);
#undef __
#define __ _masm->
int SaveLiveRegisters::xmm_compare_register_size(XMMRegisterData* left, XMMRegisterData* right) {
if (left->_size == right->_size) {
return 0;
}
return (left->_size < right->_size) ? -1 : 1;
}
int SaveLiveRegisters::xmm_slot_size(OptoReg::Name opto_reg) {
// The low order 4 bytes denote what size of the XMM register is live
return (opto_reg & 15) << 3;
}
uint SaveLiveRegisters::xmm_ideal_reg_for_size(int reg_size) {
switch (reg_size) {
case 8:
return Op_VecD;
case 16:
return Op_VecX;
case 32:
return Op_VecY;
case 64:
return Op_VecZ;
default:
fatal("Invalid register size %d", reg_size);
return 0;
}
}
bool SaveLiveRegisters::xmm_needs_vzeroupper() const {
return _xmm_registers.is_nonempty() && _xmm_registers.at(0)._size > 16;
}
void SaveLiveRegisters::xmm_register_save(const XMMRegisterData& reg_data) {
const OptoReg::Name opto_reg = OptoReg::as_OptoReg(reg_data._reg->as_VMReg());
const uint ideal_reg = xmm_ideal_reg_for_size(reg_data._size);
_spill_offset -= reg_data._size;
C2_MacroAssembler c2_masm(__ code());
vec_spill_helper(&c2_masm, false /* is_load */, _spill_offset, opto_reg, ideal_reg, tty);
}
void SaveLiveRegisters::xmm_register_restore(const XMMRegisterData& reg_data) {
const OptoReg::Name opto_reg = OptoReg::as_OptoReg(reg_data._reg->as_VMReg());
const uint ideal_reg = xmm_ideal_reg_for_size(reg_data._size);
C2_MacroAssembler c2_masm(__ code());
vec_spill_helper(&c2_masm, true /* is_load */, _spill_offset, opto_reg, ideal_reg, tty);
_spill_offset += reg_data._size;
}
void SaveLiveRegisters::gp_register_save(Register reg) {
_spill_offset -= 8;
__ movq(Address(rsp, _spill_offset), reg);
}
void SaveLiveRegisters::opmask_register_save(KRegister reg) {
_spill_offset -= 8;
__ kmov(Address(rsp, _spill_offset), reg);
}
void SaveLiveRegisters::gp_register_restore(Register reg) {
__ movq(reg, Address(rsp, _spill_offset));
_spill_offset += 8;
}
void SaveLiveRegisters::opmask_register_restore(KRegister reg) {
__ kmov(reg, Address(rsp, _spill_offset));
_spill_offset += 8;
}
void SaveLiveRegisters::initialize(BarrierStubC2* stub) {
// Create mask of caller saved registers that need to
// be saved/restored if live
RegMask caller_saved;
caller_saved.Insert(OptoReg::as_OptoReg(rax->as_VMReg()));
caller_saved.Insert(OptoReg::as_OptoReg(rcx->as_VMReg()));
caller_saved.Insert(OptoReg::as_OptoReg(rdx->as_VMReg()));
caller_saved.Insert(OptoReg::as_OptoReg(rsi->as_VMReg()));
caller_saved.Insert(OptoReg::as_OptoReg(rdi->as_VMReg()));
caller_saved.Insert(OptoReg::as_OptoReg(r8->as_VMReg()));
caller_saved.Insert(OptoReg::as_OptoReg(r9->as_VMReg()));
caller_saved.Insert(OptoReg::as_OptoReg(r10->as_VMReg()));
caller_saved.Insert(OptoReg::as_OptoReg(r11->as_VMReg()));
if (stub->result() != noreg) {
caller_saved.Remove(OptoReg::as_OptoReg(stub->result()->as_VMReg()));
}
// Create mask of live registers
RegMask live = stub->live();
int gp_spill_size = 0;
int opmask_spill_size = 0;
int xmm_spill_size = 0;
// Record registers that needs to be saved/restored
RegMaskIterator rmi(live);
while (rmi.has_next()) {
const OptoReg::Name opto_reg = rmi.next();
const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
if (vm_reg->is_Register()) {
if (caller_saved.Member(opto_reg)) {
_gp_registers.append(vm_reg->as_Register());
gp_spill_size += 8;
}
} else if (vm_reg->is_KRegister()) {
// All opmask registers are caller saved, thus spill the ones
// which are live.
if (_opmask_registers.find(vm_reg->as_KRegister()) == -1) {
_opmask_registers.append(vm_reg->as_KRegister());
opmask_spill_size += 8;
}
} else if (vm_reg->is_XMMRegister()) {
// We encode in the low order 4 bits of the opto_reg, how large part of the register is live
const VMReg vm_reg_base = OptoReg::as_VMReg(opto_reg & ~15);
const int reg_size = xmm_slot_size(opto_reg);
const XMMRegisterData reg_data = { vm_reg_base->as_XMMRegister(), reg_size };
const int reg_index = _xmm_registers.find(reg_data);
if (reg_index == -1) {
// Not previously appended
_xmm_registers.append(reg_data);
xmm_spill_size += reg_size;
} else {
// Previously appended, update size
const int reg_size_prev = _xmm_registers.at(reg_index)._size;
if (reg_size > reg_size_prev) {
_xmm_registers.at_put(reg_index, reg_data);
xmm_spill_size += reg_size - reg_size_prev;
}
}
} else {
fatal("Unexpected register type");
}
}
// Sort by size, largest first
_xmm_registers.sort(xmm_compare_register_size);
// On Windows, the caller reserves stack space for spilling register arguments
const int arg_spill_size = frame::arg_reg_save_area_bytes;
// Stack pointer must be 16 bytes aligned for the call
_spill_offset = _spill_size = align_up(xmm_spill_size + gp_spill_size + opmask_spill_size + arg_spill_size, 16);
}
SaveLiveRegisters::SaveLiveRegisters(MacroAssembler* masm, BarrierStubC2* stub)
: _masm(masm),
_gp_registers(),
_opmask_registers(),
_xmm_registers(),
_spill_size(0),
_spill_offset(0) {
//
// Stack layout after registers have been spilled:
//
// | ... | original rsp, 16 bytes aligned
// ------------------
// | zmm0 high |
// | ... |
// | zmm0 low | 16 bytes aligned
// | ... |
// | ymm1 high |
// | ... |
// | ymm1 low | 16 bytes aligned
// | ... |
// | xmmN high |
// | ... |
// | xmmN low | 8 bytes aligned
// | reg0 | 8 bytes aligned
// | reg1 |
// | ... |
// | regN | new rsp, if 16 bytes aligned
// | <padding> | else new rsp, 16 bytes aligned
// ------------------
//
// Figure out what registers to save/restore
initialize(stub);
// Allocate stack space
if (_spill_size > 0) {
__ subptr(rsp, _spill_size);
}
// Save XMM/YMM/ZMM registers
for (int i = 0; i < _xmm_registers.length(); i++) {
xmm_register_save(_xmm_registers.at(i));
}
if (xmm_needs_vzeroupper()) {
__ vzeroupper();
}
// Save general purpose registers
for (int i = 0; i < _gp_registers.length(); i++) {
gp_register_save(_gp_registers.at(i));
}
// Save opmask registers
for (int i = 0; i < _opmask_registers.length(); i++) {
opmask_register_save(_opmask_registers.at(i));
}
}
SaveLiveRegisters::~SaveLiveRegisters() {
// Restore opmask registers
for (int i = _opmask_registers.length() - 1; i >= 0; i--) {
opmask_register_restore(_opmask_registers.at(i));
}
// Restore general purpose registers
for (int i = _gp_registers.length() - 1; i >= 0; i--) {
gp_register_restore(_gp_registers.at(i));
}
__ vzeroupper();
// Restore XMM/YMM/ZMM registers
for (int i = _xmm_registers.length() - 1; i >= 0; i--) {
xmm_register_restore(_xmm_registers.at(i));
}
// Free stack space
if (_spill_size > 0) {
__ addptr(rsp, _spill_size);
}
}
#else // !_LP64
OptoReg::Name BarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) {
Unimplemented(); // This must be implemented to support late barrier expansion.
}
#endif // _LP64
#endif // COMPILER2

View File

@ -28,7 +28,12 @@
#include "asm/macroAssembler.hpp"
#include "memory/allocation.hpp"
#include "oops/access.hpp"
#ifdef COMPILER2
#include "opto/optoreg.hpp"
class BarrierStubC2;
class Node;
#endif // COMPILER2
class InterpreterMacroAssembler;
class BarrierSetAssembler: public CHeapObj<mtGC> {
@ -106,6 +111,63 @@ public:
virtual void c2i_entry_barrier(MacroAssembler* masm);
virtual void check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error);
#ifdef COMPILER2
OptoReg::Name refine_register(const Node* node,
OptoReg::Name opto_reg);
#endif // COMPILER2
};
#ifdef COMPILER2
#ifdef _LP64
// This class saves and restores the registers that need to be preserved across
// the runtime call represented by a given C2 barrier stub. Use as follows:
// {
// SaveLiveRegisters save(masm, stub);
// ..
// __ call(RuntimeAddress(...);
// ..
// }
class SaveLiveRegisters {
private:
struct XMMRegisterData {
XMMRegister _reg;
int _size;
// Used by GrowableArray::find()
bool operator == (const XMMRegisterData& other) {
return _reg == other._reg;
}
};
MacroAssembler* const _masm;
GrowableArray<Register> _gp_registers;
GrowableArray<KRegister> _opmask_registers;
GrowableArray<XMMRegisterData> _xmm_registers;
int _spill_size;
int _spill_offset;
static int xmm_compare_register_size(XMMRegisterData* left, XMMRegisterData* right);
static int xmm_slot_size(OptoReg::Name opto_reg);
static uint xmm_ideal_reg_for_size(int reg_size);
bool xmm_needs_vzeroupper() const;
void xmm_register_save(const XMMRegisterData& reg_data);
void xmm_register_restore(const XMMRegisterData& reg_data);
void gp_register_save(Register reg);
void opmask_register_save(KRegister reg);
void gp_register_restore(Register reg);
void opmask_register_restore(KRegister reg);
void initialize(BarrierStubC2* stub);
public:
SaveLiveRegisters(MacroAssembler* masm, BarrierStubC2* stub);
~SaveLiveRegisters();
};
#endif // _LP64
#endif // COMPILER2
#endif // CPU_X86_GC_SHARED_BARRIERSETASSEMBLER_X86_HPP

View File

@ -1156,290 +1156,9 @@ void ZBarrierSetAssembler::generate_c1_store_barrier_runtime_stub(StubAssembler*
#ifdef COMPILER2
OptoReg::Name ZBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) {
if (!OptoReg::is_reg(opto_reg)) {
return OptoReg::Bad;
}
const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
if (vm_reg->is_XMMRegister()) {
opto_reg &= ~15;
switch (node->ideal_reg()) {
case Op_VecX:
opto_reg |= 2;
break;
case Op_VecY:
opto_reg |= 4;
break;
case Op_VecZ:
opto_reg |= 8;
break;
default:
opto_reg |= 1;
break;
}
}
return opto_reg;
}
// We use the vec_spill_helper from the x86.ad file to avoid reinventing this wheel
extern void vec_spill_helper(C2_MacroAssembler *masm, bool is_load,
int stack_offset, int reg, uint ireg, outputStream* st);
#undef __
#define __ _masm->
class ZSaveLiveRegisters {
private:
struct XMMRegisterData {
XMMRegister _reg;
int _size;
// Used by GrowableArray::find()
bool operator == (const XMMRegisterData& other) {
return _reg == other._reg;
}
};
MacroAssembler* const _masm;
GrowableArray<Register> _gp_registers;
GrowableArray<KRegister> _opmask_registers;
GrowableArray<XMMRegisterData> _xmm_registers;
int _spill_size;
int _spill_offset;
static int xmm_compare_register_size(XMMRegisterData* left, XMMRegisterData* right) {
if (left->_size == right->_size) {
return 0;
}
return (left->_size < right->_size) ? -1 : 1;
}
static int xmm_slot_size(OptoReg::Name opto_reg) {
// The low order 4 bytes denote what size of the XMM register is live
return (opto_reg & 15) << 3;
}
static uint xmm_ideal_reg_for_size(int reg_size) {
switch (reg_size) {
case 8:
return Op_VecD;
case 16:
return Op_VecX;
case 32:
return Op_VecY;
case 64:
return Op_VecZ;
default:
fatal("Invalid register size %d", reg_size);
return 0;
}
}
bool xmm_needs_vzeroupper() const {
return _xmm_registers.is_nonempty() && _xmm_registers.at(0)._size > 16;
}
void xmm_register_save(const XMMRegisterData& reg_data) {
const OptoReg::Name opto_reg = OptoReg::as_OptoReg(reg_data._reg->as_VMReg());
const uint ideal_reg = xmm_ideal_reg_for_size(reg_data._size);
_spill_offset -= reg_data._size;
C2_MacroAssembler c2_masm(__ code());
vec_spill_helper(&c2_masm, false /* is_load */, _spill_offset, opto_reg, ideal_reg, tty);
}
void xmm_register_restore(const XMMRegisterData& reg_data) {
const OptoReg::Name opto_reg = OptoReg::as_OptoReg(reg_data._reg->as_VMReg());
const uint ideal_reg = xmm_ideal_reg_for_size(reg_data._size);
C2_MacroAssembler c2_masm(__ code());
vec_spill_helper(&c2_masm, true /* is_load */, _spill_offset, opto_reg, ideal_reg, tty);
_spill_offset += reg_data._size;
}
void gp_register_save(Register reg) {
_spill_offset -= 8;
__ movq(Address(rsp, _spill_offset), reg);
}
void opmask_register_save(KRegister reg) {
_spill_offset -= 8;
__ kmov(Address(rsp, _spill_offset), reg);
}
void gp_register_restore(Register reg) {
__ movq(reg, Address(rsp, _spill_offset));
_spill_offset += 8;
}
void opmask_register_restore(KRegister reg) {
__ kmov(reg, Address(rsp, _spill_offset));
_spill_offset += 8;
}
void initialize(ZBarrierStubC2* stub) {
// Create mask of caller saved registers that need to
// be saved/restored if live
RegMask caller_saved;
caller_saved.Insert(OptoReg::as_OptoReg(rax->as_VMReg()));
caller_saved.Insert(OptoReg::as_OptoReg(rcx->as_VMReg()));
caller_saved.Insert(OptoReg::as_OptoReg(rdx->as_VMReg()));
caller_saved.Insert(OptoReg::as_OptoReg(rsi->as_VMReg()));
caller_saved.Insert(OptoReg::as_OptoReg(rdi->as_VMReg()));
caller_saved.Insert(OptoReg::as_OptoReg(r8->as_VMReg()));
caller_saved.Insert(OptoReg::as_OptoReg(r9->as_VMReg()));
caller_saved.Insert(OptoReg::as_OptoReg(r10->as_VMReg()));
caller_saved.Insert(OptoReg::as_OptoReg(r11->as_VMReg()));
if (stub->result() != noreg) {
caller_saved.Remove(OptoReg::as_OptoReg(stub->result()->as_VMReg()));
}
// Create mask of live registers
RegMask live = stub->live();
int gp_spill_size = 0;
int opmask_spill_size = 0;
int xmm_spill_size = 0;
// Record registers that needs to be saved/restored
RegMaskIterator rmi(live);
while (rmi.has_next()) {
const OptoReg::Name opto_reg = rmi.next();
const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
if (vm_reg->is_Register()) {
if (caller_saved.Member(opto_reg)) {
_gp_registers.append(vm_reg->as_Register());
gp_spill_size += 8;
}
} else if (vm_reg->is_KRegister()) {
// All opmask registers are caller saved, thus spill the ones
// which are live.
if (_opmask_registers.find(vm_reg->as_KRegister()) == -1) {
_opmask_registers.append(vm_reg->as_KRegister());
opmask_spill_size += 8;
}
} else if (vm_reg->is_XMMRegister()) {
// We encode in the low order 4 bits of the opto_reg, how large part of the register is live
const VMReg vm_reg_base = OptoReg::as_VMReg(opto_reg & ~15);
const int reg_size = xmm_slot_size(opto_reg);
const XMMRegisterData reg_data = { vm_reg_base->as_XMMRegister(), reg_size };
const int reg_index = _xmm_registers.find(reg_data);
if (reg_index == -1) {
// Not previously appended
_xmm_registers.append(reg_data);
xmm_spill_size += reg_size;
} else {
// Previously appended, update size
const int reg_size_prev = _xmm_registers.at(reg_index)._size;
if (reg_size > reg_size_prev) {
_xmm_registers.at_put(reg_index, reg_data);
xmm_spill_size += reg_size - reg_size_prev;
}
}
} else {
fatal("Unexpected register type");
}
}
// Sort by size, largest first
_xmm_registers.sort(xmm_compare_register_size);
// On Windows, the caller reserves stack space for spilling register arguments
const int arg_spill_size = frame::arg_reg_save_area_bytes;
// Stack pointer must be 16 bytes aligned for the call
_spill_offset = _spill_size = align_up(xmm_spill_size + gp_spill_size + opmask_spill_size + arg_spill_size, 16);
}
public:
ZSaveLiveRegisters(MacroAssembler* masm, ZBarrierStubC2* stub)
: _masm(masm),
_gp_registers(),
_opmask_registers(),
_xmm_registers(),
_spill_size(0),
_spill_offset(0) {
//
// Stack layout after registers have been spilled:
//
// | ... | original rsp, 16 bytes aligned
// ------------------
// | zmm0 high |
// | ... |
// | zmm0 low | 16 bytes aligned
// | ... |
// | ymm1 high |
// | ... |
// | ymm1 low | 16 bytes aligned
// | ... |
// | xmmN high |
// | ... |
// | xmmN low | 8 bytes aligned
// | reg0 | 8 bytes aligned
// | reg1 |
// | ... |
// | regN | new rsp, if 16 bytes aligned
// | <padding> | else new rsp, 16 bytes aligned
// ------------------
//
// Figure out what registers to save/restore
initialize(stub);
// Allocate stack space
if (_spill_size > 0) {
__ subptr(rsp, _spill_size);
}
// Save XMM/YMM/ZMM registers
for (int i = 0; i < _xmm_registers.length(); i++) {
xmm_register_save(_xmm_registers.at(i));
}
if (xmm_needs_vzeroupper()) {
__ vzeroupper();
}
// Save general purpose registers
for (int i = 0; i < _gp_registers.length(); i++) {
gp_register_save(_gp_registers.at(i));
}
// Save opmask registers
for (int i = 0; i < _opmask_registers.length(); i++) {
opmask_register_save(_opmask_registers.at(i));
}
}
~ZSaveLiveRegisters() {
// Restore opmask registers
for (int i = _opmask_registers.length() - 1; i >= 0; i--) {
opmask_register_restore(_opmask_registers.at(i));
}
// Restore general purpose registers
for (int i = _gp_registers.length() - 1; i >= 0; i--) {
gp_register_restore(_gp_registers.at(i));
}
__ vzeroupper();
// Restore XMM/YMM/ZMM registers
for (int i = _xmm_registers.length() - 1; i >= 0; i--) {
xmm_register_restore(_xmm_registers.at(i));
}
// Free stack space
if (_spill_size > 0) {
__ addptr(rsp, _spill_size);
}
}
};
class ZSetupArguments {
private:
MacroAssembler* const _masm;
@ -1504,7 +1223,7 @@ void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, Z
__ movptr(stub->ref(), stub->ref_addr());
{
ZSaveLiveRegisters save_live_registers(masm, stub);
SaveLiveRegisters save_live_registers(masm, stub);
ZSetupArguments setup_arguments(masm, stub);
__ call(RuntimeAddress(stub->slow_path()));
}
@ -1534,7 +1253,7 @@ void ZBarrierSetAssembler::generate_c2_store_barrier_stub(MacroAssembler* masm,
__ bind(slow);
{
ZSaveLiveRegisters save_live_registers(masm, stub);
SaveLiveRegisters save_live_registers(masm, stub);
__ lea(c_rarg0, stub->ref_addr());
if (stub->is_native()) {

View File

@ -164,9 +164,6 @@ public:
#endif // COMPILER1
#ifdef COMPILER2
OptoReg::Name refine_register(const Node* node,
OptoReg::Name opto_reg);
void generate_c2_load_barrier_stub(MacroAssembler* masm,
ZLoadBarrierStubC2* stub) const;
void generate_c2_store_barrier_stub(MacroAssembler* masm,

View File

@ -23,16 +23,22 @@
*/
#include "precompiled.hpp"
#include "code/vmreg.inline.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/tlab_globals.hpp"
#include "gc/shared/c2/barrierSetC2.hpp"
#include "opto/arraycopynode.hpp"
#include "opto/block.hpp"
#include "opto/convertnode.hpp"
#include "opto/graphKit.hpp"
#include "opto/idealKit.hpp"
#include "opto/macro.hpp"
#include "opto/narrowptrnode.hpp"
#include "opto/output.hpp"
#include "opto/regalloc.hpp"
#include "opto/runtime.hpp"
#include "utilities/macros.hpp"
#include CPU_HEADER(gc/shared/barrierSetAssembler)
// By default this is a no-op.
void BarrierSetC2::resolve_address(C2Access& access) const { }
@ -77,6 +83,31 @@ bool C2Access::needs_cpu_membar() const {
return false;
}
static BarrierSetC2State* barrier_set_state() {
return reinterpret_cast<BarrierSetC2State*>(Compile::current()->barrier_set_state());
}
BarrierStubC2::BarrierStubC2(const MachNode* node)
: _node(node),
_entry(),
_continuation() {}
RegMask& BarrierStubC2::live() const {
return *barrier_set_state()->live(_node);
}
Label* BarrierStubC2::entry() {
// The _entry will never be bound when in_scratch_emit_size() is true.
// However, we still need to return a label that is not bound now, but
// will eventually be bound. Any eventually bound label will do, as it
// will only act as a placeholder, so we return the _continuation label.
return Compile::current()->output()->in_scratch_emit_size() ? &_continuation : &_entry;
}
Label* BarrierStubC2::continuation() {
return &_continuation;
}
Node* BarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const {
DecoratorSet decorators = access.decorators();
@ -788,3 +819,76 @@ void BarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac
}
#undef XTOP
void BarrierSetC2::compute_liveness_at_stubs() const {
ResourceMark rm;
Compile* const C = Compile::current();
Arena* const A = Thread::current()->resource_area();
PhaseCFG* const cfg = C->cfg();
PhaseRegAlloc* const regalloc = C->regalloc();
RegMask* const live = NEW_ARENA_ARRAY(A, RegMask, cfg->number_of_blocks() * sizeof(RegMask));
BarrierSetAssembler* const bs = BarrierSet::barrier_set()->barrier_set_assembler();
Block_List worklist;
for (uint i = 0; i < cfg->number_of_blocks(); ++i) {
new ((void*)(live + i)) RegMask();
worklist.push(cfg->get_block(i));
}
while (worklist.size() > 0) {
const Block* const block = worklist.pop();
RegMask& old_live = live[block->_pre_order];
RegMask new_live;
// Initialize to union of successors
for (uint i = 0; i < block->_num_succs; i++) {
const uint succ_id = block->_succs[i]->_pre_order;
new_live.OR(live[succ_id]);
}
// Walk block backwards, computing liveness
for (int i = block->number_of_nodes() - 1; i >= 0; --i) {
const Node* const node = block->get_node(i);
// Remove def bits
const OptoReg::Name first = bs->refine_register(node, regalloc->get_reg_first(node));
const OptoReg::Name second = bs->refine_register(node, regalloc->get_reg_second(node));
if (first != OptoReg::Bad) {
new_live.Remove(first);
}
if (second != OptoReg::Bad) {
new_live.Remove(second);
}
// Add use bits
for (uint j = 1; j < node->req(); ++j) {
const Node* const use = node->in(j);
const OptoReg::Name first = bs->refine_register(use, regalloc->get_reg_first(use));
const OptoReg::Name second = bs->refine_register(use, regalloc->get_reg_second(use));
if (first != OptoReg::Bad) {
new_live.Insert(first);
}
if (second != OptoReg::Bad) {
new_live.Insert(second);
}
}
// If this node tracks liveness, update it
RegMask* const regs = barrier_set_state()->live(node);
if (regs != NULL) {
regs->OR(new_live);
}
}
// Now at block top, see if we have any changes
new_live.SUBTRACT(old_live);
if (new_live.is_NotEmpty()) {
// Liveness has refined, update and propagate to prior blocks
old_live.OR(new_live);
for (uint i = 1; i < block->num_preds(); ++i) {
Block* const pred = cfg->get_block_for_node(block->pred(i));
worklist.push(pred);
}
}
}
}

View File

@ -205,6 +205,48 @@ public:
virtual bool is_opt_access() const { return true; }
};
class BarrierSetC2State : public ArenaObj {
protected:
Node_Array _live;
public:
BarrierSetC2State(Arena* arena) : _live(arena) {}
RegMask* live(const Node* node) {
if (!node->is_Mach() || !needs_liveness_data(node->as_Mach())) {
// Don't need liveness for non-MachNodes or if the GC doesn't request it
return nullptr;
}
RegMask* live = (RegMask*)_live[node->_idx];
if (live == nullptr) {
live = new (Compile::current()->comp_arena()->AmallocWords(sizeof(RegMask))) RegMask();
_live.map(node->_idx, (Node*)live);
}
return live;
}
virtual bool needs_liveness_data(const MachNode* mach) const = 0;
};
// This class represents the slow path in a C2 barrier. It is defined by a
// memory access, an entry point, and a continuation point (typically the end of
// the barrier). It provides a set of registers whose value is live across the
// barrier, and hence must be preserved across runtime calls from the stub.
class BarrierStubC2 : public ArenaObj {
protected:
const MachNode* _node;
Label _entry;
Label _continuation;
public:
BarrierStubC2(const MachNode* node);
RegMask& live() const;
Label* entry();
Label* continuation();
virtual Register result() const = 0;
};
// This is the top-level class for the backend of the Access API in C2.
// The top-level class is responsible for performing raw accesses. The
@ -302,6 +344,7 @@ public:
virtual bool matcher_is_store_load_barrier(Node* x, uint xop) const { return false; }
virtual void late_barrier_analysis() const { }
virtual void compute_liveness_at_stubs() const;
virtual int estimate_stub_size() const { return 0; }
virtual void emit_stubs(CodeBuffer& cb) const { }

View File

@ -120,17 +120,16 @@ public:
typedef ZArenaHashtable<intptr_t, bool, 4> ZOffsetTable;
class ZBarrierSetC2State : public ArenaObj {
class ZBarrierSetC2State : public BarrierSetC2State {
private:
GrowableArray<ZBarrierStubC2*>* _stubs;
Node_Array _live;
int _trampoline_stubs_count;
int _stubs_start_offset;
public:
ZBarrierSetC2State(Arena* arena)
: _stubs(new (arena) GrowableArray<ZBarrierStubC2*>(arena, 8, 0, nullptr)),
_live(arena),
: BarrierSetC2State(arena),
_stubs(new (arena) GrowableArray<ZBarrierStubC2*>(arena, 8, 0, nullptr)),
_trampoline_stubs_count(0),
_stubs_start_offset(0) {}
@ -138,25 +137,9 @@ public:
return _stubs;
}
RegMask* live(const Node* node) {
if (!node->is_Mach()) {
// Don't need liveness for non-MachNodes
return nullptr;
}
const MachNode* const mach = node->as_Mach();
if (mach->barrier_data() == ZBarrierElided) {
// Don't need liveness data for nodes without barriers
return nullptr;
}
RegMask* live = (RegMask*)_live[node->_idx];
if (live == nullptr) {
live = new (Compile::current()->comp_arena()->AmallocWords(sizeof(RegMask))) RegMask();
_live.map(node->_idx, (Node*)live);
}
return live;
bool needs_liveness_data(const MachNode* mach) const {
// Don't need liveness data for nodes without barriers
return mach->barrier_data() != ZBarrierElided;
}
void inc_trampoline_stubs_count() {
@ -201,30 +184,7 @@ int ZBarrierStubC2::stubs_start_offset() {
return barrier_set_state()->stubs_start_offset();
}
ZBarrierStubC2::ZBarrierStubC2(const MachNode* node)
: _node(node),
_entry(),
_continuation() {}
Register ZBarrierStubC2::result() const {
return noreg;
}
RegMask& ZBarrierStubC2::live() const {
return *barrier_set_state()->live(_node);
}
Label* ZBarrierStubC2::entry() {
// The _entry will never be bound when in_scratch_emit_size() is true.
// However, we still need to return a label that is not bound now, but
// will eventually be bound. Any eventually bound label will do, as it
// will only act as a placeholder, so we return the _continuation label.
return Compile::current()->output()->in_scratch_emit_size() ? &_continuation : &_entry;
}
Label* ZBarrierStubC2::continuation() {
return &_continuation;
}
ZBarrierStubC2::ZBarrierStubC2(const MachNode* node) : BarrierStubC2(node) {}
ZLoadBarrierStubC2* ZLoadBarrierStubC2::create(const MachNode* node, Address ref_addr, Register ref) {
AARCH64_ONLY(fatal("Should use ZLoadBarrierStubC2Aarch64::create"));
@ -884,81 +844,6 @@ void ZBarrierSetC2::analyze_dominating_barriers() const {
analyze_dominating_barriers_impl(atomics, atomic_dominators);
}
// == Reduced spilling optimization ==
void ZBarrierSetC2::compute_liveness_at_stubs() const {
ResourceMark rm;
Compile* const C = Compile::current();
Arena* const A = Thread::current()->resource_area();
PhaseCFG* const cfg = C->cfg();
PhaseRegAlloc* const regalloc = C->regalloc();
RegMask* const live = NEW_ARENA_ARRAY(A, RegMask, cfg->number_of_blocks() * sizeof(RegMask));
ZBarrierSetAssembler* const bs = ZBarrierSet::assembler();
Block_List worklist;
for (uint i = 0; i < cfg->number_of_blocks(); ++i) {
new ((void*)(live + i)) RegMask();
worklist.push(cfg->get_block(i));
}
while (worklist.size() > 0) {
const Block* const block = worklist.pop();
RegMask& old_live = live[block->_pre_order];
RegMask new_live;
// Initialize to union of successors
for (uint i = 0; i < block->_num_succs; i++) {
const uint succ_id = block->_succs[i]->_pre_order;
new_live.OR(live[succ_id]);
}
// Walk block backwards, computing liveness
for (int i = block->number_of_nodes() - 1; i >= 0; --i) {
const Node* const node = block->get_node(i);
// Remove def bits
const OptoReg::Name first = bs->refine_register(node, regalloc->get_reg_first(node));
const OptoReg::Name second = bs->refine_register(node, regalloc->get_reg_second(node));
if (first != OptoReg::Bad) {
new_live.Remove(first);
}
if (second != OptoReg::Bad) {
new_live.Remove(second);
}
// Add use bits
for (uint j = 1; j < node->req(); ++j) {
const Node* const use = node->in(j);
const OptoReg::Name first = bs->refine_register(use, regalloc->get_reg_first(use));
const OptoReg::Name second = bs->refine_register(use, regalloc->get_reg_second(use));
if (first != OptoReg::Bad) {
new_live.Insert(first);
}
if (second != OptoReg::Bad) {
new_live.Insert(second);
}
}
// If this node tracks liveness, update it
RegMask* const regs = barrier_set_state()->live(node);
if (regs != nullptr) {
regs->OR(new_live);
}
}
// Now at block top, see if we have any changes
new_live.SUBTRACT(old_live);
if (new_live.is_NotEmpty()) {
// Liveness has refined, update and propagate to prior blocks
old_live.OR(new_live);
for (uint i = 1; i < block->num_preds(); ++i) {
Block* const pred = cfg->get_block_for_node(block->pred(i));
worklist.push(pred);
}
}
}
}
void ZBarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const {
eliminate_gc_barrier_data(node);
}

View File

@ -41,12 +41,8 @@ class MachNode;
class MacroAssembler;
class ZBarrierStubC2 : public ArenaObj {
class ZBarrierStubC2 : public BarrierStubC2 {
protected:
const MachNode* _node;
Label _entry;
Label _continuation;
static void register_stub(ZBarrierStubC2* stub);
static void inc_trampoline_stubs_count();
static int trampoline_stubs_count();
@ -55,10 +51,6 @@ static int stubs_start_offset();
ZBarrierStubC2(const MachNode* node);
public:
RegMask& live() const;
Label* entry();
Label* continuation();
virtual Register result() const = 0;
virtual void emit_code(MacroAssembler& masm) = 0;
};
@ -108,7 +100,6 @@ public:
class ZBarrierSetC2 : public BarrierSetC2 {
private:
void compute_liveness_at_stubs() const;
void analyze_dominating_barriers_impl(Node_List& accesses, Node_List& access_dominators) const;
void analyze_dominating_barriers() const;