mirror of
https://github.com/openjdk/jdk.git
synced 2026-02-15 12:55:07 +00:00
8369569: Rename methods in regmask.hpp to conform with HotSpot coding style
Reviewed-by: aseoane, rcastanedalo, epeter
This commit is contained in:
parent
2148dbbe75
commit
39211e7fac
@ -1267,38 +1267,38 @@ source %{
|
||||
// registers conditionally reserved.
|
||||
|
||||
_ANY_REG32_mask = _ALL_REG32_mask;
|
||||
_ANY_REG32_mask.Remove(OptoReg::as_OptoReg(r31_sp->as_VMReg()));
|
||||
_ANY_REG32_mask.remove(OptoReg::as_OptoReg(r31_sp->as_VMReg()));
|
||||
|
||||
_ANY_REG_mask = _ALL_REG_mask;
|
||||
|
||||
_PTR_REG_mask = _ALL_REG_mask;
|
||||
|
||||
_NO_SPECIAL_REG32_mask = _ALL_REG32_mask;
|
||||
_NO_SPECIAL_REG32_mask.SUBTRACT(_NON_ALLOCATABLE_REG32_mask);
|
||||
_NO_SPECIAL_REG32_mask.subtract(_NON_ALLOCATABLE_REG32_mask);
|
||||
|
||||
_NO_SPECIAL_REG_mask = _ALL_REG_mask;
|
||||
_NO_SPECIAL_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
|
||||
_NO_SPECIAL_REG_mask.subtract(_NON_ALLOCATABLE_REG_mask);
|
||||
|
||||
_NO_SPECIAL_PTR_REG_mask = _ALL_REG_mask;
|
||||
_NO_SPECIAL_PTR_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
|
||||
_NO_SPECIAL_PTR_REG_mask.subtract(_NON_ALLOCATABLE_REG_mask);
|
||||
|
||||
// r27 is not allocatable when compressed oops is on and heapbase is not
|
||||
// zero, compressed klass pointers doesn't use r27 after JDK-8234794
|
||||
if (UseCompressedOops && (CompressedOops::base() != nullptr)) {
|
||||
_NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
|
||||
_NO_SPECIAL_REG_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
|
||||
_NO_SPECIAL_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
|
||||
_NO_SPECIAL_REG32_mask.remove(OptoReg::as_OptoReg(r27->as_VMReg()));
|
||||
_NO_SPECIAL_REG_mask.remove(OptoReg::as_OptoReg(r27->as_VMReg()));
|
||||
_NO_SPECIAL_PTR_REG_mask.remove(OptoReg::as_OptoReg(r27->as_VMReg()));
|
||||
}
|
||||
|
||||
// r29 is not allocatable when PreserveFramePointer is on
|
||||
if (PreserveFramePointer) {
|
||||
_NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
|
||||
_NO_SPECIAL_REG_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
|
||||
_NO_SPECIAL_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
|
||||
_NO_SPECIAL_REG32_mask.remove(OptoReg::as_OptoReg(r29->as_VMReg()));
|
||||
_NO_SPECIAL_REG_mask.remove(OptoReg::as_OptoReg(r29->as_VMReg()));
|
||||
_NO_SPECIAL_PTR_REG_mask.remove(OptoReg::as_OptoReg(r29->as_VMReg()));
|
||||
}
|
||||
|
||||
_NO_SPECIAL_NO_RFP_PTR_REG_mask = _NO_SPECIAL_PTR_REG_mask;
|
||||
_NO_SPECIAL_NO_RFP_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
|
||||
_NO_SPECIAL_NO_RFP_PTR_REG_mask.remove(OptoReg::as_OptoReg(r29->as_VMReg()));
|
||||
}
|
||||
|
||||
// Optimizaton of volatile gets and puts
|
||||
@ -1734,7 +1734,7 @@ uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
|
||||
const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::EMPTY;
|
||||
|
||||
int ConstantTable::calculate_table_base_offset() const {
|
||||
return 0; // absolute addressing, no offset
|
||||
@ -2520,10 +2520,10 @@ uint Matcher::int_pressure_limit()
|
||||
// as a spilled LRG. Spilling heuristics(Spill-USE) explicitly skip
|
||||
// derived pointers and lastly fail to spill after reaching maximum
|
||||
// number of iterations. Lowering the default pressure threshold to
|
||||
// (_NO_SPECIAL_REG32_mask.Size() minus 1) forces CallNode to become
|
||||
// (_NO_SPECIAL_REG32_mask.size() minus 1) forces CallNode to become
|
||||
// a high register pressure area of the code so that split_DEF can
|
||||
// generate DefinitionSpillCopy for the derived pointer.
|
||||
uint default_int_pressure_threshold = _NO_SPECIAL_REG32_mask.Size() - 1;
|
||||
uint default_int_pressure_threshold = _NO_SPECIAL_REG32_mask.size() - 1;
|
||||
if (!PreserveFramePointer) {
|
||||
// When PreserveFramePointer is off, frame pointer is allocatable,
|
||||
// but different from other SOC registers, it is excluded from
|
||||
@ -2538,7 +2538,7 @@ uint Matcher::int_pressure_limit()
|
||||
uint Matcher::float_pressure_limit()
|
||||
{
|
||||
// _FLOAT_REG_mask is generated by adlc from the float_reg register class.
|
||||
return (FLOATPRESSURE == -1) ? _FLOAT_REG_mask.Size() : FLOATPRESSURE;
|
||||
return (FLOATPRESSURE == -1) ? _FLOAT_REG_mask.size() : FLOATPRESSURE;
|
||||
}
|
||||
|
||||
bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
|
||||
|
||||
@ -1093,39 +1093,39 @@ RegMask _NO_SPECIAL_NO_FP_PTR_REG_mask;
|
||||
void reg_mask_init() {
|
||||
|
||||
_ANY_REG32_mask = _ALL_REG32_mask;
|
||||
_ANY_REG32_mask.Remove(OptoReg::as_OptoReg(x0->as_VMReg()));
|
||||
_ANY_REG32_mask.remove(OptoReg::as_OptoReg(x0->as_VMReg()));
|
||||
|
||||
_ANY_REG_mask = _ALL_REG_mask;
|
||||
_ANY_REG_mask.SUBTRACT(_ZR_REG_mask);
|
||||
_ANY_REG_mask.subtract(_ZR_REG_mask);
|
||||
|
||||
_PTR_REG_mask = _ALL_REG_mask;
|
||||
_PTR_REG_mask.SUBTRACT(_ZR_REG_mask);
|
||||
_PTR_REG_mask.subtract(_ZR_REG_mask);
|
||||
|
||||
_NO_SPECIAL_REG32_mask = _ALL_REG32_mask;
|
||||
_NO_SPECIAL_REG32_mask.SUBTRACT(_NON_ALLOCATABLE_REG32_mask);
|
||||
_NO_SPECIAL_REG32_mask.subtract(_NON_ALLOCATABLE_REG32_mask);
|
||||
|
||||
_NO_SPECIAL_REG_mask = _ALL_REG_mask;
|
||||
_NO_SPECIAL_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
|
||||
_NO_SPECIAL_REG_mask.subtract(_NON_ALLOCATABLE_REG_mask);
|
||||
|
||||
_NO_SPECIAL_PTR_REG_mask = _ALL_REG_mask;
|
||||
_NO_SPECIAL_PTR_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
|
||||
_NO_SPECIAL_PTR_REG_mask.subtract(_NON_ALLOCATABLE_REG_mask);
|
||||
|
||||
// x27 is not allocatable when compressed oops is on
|
||||
if (UseCompressedOops) {
|
||||
_NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(x27->as_VMReg()));
|
||||
_NO_SPECIAL_REG_mask.Remove(OptoReg::as_OptoReg(x27->as_VMReg()));
|
||||
_NO_SPECIAL_PTR_REG_mask.Remove(OptoReg::as_OptoReg(x27->as_VMReg()));
|
||||
_NO_SPECIAL_REG32_mask.remove(OptoReg::as_OptoReg(x27->as_VMReg()));
|
||||
_NO_SPECIAL_REG_mask.remove(OptoReg::as_OptoReg(x27->as_VMReg()));
|
||||
_NO_SPECIAL_PTR_REG_mask.remove(OptoReg::as_OptoReg(x27->as_VMReg()));
|
||||
}
|
||||
|
||||
// x8 is not allocatable when PreserveFramePointer is on
|
||||
if (PreserveFramePointer) {
|
||||
_NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(x8->as_VMReg()));
|
||||
_NO_SPECIAL_REG_mask.Remove(OptoReg::as_OptoReg(x8->as_VMReg()));
|
||||
_NO_SPECIAL_PTR_REG_mask.Remove(OptoReg::as_OptoReg(x8->as_VMReg()));
|
||||
_NO_SPECIAL_REG32_mask.remove(OptoReg::as_OptoReg(x8->as_VMReg()));
|
||||
_NO_SPECIAL_REG_mask.remove(OptoReg::as_OptoReg(x8->as_VMReg()));
|
||||
_NO_SPECIAL_PTR_REG_mask.remove(OptoReg::as_OptoReg(x8->as_VMReg()));
|
||||
}
|
||||
|
||||
_NO_SPECIAL_NO_FP_PTR_REG_mask = _NO_SPECIAL_PTR_REG_mask;
|
||||
_NO_SPECIAL_NO_FP_PTR_REG_mask.Remove(OptoReg::as_OptoReg(x8->as_VMReg()));
|
||||
_NO_SPECIAL_NO_FP_PTR_REG_mask.remove(OptoReg::as_OptoReg(x8->as_VMReg()));
|
||||
}
|
||||
|
||||
void PhaseOutput::pd_perform_mach_node_analysis() {
|
||||
@ -1326,7 +1326,7 @@ uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
|
||||
const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::EMPTY;
|
||||
|
||||
int ConstantTable::calculate_table_base_offset() const {
|
||||
return 0; // absolute addressing, no offset
|
||||
@ -2104,10 +2104,10 @@ uint Matcher::int_pressure_limit()
|
||||
// as a spilled LRG. Spilling heuristics(Spill-USE) explicitly skip
|
||||
// derived pointers and lastly fail to spill after reaching maximum
|
||||
// number of iterations. Lowering the default pressure threshold to
|
||||
// (_NO_SPECIAL_REG32_mask.Size() minus 1) forces CallNode to become
|
||||
// (_NO_SPECIAL_REG32_mask.size() minus 1) forces CallNode to become
|
||||
// a high register pressure area of the code so that split_DEF can
|
||||
// generate DefinitionSpillCopy for the derived pointer.
|
||||
uint default_int_pressure_threshold = _NO_SPECIAL_REG32_mask.Size() - 1;
|
||||
uint default_int_pressure_threshold = _NO_SPECIAL_REG32_mask.size() - 1;
|
||||
if (!PreserveFramePointer) {
|
||||
// When PreserveFramePointer is off, frame pointer is allocatable,
|
||||
// but different from other SOC registers, it is excluded from
|
||||
@ -2122,7 +2122,7 @@ uint Matcher::int_pressure_limit()
|
||||
uint Matcher::float_pressure_limit()
|
||||
{
|
||||
// _FLOAT_REG_mask is generated by adlc from the float_reg register class.
|
||||
return (FLOATPRESSURE == -1) ? _FLOAT_REG_mask.Size() : FLOATPRESSURE;
|
||||
return (FLOATPRESSURE == -1) ? _FLOAT_REG_mask.size() : FLOATPRESSURE;
|
||||
}
|
||||
|
||||
bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
|
||||
|
||||
@ -471,33 +471,33 @@ void SaveLiveRegisters::initialize(BarrierStubC2* stub) {
|
||||
// Create mask of caller saved registers that need to
|
||||
// be saved/restored if live
|
||||
RegMask caller_saved;
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(rax->as_VMReg()));
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(rcx->as_VMReg()));
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(rdx->as_VMReg()));
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(rsi->as_VMReg()));
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(rdi->as_VMReg()));
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(r8->as_VMReg()));
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(r9->as_VMReg()));
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(r10->as_VMReg()));
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(r11->as_VMReg()));
|
||||
caller_saved.insert(OptoReg::as_OptoReg(rax->as_VMReg()));
|
||||
caller_saved.insert(OptoReg::as_OptoReg(rcx->as_VMReg()));
|
||||
caller_saved.insert(OptoReg::as_OptoReg(rdx->as_VMReg()));
|
||||
caller_saved.insert(OptoReg::as_OptoReg(rsi->as_VMReg()));
|
||||
caller_saved.insert(OptoReg::as_OptoReg(rdi->as_VMReg()));
|
||||
caller_saved.insert(OptoReg::as_OptoReg(r8->as_VMReg()));
|
||||
caller_saved.insert(OptoReg::as_OptoReg(r9->as_VMReg()));
|
||||
caller_saved.insert(OptoReg::as_OptoReg(r10->as_VMReg()));
|
||||
caller_saved.insert(OptoReg::as_OptoReg(r11->as_VMReg()));
|
||||
|
||||
if (UseAPX) {
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(r16->as_VMReg()));
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(r17->as_VMReg()));
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(r18->as_VMReg()));
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(r19->as_VMReg()));
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(r20->as_VMReg()));
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(r21->as_VMReg()));
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(r22->as_VMReg()));
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(r23->as_VMReg()));
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(r24->as_VMReg()));
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(r25->as_VMReg()));
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(r26->as_VMReg()));
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(r27->as_VMReg()));
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(r28->as_VMReg()));
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(r29->as_VMReg()));
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(r30->as_VMReg()));
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(r31->as_VMReg()));
|
||||
caller_saved.insert(OptoReg::as_OptoReg(r16->as_VMReg()));
|
||||
caller_saved.insert(OptoReg::as_OptoReg(r17->as_VMReg()));
|
||||
caller_saved.insert(OptoReg::as_OptoReg(r18->as_VMReg()));
|
||||
caller_saved.insert(OptoReg::as_OptoReg(r19->as_VMReg()));
|
||||
caller_saved.insert(OptoReg::as_OptoReg(r20->as_VMReg()));
|
||||
caller_saved.insert(OptoReg::as_OptoReg(r21->as_VMReg()));
|
||||
caller_saved.insert(OptoReg::as_OptoReg(r22->as_VMReg()));
|
||||
caller_saved.insert(OptoReg::as_OptoReg(r23->as_VMReg()));
|
||||
caller_saved.insert(OptoReg::as_OptoReg(r24->as_VMReg()));
|
||||
caller_saved.insert(OptoReg::as_OptoReg(r25->as_VMReg()));
|
||||
caller_saved.insert(OptoReg::as_OptoReg(r26->as_VMReg()));
|
||||
caller_saved.insert(OptoReg::as_OptoReg(r27->as_VMReg()));
|
||||
caller_saved.insert(OptoReg::as_OptoReg(r28->as_VMReg()));
|
||||
caller_saved.insert(OptoReg::as_OptoReg(r29->as_VMReg()));
|
||||
caller_saved.insert(OptoReg::as_OptoReg(r30->as_VMReg()));
|
||||
caller_saved.insert(OptoReg::as_OptoReg(r31->as_VMReg()));
|
||||
}
|
||||
|
||||
int gp_spill_size = 0;
|
||||
@ -511,7 +511,7 @@ void SaveLiveRegisters::initialize(BarrierStubC2* stub) {
|
||||
const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
|
||||
|
||||
if (vm_reg->is_Register()) {
|
||||
if (caller_saved.Member(opto_reg)) {
|
||||
if (caller_saved.member(opto_reg)) {
|
||||
_gp_registers.append(vm_reg->as_Register());
|
||||
gp_spill_size += 8;
|
||||
}
|
||||
|
||||
@ -500,89 +500,89 @@ void reg_mask_init() {
|
||||
_ANY_REG_mask = _ALL_REG_mask;
|
||||
|
||||
if (PreserveFramePointer) {
|
||||
_ANY_REG_mask.Remove(OptoReg::as_OptoReg(rbp->as_VMReg()));
|
||||
_ANY_REG_mask.Remove(OptoReg::as_OptoReg(rbp->as_VMReg()->next()));
|
||||
_ANY_REG_mask.remove(OptoReg::as_OptoReg(rbp->as_VMReg()));
|
||||
_ANY_REG_mask.remove(OptoReg::as_OptoReg(rbp->as_VMReg()->next()));
|
||||
}
|
||||
if (need_r12_heapbase()) {
|
||||
_ANY_REG_mask.Remove(OptoReg::as_OptoReg(r12->as_VMReg()));
|
||||
_ANY_REG_mask.Remove(OptoReg::as_OptoReg(r12->as_VMReg()->next()));
|
||||
_ANY_REG_mask.remove(OptoReg::as_OptoReg(r12->as_VMReg()));
|
||||
_ANY_REG_mask.remove(OptoReg::as_OptoReg(r12->as_VMReg()->next()));
|
||||
}
|
||||
|
||||
_PTR_REG_mask = _ANY_REG_mask;
|
||||
_PTR_REG_mask.Remove(OptoReg::as_OptoReg(rsp->as_VMReg()));
|
||||
_PTR_REG_mask.Remove(OptoReg::as_OptoReg(rsp->as_VMReg()->next()));
|
||||
_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r15->as_VMReg()));
|
||||
_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r15->as_VMReg()->next()));
|
||||
_PTR_REG_mask.remove(OptoReg::as_OptoReg(rsp->as_VMReg()));
|
||||
_PTR_REG_mask.remove(OptoReg::as_OptoReg(rsp->as_VMReg()->next()));
|
||||
_PTR_REG_mask.remove(OptoReg::as_OptoReg(r15->as_VMReg()));
|
||||
_PTR_REG_mask.remove(OptoReg::as_OptoReg(r15->as_VMReg()->next()));
|
||||
if (!UseAPX) {
|
||||
for (uint i = 0; i < sizeof(egprs)/sizeof(Register); i++) {
|
||||
_PTR_REG_mask.Remove(OptoReg::as_OptoReg(egprs[i]->as_VMReg()));
|
||||
_PTR_REG_mask.Remove(OptoReg::as_OptoReg(egprs[i]->as_VMReg()->next()));
|
||||
_PTR_REG_mask.remove(OptoReg::as_OptoReg(egprs[i]->as_VMReg()));
|
||||
_PTR_REG_mask.remove(OptoReg::as_OptoReg(egprs[i]->as_VMReg()->next()));
|
||||
}
|
||||
}
|
||||
|
||||
_STACK_OR_PTR_REG_mask = _PTR_REG_mask;
|
||||
_STACK_OR_PTR_REG_mask.OR(STACK_OR_STACK_SLOTS_mask());
|
||||
_STACK_OR_PTR_REG_mask.or_with(STACK_OR_STACK_SLOTS_mask());
|
||||
|
||||
_PTR_REG_NO_RBP_mask = _PTR_REG_mask;
|
||||
_PTR_REG_NO_RBP_mask.Remove(OptoReg::as_OptoReg(rbp->as_VMReg()));
|
||||
_PTR_REG_NO_RBP_mask.Remove(OptoReg::as_OptoReg(rbp->as_VMReg()->next()));
|
||||
_PTR_REG_NO_RBP_mask.remove(OptoReg::as_OptoReg(rbp->as_VMReg()));
|
||||
_PTR_REG_NO_RBP_mask.remove(OptoReg::as_OptoReg(rbp->as_VMReg()->next()));
|
||||
|
||||
_PTR_NO_RAX_REG_mask = _PTR_REG_mask;
|
||||
_PTR_NO_RAX_REG_mask.Remove(OptoReg::as_OptoReg(rax->as_VMReg()));
|
||||
_PTR_NO_RAX_REG_mask.Remove(OptoReg::as_OptoReg(rax->as_VMReg()->next()));
|
||||
_PTR_NO_RAX_REG_mask.remove(OptoReg::as_OptoReg(rax->as_VMReg()));
|
||||
_PTR_NO_RAX_REG_mask.remove(OptoReg::as_OptoReg(rax->as_VMReg()->next()));
|
||||
|
||||
_PTR_NO_RAX_RBX_REG_mask = _PTR_NO_RAX_REG_mask;
|
||||
_PTR_NO_RAX_RBX_REG_mask.Remove(OptoReg::as_OptoReg(rbx->as_VMReg()));
|
||||
_PTR_NO_RAX_RBX_REG_mask.Remove(OptoReg::as_OptoReg(rbx->as_VMReg()->next()));
|
||||
_PTR_NO_RAX_RBX_REG_mask.remove(OptoReg::as_OptoReg(rbx->as_VMReg()));
|
||||
_PTR_NO_RAX_RBX_REG_mask.remove(OptoReg::as_OptoReg(rbx->as_VMReg()->next()));
|
||||
|
||||
|
||||
_LONG_REG_mask = _PTR_REG_mask;
|
||||
_STACK_OR_LONG_REG_mask = _LONG_REG_mask;
|
||||
_STACK_OR_LONG_REG_mask.OR(STACK_OR_STACK_SLOTS_mask());
|
||||
_STACK_OR_LONG_REG_mask.or_with(STACK_OR_STACK_SLOTS_mask());
|
||||
|
||||
_LONG_NO_RAX_RDX_REG_mask = _LONG_REG_mask;
|
||||
_LONG_NO_RAX_RDX_REG_mask.Remove(OptoReg::as_OptoReg(rax->as_VMReg()));
|
||||
_LONG_NO_RAX_RDX_REG_mask.Remove(OptoReg::as_OptoReg(rax->as_VMReg()->next()));
|
||||
_LONG_NO_RAX_RDX_REG_mask.Remove(OptoReg::as_OptoReg(rdx->as_VMReg()));
|
||||
_LONG_NO_RAX_RDX_REG_mask.Remove(OptoReg::as_OptoReg(rdx->as_VMReg()->next()));
|
||||
_LONG_NO_RAX_RDX_REG_mask.remove(OptoReg::as_OptoReg(rax->as_VMReg()));
|
||||
_LONG_NO_RAX_RDX_REG_mask.remove(OptoReg::as_OptoReg(rax->as_VMReg()->next()));
|
||||
_LONG_NO_RAX_RDX_REG_mask.remove(OptoReg::as_OptoReg(rdx->as_VMReg()));
|
||||
_LONG_NO_RAX_RDX_REG_mask.remove(OptoReg::as_OptoReg(rdx->as_VMReg()->next()));
|
||||
|
||||
_LONG_NO_RCX_REG_mask = _LONG_REG_mask;
|
||||
_LONG_NO_RCX_REG_mask.Remove(OptoReg::as_OptoReg(rcx->as_VMReg()));
|
||||
_LONG_NO_RCX_REG_mask.Remove(OptoReg::as_OptoReg(rcx->as_VMReg()->next()));
|
||||
_LONG_NO_RCX_REG_mask.remove(OptoReg::as_OptoReg(rcx->as_VMReg()));
|
||||
_LONG_NO_RCX_REG_mask.remove(OptoReg::as_OptoReg(rcx->as_VMReg()->next()));
|
||||
|
||||
_LONG_NO_RBP_R13_REG_mask = _LONG_REG_mask;
|
||||
_LONG_NO_RBP_R13_REG_mask.Remove(OptoReg::as_OptoReg(rbp->as_VMReg()));
|
||||
_LONG_NO_RBP_R13_REG_mask.Remove(OptoReg::as_OptoReg(rbp->as_VMReg()->next()));
|
||||
_LONG_NO_RBP_R13_REG_mask.Remove(OptoReg::as_OptoReg(r13->as_VMReg()));
|
||||
_LONG_NO_RBP_R13_REG_mask.Remove(OptoReg::as_OptoReg(r13->as_VMReg()->next()));
|
||||
_LONG_NO_RBP_R13_REG_mask.remove(OptoReg::as_OptoReg(rbp->as_VMReg()));
|
||||
_LONG_NO_RBP_R13_REG_mask.remove(OptoReg::as_OptoReg(rbp->as_VMReg()->next()));
|
||||
_LONG_NO_RBP_R13_REG_mask.remove(OptoReg::as_OptoReg(r13->as_VMReg()));
|
||||
_LONG_NO_RBP_R13_REG_mask.remove(OptoReg::as_OptoReg(r13->as_VMReg()->next()));
|
||||
|
||||
_INT_REG_mask = _ALL_INT_REG_mask;
|
||||
if (!UseAPX) {
|
||||
for (uint i = 0; i < sizeof(egprs)/sizeof(Register); i++) {
|
||||
_INT_REG_mask.Remove(OptoReg::as_OptoReg(egprs[i]->as_VMReg()));
|
||||
_INT_REG_mask.remove(OptoReg::as_OptoReg(egprs[i]->as_VMReg()));
|
||||
}
|
||||
}
|
||||
|
||||
if (PreserveFramePointer) {
|
||||
_INT_REG_mask.Remove(OptoReg::as_OptoReg(rbp->as_VMReg()));
|
||||
_INT_REG_mask.remove(OptoReg::as_OptoReg(rbp->as_VMReg()));
|
||||
}
|
||||
if (need_r12_heapbase()) {
|
||||
_INT_REG_mask.Remove(OptoReg::as_OptoReg(r12->as_VMReg()));
|
||||
_INT_REG_mask.remove(OptoReg::as_OptoReg(r12->as_VMReg()));
|
||||
}
|
||||
|
||||
_STACK_OR_INT_REG_mask = _INT_REG_mask;
|
||||
_STACK_OR_INT_REG_mask.OR(STACK_OR_STACK_SLOTS_mask());
|
||||
_STACK_OR_INT_REG_mask.or_with(STACK_OR_STACK_SLOTS_mask());
|
||||
|
||||
_INT_NO_RAX_RDX_REG_mask = _INT_REG_mask;
|
||||
_INT_NO_RAX_RDX_REG_mask.Remove(OptoReg::as_OptoReg(rax->as_VMReg()));
|
||||
_INT_NO_RAX_RDX_REG_mask.Remove(OptoReg::as_OptoReg(rdx->as_VMReg()));
|
||||
_INT_NO_RAX_RDX_REG_mask.remove(OptoReg::as_OptoReg(rax->as_VMReg()));
|
||||
_INT_NO_RAX_RDX_REG_mask.remove(OptoReg::as_OptoReg(rdx->as_VMReg()));
|
||||
|
||||
_INT_NO_RCX_REG_mask = _INT_REG_mask;
|
||||
_INT_NO_RCX_REG_mask.Remove(OptoReg::as_OptoReg(rcx->as_VMReg()));
|
||||
_INT_NO_RCX_REG_mask.remove(OptoReg::as_OptoReg(rcx->as_VMReg()));
|
||||
|
||||
_INT_NO_RBP_R13_REG_mask = _INT_REG_mask;
|
||||
_INT_NO_RBP_R13_REG_mask.Remove(OptoReg::as_OptoReg(rbp->as_VMReg()));
|
||||
_INT_NO_RBP_R13_REG_mask.Remove(OptoReg::as_OptoReg(r13->as_VMReg()));
|
||||
_INT_NO_RBP_R13_REG_mask.remove(OptoReg::as_OptoReg(rbp->as_VMReg()));
|
||||
_INT_NO_RBP_R13_REG_mask.remove(OptoReg::as_OptoReg(r13->as_VMReg()));
|
||||
|
||||
// _FLOAT_REG_LEGACY_mask/_FLOAT_REG_EVEX_mask is generated by adlc
|
||||
// from the float_reg_legacy/float_reg_evex register class.
|
||||
@ -756,7 +756,7 @@ static void emit_fp_min_max(MacroAssembler* masm, XMMRegister dst,
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
|
||||
const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::EMPTY;
|
||||
|
||||
int ConstantTable::calculate_table_base_offset() const {
|
||||
return 0; // absolute addressing, no offset
|
||||
@ -1658,7 +1658,7 @@ bool Matcher::is_spillable_arg(int reg)
|
||||
|
||||
uint Matcher::int_pressure_limit()
|
||||
{
|
||||
return (INTPRESSURE == -1) ? _INT_REG_mask.Size() : INTPRESSURE;
|
||||
return (INTPRESSURE == -1) ? _INT_REG_mask.size() : INTPRESSURE;
|
||||
}
|
||||
|
||||
uint Matcher::float_pressure_limit()
|
||||
@ -1666,7 +1666,7 @@ uint Matcher::float_pressure_limit()
|
||||
// After experiment around with different values, the following default threshold
|
||||
// works best for LCM's register pressure scheduling on x64.
|
||||
uint dec_count = VM_Version::supports_evex() ? 4 : 2;
|
||||
uint default_float_pressure_threshold = _FLOAT_REG_mask.Size() - dec_count;
|
||||
uint default_float_pressure_threshold = _FLOAT_REG_mask.size() - dec_count;
|
||||
return (FLOATPRESSURE == -1) ? default_float_pressure_threshold : FLOATPRESSURE;
|
||||
}
|
||||
|
||||
|
||||
@ -899,10 +899,12 @@ int ArchDesc::emit_msg(int quiet, int flag, int line, const char *fmt,
|
||||
|
||||
// Construct the name of the register mask.
|
||||
static const char *getRegMask(const char *reg_class_name) {
|
||||
if( reg_class_name == nullptr ) return "RegMask::Empty";
|
||||
if (reg_class_name == nullptr) {
|
||||
return "RegMask::EMPTY";
|
||||
}
|
||||
|
||||
if (strcmp(reg_class_name,"Universe")==0) {
|
||||
return "RegMask::Empty";
|
||||
return "RegMask::EMPTY";
|
||||
} else if (strcmp(reg_class_name,"stack_slots")==0) {
|
||||
return "(Compile::current()->FIRST_STACK_mask())";
|
||||
} else if (strcmp(reg_class_name, "dynamic")==0) {
|
||||
@ -920,7 +922,7 @@ static const char *getRegMask(const char *reg_class_name) {
|
||||
|
||||
// Convert a register class name to its register mask.
|
||||
const char *ArchDesc::reg_class_to_reg_mask(const char *rc_name) {
|
||||
const char *reg_mask = "RegMask::Empty";
|
||||
const char* reg_mask = "RegMask::EMPTY";
|
||||
|
||||
if( _register ) {
|
||||
RegClass *reg_class = _register->getRegClass(rc_name);
|
||||
@ -939,7 +941,7 @@ const char *ArchDesc::reg_class_to_reg_mask(const char *rc_name) {
|
||||
|
||||
// Obtain the name of the RegMask for an OperandForm
|
||||
const char *ArchDesc::reg_mask(OperandForm &opForm) {
|
||||
const char *regMask = "RegMask::Empty";
|
||||
const char* regMask = "RegMask::EMPTY";
|
||||
|
||||
// Check constraints on result's register class
|
||||
const char *result_class = opForm.constrained_reg_class();
|
||||
@ -968,9 +970,9 @@ const char *ArchDesc::reg_mask(InstructForm &inForm) {
|
||||
abort();
|
||||
}
|
||||
|
||||
// Instructions producing 'Universe' use RegMask::Empty
|
||||
// Instructions producing 'Universe' use RegMask::EMPTY
|
||||
if (strcmp(result,"Universe") == 0) {
|
||||
return "RegMask::Empty";
|
||||
return "RegMask::EMPTY";
|
||||
}
|
||||
|
||||
// Lookup this result operand and get its register class
|
||||
|
||||
@ -2422,7 +2422,7 @@ const char *OperandForm::constrained_reg_class() const {
|
||||
|
||||
// Return the register class associated with 'leaf'.
|
||||
const char *OperandForm::in_reg_class(uint leaf, FormDict &globals) {
|
||||
const char *reg_class = nullptr; // "RegMask::Empty";
|
||||
const char* reg_class = nullptr; // "RegMask::EMPTY";
|
||||
|
||||
if((_matrule == nullptr) || (_matrule->is_chain_rule(globals))) {
|
||||
reg_class = constrained_reg_class();
|
||||
|
||||
@ -2837,7 +2837,7 @@ static void defineIn_RegMask(FILE *fp, FormDict &globals, OperandForm &oper) {
|
||||
if (strcmp(first_reg_class, "stack_slots") == 0) {
|
||||
fprintf(fp," return &(Compile::current()->FIRST_STACK_mask());\n");
|
||||
} else if (strcmp(first_reg_class, "dynamic") == 0) {
|
||||
fprintf(fp," return &RegMask::Empty;\n");
|
||||
fprintf(fp, " return &RegMask::EMPTY;\n");
|
||||
} else {
|
||||
const char* first_reg_class_to_upper = toUpper(first_reg_class);
|
||||
fprintf(fp," return &%s_mask();\n", first_reg_class_to_upper);
|
||||
|
||||
@ -115,7 +115,7 @@ uint8_t BarrierStubC2::barrier_data() const {
|
||||
void BarrierStubC2::preserve(Register r) {
|
||||
const VMReg vm_reg = r->as_VMReg();
|
||||
assert(vm_reg->is_Register(), "r must be a general-purpose register");
|
||||
_preserve.Insert(OptoReg::as_OptoReg(vm_reg));
|
||||
_preserve.insert(OptoReg::as_OptoReg(vm_reg));
|
||||
}
|
||||
|
||||
void BarrierStubC2::dont_preserve(Register r) {
|
||||
@ -124,7 +124,7 @@ void BarrierStubC2::dont_preserve(Register r) {
|
||||
// Subtract the given register and all its sub-registers (e.g. {R11, R11_H}
|
||||
// for r11 in aarch64).
|
||||
do {
|
||||
_preserve.Remove(OptoReg::as_OptoReg(vm_reg));
|
||||
_preserve.remove(OptoReg::as_OptoReg(vm_reg));
|
||||
vm_reg = vm_reg->next();
|
||||
} while (vm_reg->is_Register() && !vm_reg->is_concrete());
|
||||
}
|
||||
@ -1171,7 +1171,7 @@ void BarrierSetC2::compute_liveness_at_stubs() const {
|
||||
// Initialize to union of successors
|
||||
for (uint i = 0; i < block->_num_succs; i++) {
|
||||
const uint succ_id = block->_succs[i]->_pre_order;
|
||||
new_live.OR(live[succ_id]);
|
||||
new_live.or_with(live[succ_id]);
|
||||
}
|
||||
|
||||
// Walk block backwards, computing liveness
|
||||
@ -1182,7 +1182,7 @@ void BarrierSetC2::compute_liveness_at_stubs() const {
|
||||
if (!bs_state->needs_livein_data()) {
|
||||
RegMask* const regs = bs_state->live(node);
|
||||
if (regs != nullptr) {
|
||||
regs->OR(new_live);
|
||||
regs->or_with(new_live);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1190,10 +1190,10 @@ void BarrierSetC2::compute_liveness_at_stubs() const {
|
||||
const OptoReg::Name first = bs->refine_register(node, regalloc->get_reg_first(node));
|
||||
const OptoReg::Name second = bs->refine_register(node, regalloc->get_reg_second(node));
|
||||
if (first != OptoReg::Bad) {
|
||||
new_live.Remove(first);
|
||||
new_live.remove(first);
|
||||
}
|
||||
if (second != OptoReg::Bad) {
|
||||
new_live.Remove(second);
|
||||
new_live.remove(second);
|
||||
}
|
||||
|
||||
// Add use bits
|
||||
@ -1202,10 +1202,10 @@ void BarrierSetC2::compute_liveness_at_stubs() const {
|
||||
const OptoReg::Name first = bs->refine_register(use, regalloc->get_reg_first(use));
|
||||
const OptoReg::Name second = bs->refine_register(use, regalloc->get_reg_second(use));
|
||||
if (first != OptoReg::Bad) {
|
||||
new_live.Insert(first);
|
||||
new_live.insert(first);
|
||||
}
|
||||
if (second != OptoReg::Bad) {
|
||||
new_live.Insert(second);
|
||||
new_live.insert(second);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1213,16 +1213,16 @@ void BarrierSetC2::compute_liveness_at_stubs() const {
|
||||
if (bs_state->needs_livein_data()) {
|
||||
RegMask* const regs = bs_state->live(node);
|
||||
if (regs != nullptr) {
|
||||
regs->OR(new_live);
|
||||
regs->or_with(new_live);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Now at block top, see if we have any changes
|
||||
new_live.SUBTRACT(old_live);
|
||||
if (!new_live.is_Empty()) {
|
||||
new_live.subtract(old_live);
|
||||
if (!new_live.is_empty()) {
|
||||
// Liveness has refined, update and propagate to prior blocks
|
||||
old_live.OR(new_live);
|
||||
old_live.or_with(new_live);
|
||||
for (uint i = 1; i < block->num_preds(); ++i) {
|
||||
Block* const pred = cfg->get_block_for_node(block->pred(i));
|
||||
worklist.push(pred);
|
||||
|
||||
@ -72,7 +72,7 @@ void StartNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint
|
||||
|
||||
//------------------------------Registers--------------------------------------
|
||||
const RegMask &StartNode::in_RegMask(uint) const {
|
||||
return RegMask::Empty;
|
||||
return RegMask::EMPTY;
|
||||
}
|
||||
|
||||
//------------------------------match------------------------------------------
|
||||
@ -82,7 +82,7 @@ Node *StartNode::match( const ProjNode *proj, const Matcher *match ) {
|
||||
case TypeFunc::Control:
|
||||
case TypeFunc::I_O:
|
||||
case TypeFunc::Memory:
|
||||
return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
|
||||
return new MachProjNode(this,proj->_con,RegMask::EMPTY,MachProjNode::unmatched_proj);
|
||||
case TypeFunc::FramePtr:
|
||||
return new MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP);
|
||||
case TypeFunc::ReturnAdr:
|
||||
@ -777,12 +777,12 @@ Node *CallNode::match( const ProjNode *proj, const Matcher *match ) {
|
||||
case TypeFunc::Control:
|
||||
case TypeFunc::I_O:
|
||||
case TypeFunc::Memory:
|
||||
return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
|
||||
return new MachProjNode(this,proj->_con,RegMask::EMPTY,MachProjNode::unmatched_proj);
|
||||
|
||||
case TypeFunc::Parms+1: // For LONG & DOUBLE returns
|
||||
assert(tf()->range()->field_at(TypeFunc::Parms+1) == Type::HALF, "");
|
||||
// 2nd half of doubles and longs
|
||||
return new MachProjNode(this,proj->_con, RegMask::Empty, (uint)OptoReg::Bad);
|
||||
return new MachProjNode(this,proj->_con, RegMask::EMPTY, (uint)OptoReg::Bad);
|
||||
|
||||
case TypeFunc::Parms: { // Normal returns
|
||||
uint ideal_reg = tf()->range()->field_at(TypeFunc::Parms)->ideal_reg();
|
||||
@ -798,14 +798,14 @@ Node *CallNode::match( const ProjNode *proj, const Matcher *match ) {
|
||||
if(ideal_reg >= Op_VecA && ideal_reg <= Op_VecZ) {
|
||||
if(OptoReg::is_valid(regs.second())) {
|
||||
for (OptoReg::Name r = regs.first(); r <= regs.second(); r = OptoReg::add(r, 1)) {
|
||||
rm.Insert(r);
|
||||
rm.insert(r);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if( OptoReg::is_valid(regs.second()) )
|
||||
rm.Insert( regs.second() );
|
||||
rm.insert(regs.second());
|
||||
return new MachProjNode(this,proj->_con,rm,ideal_reg);
|
||||
}
|
||||
|
||||
@ -1492,12 +1492,14 @@ void SafePointNode::dump_spec(outputStream *st) const {
|
||||
#endif
|
||||
|
||||
const RegMask &SafePointNode::in_RegMask(uint idx) const {
|
||||
if( idx < TypeFunc::Parms ) return RegMask::Empty;
|
||||
if (idx < TypeFunc::Parms) {
|
||||
return RegMask::EMPTY;
|
||||
}
|
||||
// Values outside the domain represent debug info
|
||||
return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
|
||||
}
|
||||
const RegMask &SafePointNode::out_RegMask() const {
|
||||
return RegMask::Empty;
|
||||
return RegMask::EMPTY;
|
||||
}
|
||||
|
||||
|
||||
@ -1608,7 +1610,7 @@ const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const {
|
||||
}
|
||||
|
||||
const RegMask &SafePointScalarObjectNode::out_RegMask() const {
|
||||
return RegMask::Empty;
|
||||
return RegMask::EMPTY;
|
||||
}
|
||||
|
||||
uint SafePointScalarObjectNode::match_edge(uint idx) const {
|
||||
@ -1659,7 +1661,7 @@ const RegMask &SafePointScalarMergeNode::in_RegMask(uint idx) const {
|
||||
}
|
||||
|
||||
const RegMask &SafePointScalarMergeNode::out_RegMask() const {
|
||||
return RegMask::Empty;
|
||||
return RegMask::EMPTY;
|
||||
}
|
||||
|
||||
uint SafePointScalarMergeNode::match_edge(uint idx) const {
|
||||
|
||||
@ -1014,7 +1014,7 @@ bool RegionNode::optimize_trichotomy(PhaseIterGVN* igvn) {
|
||||
}
|
||||
|
||||
const RegMask &RegionNode::out_RegMask() const {
|
||||
return RegMask::Empty;
|
||||
return RegMask::EMPTY;
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
@ -2859,13 +2859,15 @@ bool PhiNode::is_tripcount(BasicType bt) const {
|
||||
|
||||
//------------------------------out_RegMask------------------------------------
|
||||
const RegMask &PhiNode::in_RegMask(uint i) const {
|
||||
return i ? out_RegMask() : RegMask::Empty;
|
||||
return i ? out_RegMask() : RegMask::EMPTY;
|
||||
}
|
||||
|
||||
const RegMask &PhiNode::out_RegMask() const {
|
||||
uint ideal_reg = _type->ideal_reg();
|
||||
assert( ideal_reg != Node::NotAMachineReg, "invalid type at Phi" );
|
||||
if( ideal_reg == 0 ) return RegMask::Empty;
|
||||
if (ideal_reg == 0) {
|
||||
return RegMask::EMPTY;
|
||||
}
|
||||
assert(ideal_reg != Op_RegFlags, "flags register is not spillable");
|
||||
return *(Compile::current()->matcher()->idealreg2spillmask[ideal_reg]);
|
||||
}
|
||||
@ -2892,22 +2894,22 @@ Node* GotoNode::Identity(PhaseGVN* phase) {
|
||||
}
|
||||
|
||||
const RegMask &GotoNode::out_RegMask() const {
|
||||
return RegMask::Empty;
|
||||
return RegMask::EMPTY;
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
const RegMask &JumpNode::out_RegMask() const {
|
||||
return RegMask::Empty;
|
||||
return RegMask::EMPTY;
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
const RegMask &JProjNode::out_RegMask() const {
|
||||
return RegMask::Empty;
|
||||
return RegMask::EMPTY;
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
const RegMask &CProjNode::out_RegMask() const {
|
||||
return RegMask::Empty;
|
||||
return RegMask::EMPTY;
|
||||
}
|
||||
|
||||
|
||||
|
||||
@ -741,7 +741,7 @@ public:
|
||||
// Fake the incoming arguments mask for blackholes: accept all registers
|
||||
// and all stack slots. This would avoid any redundant register moves
|
||||
// for blackhole inputs.
|
||||
return RegMask::All;
|
||||
return RegMask::ALL;
|
||||
}
|
||||
#ifndef PRODUCT
|
||||
virtual void format(PhaseRegAlloc* ra, outputStream* st) const;
|
||||
|
||||
@ -49,9 +49,11 @@ void LRG::dump() const {
|
||||
_mask.dump();
|
||||
if( _msize_valid ) {
|
||||
if( mask_size() == compute_mask_size() ) tty->print(", #%d ",_mask_size);
|
||||
else tty->print(", #!!!_%d_vs_%d ",_mask_size,_mask.Size());
|
||||
else {
|
||||
tty->print(", #!!!_%d_vs_%d ", _mask_size, _mask.size());
|
||||
}
|
||||
} else {
|
||||
tty->print(", #?(%d) ",_mask.Size());
|
||||
tty->print(", #?(%d) ", _mask.size());
|
||||
}
|
||||
|
||||
tty->print("EffDeg: ");
|
||||
@ -741,7 +743,7 @@ void PhaseChaitin::Register_Allocate() {
|
||||
}
|
||||
} else { // Misaligned; extract 2 bits
|
||||
OptoReg::Name hi = lrg.reg(); // Get hi register
|
||||
lrg.Remove(hi); // Yank from mask
|
||||
lrg.remove(hi); // Yank from mask
|
||||
int lo = lrg.mask().find_first_elem(); // Find lo
|
||||
set_pair(i, hi, lo);
|
||||
}
|
||||
@ -773,7 +775,7 @@ void PhaseChaitin::de_ssa() {
|
||||
Node *n = block->get_node(j);
|
||||
// Pre-color to the zero live range, or pick virtual register
|
||||
const RegMask &rm = n->out_RegMask();
|
||||
_lrg_map.map(n->_idx, !rm.is_Empty() ? lr_counter++ : 0);
|
||||
_lrg_map.map(n->_idx, !rm.is_empty() ? lr_counter++ : 0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -794,7 +796,7 @@ void PhaseChaitin::mark_ssa() {
|
||||
Node *n = block->get_node(j);
|
||||
// Pre-color to the zero live range, or pick virtual register
|
||||
const RegMask &rm = n->out_RegMask();
|
||||
_lrg_map.map(n->_idx, !rm.is_Empty() ? n->_idx : 0);
|
||||
_lrg_map.map(n->_idx, !rm.is_empty() ? n->_idx : 0);
|
||||
max_idx = (n->_idx > max_idx) ? n->_idx : max_idx;
|
||||
}
|
||||
}
|
||||
@ -879,7 +881,7 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
|
||||
|
||||
// Limit result register mask to acceptable registers
|
||||
const RegMask &rm = n->out_RegMask();
|
||||
lrg.AND( rm );
|
||||
lrg.and_with(rm);
|
||||
|
||||
uint ireg = n->ideal_reg();
|
||||
assert( !n->bottom_type()->isa_oop_ptr() || ireg == Op_RegP,
|
||||
@ -935,7 +937,7 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
|
||||
switch (ireg) {
|
||||
case MachProjNode::fat_proj:
|
||||
// Fat projections have size equal to number of registers killed
|
||||
lrg.set_num_regs(rm.Size());
|
||||
lrg.set_num_regs(rm.size());
|
||||
lrg.set_reg_pressure(lrg.num_regs());
|
||||
lrg._fat_proj = 1;
|
||||
lrg._is_bound = 1;
|
||||
@ -1126,7 +1128,7 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
|
||||
// Later, AFTER aggressive, this live range will have to spill
|
||||
// but the spiller handles slow-path calls very nicely.
|
||||
} else {
|
||||
lrg.AND( rm );
|
||||
lrg.and_with(rm);
|
||||
}
|
||||
|
||||
// Check for bound register masks
|
||||
@ -1164,7 +1166,7 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
|
||||
if (!is_vect && !n->is_SpillCopy() &&
|
||||
(lrg._def == nullptr || lrg.is_multidef() || !lrg._def->is_SpillCopy()) &&
|
||||
lrgmask.is_misaligned_pair()) {
|
||||
lrg.Clear();
|
||||
lrg.clear();
|
||||
}
|
||||
|
||||
// Check for maximum frequency value
|
||||
@ -1405,7 +1407,7 @@ void PhaseChaitin::Simplify( ) {
|
||||
|
||||
// Is 'reg' register legal for 'lrg'?
|
||||
static bool is_legal_reg(LRG& lrg, OptoReg::Name reg) {
|
||||
if (lrg.mask().can_represent(reg) && lrg.mask().Member(reg)) {
|
||||
if (lrg.mask().can_represent(reg) && lrg.mask().member(reg)) {
|
||||
// RA uses OptoReg which represent the highest element of a registers set.
|
||||
// For example, vectorX (128bit) on x86 uses [XMM,XMMb,XMMc,XMMd] set
|
||||
// in which XMMd is used by RA to represent such vectors. A double value
|
||||
@ -1459,7 +1461,7 @@ static OptoReg::Name find_first_set(LRG& lrg, RegMask& mask) {
|
||||
return assigned;
|
||||
} else {
|
||||
// Remove more for each iteration
|
||||
mask.Remove(assigned - num_regs + 1); // Unmask the lowest reg
|
||||
mask.remove(assigned - num_regs + 1); // Unmask the lowest reg
|
||||
mask.clear_to_sets(RegMask::SlotsPerVecA); // Align by SlotsPerVecA bits
|
||||
assigned = mask.find_first_set(lrg, num_regs);
|
||||
}
|
||||
@ -1510,7 +1512,7 @@ OptoReg::Name PhaseChaitin::bias_color(LRG& lrg) {
|
||||
// Choose a color which is legal for him
|
||||
ResourceMark rm(C->regmask_arena());
|
||||
RegMask tempmask(lrg.mask(), C->regmask_arena());
|
||||
tempmask.AND(lrgs(copy_lrg).mask());
|
||||
tempmask.and_with(lrgs(copy_lrg).mask());
|
||||
tempmask.clear_to_sets(lrg.num_regs());
|
||||
OptoReg::Name reg = find_first_set(lrg, tempmask);
|
||||
if (OptoReg::is_valid(reg))
|
||||
@ -1533,9 +1535,9 @@ OptoReg::Name PhaseChaitin::bias_color(LRG& lrg) {
|
||||
if( (++_alternate & 1) && OptoReg::is_valid(reg) ) {
|
||||
// This 'Remove; find; Insert' idiom is an expensive way to find the
|
||||
// SECOND element in the mask.
|
||||
lrg.Remove(reg);
|
||||
lrg.remove(reg);
|
||||
OptoReg::Name reg2 = lrg.mask().find_first_elem();
|
||||
lrg.Insert(reg);
|
||||
lrg.insert(reg);
|
||||
if (OptoReg::is_reg(reg2)) {
|
||||
reg = reg2;
|
||||
}
|
||||
@ -1545,8 +1547,8 @@ OptoReg::Name PhaseChaitin::bias_color(LRG& lrg) {
|
||||
|
||||
// Choose a color in the current chunk
|
||||
OptoReg::Name PhaseChaitin::choose_color(LRG& lrg) {
|
||||
assert(C->in_preserve_stack_slots() == 0 || lrg.mask().is_offset() || lrg._is_bound || lrg.mask().is_bound1() || !lrg.mask().Member(OptoReg::Name(_matcher._old_SP - 1)), "must not allocate stack0 (inside preserve area)");
|
||||
assert(C->out_preserve_stack_slots() == 0 || lrg.mask().is_offset() || lrg._is_bound || lrg.mask().is_bound1() || !lrg.mask().Member(OptoReg::Name(_matcher._old_SP + 0)), "must not allocate stack0 (inside preserve area)");
|
||||
assert(C->in_preserve_stack_slots() == 0 || lrg.mask().is_offset() || lrg._is_bound || lrg.mask().is_bound1() || !lrg.mask().member(OptoReg::Name(_matcher._old_SP - 1)), "must not allocate stack0 (inside preserve area)");
|
||||
assert(C->out_preserve_stack_slots() == 0 || lrg.mask().is_offset() || lrg._is_bound || lrg.mask().is_bound1() || !lrg.mask().member(OptoReg::Name(_matcher._old_SP + 0)), "must not allocate stack0 (inside preserve area)");
|
||||
|
||||
if( lrg.num_regs() == 1 || // Common Case
|
||||
!lrg._fat_proj ) // Aligned+adjacent pairs ok
|
||||
@ -1622,20 +1624,20 @@ uint PhaseChaitin::Select( ) {
|
||||
// at retry_next_chunk.
|
||||
if (nreg < LRG::SPILL_REG) {
|
||||
#ifndef PRODUCT
|
||||
uint size = lrg->mask().Size();
|
||||
uint size = lrg->mask().size();
|
||||
ResourceMark rm(C->regmask_arena());
|
||||
RegMask trace_mask(lrg->mask(), C->regmask_arena());
|
||||
#endif
|
||||
lrg->SUBTRACT_inner(nlrg.mask());
|
||||
lrg->subtract_inner(nlrg.mask());
|
||||
#ifndef PRODUCT
|
||||
if (trace_spilling() && lrg->mask().Size() != size) {
|
||||
if (trace_spilling() && lrg->mask().size() != size) {
|
||||
ttyLocker ttyl;
|
||||
tty->print("L%d ", lidx);
|
||||
trace_mask.dump();
|
||||
tty->print(" intersected L%d ", neighbor);
|
||||
nlrg.mask().dump();
|
||||
tty->print(" removed ");
|
||||
trace_mask.SUBTRACT(lrg->mask());
|
||||
trace_mask.subtract(lrg->mask());
|
||||
trace_mask.dump();
|
||||
tty->print(" leaving ");
|
||||
lrg->mask().dump();
|
||||
@ -1701,15 +1703,15 @@ uint PhaseChaitin::Select( ) {
|
||||
} else {
|
||||
assert(!lrg->_is_vector || n_regs <= RegMask::SlotsPerVecZ, "sanity");
|
||||
}
|
||||
lrg->Clear(); // Clear the mask
|
||||
lrg->Insert(reg); // Set regmask to match selected reg
|
||||
lrg->clear(); // Clear the mask
|
||||
lrg->insert(reg); // Set regmask to match selected reg
|
||||
// For vectors and pairs, also insert the low bit of the pair
|
||||
// We always choose the high bit, then mask the low bits by register size
|
||||
if (lrg->is_scalable() && OptoReg::is_stack(lrg->reg())) { // stack
|
||||
n_regs = lrg->scalable_reg_slots();
|
||||
}
|
||||
for (int i = 1; i < n_regs; i++) {
|
||||
lrg->Insert(OptoReg::add(reg,-i));
|
||||
lrg->insert(OptoReg::add(reg, -i));
|
||||
}
|
||||
lrg->set_mask_size(n_regs);
|
||||
} else { // Else fatproj
|
||||
|
||||
@ -103,11 +103,11 @@ public:
|
||||
|
||||
private:
|
||||
RegMask _mask; // Allowed registers for this LRG
|
||||
uint _mask_size; // cache of _mask.Size();
|
||||
uint _mask_size; // cache of _mask.size();
|
||||
public:
|
||||
int compute_mask_size() const { return _mask.is_infinite_stack() ? INFINITE_STACK_SIZE : _mask.Size(); }
|
||||
int compute_mask_size() const { return _mask.is_infinite_stack() ? INFINITE_STACK_SIZE : _mask.size(); }
|
||||
void set_mask_size( int size ) {
|
||||
assert((size == (int)INFINITE_STACK_SIZE) || (size == (int)_mask.Size()), "");
|
||||
assert((size == (int)INFINITE_STACK_SIZE) || (size == (int)_mask.size()), "");
|
||||
_mask_size = size;
|
||||
#ifdef ASSERT
|
||||
_msize_valid=1;
|
||||
@ -130,15 +130,15 @@ public:
|
||||
const RegMask &mask() const { return _mask; }
|
||||
void set_mask( const RegMask &rm ) { _mask = rm; DEBUG_ONLY(_msize_valid=0;)}
|
||||
void init_mask(Arena* arena) { new (&_mask) RegMask(arena); }
|
||||
void AND( const RegMask &rm ) { _mask.AND(rm); DEBUG_ONLY(_msize_valid=0;)}
|
||||
void SUBTRACT( const RegMask &rm ) { _mask.SUBTRACT(rm); DEBUG_ONLY(_msize_valid=0;)}
|
||||
void SUBTRACT_inner(const RegMask& rm) { _mask.SUBTRACT_inner(rm); DEBUG_ONLY(_msize_valid = 0;) }
|
||||
void Clear() { _mask.Clear() ; DEBUG_ONLY(_msize_valid=1); _mask_size = 0; }
|
||||
void Set_All() { _mask.Set_All(); DEBUG_ONLY(_msize_valid = 1); _mask_size = _mask.rm_size_in_bits(); }
|
||||
void and_with( const RegMask &rm ) { _mask.and_with(rm); DEBUG_ONLY(_msize_valid=0;)}
|
||||
void subtract( const RegMask &rm ) { _mask.subtract(rm); DEBUG_ONLY(_msize_valid=0;)}
|
||||
void subtract_inner(const RegMask& rm) { _mask.subtract_inner(rm); DEBUG_ONLY(_msize_valid = 0;) }
|
||||
void clear() { _mask.clear() ; DEBUG_ONLY(_msize_valid=1); _mask_size = 0; }
|
||||
void set_all() { _mask.set_all(); DEBUG_ONLY(_msize_valid = 1); _mask_size = _mask.rm_size_in_bits(); }
|
||||
bool rollover() { DEBUG_ONLY(_msize_valid = 1); _mask_size = _mask.rm_size_in_bits(); return _mask.rollover(); }
|
||||
|
||||
void Insert( OptoReg::Name reg ) { _mask.Insert(reg); DEBUG_ONLY(_msize_valid=0;) }
|
||||
void Remove( OptoReg::Name reg ) { _mask.Remove(reg); DEBUG_ONLY(_msize_valid=0;) }
|
||||
void insert( OptoReg::Name reg ) { _mask.insert(reg); DEBUG_ONLY(_msize_valid=0;) }
|
||||
void remove( OptoReg::Name reg ) { _mask.remove(reg); DEBUG_ONLY(_msize_valid=0;) }
|
||||
void clear_to_sets() { _mask.clear_to_sets(_num_regs); DEBUG_ONLY(_msize_valid=0;) }
|
||||
|
||||
private:
|
||||
@ -624,7 +624,7 @@ private:
|
||||
void check_pressure_at_fatproj(uint fatproj_location, RegMask& fatproj_mask) {
|
||||
// this pressure is only valid at this instruction, i.e. we don't need to lower
|
||||
// the register pressure since the fat proj was never live before (going backwards)
|
||||
uint new_pressure = current_pressure() + fatproj_mask.Size();
|
||||
uint new_pressure = current_pressure() + fatproj_mask.size();
|
||||
if (new_pressure > final_pressure()) {
|
||||
_final_pressure = new_pressure;
|
||||
}
|
||||
|
||||
@ -118,7 +118,7 @@ void PhaseCoalesce::combine_these_two(Node *n1, Node *n2) {
|
||||
// Merge in the IFG
|
||||
_phc._ifg->Union( lr1, lr2 );
|
||||
// Combine register restrictions
|
||||
lrg1->AND(lrg2->mask());
|
||||
lrg1->and_with(lrg2->mask());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -503,8 +503,8 @@ void PhaseConservativeCoalesce::union_helper( Node *lr1_node, Node *lr2_node, ui
|
||||
lrgs(lr2).is_multidef() )
|
||||
? NodeSentinel : src_def;
|
||||
lrgs(lr2)._def = nullptr; // No def for lrg 2
|
||||
lrgs(lr2).Clear(); // Force empty mask for LRG 2
|
||||
//lrgs(lr2)._size = 0; // Live-range 2 goes dead
|
||||
lrgs(lr2).clear(); // Force empty mask for LRG 2
|
||||
// lrgs(lr2)._size = 0; // Live-range 2 goes dead
|
||||
lrgs(lr1)._is_oop |= lrgs(lr2)._is_oop;
|
||||
lrgs(lr2)._is_oop = 0; // In particular, not an oop for GC info
|
||||
|
||||
@ -570,9 +570,9 @@ uint PhaseConservativeCoalesce::compute_separating_interferences(Node *dst_copy,
|
||||
// If we attempt to coalesce across a bound def
|
||||
if( lrgs(lidx).is_bound() ) {
|
||||
// Do not let the coalesced LRG expect to get the bound color
|
||||
rm.SUBTRACT( lrgs(lidx).mask() );
|
||||
rm.subtract(lrgs(lidx).mask());
|
||||
// Recompute rm_size
|
||||
rm_size = rm.Size();
|
||||
rm_size = rm.size();
|
||||
//if( rm._flags ) rm_size += 1000000;
|
||||
if( reg_degree >= rm_size ) return max_juint;
|
||||
}
|
||||
@ -695,9 +695,9 @@ bool PhaseConservativeCoalesce::copy_copy(Node *dst_copy, Node *src_copy, Block
|
||||
// intersecting their allowed register sets.
|
||||
ResourceMark rm(C->regmask_arena());
|
||||
RegMask mask(lrgs(lr1).mask(), C->regmask_arena());
|
||||
mask.AND(lrgs(lr2).mask());
|
||||
mask.and_with(lrgs(lr2).mask());
|
||||
// Number of bits free
|
||||
uint rm_size = mask.Size();
|
||||
uint rm_size = mask.size();
|
||||
|
||||
if (UseFPUForSpilling && mask.is_infinite_stack() ) {
|
||||
// Don't coalesce when frequency difference is large
|
||||
|
||||
@ -43,8 +43,8 @@ public:
|
||||
}
|
||||
virtual int Opcode() const;
|
||||
virtual uint hash() const;
|
||||
virtual const RegMask &out_RegMask() const { return RegMask::Empty; }
|
||||
virtual const RegMask &in_RegMask(uint) const { return RegMask::Empty; }
|
||||
virtual const RegMask& out_RegMask() const { return RegMask::EMPTY; }
|
||||
virtual const RegMask& in_RegMask(uint) const { return RegMask::EMPTY; }
|
||||
|
||||
virtual Node* Ideal(PhaseGVN* phase, bool can_reshape) {
|
||||
return Node::Ideal(phase, can_reshape);
|
||||
|
||||
@ -1449,8 +1449,9 @@ Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {
|
||||
// single register. Hoisting stretches the live range of the
|
||||
// single register and may force spilling.
|
||||
MachNode* mach = self->is_Mach() ? self->as_Mach() : nullptr;
|
||||
if (mach != nullptr && mach->out_RegMask().is_bound1() && !mach->out_RegMask().is_Empty())
|
||||
if (mach != nullptr && mach->out_RegMask().is_bound1() && !mach->out_RegMask().is_empty()) {
|
||||
in_latency = true;
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (trace_opto_pipelining()) {
|
||||
|
||||
@ -55,7 +55,7 @@ void PhaseIFG::init( uint maxlrg ) {
|
||||
for( uint i = 0; i < maxlrg; i++ ) {
|
||||
_adjs[i].initialize(maxlrg);
|
||||
_lrgs[i].init_mask(_arena);
|
||||
_lrgs[i].Set_All();
|
||||
_lrgs[i].set_all();
|
||||
}
|
||||
}
|
||||
|
||||
@ -655,7 +655,7 @@ bool PhaseChaitin::remove_node_if_not_used(Block* b, uint location, Node* n, uin
|
||||
void PhaseChaitin::check_for_high_pressure_transition_at_fatproj(uint& block_reg_pressure, uint location, LRG& lrg, Pressure& pressure, const int op_regtype) {
|
||||
ResourceMark rm(C->regmask_arena());
|
||||
RegMask mask_tmp(lrg.mask(), C->regmask_arena());
|
||||
mask_tmp.AND(*Matcher::idealreg2regmask[op_regtype]);
|
||||
mask_tmp.and_with(*Matcher::idealreg2regmask[op_regtype]);
|
||||
pressure.check_pressure_at_fatproj(location, mask_tmp);
|
||||
}
|
||||
|
||||
@ -742,17 +742,17 @@ void PhaseChaitin::remove_bound_register_from_interfering_live_ranges(LRG& lrg,
|
||||
// Leave only aligned set of bits.
|
||||
r2mask.smear_to_sets(interfering_lrg.num_regs());
|
||||
// It includes vector case.
|
||||
interfering_lrg.SUBTRACT(r2mask);
|
||||
interfering_lrg.subtract(r2mask);
|
||||
interfering_lrg.compute_set_mask_size();
|
||||
} else if (r_size != 1) {
|
||||
// fat proj
|
||||
interfering_lrg.SUBTRACT(mask);
|
||||
interfering_lrg.subtract(mask);
|
||||
interfering_lrg.compute_set_mask_size();
|
||||
} else {
|
||||
// Common case: size 1 bound removal
|
||||
OptoReg::Name r_reg = mask.find_first_elem();
|
||||
if (interfering_lrg.mask().Member(r_reg)) {
|
||||
interfering_lrg.Remove(r_reg);
|
||||
if (interfering_lrg.mask().member(r_reg)) {
|
||||
interfering_lrg.remove(r_reg);
|
||||
interfering_lrg.set_mask_size(interfering_lrg.mask().is_infinite_stack() ? LRG::INFINITE_STACK_SIZE : old_size - 1);
|
||||
}
|
||||
}
|
||||
@ -933,7 +933,7 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
|
||||
// Since rematerializable DEFs are not bound but the live range is,
|
||||
// some uses must be bound. If we spill live range 'r', it can
|
||||
// rematerialize at each use site according to its bindings.
|
||||
if (lrg.is_bound() && !n->rematerialize() && !lrg.mask().is_Empty()) {
|
||||
if (lrg.is_bound() && !n->rematerialize() && !lrg.mask().is_empty()) {
|
||||
remove_bound_register_from_interfering_live_ranges(lrg, &liveout, must_spill);
|
||||
}
|
||||
interfere_with_live(lid, &liveout);
|
||||
|
||||
@ -82,7 +82,7 @@ const Type* IfNode::Value(PhaseGVN* phase) const {
|
||||
}
|
||||
|
||||
const RegMask &IfNode::out_RegMask() const {
|
||||
return RegMask::Empty;
|
||||
return RegMask::EMPTY;
|
||||
}
|
||||
|
||||
//------------------------------split_if---------------------------------------
|
||||
|
||||
@ -855,12 +855,12 @@ void PhaseCFG::needed_for_next_call(Block* block, Node* this_call, VectorSet& ne
|
||||
static void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe) {
|
||||
// Fill in the kill mask for the call
|
||||
for( OptoReg::Name r = OptoReg::Name(0); r < _last_Mach_Reg; r=OptoReg::add(r,1) ) {
|
||||
if( !regs.Member(r) ) { // Not already defined by the call
|
||||
if (!regs.member(r)) { // Not already defined by the call
|
||||
// Save-on-call register?
|
||||
if ((save_policy[r] == 'C') ||
|
||||
(save_policy[r] == 'A') ||
|
||||
((save_policy[r] == 'E') && exclude_soe)) {
|
||||
proj->_rout.Insert(r);
|
||||
proj->_rout.insert(r);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -884,7 +884,7 @@ uint PhaseCFG::sched_call(Block* block, uint node_cnt, Node_List& worklist, Grow
|
||||
// Schedule next to call
|
||||
block->map_node(n, node_cnt++);
|
||||
// Collect defined registers
|
||||
regs.OR(n->out_RegMask());
|
||||
regs.or_with(n->out_RegMask());
|
||||
// Check for scheduling the next control-definer
|
||||
if( n->bottom_type() == Type::CONTROL )
|
||||
// Warm up next pile of heuristic bits
|
||||
@ -907,12 +907,12 @@ uint PhaseCFG::sched_call(Block* block, uint node_cnt, Node_List& worklist, Grow
|
||||
|
||||
// Act as if the call defines the Frame Pointer.
|
||||
// Certainly the FP is alive and well after the call.
|
||||
regs.Insert(_matcher.c_frame_pointer());
|
||||
regs.insert(_matcher.c_frame_pointer());
|
||||
|
||||
// Set all registers killed and not already defined by the call.
|
||||
uint r_cnt = mcall->tf()->range()->cnt();
|
||||
int op = mcall->ideal_Opcode();
|
||||
MachProjNode *proj = new MachProjNode( mcall, r_cnt+1, RegMask::Empty, MachProjNode::fat_proj );
|
||||
MachProjNode* proj = new MachProjNode(mcall, r_cnt + 1, RegMask::EMPTY, MachProjNode::fat_proj);
|
||||
map_node_to_block(proj, block);
|
||||
block->insert_node(proj, node_cnt++);
|
||||
|
||||
@ -1164,10 +1164,10 @@ bool PhaseCFG::schedule_local(Block* block, GrowableArray<int>& ready_cnt, Vecto
|
||||
|
||||
if (n->is_Mach() && n->as_Mach()->has_call()) {
|
||||
RegMask regs;
|
||||
regs.Insert(_matcher.c_frame_pointer());
|
||||
regs.OR(n->out_RegMask());
|
||||
regs.insert(_matcher.c_frame_pointer());
|
||||
regs.or_with(n->out_RegMask());
|
||||
|
||||
MachProjNode *proj = new MachProjNode( n, 1, RegMask::Empty, MachProjNode::fat_proj );
|
||||
MachProjNode* proj = new MachProjNode(n, 1, RegMask::EMPTY, MachProjNode::fat_proj);
|
||||
map_node_to_block(proj, block);
|
||||
block->insert_node(proj, phi_cnt++);
|
||||
|
||||
|
||||
@ -525,7 +525,7 @@ bool MachNode::rematerialize() const {
|
||||
uint idx = oper_input_base();
|
||||
if (req() > idx) {
|
||||
const RegMask &rm = in_RegMask(idx);
|
||||
if (!rm.is_Empty() && rm.is_bound(ideal_reg())) {
|
||||
if (!rm.is_empty() && rm.is_bound(ideal_reg())) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -619,8 +619,11 @@ void MachNullCheckNode::save_label( Label** label, uint* block_num ) {
|
||||
}
|
||||
|
||||
const RegMask &MachNullCheckNode::in_RegMask( uint idx ) const {
|
||||
if( idx == 0 ) return RegMask::Empty;
|
||||
else return in(1)->as_Mach()->out_RegMask();
|
||||
if (idx == 0) {
|
||||
return RegMask::EMPTY;
|
||||
} else {
|
||||
return in(1)->as_Mach()->out_RegMask();
|
||||
}
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
|
||||
@ -737,7 +737,7 @@ public:
|
||||
virtual const class Type *bottom_type() const { return TypeTuple::IFBOTH; }
|
||||
virtual uint ideal_reg() const { return NotAMachineReg; }
|
||||
virtual const RegMask &in_RegMask(uint) const;
|
||||
virtual const RegMask &out_RegMask() const { return RegMask::Empty; }
|
||||
virtual const RegMask& out_RegMask() const { return RegMask::EMPTY; }
|
||||
#ifndef PRODUCT
|
||||
virtual const char *Name() const { return "NullCheck"; }
|
||||
virtual void format( PhaseRegAlloc *, outputStream *st ) const;
|
||||
@ -769,7 +769,7 @@ public:
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const;
|
||||
virtual const TypePtr *adr_type() const;
|
||||
virtual const RegMask &in_RegMask(uint) const { return RegMask::Empty; }
|
||||
virtual const RegMask& in_RegMask(uint) const { return RegMask::EMPTY; }
|
||||
virtual const RegMask &out_RegMask() const { return _rout; }
|
||||
virtual uint ideal_reg() const { return _ideal_reg; }
|
||||
// Need size_of() for virtual ProjNode::clone()
|
||||
|
||||
@ -176,12 +176,12 @@ void Matcher::match( ) {
|
||||
if (C->failing()) {
|
||||
return;
|
||||
}
|
||||
assert(_return_addr_mask.is_Empty(),
|
||||
assert(_return_addr_mask.is_empty(),
|
||||
"return address mask must be empty initially");
|
||||
_return_addr_mask.Insert(return_addr());
|
||||
_return_addr_mask.insert(return_addr());
|
||||
#ifdef _LP64
|
||||
// Pointers take 2 slots in 64-bit land
|
||||
_return_addr_mask.Insert(OptoReg::add(return_addr(),1));
|
||||
_return_addr_mask.insert(OptoReg::add(return_addr(), 1));
|
||||
#endif
|
||||
|
||||
// Map a Java-signature return type into return register-value
|
||||
@ -197,7 +197,7 @@ void Matcher::match( ) {
|
||||
// And mask for same
|
||||
_return_value_mask = RegMask(regs.first());
|
||||
if( OptoReg::is_valid(regs.second()) )
|
||||
_return_value_mask.Insert(regs.second());
|
||||
_return_value_mask.insert(regs.second());
|
||||
}
|
||||
|
||||
// ---------------
|
||||
@ -261,7 +261,7 @@ void Matcher::match( ) {
|
||||
assert( is_even(_in_arg_limit), "out_preserve must be even" );
|
||||
for( i = 0; i < argcnt; i++ ) {
|
||||
// Permit args to have no register
|
||||
_calling_convention_mask[i].Clear();
|
||||
_calling_convention_mask[i].clear();
|
||||
if( !vm_parm_regs[i].first()->is_valid() && !vm_parm_regs[i].second()->is_valid() ) {
|
||||
_parm_regs[i].set_bad();
|
||||
continue;
|
||||
@ -273,11 +273,11 @@ void Matcher::match( ) {
|
||||
|
||||
OptoReg::Name reg1 = warp_incoming_stk_arg(vm_parm_regs[i].first());
|
||||
if( OptoReg::is_valid(reg1))
|
||||
_calling_convention_mask[i].Insert(reg1);
|
||||
_calling_convention_mask[i].insert(reg1);
|
||||
|
||||
OptoReg::Name reg2 = warp_incoming_stk_arg(vm_parm_regs[i].second());
|
||||
if( OptoReg::is_valid(reg2))
|
||||
_calling_convention_mask[i].Insert(reg2);
|
||||
_calling_convention_mask[i].insert(reg2);
|
||||
|
||||
// Saved biased stack-slot register number
|
||||
_parm_regs[i].set_pair(reg2, reg1);
|
||||
@ -422,9 +422,9 @@ static RegMask *init_input_masks( uint size, RegMask &ret_adr, RegMask &fp ) {
|
||||
new (rms + i) RegMask(Compile::current()->comp_arena());
|
||||
}
|
||||
// Do all the pre-defined register masks
|
||||
rms[TypeFunc::Control ] = RegMask::Empty;
|
||||
rms[TypeFunc::I_O ] = RegMask::Empty;
|
||||
rms[TypeFunc::Memory ] = RegMask::Empty;
|
||||
rms[TypeFunc::Control ] = RegMask::EMPTY;
|
||||
rms[TypeFunc::I_O ] = RegMask::EMPTY;
|
||||
rms[TypeFunc::Memory ] = RegMask::EMPTY;
|
||||
rms[TypeFunc::ReturnAdr] = ret_adr;
|
||||
rms[TypeFunc::FramePtr ] = fp;
|
||||
return rms;
|
||||
@ -471,15 +471,15 @@ void Matcher::init_first_stack_mask() {
|
||||
assert(index == NOF_STACK_MASKS, "wrong size");
|
||||
|
||||
// At first, start with the empty mask
|
||||
C->FIRST_STACK_mask().Clear();
|
||||
C->FIRST_STACK_mask().clear();
|
||||
|
||||
// Add in the incoming argument area
|
||||
OptoReg::Name init_in = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
|
||||
for (OptoReg::Name i = init_in; i < _in_arg_limit; i = OptoReg::add(i, 1)) {
|
||||
C->FIRST_STACK_mask().Insert(i);
|
||||
C->FIRST_STACK_mask().insert(i);
|
||||
}
|
||||
// Add in all bits past the outgoing argument area
|
||||
C->FIRST_STACK_mask().Set_All_From(_out_arg_limit);
|
||||
C->FIRST_STACK_mask().set_all_from(_out_arg_limit);
|
||||
|
||||
// Make spill masks. Registers for their class, plus FIRST_STACK_mask.
|
||||
RegMask aligned_stack_mask(C->FIRST_STACK_mask(), C->comp_arena());
|
||||
@ -491,41 +491,41 @@ void Matcher::init_first_stack_mask() {
|
||||
*idealreg2spillmask[Op_RegP] = *idealreg2regmask[Op_RegP];
|
||||
#ifdef _LP64
|
||||
*idealreg2spillmask[Op_RegN] = *idealreg2regmask[Op_RegN];
|
||||
idealreg2spillmask[Op_RegN]->OR(C->FIRST_STACK_mask());
|
||||
idealreg2spillmask[Op_RegP]->OR(aligned_stack_mask);
|
||||
idealreg2spillmask[Op_RegN]->or_with(C->FIRST_STACK_mask());
|
||||
idealreg2spillmask[Op_RegP]->or_with(aligned_stack_mask);
|
||||
#else
|
||||
idealreg2spillmask[Op_RegP]->OR(C->FIRST_STACK_mask());
|
||||
idealreg2spillmask[Op_RegP]->or_with(C->FIRST_STACK_mask());
|
||||
#endif
|
||||
*idealreg2spillmask[Op_RegI] = *idealreg2regmask[Op_RegI];
|
||||
idealreg2spillmask[Op_RegI]->OR(C->FIRST_STACK_mask());
|
||||
idealreg2spillmask[Op_RegI]->or_with(C->FIRST_STACK_mask());
|
||||
*idealreg2spillmask[Op_RegL] = *idealreg2regmask[Op_RegL];
|
||||
idealreg2spillmask[Op_RegL]->OR(aligned_stack_mask);
|
||||
idealreg2spillmask[Op_RegL]->or_with(aligned_stack_mask);
|
||||
*idealreg2spillmask[Op_RegF] = *idealreg2regmask[Op_RegF];
|
||||
idealreg2spillmask[Op_RegF]->OR(C->FIRST_STACK_mask());
|
||||
idealreg2spillmask[Op_RegF]->or_with(C->FIRST_STACK_mask());
|
||||
*idealreg2spillmask[Op_RegD] = *idealreg2regmask[Op_RegD];
|
||||
idealreg2spillmask[Op_RegD]->OR(aligned_stack_mask);
|
||||
idealreg2spillmask[Op_RegD]->or_with(aligned_stack_mask);
|
||||
|
||||
if (Matcher::has_predicated_vectors()) {
|
||||
*idealreg2spillmask[Op_RegVectMask] = *idealreg2regmask[Op_RegVectMask];
|
||||
idealreg2spillmask[Op_RegVectMask]->OR(aligned_stack_mask);
|
||||
idealreg2spillmask[Op_RegVectMask]->or_with(aligned_stack_mask);
|
||||
} else {
|
||||
*idealreg2spillmask[Op_RegVectMask] = RegMask::Empty;
|
||||
*idealreg2spillmask[Op_RegVectMask] = RegMask::EMPTY;
|
||||
}
|
||||
|
||||
if (Matcher::vector_size_supported(T_BYTE,4)) {
|
||||
*idealreg2spillmask[Op_VecS] = *idealreg2regmask[Op_VecS];
|
||||
idealreg2spillmask[Op_VecS]->OR(C->FIRST_STACK_mask());
|
||||
idealreg2spillmask[Op_VecS]->or_with(C->FIRST_STACK_mask());
|
||||
} else {
|
||||
*idealreg2spillmask[Op_VecS] = RegMask::Empty;
|
||||
*idealreg2spillmask[Op_VecS] = RegMask::EMPTY;
|
||||
}
|
||||
|
||||
if (Matcher::vector_size_supported(T_FLOAT,2)) {
|
||||
// For VecD we need dual alignment and 8 bytes (2 slots) for spills.
|
||||
// RA guarantees such alignment since it is needed for Double and Long values.
|
||||
*idealreg2spillmask[Op_VecD] = *idealreg2regmask[Op_VecD];
|
||||
idealreg2spillmask[Op_VecD]->OR(aligned_stack_mask);
|
||||
idealreg2spillmask[Op_VecD]->or_with(aligned_stack_mask);
|
||||
} else {
|
||||
*idealreg2spillmask[Op_VecD] = RegMask::Empty;
|
||||
*idealreg2spillmask[Op_VecD] = RegMask::EMPTY;
|
||||
}
|
||||
|
||||
if (Matcher::vector_size_supported(T_FLOAT,4)) {
|
||||
@ -538,45 +538,45 @@ void Matcher::init_first_stack_mask() {
|
||||
// otherwise vector spills could stomp over stack slots in caller frame.
|
||||
OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
|
||||
for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecX); k++) {
|
||||
aligned_stack_mask.Remove(in);
|
||||
aligned_stack_mask.remove(in);
|
||||
in = OptoReg::add(in, -1);
|
||||
}
|
||||
aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecX);
|
||||
assert(aligned_stack_mask.is_infinite_stack(), "should be infinite stack");
|
||||
*idealreg2spillmask[Op_VecX] = *idealreg2regmask[Op_VecX];
|
||||
idealreg2spillmask[Op_VecX]->OR(aligned_stack_mask);
|
||||
idealreg2spillmask[Op_VecX]->or_with(aligned_stack_mask);
|
||||
} else {
|
||||
*idealreg2spillmask[Op_VecX] = RegMask::Empty;
|
||||
*idealreg2spillmask[Op_VecX] = RegMask::EMPTY;
|
||||
}
|
||||
|
||||
if (Matcher::vector_size_supported(T_FLOAT,8)) {
|
||||
// For VecY we need octo alignment and 32 bytes (8 slots) for spills.
|
||||
OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
|
||||
for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecY); k++) {
|
||||
aligned_stack_mask.Remove(in);
|
||||
aligned_stack_mask.remove(in);
|
||||
in = OptoReg::add(in, -1);
|
||||
}
|
||||
aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecY);
|
||||
assert(aligned_stack_mask.is_infinite_stack(), "should be infinite stack");
|
||||
*idealreg2spillmask[Op_VecY] = *idealreg2regmask[Op_VecY];
|
||||
idealreg2spillmask[Op_VecY]->OR(aligned_stack_mask);
|
||||
idealreg2spillmask[Op_VecY]->or_with(aligned_stack_mask);
|
||||
} else {
|
||||
*idealreg2spillmask[Op_VecY] = RegMask::Empty;
|
||||
*idealreg2spillmask[Op_VecY] = RegMask::EMPTY;
|
||||
}
|
||||
|
||||
if (Matcher::vector_size_supported(T_FLOAT,16)) {
|
||||
// For VecZ we need enough alignment and 64 bytes (16 slots) for spills.
|
||||
OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
|
||||
for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecZ); k++) {
|
||||
aligned_stack_mask.Remove(in);
|
||||
aligned_stack_mask.remove(in);
|
||||
in = OptoReg::add(in, -1);
|
||||
}
|
||||
aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecZ);
|
||||
assert(aligned_stack_mask.is_infinite_stack(), "should be infinite stack");
|
||||
*idealreg2spillmask[Op_VecZ] = *idealreg2regmask[Op_VecZ];
|
||||
idealreg2spillmask[Op_VecZ]->OR(aligned_stack_mask);
|
||||
idealreg2spillmask[Op_VecZ]->or_with(aligned_stack_mask);
|
||||
} else {
|
||||
*idealreg2spillmask[Op_VecZ] = RegMask::Empty;
|
||||
*idealreg2spillmask[Op_VecZ] = RegMask::EMPTY;
|
||||
}
|
||||
|
||||
if (Matcher::supports_scalable_vector()) {
|
||||
@ -586,7 +586,7 @@ void Matcher::init_first_stack_mask() {
|
||||
// Exclude last input arg stack slots to avoid spilling vector register there,
|
||||
// otherwise RegVectMask spills could stomp over stack slots in caller frame.
|
||||
for (; (in >= init_in) && (k < scalable_predicate_reg_slots()); k++) {
|
||||
scalable_stack_mask.Remove(in);
|
||||
scalable_stack_mask.remove(in);
|
||||
in = OptoReg::add(in, -1);
|
||||
}
|
||||
|
||||
@ -594,13 +594,13 @@ void Matcher::init_first_stack_mask() {
|
||||
scalable_stack_mask.clear_to_sets(scalable_predicate_reg_slots());
|
||||
assert(scalable_stack_mask.is_infinite_stack(), "should be infinite stack");
|
||||
*idealreg2spillmask[Op_RegVectMask] = *idealreg2regmask[Op_RegVectMask];
|
||||
idealreg2spillmask[Op_RegVectMask]->OR(scalable_stack_mask);
|
||||
idealreg2spillmask[Op_RegVectMask]->or_with(scalable_stack_mask);
|
||||
}
|
||||
|
||||
// Exclude last input arg stack slots to avoid spilling vector register there,
|
||||
// otherwise vector spills could stomp over stack slots in caller frame.
|
||||
for (; (in >= init_in) && (k < scalable_vector_reg_size(T_FLOAT)); k++) {
|
||||
scalable_stack_mask.Remove(in);
|
||||
scalable_stack_mask.remove(in);
|
||||
in = OptoReg::add(in, -1);
|
||||
}
|
||||
|
||||
@ -608,9 +608,9 @@ void Matcher::init_first_stack_mask() {
|
||||
scalable_stack_mask.clear_to_sets(RegMask::SlotsPerVecA);
|
||||
assert(scalable_stack_mask.is_infinite_stack(), "should be infinite stack");
|
||||
*idealreg2spillmask[Op_VecA] = *idealreg2regmask[Op_VecA];
|
||||
idealreg2spillmask[Op_VecA]->OR(scalable_stack_mask);
|
||||
idealreg2spillmask[Op_VecA]->or_with(scalable_stack_mask);
|
||||
} else {
|
||||
*idealreg2spillmask[Op_VecA] = RegMask::Empty;
|
||||
*idealreg2spillmask[Op_VecA] = RegMask::EMPTY;
|
||||
}
|
||||
|
||||
if (UseFPUForSpilling) {
|
||||
@ -618,20 +618,20 @@ void Matcher::init_first_stack_mask() {
|
||||
// symmetric and that the registers involved are the same size.
|
||||
// On sparc for instance we may have to use 64 bit moves will
|
||||
// kill 2 registers when used with F0-F31.
|
||||
idealreg2spillmask[Op_RegI]->OR(*idealreg2regmask[Op_RegF]);
|
||||
idealreg2spillmask[Op_RegF]->OR(*idealreg2regmask[Op_RegI]);
|
||||
idealreg2spillmask[Op_RegI]->or_with(*idealreg2regmask[Op_RegF]);
|
||||
idealreg2spillmask[Op_RegF]->or_with(*idealreg2regmask[Op_RegI]);
|
||||
#ifdef _LP64
|
||||
idealreg2spillmask[Op_RegN]->OR(*idealreg2regmask[Op_RegF]);
|
||||
idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
|
||||
idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
|
||||
idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegD]);
|
||||
idealreg2spillmask[Op_RegN]->or_with(*idealreg2regmask[Op_RegF]);
|
||||
idealreg2spillmask[Op_RegL]->or_with(*idealreg2regmask[Op_RegD]);
|
||||
idealreg2spillmask[Op_RegD]->or_with(*idealreg2regmask[Op_RegL]);
|
||||
idealreg2spillmask[Op_RegP]->or_with(*idealreg2regmask[Op_RegD]);
|
||||
#else
|
||||
idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegF]);
|
||||
idealreg2spillmask[Op_RegP]->or_with(*idealreg2regmask[Op_RegF]);
|
||||
#ifdef ARM
|
||||
// ARM has support for moving 64bit values between a pair of
|
||||
// integer registers and a double register
|
||||
idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
|
||||
idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
|
||||
idealreg2spillmask[Op_RegL]->or_with(*idealreg2regmask[Op_RegD]);
|
||||
idealreg2spillmask[Op_RegD]->or_with(*idealreg2regmask[Op_RegL]);
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
@ -659,20 +659,20 @@ void Matcher::init_first_stack_mask() {
|
||||
bool exclude_soe = !Compile::current()->is_method_compilation();
|
||||
RegMask* caller_save_mask = exclude_soe ? &caller_save_regmask_exclude_soe : &caller_save_regmask;
|
||||
|
||||
idealreg2debugmask[Op_RegN]->SUBTRACT(*caller_save_mask);
|
||||
idealreg2debugmask[Op_RegI]->SUBTRACT(*caller_save_mask);
|
||||
idealreg2debugmask[Op_RegL]->SUBTRACT(*caller_save_mask);
|
||||
idealreg2debugmask[Op_RegF]->SUBTRACT(*caller_save_mask);
|
||||
idealreg2debugmask[Op_RegD]->SUBTRACT(*caller_save_mask);
|
||||
idealreg2debugmask[Op_RegP]->SUBTRACT(*caller_save_mask);
|
||||
idealreg2debugmask[Op_RegVectMask]->SUBTRACT(*caller_save_mask);
|
||||
idealreg2debugmask[Op_RegN]->subtract(*caller_save_mask);
|
||||
idealreg2debugmask[Op_RegI]->subtract(*caller_save_mask);
|
||||
idealreg2debugmask[Op_RegL]->subtract(*caller_save_mask);
|
||||
idealreg2debugmask[Op_RegF]->subtract(*caller_save_mask);
|
||||
idealreg2debugmask[Op_RegD]->subtract(*caller_save_mask);
|
||||
idealreg2debugmask[Op_RegP]->subtract(*caller_save_mask);
|
||||
idealreg2debugmask[Op_RegVectMask]->subtract(*caller_save_mask);
|
||||
|
||||
idealreg2debugmask[Op_VecA]->SUBTRACT(*caller_save_mask);
|
||||
idealreg2debugmask[Op_VecS]->SUBTRACT(*caller_save_mask);
|
||||
idealreg2debugmask[Op_VecD]->SUBTRACT(*caller_save_mask);
|
||||
idealreg2debugmask[Op_VecX]->SUBTRACT(*caller_save_mask);
|
||||
idealreg2debugmask[Op_VecY]->SUBTRACT(*caller_save_mask);
|
||||
idealreg2debugmask[Op_VecZ]->SUBTRACT(*caller_save_mask);
|
||||
idealreg2debugmask[Op_VecA]->subtract(*caller_save_mask);
|
||||
idealreg2debugmask[Op_VecS]->subtract(*caller_save_mask);
|
||||
idealreg2debugmask[Op_VecD]->subtract(*caller_save_mask);
|
||||
idealreg2debugmask[Op_VecX]->subtract(*caller_save_mask);
|
||||
idealreg2debugmask[Op_VecY]->subtract(*caller_save_mask);
|
||||
idealreg2debugmask[Op_VecZ]->subtract(*caller_save_mask);
|
||||
}
|
||||
|
||||
//---------------------------is_save_on_entry----------------------------------
|
||||
@ -718,7 +718,7 @@ void Matcher::Fixup_Save_On_Entry( ) {
|
||||
reth_rms[TypeFunc::Parms] = mreg2regmask[reg];
|
||||
#ifdef _LP64
|
||||
// Need two slots for ptrs in 64-bit land
|
||||
reth_rms[TypeFunc::Parms].Insert(OptoReg::add(OptoReg::Name(reg), 1));
|
||||
reth_rms[TypeFunc::Parms].insert(OptoReg::add(OptoReg::Name(reg), 1));
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -802,12 +802,12 @@ void Matcher::Fixup_Save_On_Entry( ) {
|
||||
_register_save_type[i+1] == Op_RegF &&
|
||||
is_save_on_entry(i+1) ) {
|
||||
// Add other bit for double
|
||||
ret_rms [ ret_edge_cnt].Insert(OptoReg::Name(i+1));
|
||||
reth_rms [ reth_edge_cnt].Insert(OptoReg::Name(i+1));
|
||||
tail_call_rms[tail_call_edge_cnt].Insert(OptoReg::Name(i+1));
|
||||
tail_jump_rms[tail_jump_edge_cnt].Insert(OptoReg::Name(i+1));
|
||||
forw_exc_rms [ forw_exc_edge_cnt].Insert(OptoReg::Name(i+1));
|
||||
halt_rms [ halt_edge_cnt].Insert(OptoReg::Name(i+1));
|
||||
ret_rms [ ret_edge_cnt].insert(OptoReg::Name(i+1));
|
||||
reth_rms [ reth_edge_cnt].insert(OptoReg::Name(i+1));
|
||||
tail_call_rms[tail_call_edge_cnt].insert(OptoReg::Name(i+1));
|
||||
tail_jump_rms[tail_jump_edge_cnt].insert(OptoReg::Name(i+1));
|
||||
forw_exc_rms [ forw_exc_edge_cnt].insert(OptoReg::Name(i+1));
|
||||
halt_rms [ halt_edge_cnt].insert(OptoReg::Name(i+1));
|
||||
mproj = new MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Op_RegD );
|
||||
proj_cnt += 2; // Skip 2 for doubles
|
||||
}
|
||||
@ -815,12 +815,12 @@ void Matcher::Fixup_Save_On_Entry( ) {
|
||||
_register_save_type[i-1] == Op_RegF &&
|
||||
_register_save_type[i ] == Op_RegF &&
|
||||
is_save_on_entry(i-1) ) {
|
||||
ret_rms [ ret_edge_cnt] = RegMask::Empty;
|
||||
reth_rms [ reth_edge_cnt] = RegMask::Empty;
|
||||
tail_call_rms[tail_call_edge_cnt] = RegMask::Empty;
|
||||
tail_jump_rms[tail_jump_edge_cnt] = RegMask::Empty;
|
||||
forw_exc_rms [ forw_exc_edge_cnt] = RegMask::Empty;
|
||||
halt_rms [ halt_edge_cnt] = RegMask::Empty;
|
||||
ret_rms [ ret_edge_cnt] = RegMask::EMPTY;
|
||||
reth_rms [ reth_edge_cnt] = RegMask::EMPTY;
|
||||
tail_call_rms[tail_call_edge_cnt] = RegMask::EMPTY;
|
||||
tail_jump_rms[tail_jump_edge_cnt] = RegMask::EMPTY;
|
||||
forw_exc_rms [ forw_exc_edge_cnt] = RegMask::EMPTY;
|
||||
halt_rms [ halt_edge_cnt] = RegMask::EMPTY;
|
||||
mproj = C->top();
|
||||
}
|
||||
// Is this a RegI low half of a RegL? Double up 2 adjacent RegI's
|
||||
@ -830,12 +830,12 @@ void Matcher::Fixup_Save_On_Entry( ) {
|
||||
_register_save_type[i+1] == Op_RegI &&
|
||||
is_save_on_entry(i+1) ) {
|
||||
// Add other bit for long
|
||||
ret_rms [ ret_edge_cnt].Insert(OptoReg::Name(i+1));
|
||||
reth_rms [ reth_edge_cnt].Insert(OptoReg::Name(i+1));
|
||||
tail_call_rms[tail_call_edge_cnt].Insert(OptoReg::Name(i+1));
|
||||
tail_jump_rms[tail_jump_edge_cnt].Insert(OptoReg::Name(i+1));
|
||||
forw_exc_rms [ forw_exc_edge_cnt].Insert(OptoReg::Name(i+1));
|
||||
halt_rms [ halt_edge_cnt].Insert(OptoReg::Name(i+1));
|
||||
ret_rms [ ret_edge_cnt].insert(OptoReg::Name(i+1));
|
||||
reth_rms [ reth_edge_cnt].insert(OptoReg::Name(i+1));
|
||||
tail_call_rms[tail_call_edge_cnt].insert(OptoReg::Name(i+1));
|
||||
tail_jump_rms[tail_jump_edge_cnt].insert(OptoReg::Name(i+1));
|
||||
forw_exc_rms [ forw_exc_edge_cnt].insert(OptoReg::Name(i+1));
|
||||
halt_rms [ halt_edge_cnt].insert(OptoReg::Name(i+1));
|
||||
mproj = new MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Op_RegL );
|
||||
proj_cnt += 2; // Skip 2 for longs
|
||||
}
|
||||
@ -843,12 +843,12 @@ void Matcher::Fixup_Save_On_Entry( ) {
|
||||
_register_save_type[i-1] == Op_RegI &&
|
||||
_register_save_type[i ] == Op_RegI &&
|
||||
is_save_on_entry(i-1) ) {
|
||||
ret_rms [ ret_edge_cnt] = RegMask::Empty;
|
||||
reth_rms [ reth_edge_cnt] = RegMask::Empty;
|
||||
tail_call_rms[tail_call_edge_cnt] = RegMask::Empty;
|
||||
tail_jump_rms[tail_jump_edge_cnt] = RegMask::Empty;
|
||||
forw_exc_rms [ forw_exc_edge_cnt] = RegMask::Empty;
|
||||
halt_rms [ halt_edge_cnt] = RegMask::Empty;
|
||||
ret_rms [ ret_edge_cnt] = RegMask::EMPTY;
|
||||
reth_rms [ reth_edge_cnt] = RegMask::EMPTY;
|
||||
tail_call_rms[tail_call_edge_cnt] = RegMask::EMPTY;
|
||||
tail_jump_rms[tail_jump_edge_cnt] = RegMask::EMPTY;
|
||||
forw_exc_rms [ forw_exc_edge_cnt] = RegMask::EMPTY;
|
||||
halt_rms [ halt_edge_cnt] = RegMask::EMPTY;
|
||||
mproj = C->top();
|
||||
} else {
|
||||
// Make a projection for it off the Start
|
||||
@ -878,31 +878,31 @@ void Matcher::init_spill_mask( Node *ret ) {
|
||||
c_frame_ptr_mask = RegMask(c_frame_pointer());
|
||||
#ifdef _LP64
|
||||
// pointers are twice as big
|
||||
c_frame_ptr_mask.Insert(OptoReg::add(c_frame_pointer(),1));
|
||||
c_frame_ptr_mask.insert(OptoReg::add(c_frame_pointer(), 1));
|
||||
#endif
|
||||
|
||||
// Start at OptoReg::stack0()
|
||||
STACK_ONLY_mask.Clear();
|
||||
STACK_ONLY_mask.clear();
|
||||
// STACK_ONLY_mask is all stack bits
|
||||
STACK_ONLY_mask.Set_All_From(OptoReg::stack2reg(0));
|
||||
STACK_ONLY_mask.set_all_from(OptoReg::stack2reg(0));
|
||||
|
||||
for (OptoReg::Name i = OptoReg::Name(0); i < OptoReg::Name(_last_Mach_Reg);
|
||||
i = OptoReg::add(i, 1)) {
|
||||
// Copy the register names over into the shared world.
|
||||
// SharedInfo::regName[i] = regName[i];
|
||||
// Handy RegMasks per machine register
|
||||
mreg2regmask[i].Insert(i);
|
||||
mreg2regmask[i].insert(i);
|
||||
|
||||
// Set up regmasks used to exclude save-on-call (and always-save) registers from debug masks.
|
||||
if (_register_save_policy[i] == 'C' ||
|
||||
_register_save_policy[i] == 'A') {
|
||||
caller_save_regmask.Insert(i);
|
||||
caller_save_regmask.insert(i);
|
||||
}
|
||||
// Exclude save-on-entry registers from debug masks for stub compilations.
|
||||
if (_register_save_policy[i] == 'C' ||
|
||||
_register_save_policy[i] == 'A' ||
|
||||
_register_save_policy[i] == 'E') {
|
||||
caller_save_regmask_exclude_soe.Insert(i);
|
||||
caller_save_regmask_exclude_soe.insert(i);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1315,17 +1315,17 @@ MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) {
|
||||
OptoReg::Name reg_snd = OptoReg::as_OptoReg(second);
|
||||
assert (reg_fst <= reg_snd, "fst=%d snd=%d", reg_fst, reg_snd);
|
||||
for (OptoReg::Name r = reg_fst; r <= reg_snd; r++) {
|
||||
rm->Insert(r);
|
||||
rm->insert(r);
|
||||
}
|
||||
}
|
||||
// Grab first register, adjust stack slots and insert in mask.
|
||||
OptoReg::Name reg1 = warp_outgoing_stk_arg(first, begin_out_arg_area, out_arg_limit_per_call );
|
||||
if (OptoReg::is_valid(reg1))
|
||||
rm->Insert( reg1 );
|
||||
rm->insert(reg1);
|
||||
// Grab second register (if any), adjust stack slots and insert in mask.
|
||||
OptoReg::Name reg2 = warp_outgoing_stk_arg(second, begin_out_arg_area, out_arg_limit_per_call );
|
||||
if (OptoReg::is_valid(reg2))
|
||||
rm->Insert( reg2 );
|
||||
rm->insert(reg2);
|
||||
} // End of for all arguments
|
||||
}
|
||||
|
||||
@ -1342,11 +1342,11 @@ MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) {
|
||||
// is excluded on the max-per-method basis, debug info cannot land in
|
||||
// this killed area.
|
||||
uint r_cnt = mcall->tf()->range()->cnt();
|
||||
MachProjNode *proj = new MachProjNode( mcall, r_cnt+10000, RegMask::Empty, MachProjNode::fat_proj );
|
||||
MachProjNode* proj = new MachProjNode(mcall, r_cnt + 10000, RegMask::EMPTY, MachProjNode::fat_proj);
|
||||
for (int i = begin_out_arg_area; i < out_arg_limit_per_call; i++) {
|
||||
proj->_rout.Insert(OptoReg::Name(i));
|
||||
proj->_rout.insert(OptoReg::Name(i));
|
||||
}
|
||||
if (!proj->_rout.is_Empty()) {
|
||||
if (!proj->_rout.is_empty()) {
|
||||
push_projection(proj);
|
||||
}
|
||||
}
|
||||
|
||||
@ -4325,7 +4325,7 @@ Node *MemBarNode::match( const ProjNode *proj, const Matcher *m ) {
|
||||
switch (proj->_con) {
|
||||
case TypeFunc::Control:
|
||||
case TypeFunc::Memory:
|
||||
return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
|
||||
return new MachProjNode(this, proj->_con, RegMask::EMPTY, MachProjNode::unmatched_proj);
|
||||
}
|
||||
ShouldNotReachHere();
|
||||
return nullptr;
|
||||
@ -4572,7 +4572,7 @@ const RegMask &InitializeNode::in_RegMask(uint idx) const {
|
||||
// This edge should be set to top, by the set_complete. But be conservative.
|
||||
if (idx == InitializeNode::RawAddress)
|
||||
return *(Compile::current()->matcher()->idealreg2spillmask[in(idx)->ideal_reg()]);
|
||||
return RegMask::Empty;
|
||||
return RegMask::EMPTY;
|
||||
}
|
||||
|
||||
Node* InitializeNode::memory(uint alias_idx) {
|
||||
@ -5784,7 +5784,7 @@ void MergeMemNode::set_base_memory(Node *new_base) {
|
||||
|
||||
//------------------------------out_RegMask------------------------------------
|
||||
const RegMask &MergeMemNode::out_RegMask() const {
|
||||
return RegMask::Empty;
|
||||
return RegMask::EMPTY;
|
||||
}
|
||||
|
||||
//------------------------------dump_spec--------------------------------------
|
||||
|
||||
@ -36,7 +36,7 @@
|
||||
//=============================================================================
|
||||
//------------------------------MultiNode--------------------------------------
|
||||
const RegMask &MultiNode::out_RegMask() const {
|
||||
return RegMask::Empty;
|
||||
return RegMask::EMPTY;
|
||||
}
|
||||
|
||||
Node *MultiNode::match( const ProjNode *proj, const Matcher *m ) { return proj->clone(); }
|
||||
@ -185,7 +185,7 @@ const Type* ProjNode::Value(PhaseGVN* phase) const {
|
||||
//------------------------------out_RegMask------------------------------------
|
||||
// Pass the buck uphill
|
||||
const RegMask &ProjNode::out_RegMask() const {
|
||||
return RegMask::Empty;
|
||||
return RegMask::EMPTY;
|
||||
}
|
||||
|
||||
//------------------------------ideal_reg--------------------------------------
|
||||
|
||||
@ -2800,12 +2800,12 @@ uint Node::match_edge(uint idx) const {
|
||||
// Register classes are defined for specific machines
|
||||
const RegMask &Node::out_RegMask() const {
|
||||
ShouldNotCallThis();
|
||||
return RegMask::Empty;
|
||||
return RegMask::EMPTY;
|
||||
}
|
||||
|
||||
const RegMask &Node::in_RegMask(uint) const {
|
||||
ShouldNotCallThis();
|
||||
return RegMask::Empty;
|
||||
return RegMask::EMPTY;
|
||||
}
|
||||
|
||||
void Node_Array::grow(uint i) {
|
||||
|
||||
@ -173,7 +173,7 @@ int PhaseChaitin::use_prior_register( Node *n, uint idx, Node *def, Block *curre
|
||||
const LRG &def_lrg = lrgs(_lrg_map.live_range_id(def));
|
||||
OptoReg::Name def_reg = def_lrg.reg();
|
||||
const RegMask &use_mask = n->in_RegMask(idx);
|
||||
bool can_use = use_mask.Member(def_reg);
|
||||
bool can_use = use_mask.member(def_reg);
|
||||
if (!RegMask::is_vector(def->ideal_reg())) {
|
||||
// Check for a copy to or from a misaligned pair.
|
||||
// It is workaround for a sparc with misaligned pairs.
|
||||
@ -678,7 +678,7 @@ void PhaseChaitin::post_allocate_copy_removal() {
|
||||
int n_regs = RegMask::num_registers(def_ideal_reg, lrgs(_lrg_map.live_range_id(def)));
|
||||
for (int l = 1; l < n_regs; l++) {
|
||||
OptoReg::Name ureg_lo = OptoReg::add(ureg,-l);
|
||||
bool is_adjacent = lrgs(useidx).mask().Member(ureg_lo);
|
||||
bool is_adjacent = lrgs(useidx).mask().member(ureg_lo);
|
||||
assert(is_adjacent || OptoReg::is_reg(ureg_lo),
|
||||
"only registers can be non-adjacent");
|
||||
if (value[ureg_lo] == nullptr && is_adjacent) { // Nearly always adjacent
|
||||
@ -762,13 +762,13 @@ void PhaseChaitin::post_allocate_copy_removal() {
|
||||
// If the value occupies a register pair, record same info
|
||||
// in both registers.
|
||||
OptoReg::Name nreg_lo = OptoReg::add(nreg,-1);
|
||||
bool is_adjacent = lrgs(lidx).mask().Member(nreg_lo);
|
||||
bool is_adjacent = lrgs(lidx).mask().member(nreg_lo);
|
||||
assert(is_adjacent || OptoReg::is_reg(nreg_lo), "only registers can be non-adjacent");
|
||||
if (!is_adjacent) { // Nearly always adjacent
|
||||
// Sparc occasionally has non-adjacent pairs.
|
||||
// Find the actual other value
|
||||
RegMask tmp = lrgs(lidx).mask();
|
||||
tmp.Remove(nreg);
|
||||
tmp.remove(nreg);
|
||||
nreg_lo = tmp.find_first_elem();
|
||||
}
|
||||
if (value[nreg] != val || value[nreg_lo] != val) {
|
||||
|
||||
@ -476,7 +476,7 @@ bool PhaseChaitin::prompt_use( Block *b, uint lidx ) {
|
||||
return true; // Found 1st use!
|
||||
}
|
||||
}
|
||||
if (!n->out_RegMask().is_Empty()) {
|
||||
if (!n->out_RegMask().is_empty()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -1038,7 +1038,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
|
||||
// bound use if we can't rematerialize the def, or if we need the
|
||||
// split to form a misaligned pair.
|
||||
if (!umask.is_infinite_stack() &&
|
||||
(int)umask.Size() <= lrgs(useidx).num_regs() &&
|
||||
(int)umask.size() <= lrgs(useidx).num_regs() &&
|
||||
(!def->rematerialize() ||
|
||||
(!is_vect && umask.is_misaligned_pair()))) {
|
||||
// These need a Split regardless of overlap or pressure
|
||||
@ -1128,7 +1128,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
|
||||
if( n->is_SpillCopy() ) {
|
||||
ResourceMark rm(C->regmask_arena());
|
||||
RegMask tmp_rm(umask, C->regmask_arena());
|
||||
tmp_rm.SUBTRACT(Matcher::STACK_ONLY_mask);
|
||||
tmp_rm.subtract(Matcher::STACK_ONLY_mask);
|
||||
if( dmask.overlap(tmp_rm) ) {
|
||||
if( def != n->in(inpidx) ) {
|
||||
n->set_req(inpidx, def);
|
||||
|
||||
@ -47,9 +47,9 @@ void OptoReg::dump(int r, outputStream *st) {
|
||||
|
||||
|
||||
//=============================================================================
|
||||
const RegMask RegMask::Empty;
|
||||
const RegMask RegMask::EMPTY;
|
||||
|
||||
const RegMask RegMask::All(
|
||||
const RegMask RegMask::ALL(
|
||||
# define BODY(I) -1,
|
||||
FORALL_BODY
|
||||
# undef BODY
|
||||
@ -126,7 +126,7 @@ void RegMask::clear_to_pairs() {
|
||||
}
|
||||
|
||||
bool RegMask::is_misaligned_pair() const {
|
||||
return Size() == 2 && !is_aligned_pairs();
|
||||
return size() == 2 && !is_aligned_pairs();
|
||||
}
|
||||
|
||||
bool RegMask::is_aligned_pairs() const {
|
||||
@ -227,7 +227,7 @@ bool RegMask::is_bound(uint ireg) const {
|
||||
// for current regmask, where reg is the highest number.
|
||||
bool RegMask::is_valid_reg(OptoReg::Name reg, const int size) const {
|
||||
for (int i = 0; i < size; i++) {
|
||||
if (!Member(reg - i)) {
|
||||
if (!member(reg - i)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@ -449,7 +449,7 @@ public:
|
||||
RegMask(OptoReg::Name reg,
|
||||
Arena* arena DEBUG_ONLY(COMMA bool read_only = false))
|
||||
: RegMask(arena DEBUG_ONLY(COMMA read_only)) {
|
||||
Insert(reg);
|
||||
insert(reg);
|
||||
}
|
||||
explicit RegMask(OptoReg::Name reg) : RegMask(reg, nullptr) {}
|
||||
|
||||
@ -473,7 +473,7 @@ public:
|
||||
// End deep copying
|
||||
// ----------------
|
||||
|
||||
bool Member(OptoReg::Name reg) const {
|
||||
bool member(OptoReg::Name reg) const {
|
||||
reg = reg - offset_bits();
|
||||
if (reg < 0) {
|
||||
return false;
|
||||
@ -486,7 +486,7 @@ public:
|
||||
}
|
||||
|
||||
// Empty mask check. Ignores registers included through the infinite_stack flag.
|
||||
bool is_Empty() const {
|
||||
bool is_empty() const {
|
||||
assert(valid_watermarks(), "sanity");
|
||||
for (unsigned i = _lwm; i <= _hwm; i++) {
|
||||
if (rm_word(i) != 0) {
|
||||
@ -642,7 +642,7 @@ public:
|
||||
bool is_UP() const;
|
||||
|
||||
// Clear a register mask. Does not clear any offset.
|
||||
void Clear() {
|
||||
void clear() {
|
||||
_lwm = rm_word_max_index();
|
||||
_hwm = 0;
|
||||
set_range(0, 0, _rm_size_in_words);
|
||||
@ -651,13 +651,13 @@ public:
|
||||
}
|
||||
|
||||
// Fill a register mask with 1's
|
||||
void Set_All() {
|
||||
void set_all() {
|
||||
assert(_offset == 0, "offset non-zero");
|
||||
Set_All_From_Offset();
|
||||
set_all_from_offset();
|
||||
}
|
||||
|
||||
// Fill a register mask with 1's from the current offset.
|
||||
void Set_All_From_Offset() {
|
||||
void set_all_from_offset() {
|
||||
_lwm = 0;
|
||||
_hwm = rm_word_max_index();
|
||||
set_range(0, 0xFF, _rm_size_in_words);
|
||||
@ -666,7 +666,7 @@ public:
|
||||
}
|
||||
|
||||
// Fill a register mask with 1's starting from the given register.
|
||||
void Set_All_From(OptoReg::Name reg) {
|
||||
void set_all_from(OptoReg::Name reg) {
|
||||
reg = reg - offset_bits();
|
||||
assert(reg != OptoReg::Bad, "sanity");
|
||||
assert(reg != OptoReg::Special, "sanity");
|
||||
@ -689,7 +689,7 @@ public:
|
||||
}
|
||||
|
||||
// Insert register into mask
|
||||
void Insert(OptoReg::Name reg) {
|
||||
void insert(OptoReg::Name reg) {
|
||||
reg = reg - offset_bits();
|
||||
assert(reg != OptoReg::Bad, "sanity");
|
||||
assert(reg != OptoReg::Special, "sanity");
|
||||
@ -706,7 +706,7 @@ public:
|
||||
}
|
||||
|
||||
// Remove register from mask
|
||||
void Remove(OptoReg::Name reg) {
|
||||
void remove(OptoReg::Name reg) {
|
||||
reg = reg - offset_bits();
|
||||
assert(reg >= 0, "register outside mask");
|
||||
assert(reg < (int)rm_size_in_bits(), "register outside mask");
|
||||
@ -714,8 +714,8 @@ public:
|
||||
rm_word(r >> LogBitsPerWord) &= ~(uintptr_t(1) << (r & WORD_BIT_MASK));
|
||||
}
|
||||
|
||||
// OR 'rm' into 'this'
|
||||
void OR(const RegMask &rm) {
|
||||
// Or 'rm' into 'this'
|
||||
void or_with(const RegMask& rm) {
|
||||
assert(_offset == rm._offset, "offset mismatch");
|
||||
assert(valid_watermarks() && rm.valid_watermarks(), "sanity");
|
||||
grow(rm._rm_size_in_words);
|
||||
@ -736,8 +736,8 @@ public:
|
||||
assert(valid_watermarks(), "sanity");
|
||||
}
|
||||
|
||||
// AND 'rm' into 'this'
|
||||
void AND(const RegMask &rm) {
|
||||
// And 'rm' into 'this'
|
||||
void and_with(const RegMask& rm) {
|
||||
assert(_offset == rm._offset, "offset mismatch");
|
||||
assert(valid_watermarks() && rm.valid_watermarks(), "sanity");
|
||||
grow(rm._rm_size_in_words);
|
||||
@ -768,7 +768,7 @@ public:
|
||||
}
|
||||
|
||||
// Subtract 'rm' from 'this'.
|
||||
void SUBTRACT(const RegMask &rm) {
|
||||
void subtract(const RegMask& rm) {
|
||||
assert(_offset == rm._offset, "offset mismatch");
|
||||
assert(valid_watermarks() && rm.valid_watermarks(), "sanity");
|
||||
grow(rm._rm_size_in_words);
|
||||
@ -791,7 +791,7 @@ public:
|
||||
// Subtract 'rm' from 'this', but ignore everything in 'rm' that does not
|
||||
// overlap with us and do not modify our infinite_stack flag. Supports masks of
|
||||
// differing offsets. Does not support 'rm' with the infinite_stack flag set.
|
||||
void SUBTRACT_inner(const RegMask& rm) {
|
||||
void subtract_inner(const RegMask& rm) {
|
||||
assert(valid_watermarks() && rm.valid_watermarks(), "sanity");
|
||||
assert(!rm.is_infinite_stack(), "not supported");
|
||||
// Various translations due to differing offsets
|
||||
@ -821,12 +821,12 @@ public:
|
||||
return false;
|
||||
}
|
||||
_offset += _rm_size_in_words;
|
||||
Set_All_From_Offset();
|
||||
set_all_from_offset();
|
||||
return true;
|
||||
}
|
||||
|
||||
// Compute size of register mask: number of bits
|
||||
uint Size() const {
|
||||
uint size() const {
|
||||
uint sum = 0;
|
||||
assert(valid_watermarks(), "sanity");
|
||||
for (unsigned i = _lwm; i <= _hwm; i++) {
|
||||
@ -895,8 +895,8 @@ public:
|
||||
void dump_hex(outputStream* st = tty) const; // Print a mask (raw hex)
|
||||
#endif
|
||||
|
||||
static const RegMask Empty; // Common empty mask
|
||||
static const RegMask All; // Common all mask
|
||||
static const RegMask EMPTY; // Common empty mask
|
||||
static const RegMask ALL; // Common all mask
|
||||
|
||||
bool can_represent(OptoReg::Name reg, unsigned int size = 1) const {
|
||||
reg = reg - offset_bits();
|
||||
|
||||
@ -88,5 +88,5 @@ const Type* HaltNode::Value(PhaseGVN* phase) const {
|
||||
}
|
||||
|
||||
const RegMask &HaltNode::out_RegMask() const {
|
||||
return RegMask::Empty;
|
||||
return RegMask::EMPTY;
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
Loading…
x
Reference in New Issue
Block a user