diff --git a/src/hotspot/share/adlc/output_h.cpp b/src/hotspot/share/adlc/output_h.cpp index e3fde235443..0ef8b19c79f 100644 --- a/src/hotspot/share/adlc/output_h.cpp +++ b/src/hotspot/share/adlc/output_h.cpp @@ -98,7 +98,7 @@ void ArchDesc::buildMachRegisterNumbers(FILE *fp_hpp) { } fprintf(fp_hpp, "\n// Size of register-mask in ints\n"); - fprintf(fp_hpp, "#define RM_SIZE %d\n", RegisterForm::RegMask_Size()); + fprintf(fp_hpp, "#define RM_SIZE_IN_INTS %d\n", RegisterForm::RegMask_Size()); fprintf(fp_hpp, "// Unroll factor for loops over the data in a RegMask\n"); fprintf(fp_hpp, "#define FORALL_BODY "); int len = RegisterForm::RegMask_Size(); diff --git a/src/hotspot/share/opto/chaitin.cpp b/src/hotspot/share/opto/chaitin.cpp index 926a5176232..d650df45a8d 100644 --- a/src/hotspot/share/opto/chaitin.cpp +++ b/src/hotspot/share/opto/chaitin.cpp @@ -1577,8 +1577,8 @@ uint PhaseChaitin::Select( ) { // Re-insert into the IFG _ifg->re_insert(lidx); if( !lrg->alive() ) continue; - // capture allstackedness flag before mask is hacked - const int is_allstack = lrg->mask().is_AllStack(); + // capture infinitestackedness flag before mask is hacked + const int is_infinite_stack = lrg->mask().is_infinite_stack(); // Yeah, yeah, yeah, I know, I know. I can refactor this // to avoid the GOTO, although the refactored code will not @@ -1629,7 +1629,7 @@ uint PhaseChaitin::Select( ) { } } } - //assert(is_allstack == lrg->mask().is_AllStack(), "nbrs must not change AllStackedness"); + //assert(is_infinite_stack == lrg->mask().is_infinite_stack(), "nbrs must not change InfiniteStackedness"); // Aligned pairs need aligned masks assert(!lrg->_is_vector || !lrg->_fat_proj, "sanity"); if (lrg->num_regs() > 1 && !lrg->_fat_proj) { @@ -1640,9 +1640,9 @@ uint PhaseChaitin::Select( ) { OptoReg::Name reg = choose_color( *lrg, chunk ); //--------------- - // If we fail to color and the AllStack flag is set, trigger + // If we fail to color and the infinite flag is set, trigger // a chunk-rollover event - if(!OptoReg::is_valid(OptoReg::add(reg,-chunk)) && is_allstack) { + if (!OptoReg::is_valid(OptoReg::add(reg, -chunk)) && is_infinite_stack) { // Bump register mask up to next stack chunk chunk += RegMask::CHUNK_SIZE; lrg->Set_All(); @@ -1651,7 +1651,7 @@ uint PhaseChaitin::Select( ) { //--------------- // Did we get a color? - else if( OptoReg::is_valid(reg)) { + else if (OptoReg::is_valid(reg)) { #ifndef PRODUCT RegMask avail_rm = lrg->mask(); #endif @@ -1708,7 +1708,7 @@ uint PhaseChaitin::Select( ) { assert( lrg->alive(), "" ); assert( !lrg->_fat_proj || lrg->is_multidef() || lrg->_def->outcnt() > 0, "fat_proj cannot spill"); - assert( !orig_mask.is_AllStack(), "All Stack does not spill" ); + assert( !orig_mask.is_infinite_stack(), "infinite stack does not spill" ); // Assign the special spillreg register lrg->set_reg(OptoReg::Name(spill_reg++)); diff --git a/src/hotspot/share/opto/chaitin.hpp b/src/hotspot/share/opto/chaitin.hpp index ad172f672b4..8cef21c6cc2 100644 --- a/src/hotspot/share/opto/chaitin.hpp +++ b/src/hotspot/share/opto/chaitin.hpp @@ -48,7 +48,7 @@ class PhaseChaitin; // Live-RanGe structure. class LRG : public ResourceObj { public: - static const uint AllStack_size = 0xFFFFF; // This mask size is used to tell that the mask of this LRG supports stack positions + static const uint INFINITE_STACK_SIZE = 0xFFFFF; // This mask size is used to tell that the mask of this LRG supports stack positions enum { SPILL_REG=29999 }; // Register number of a spilled LRG double _cost; // 2 for loads/1 for stores times block freq @@ -83,14 +83,14 @@ public: void set_degree( uint degree ) { _eff_degree = degree; DEBUG_ONLY(_degree_valid = 1;) - assert(!_mask.is_AllStack() || (_mask.is_AllStack() && lo_degree()), "_eff_degree can't be bigger than AllStack_size - _num_regs if the mask supports stack registers"); + assert(!_mask.is_infinite_stack() || (_mask.is_infinite_stack() && lo_degree()), "_eff_degree can't be bigger than INFINITE_STACK_SIZE - _num_regs if the mask supports stack registers"); } // Made a change that hammered degree void invalid_degree() { DEBUG_ONLY(_degree_valid=0;) } // Incrementally modify degree. If it was correct, it should remain correct void inc_degree( uint mod ) { _eff_degree += mod; - assert(!_mask.is_AllStack() || (_mask.is_AllStack() && lo_degree()), "_eff_degree can't be bigger than AllStack_size - _num_regs if the mask supports stack registers"); + assert(!_mask.is_infinite_stack() || (_mask.is_infinite_stack() && lo_degree()), "_eff_degree can't be bigger than INFINITE_STACK_SIZE - _num_regs if the mask supports stack registers"); } // Compute the degree between 2 live ranges int compute_degree( LRG &l ) const; @@ -105,9 +105,9 @@ private: RegMask _mask; // Allowed registers for this LRG uint _mask_size; // cache of _mask.Size(); public: - int compute_mask_size() const { return _mask.is_AllStack() ? AllStack_size : _mask.Size(); } + int compute_mask_size() const { return _mask.is_infinite_stack() ? INFINITE_STACK_SIZE : _mask.Size(); } void set_mask_size( int size ) { - assert((size == (int)AllStack_size) || (size == (int)_mask.Size()), ""); + assert((size == (int)INFINITE_STACK_SIZE) || (size == (int)_mask.Size()), ""); _mask_size = size; #ifdef ASSERT _msize_valid=1; diff --git a/src/hotspot/share/opto/coalesce.cpp b/src/hotspot/share/opto/coalesce.cpp index 42e3f314927..588adbfcf42 100644 --- a/src/hotspot/share/opto/coalesce.cpp +++ b/src/hotspot/share/opto/coalesce.cpp @@ -581,7 +581,7 @@ uint PhaseConservativeCoalesce::compute_separating_interferences(Node *dst_copy, if( _ulr.insert(lidx) ) { // Infinite-stack neighbors do not alter colorability, as they // can always color to some other color. - if( !lrgs(lidx).mask().is_AllStack() ) { + if( !lrgs(lidx).mask().is_infinite_stack() ) { // If this coalesce will make any new neighbor uncolorable, // do not coalesce. if( lrgs(lidx).just_lo_degree() ) @@ -698,7 +698,7 @@ bool PhaseConservativeCoalesce::copy_copy(Node *dst_copy, Node *src_copy, Block // Number of bits free uint rm_size = rm.Size(); - if (UseFPUForSpilling && rm.is_AllStack() ) { + if (UseFPUForSpilling && rm.is_infinite_stack()) { // Don't coalesce when frequency difference is large Block *dst_b = _phc._cfg.get_block_for_node(dst_copy); Block *src_def_b = _phc._cfg.get_block_for_node(src_def); @@ -707,7 +707,9 @@ bool PhaseConservativeCoalesce::copy_copy(Node *dst_copy, Node *src_copy, Block } // If we can use any stack slot, then effective size is infinite - if( rm.is_AllStack() ) rm_size += 1000000; + if (rm.is_infinite_stack()) { + rm_size += 1000000; + } // Incompatible masks, no way to coalesce if( rm_size == 0 ) return false; diff --git a/src/hotspot/share/opto/ifg.cpp b/src/hotspot/share/opto/ifg.cpp index 0807f8ea5fe..a6bcb640c14 100644 --- a/src/hotspot/share/opto/ifg.cpp +++ b/src/hotspot/share/opto/ifg.cpp @@ -748,7 +748,7 @@ void PhaseChaitin::remove_bound_register_from_interfering_live_ranges(LRG& lrg, OptoReg::Name r_reg = rm.find_first_elem(); if (interfering_lrg.mask().Member(r_reg)) { interfering_lrg.Remove(r_reg); - interfering_lrg.set_mask_size(interfering_lrg.mask().is_AllStack() ? LRG::AllStack_size : old_size - 1); + interfering_lrg.set_mask_size(interfering_lrg.mask().is_infinite_stack() ? LRG::INFINITE_STACK_SIZE : old_size - 1); } } diff --git a/src/hotspot/share/opto/indexSet.cpp b/src/hotspot/share/opto/indexSet.cpp index 367f5b78af2..cdce9fa95b1 100644 --- a/src/hotspot/share/opto/indexSet.cpp +++ b/src/hotspot/share/opto/indexSet.cpp @@ -178,7 +178,7 @@ uint IndexSet::lrg_union(uint lr1, uint lr2, LRG &lrg = ifg->lrgs(element); if (mask.overlap(lrg.mask())) { insert(element); - if (!lrg.mask().is_AllStack()) { + if (!lrg.mask().is_infinite_stack()) { reg_degree += lrg1.compute_degree(lrg); if (reg_degree >= fail_degree) return reg_degree; } else { @@ -198,7 +198,7 @@ uint IndexSet::lrg_union(uint lr1, uint lr2, LRG &lrg = ifg->lrgs(element); if (mask.overlap(lrg.mask())) { if (insert(element)) { - if (!lrg.mask().is_AllStack()) { + if (!lrg.mask().is_infinite_stack()) { reg_degree += lrg2.compute_degree(lrg); if (reg_degree >= fail_degree) return reg_degree; } else { diff --git a/src/hotspot/share/opto/matcher.cpp b/src/hotspot/share/opto/matcher.cpp index 5cb56019bc1..2608ba0af0b 100644 --- a/src/hotspot/share/opto/matcher.cpp +++ b/src/hotspot/share/opto/matcher.cpp @@ -557,13 +557,13 @@ void Matcher::init_first_stack_mask() { C->FIRST_STACK_mask().Insert(i); } // Finally, set the "infinite stack" bit. - C->FIRST_STACK_mask().set_AllStack(); + C->FIRST_STACK_mask().set_infinite_stack(); // Make spill masks. Registers for their class, plus FIRST_STACK_mask. RegMask aligned_stack_mask = C->FIRST_STACK_mask(); // Keep spill masks aligned. aligned_stack_mask.clear_to_pairs(); - assert(aligned_stack_mask.is_AllStack(), "should be infinite stack"); + assert(aligned_stack_mask.is_infinite_stack(), "should be infinite stack"); RegMask scalable_stack_mask = aligned_stack_mask; *idealreg2spillmask[Op_RegP] = *idealreg2regmask[Op_RegP]; @@ -620,7 +620,7 @@ void Matcher::init_first_stack_mask() { in = OptoReg::add(in, -1); } aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecX); - assert(aligned_stack_mask.is_AllStack(), "should be infinite stack"); + assert(aligned_stack_mask.is_infinite_stack(), "should be infinite stack"); *idealreg2spillmask[Op_VecX] = *idealreg2regmask[Op_VecX]; idealreg2spillmask[Op_VecX]->OR(aligned_stack_mask); } else { @@ -635,7 +635,7 @@ void Matcher::init_first_stack_mask() { in = OptoReg::add(in, -1); } aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecY); - assert(aligned_stack_mask.is_AllStack(), "should be infinite stack"); + assert(aligned_stack_mask.is_infinite_stack(), "should be infinite stack"); *idealreg2spillmask[Op_VecY] = *idealreg2regmask[Op_VecY]; idealreg2spillmask[Op_VecY]->OR(aligned_stack_mask); } else { @@ -650,7 +650,7 @@ void Matcher::init_first_stack_mask() { in = OptoReg::add(in, -1); } aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecZ); - assert(aligned_stack_mask.is_AllStack(), "should be infinite stack"); + assert(aligned_stack_mask.is_infinite_stack(), "should be infinite stack"); *idealreg2spillmask[Op_VecZ] = *idealreg2regmask[Op_VecZ]; idealreg2spillmask[Op_VecZ]->OR(aligned_stack_mask); } else { @@ -670,7 +670,7 @@ void Matcher::init_first_stack_mask() { // For RegVectMask scalable_stack_mask.clear_to_sets(scalable_predicate_reg_slots()); - assert(scalable_stack_mask.is_AllStack(), "should be infinite stack"); + assert(scalable_stack_mask.is_infinite_stack(), "should be infinite stack"); *idealreg2spillmask[Op_RegVectMask] = *idealreg2regmask[Op_RegVectMask]; idealreg2spillmask[Op_RegVectMask]->OR(scalable_stack_mask); } @@ -684,7 +684,7 @@ void Matcher::init_first_stack_mask() { // For VecA scalable_stack_mask.clear_to_sets(RegMask::SlotsPerVecA); - assert(scalable_stack_mask.is_AllStack(), "should be infinite stack"); + assert(scalable_stack_mask.is_infinite_stack(), "should be infinite stack"); *idealreg2spillmask[Op_VecA] = *idealreg2regmask[Op_VecA]; idealreg2spillmask[Op_VecA]->OR(scalable_stack_mask); } else { @@ -998,7 +998,7 @@ void Matcher::init_spill_mask( Node *ret ) { for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1)) STACK_ONLY_mask.Insert(i); // Also set the "infinite stack" bit. - STACK_ONLY_mask.set_AllStack(); + STACK_ONLY_mask.set_infinite_stack(); for (i = OptoReg::Name(0); i < OptoReg::Name(_last_Mach_Reg); i = OptoReg::add(i, 1)) { // Copy the register names over into the shared world. diff --git a/src/hotspot/share/opto/postaloc.cpp b/src/hotspot/share/opto/postaloc.cpp index 7f4d2845792..23f16dbecfb 100644 --- a/src/hotspot/share/opto/postaloc.cpp +++ b/src/hotspot/share/opto/postaloc.cpp @@ -173,8 +173,8 @@ int PhaseChaitin::use_prior_register( Node *n, uint idx, Node *def, Block *curre const LRG &def_lrg = lrgs(_lrg_map.live_range_id(def)); OptoReg::Name def_reg = def_lrg.reg(); const RegMask &use_mask = n->in_RegMask(idx); - bool can_use = ( RegMask::can_represent(def_reg) ? (use_mask.Member(def_reg) != 0) - : (use_mask.is_AllStack() != 0)); + bool can_use = (RegMask::can_represent(def_reg) ? (use_mask.Member(def_reg) != 0) + : (use_mask.is_infinite_stack() != 0)); if (!RegMask::is_vector(def->ideal_reg())) { // Check for a copy to or from a misaligned pair. // It is workaround for a sparc with misaligned pairs. diff --git a/src/hotspot/share/opto/reg_split.cpp b/src/hotspot/share/opto/reg_split.cpp index 42ae79feba7..72110d01267 100644 --- a/src/hotspot/share/opto/reg_split.cpp +++ b/src/hotspot/share/opto/reg_split.cpp @@ -520,7 +520,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { // Gather info on which LRG's are spilling, and build maps for (bidx = 1; bidx < maxlrg; bidx++) { if (lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG) { - assert(!lrgs(bidx).mask().is_AllStack(),"AllStack should color"); + assert(!lrgs(bidx).mask().is_infinite_stack(), "Infinite stack mask should color"); lrg2reach[bidx] = spill_cnt; spill_cnt++; lidxs.append(bidx); @@ -1037,7 +1037,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { // Need special logic to handle bound USES. Insert a split at this // bound use if we can't rematerialize the def, or if we need the // split to form a misaligned pair. - if( !umask.is_AllStack() && + if (!umask.is_infinite_stack() && (int)umask.Size() <= lrgs(useidx).num_regs() && (!def->rematerialize() || (!is_vect && umask.is_misaligned_pair()))) { diff --git a/src/hotspot/share/opto/regmask.cpp b/src/hotspot/share/opto/regmask.cpp index 33f73262b16..20fcaf9ccd4 100644 --- a/src/hotspot/share/opto/regmask.cpp +++ b/src/hotspot/share/opto/regmask.cpp @@ -119,10 +119,10 @@ static const uintptr_t low_bits[5] = { fives, // 0x5555..55 void RegMask::clear_to_pairs() { assert(valid_watermarks(), "sanity"); for (unsigned i = _lwm; i <= _hwm; i++) { - uintptr_t bits = _RM_UP[i]; + uintptr_t bits = _rm_word[i]; bits &= ((bits & fives) << 1U); // 1 hi-bit set for each pair bits |= (bits >> 1U); // Smear 1 hi-bit into a pair - _RM_UP[i] = bits; + _rm_word[i] = bits; } assert(is_aligned_pairs(), "mask is not aligned, adjacent pairs"); } @@ -135,7 +135,7 @@ bool RegMask::is_aligned_pairs() const { // Assert that the register mask contains only bit pairs. assert(valid_watermarks(), "sanity"); for (unsigned i = _lwm; i <= _hwm; i++) { - uintptr_t bits = _RM_UP[i]; + uintptr_t bits = _rm_word[i]; while (bits) { // Check bits for pairing uintptr_t bit = uintptr_t(1) << find_lowest_bit(bits); // Extract low bit // Low bit is not odd means its mis-aligned. @@ -151,10 +151,12 @@ bool RegMask::is_aligned_pairs() const { // Return TRUE if the mask contains a single bit bool RegMask::is_bound1() const { - if (is_AllStack()) return false; + if (is_infinite_stack()) { + return false; + } for (unsigned i = _lwm; i <= _hwm; i++) { - uintptr_t v = _RM_UP[i]; + uintptr_t v = _rm_word[i]; if (v != 0) { // Only one bit allowed -> v must be a power of two if (!is_power_of_2(v)) { @@ -163,7 +165,7 @@ bool RegMask::is_bound1() const { // A single bit was found - check there are no bits in the rest of the mask for (i++; i <= _hwm; i++) { - if (_RM_UP[i] != 0) { + if (_rm_word[i] != 0) { return false; } } @@ -177,28 +179,30 @@ bool RegMask::is_bound1() const { // Return TRUE if the mask contains an adjacent pair of bits and no other bits. bool RegMask::is_bound_pair() const { - if (is_AllStack()) return false; + if (is_infinite_stack()) { + return false; + } assert(valid_watermarks(), "sanity"); for (unsigned i = _lwm; i <= _hwm; i++) { - if (_RM_UP[i] != 0) { // Found some bits - unsigned int bit_index = find_lowest_bit(_RM_UP[i]); - if (bit_index != _WordBitMask) { // Bit pair stays in same word? + if (_rm_word[i] != 0) { // Found some bits + unsigned int bit_index = find_lowest_bit(_rm_word[i]); + if (bit_index != WORD_BIT_MASK) { // Bit pair stays in same word? uintptr_t bit = uintptr_t(1) << bit_index; // Extract lowest bit from mask - if ((bit | (bit << 1U)) != _RM_UP[i]) { + if ((bit | (bit << 1U)) != _rm_word[i]) { return false; // Require adjacent bit pair and no more bits } } else { // Else its a split-pair case - assert(is_power_of_2(_RM_UP[i]), "invariant"); + assert(is_power_of_2(_rm_word[i]), "invariant"); i++; // Skip iteration forward - if (i > _hwm || _RM_UP[i] != 1) { + if (i > _hwm || _rm_word[i] != 1) { return false; // Require 1 lo bit in next word } } // A matching pair was found - check there are no bits in the rest of the mask for (i++; i <= _hwm; i++) { - if (_RM_UP[i] != 0) { + if (_rm_word[i] != 0) { return false; } } @@ -244,9 +248,9 @@ OptoReg::Name RegMask::find_first_set(LRG &lrg, const int size) const { } assert(valid_watermarks(), "sanity"); for (unsigned i = _lwm; i <= _hwm; i++) { - if (_RM_UP[i]) { // Found some bits + if (_rm_word[i]) { // Found some bits // Convert to bit number, return hi bit in pair - return OptoReg::Name((i<<_LogWordBits) + find_lowest_bit(_RM_UP[i]) + (size - 1)); + return OptoReg::Name((i << LogBitsPerWord) + find_lowest_bit(_rm_word[i]) + (size - 1)); } } return OptoReg::Bad; @@ -260,7 +264,7 @@ void RegMask::clear_to_sets(const unsigned int size) { assert(valid_watermarks(), "sanity"); uintptr_t low_bits_mask = low_bits[size >> 2U]; for (unsigned i = _lwm; i <= _hwm; i++) { - uintptr_t bits = _RM_UP[i]; + uintptr_t bits = _rm_word[i]; uintptr_t sets = (bits & low_bits_mask); for (unsigned j = 1U; j < size; j++) { sets = (bits & (sets << 1U)); // filter bits which produce whole sets @@ -275,7 +279,7 @@ void RegMask::clear_to_sets(const unsigned int size) { } } } - _RM_UP[i] = sets; + _rm_word[i] = sets; } assert(is_aligned_sets(size), "mask is not aligned, adjacent sets"); } @@ -288,7 +292,7 @@ void RegMask::smear_to_sets(const unsigned int size) { assert(valid_watermarks(), "sanity"); uintptr_t low_bits_mask = low_bits[size >> 2U]; for (unsigned i = _lwm; i <= _hwm; i++) { - uintptr_t bits = _RM_UP[i]; + uintptr_t bits = _rm_word[i]; uintptr_t sets = 0; for (unsigned j = 0; j < size; j++) { sets |= (bits & low_bits_mask); // collect partial bits @@ -304,7 +308,7 @@ void RegMask::smear_to_sets(const unsigned int size) { } } } - _RM_UP[i] = sets; + _rm_word[i] = sets; } assert(is_aligned_sets(size), "mask is not aligned, adjacent sets"); } @@ -317,7 +321,7 @@ bool RegMask::is_aligned_sets(const unsigned int size) const { uintptr_t low_bits_mask = low_bits[size >> 2U]; assert(valid_watermarks(), "sanity"); for (unsigned i = _lwm; i <= _hwm; i++) { - uintptr_t bits = _RM_UP[i]; + uintptr_t bits = _rm_word[i]; while (bits) { // Check bits for pairing uintptr_t bit = uintptr_t(1) << find_lowest_bit(bits); // Low bit is not odd means its mis-aligned. @@ -340,35 +344,37 @@ bool RegMask::is_aligned_sets(const unsigned int size) const { // Return TRUE if the mask contains one adjacent set of bits and no other bits. // Works also for size 1. bool RegMask::is_bound_set(const unsigned int size) const { - if (is_AllStack()) return false; + if (is_infinite_stack()) { + return false; + } assert(1 <= size && size <= 16, "update low bits table"); assert(valid_watermarks(), "sanity"); for (unsigned i = _lwm; i <= _hwm; i++) { - if (_RM_UP[i] != 0) { // Found some bits - unsigned bit_index = find_lowest_bit(_RM_UP[i]); + if (_rm_word[i] != 0) { // Found some bits + unsigned bit_index = find_lowest_bit(_rm_word[i]); uintptr_t bit = uintptr_t(1) << bit_index; if (bit_index + size <= BitsPerWord) { // Bit set stays in same word? uintptr_t hi_bit = bit << (size - 1); uintptr_t set = hi_bit + ((hi_bit-1) & ~(bit-1)); - if (set != _RM_UP[i]) { + if (set != _rm_word[i]) { return false; // Require adjacent bit set and no more bits } } else { // Else its a split-set case // All bits from bit to highest bit in the word must be set - if ((all & ~(bit-1)) != _RM_UP[i]) { + if ((all & ~(bit - 1)) != _rm_word[i]) { return false; } i++; // Skip iteration forward and check high part // The lower bits should be 1 since it is split case. uintptr_t set = (bit >> (BitsPerWord - size)) - 1; - if (i > _hwm || _RM_UP[i] != set) { + if (i > _hwm || _rm_word[i] != set) { return false; // Require expected low bits in next word } } // A matching set found - check there are no bits in the rest of the mask for (i++; i <= _hwm; i++) { - if (_RM_UP[i] != 0) { + if (_rm_word[i] != 0) { return false; } } @@ -383,8 +389,9 @@ bool RegMask::is_bound_set(const unsigned int size) const { // UP means register only, Register plus stack, or stack only is DOWN bool RegMask::is_UP() const { // Quick common case check for DOWN (any stack slot is legal) - if (is_AllStack()) + if (is_infinite_stack()) { return false; + } // Slower check for any stack bits set (also DOWN) if (overlap(Matcher::STACK_ONLY_mask)) return false; @@ -397,7 +404,7 @@ uint RegMask::Size() const { uint sum = 0; assert(valid_watermarks(), "sanity"); for (unsigned i = _lwm; i <= _hwm; i++) { - sum += population_count(_RM_UP[i]); + sum += population_count(_rm_word[i]); } return sum; } @@ -445,7 +452,9 @@ void RegMask::dump(outputStream *st) const { st->print("-"); OptoReg::dump(last, st); } - if (is_AllStack()) st->print("..."); + if (is_infinite_stack()) { + st->print("..."); + } } st->print("]"); } diff --git a/src/hotspot/share/opto/regmask.hpp b/src/hotspot/share/opto/regmask.hpp index f77c14e50ea..fa1721bd45a 100644 --- a/src/hotspot/share/opto/regmask.hpp +++ b/src/hotspot/share/opto/regmask.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -48,8 +48,8 @@ static unsigned int find_highest_bit(uintptr_t mask) { // as any notion of register classes. We provide a register mask, which is // just a collection of Register numbers. -// The ADLC defines 2 macros, RM_SIZE and FORALL_BODY. -// RM_SIZE is the size of a register mask in 32-bit words. +// The ADLC defines 2 macros, RM_SIZE_IN_INTS and FORALL_BODY. +// RM_SIZE_IN_INTS is the size of a register mask in 32-bit words. // FORALL_BODY replicates a BODY macro once per word in the register mask. // The usage is somewhat clumsy and limited to the regmask.[h,c]pp files. // However, it means the ADLC can redefine the unroll macro and all loops @@ -59,13 +59,13 @@ class RegMask { friend class RegMaskIterator; - // The RM_SIZE is aligned to 64-bit - assert that this holds - LP64_ONLY(STATIC_ASSERT(is_aligned(RM_SIZE, 2))); + // RM_SIZE_IN_INTS is aligned to 64-bit - assert that this holds + LP64_ONLY(STATIC_ASSERT(is_aligned(RM_SIZE_IN_INTS, 2))); - static const unsigned int _WordBitMask = BitsPerWord - 1U; - static const unsigned int _LogWordBits = LogBitsPerWord; - static const unsigned int _RM_SIZE = LP64_ONLY(RM_SIZE >> 1) NOT_LP64(RM_SIZE); - static const unsigned int _RM_MAX = _RM_SIZE - 1U; + static const unsigned int WORD_BIT_MASK = BitsPerWord - 1U; + static const unsigned int RM_SIZE_IN_WORDS = + LP64_ONLY(RM_SIZE_IN_INTS >> 1) NOT_LP64(RM_SIZE_IN_INTS); + static const unsigned int RM_WORD_MAX_INDEX = RM_SIZE_IN_WORDS - 1U; union { // Array of Register Mask bits. This array is large enough to cover @@ -73,8 +73,8 @@ class RegMask { // on the stack (stack registers) up to some interesting limit. Methods // that need more parameters will NOT be compiled. On Intel, the limit // is something like 90+ parameters. - int _RM_I[RM_SIZE]; - uintptr_t _RM_UP[_RM_SIZE]; + int _rm_int[RM_SIZE_IN_INTS]; + uintptr_t _rm_word[RM_SIZE_IN_WORDS]; }; // The low and high water marks represents the lowest and highest word @@ -85,7 +85,7 @@ class RegMask { unsigned int _hwm; public: - enum { CHUNK_SIZE = _RM_SIZE * BitsPerWord }; + enum { CHUNK_SIZE = RM_SIZE_IN_WORDS * BitsPerWord }; // SlotsPerLong is 2, since slots are 32 bits and longs are 64 bits. // Also, consider the maximum alignment size for a normally allocated @@ -117,17 +117,21 @@ class RegMask { # undef BODY int dummy = 0) { #if defined(VM_LITTLE_ENDIAN) || !defined(_LP64) -# define BODY(I) _RM_I[I] = a##I; +# define BODY(I) _rm_int[I] = a##I; #else // We need to swap ints. -# define BODY(I) _RM_I[I ^ 1] = a##I; +# define BODY(I) _rm_int[I ^ 1] = a##I; #endif FORALL_BODY # undef BODY _lwm = 0; - _hwm = _RM_MAX; - while (_hwm > 0 && _RM_UP[_hwm] == 0) _hwm--; - while ((_lwm < _hwm) && _RM_UP[_lwm] == 0) _lwm++; + _hwm = RM_WORD_MAX_INDEX; + while (_hwm > 0 && _rm_word[_hwm] == 0) { + _hwm--; + } + while ((_lwm < _hwm) && _rm_word[_lwm] == 0) { + _lwm++; + } assert(valid_watermarks(), "post-condition"); } @@ -135,14 +139,14 @@ class RegMask { RegMask(RegMask *rm) { _hwm = rm->_hwm; _lwm = rm->_lwm; - for (unsigned i = 0; i < _RM_SIZE; i++) { - _RM_UP[i] = rm->_RM_UP[i]; + for (unsigned i = 0; i < RM_SIZE_IN_WORDS; i++) { + _rm_word[i] = rm->_rm_word[i]; } assert(valid_watermarks(), "post-condition"); } // Construct an empty mask - RegMask() : _RM_UP(), _lwm(_RM_MAX), _hwm(0) { + RegMask() : _rm_word(), _lwm(RM_WORD_MAX_INDEX), _hwm(0) { assert(valid_watermarks(), "post-condition"); } @@ -156,18 +160,18 @@ class RegMask { assert(reg < CHUNK_SIZE, ""); unsigned r = (unsigned)reg; - return _RM_UP[r >> _LogWordBits] & (uintptr_t(1) << (r & _WordBitMask)); + return _rm_word[r >> LogBitsPerWord] & (uintptr_t(1) << (r & WORD_BIT_MASK)); } // The last bit in the register mask indicates that the mask should repeat // indefinitely with ONE bits. Returns TRUE if mask is infinite or // unbounded in size. Returns FALSE if mask is finite size. - bool is_AllStack() const { - return (_RM_UP[_RM_MAX] & (uintptr_t(1) << _WordBitMask)) != 0; + bool is_infinite_stack() const { + return (_rm_word[RM_WORD_MAX_INDEX] & (uintptr_t(1) << WORD_BIT_MASK)) != 0; } - void set_AllStack() { - _RM_UP[_RM_MAX] |= (uintptr_t(1) << _WordBitMask); + void set_infinite_stack() { + _rm_word[RM_WORD_MAX_INDEX] |= (uintptr_t(1) << WORD_BIT_MASK); } // Test for being a not-empty mask. @@ -175,7 +179,7 @@ class RegMask { assert(valid_watermarks(), "sanity"); uintptr_t tmp = 0; for (unsigned i = _lwm; i <= _hwm; i++) { - tmp |= _RM_UP[i]; + tmp |= _rm_word[i]; } return tmp; } @@ -184,9 +188,9 @@ class RegMask { OptoReg::Name find_first_elem() const { assert(valid_watermarks(), "sanity"); for (unsigned i = _lwm; i <= _hwm; i++) { - uintptr_t bits = _RM_UP[i]; + uintptr_t bits = _rm_word[i]; if (bits) { - return OptoReg::Name((i << _LogWordBits) + find_lowest_bit(bits)); + return OptoReg::Name((i << LogBitsPerWord) + find_lowest_bit(bits)); } } return OptoReg::Name(OptoReg::Bad); @@ -198,9 +202,9 @@ class RegMask { // Careful not to overflow if _lwm == 0 unsigned i = _hwm + 1; while (i > _lwm) { - uintptr_t bits = _RM_UP[--i]; + uintptr_t bits = _rm_word[--i]; if (bits) { - return OptoReg::Name((i << _LogWordBits) + find_highest_bit(bits)); + return OptoReg::Name((i << LogBitsPerWord) + find_highest_bit(bits)); } } return OptoReg::Name(OptoReg::Bad); @@ -213,13 +217,13 @@ class RegMask { // Verify watermarks are sane, i.e., within bounds and that no // register words below or above the watermarks have bits set. bool valid_watermarks() const { - assert(_hwm < _RM_SIZE, "_hwm out of range: %d", _hwm); - assert(_lwm < _RM_SIZE, "_lwm out of range: %d", _lwm); + assert(_hwm < RM_SIZE_IN_WORDS, "_hwm out of range: %d", _hwm); + assert(_lwm < RM_SIZE_IN_WORDS, "_lwm out of range: %d", _lwm); for (unsigned i = 0; i < _lwm; i++) { - assert(_RM_UP[i] == 0, "_lwm too high: %d regs at: %d", _lwm, i); + assert(_rm_word[i] == 0, "_lwm too high: %d regs at: %d", _lwm, i); } - for (unsigned i = _hwm + 1; i < _RM_SIZE; i++) { - assert(_RM_UP[i] == 0, "_hwm too low: %d regs at: %d", _hwm, i); + for (unsigned i = _hwm + 1; i < RM_SIZE_IN_WORDS; i++) { + assert(_rm_word[i] == 0, "_hwm too low: %d regs at: %d", _hwm, i); } return true; } @@ -267,7 +271,7 @@ class RegMask { unsigned lwm = MAX2(_lwm, rm._lwm); uintptr_t result = 0; for (unsigned i = lwm; i <= hwm; i++) { - result |= _RM_UP[i] & rm._RM_UP[i]; + result |= _rm_word[i] & rm._rm_word[i]; } return result; } @@ -278,17 +282,17 @@ class RegMask { // Clear a register mask void Clear() { - _lwm = _RM_MAX; + _lwm = RM_WORD_MAX_INDEX; _hwm = 0; - memset(_RM_UP, 0, sizeof(uintptr_t) * _RM_SIZE); + memset(_rm_word, 0, sizeof(uintptr_t) * RM_SIZE_IN_WORDS); assert(valid_watermarks(), "sanity"); } // Fill a register mask with 1's void Set_All() { _lwm = 0; - _hwm = _RM_MAX; - memset(_RM_UP, 0xFF, sizeof(uintptr_t) * _RM_SIZE); + _hwm = RM_WORD_MAX_INDEX; + memset(_rm_word, 0xFF, sizeof(uintptr_t) * RM_SIZE_IN_WORDS); assert(valid_watermarks(), "sanity"); } @@ -299,10 +303,10 @@ class RegMask { assert(reg < CHUNK_SIZE, "sanity"); assert(valid_watermarks(), "pre-condition"); unsigned r = (unsigned)reg; - unsigned index = r >> _LogWordBits; + unsigned index = r >> LogBitsPerWord; if (index > _hwm) _hwm = index; if (index < _lwm) _lwm = index; - _RM_UP[index] |= (uintptr_t(1) << (r & _WordBitMask)); + _rm_word[index] |= (uintptr_t(1) << (r & WORD_BIT_MASK)); assert(valid_watermarks(), "post-condition"); } @@ -310,7 +314,7 @@ class RegMask { void Remove(OptoReg::Name reg) { assert(reg < CHUNK_SIZE, ""); unsigned r = (unsigned)reg; - _RM_UP[r >> _LogWordBits] &= ~(uintptr_t(1) << (r & _WordBitMask)); + _rm_word[r >> LogBitsPerWord] &= ~(uintptr_t(1) << (r & WORD_BIT_MASK)); } // OR 'rm' into 'this' @@ -320,7 +324,7 @@ class RegMask { if (_lwm > rm._lwm) _lwm = rm._lwm; if (_hwm < rm._hwm) _hwm = rm._hwm; for (unsigned i = _lwm; i <= _hwm; i++) { - _RM_UP[i] |= rm._RM_UP[i]; + _rm_word[i] |= rm._rm_word[i]; } assert(valid_watermarks(), "sanity"); } @@ -331,7 +335,7 @@ class RegMask { // Do not evaluate words outside the current watermark range, as they are // already zero and an &= would not change that for (unsigned i = _lwm; i <= _hwm; i++) { - _RM_UP[i] &= rm._RM_UP[i]; + _rm_word[i] &= rm._rm_word[i]; } // Narrow the watermarks if &rm spans a narrower range. // Update after to ensure non-overlapping words are zeroed out. @@ -345,7 +349,7 @@ class RegMask { unsigned hwm = MIN2(_hwm, rm._hwm); unsigned lwm = MAX2(_lwm, rm._lwm); for (unsigned i = lwm; i <= hwm; i++) { - _RM_UP[i] &= ~rm._RM_UP[i]; + _rm_word[i] &= ~rm._rm_word[i]; } } @@ -415,7 +419,7 @@ class RegMaskIterator { // Find the next word with bits while (_next_index <= _rm._hwm) { - _current_bits = _rm._RM_UP[_next_index++]; + _current_bits = _rm._rm_word[_next_index++]; if (_current_bits != 0) { // Found a word. Calculate the first register element and // prepare _current_bits by shifting it down and clearing @@ -423,7 +427,7 @@ class RegMaskIterator { unsigned int next_bit = find_lowest_bit(_current_bits); assert(((_current_bits >> next_bit) & 0x1) == 1, "lowest bit must be set after shift"); _current_bits = (_current_bits >> next_bit) - 1; - _reg = OptoReg::Name(((_next_index - 1) << RegMask::_LogWordBits) + next_bit); + _reg = OptoReg::Name(((_next_index - 1) << LogBitsPerWord) + next_bit); return r; } } @@ -435,6 +439,6 @@ class RegMaskIterator { }; // Do not use this constant directly in client code! -#undef RM_SIZE +#undef RM_SIZE_IN_INTS #endif // SHARE_OPTO_REGMASK_HPP diff --git a/test/hotspot/gtest/opto/test_regmask.cpp b/test/hotspot/gtest/opto/test_regmask.cpp index 0fbbf511863..81fee5089c4 100644 --- a/test/hotspot/gtest/opto/test_regmask.cpp +++ b/test/hotspot/gtest/opto/test_regmask.cpp @@ -35,7 +35,7 @@ static void contains_expected_num_of_registers(const RegMask& rm, unsigned int e ASSERT_TRUE(rm.is_NotEmpty()); } else { ASSERT_TRUE(!rm.is_NotEmpty()); - ASSERT_TRUE(!rm.is_AllStack()); + ASSERT_TRUE(!rm.is_infinite_stack()); } RegMaskIterator rmi(rm); @@ -84,8 +84,8 @@ TEST_VM(RegMask, Set_ALL) { rm.Set_All(); ASSERT_TRUE(rm.Size() == RegMask::CHUNK_SIZE); ASSERT_TRUE(rm.is_NotEmpty()); - // Set_All sets AllStack bit - ASSERT_TRUE(rm.is_AllStack()); + // Set_All sets the infinite bit + ASSERT_TRUE(rm.is_infinite_stack()); contains_expected_num_of_registers(rm, RegMask::CHUNK_SIZE); } @@ -135,7 +135,7 @@ TEST_VM(RegMask, SUBTRACT) { for (int i = 17; i < RegMask::CHUNK_SIZE; i++) { rm1.Insert(i); } - ASSERT_TRUE(rm1.is_AllStack()); + ASSERT_TRUE(rm1.is_infinite_stack()); rm2.SUBTRACT(rm1); contains_expected_num_of_registers(rm1, RegMask::CHUNK_SIZE - 17); contains_expected_num_of_registers(rm2, 17); @@ -151,8 +151,8 @@ TEST_VM(RegMask, is_bound1) { contains_expected_num_of_registers(rm, 1); rm.Remove(i); } - // AllStack bit does not count as a bound register - rm.set_AllStack(); + // The infinite bit does not count as a bound register + rm.set_infinite_stack(); ASSERT_FALSE(rm.is_bound1()); } @@ -168,7 +168,7 @@ TEST_VM(RegMask, is_bound_pair) { contains_expected_num_of_registers(rm, 2); rm.Clear(); } - // A pair with the AllStack bit does not count as a bound pair + // A pair with the infinite bit does not count as a bound pair rm.Clear(); rm.Insert(RegMask::CHUNK_SIZE - 2); rm.Insert(RegMask::CHUNK_SIZE - 1); @@ -187,11 +187,11 @@ TEST_VM(RegMask, is_bound_set) { contains_expected_num_of_registers(rm, size); rm.Clear(); } - // A set with the AllStack bit does not count as a bound set + // A set with the infinite bit does not count as a bound set for (int j = RegMask::CHUNK_SIZE - size; j < RegMask::CHUNK_SIZE; j++) { rm.Insert(j); } ASSERT_FALSE(rm.is_bound_set(size)); rm.Clear(); } -} \ No newline at end of file +}