8373428: Refine variables with the same name in nested scopes in PhaseChaitin::gather_lrg_masks

Reviewed-by: phh
This commit is contained in:
Hamlin Li 2025-12-15 09:10:51 +00:00
parent ad6611a9a3
commit 3559eeca0e

View File

@ -1076,8 +1076,8 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
// Prepare register mask for each input
for( uint k = input_edge_start; k < cnt; k++ ) {
uint vreg = _lrg_map.live_range_id(n->in(k));
if (!vreg) {
uint vreg_in = _lrg_map.live_range_id(n->in(k));
if (!vreg_in) {
continue;
}
@ -1099,7 +1099,7 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
if (k >= cur_node->num_opnds()) continue;
}
LRG &lrg = lrgs(vreg);
LRG &lrg_in = lrgs(vreg_in);
// // Testing for floating point code shape
// Node *test = n->in(k);
// if( test->is_Mach() ) {
@ -1114,25 +1114,25 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
// Do not limit registers from uncommon uses before
// AggressiveCoalesce. This effectively pre-virtual-splits
// around uncommon uses of common defs.
const RegMask &rm = n->in_RegMask(k);
const RegMask &rm_in = n->in_RegMask(k);
if (!after_aggressive && _cfg.get_block_for_node(n->in(k))->_freq > 1000 * block->_freq) {
// Since we are BEFORE aggressive coalesce, leave the register
// mask untrimmed by the call. This encourages more coalescing.
// Later, AFTER aggressive, this live range will have to spill
// but the spiller handles slow-path calls very nicely.
} else {
lrg.and_with(rm);
lrg_in.and_with(rm_in);
}
// Check for bound register masks
const RegMask &lrgmask = lrg.mask();
const RegMask &lrgmask_in = lrg_in.mask();
uint kreg = n->in(k)->ideal_reg();
bool is_vect = RegMask::is_vector(kreg);
assert(n->in(k)->bottom_type()->isa_vect() == nullptr || is_vect ||
kreg == Op_RegD || kreg == Op_RegL || kreg == Op_RegVectMask,
"vector must be in vector registers");
if (lrgmask.is_bound(kreg))
lrg._is_bound = 1;
if (lrgmask_in.is_bound(kreg))
lrg_in._is_bound = 1;
// If this use of a double forces a mis-aligned double,
// flag as '_fat_proj' - really flag as allowing misalignment
@ -1141,30 +1141,30 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
// FOUR registers!
#ifdef ASSERT
if (is_vect && !_scheduling_info_generated) {
if (lrg.num_regs() != 0) {
assert(lrgmask.is_aligned_sets(lrg.num_regs()), "vector should be aligned");
assert(!lrg._fat_proj, "sanity");
assert(RegMask::num_registers(kreg) == lrg.num_regs(), "sanity");
if (lrg_in.num_regs() != 0) {
assert(lrgmask_in.is_aligned_sets(lrg_in.num_regs()), "vector should be aligned");
assert(!lrg_in._fat_proj, "sanity");
assert(RegMask::num_registers(kreg) == lrg_in.num_regs(), "sanity");
} else {
assert(n->is_Phi(), "not all inputs processed only if Phi");
}
}
#endif
if (!is_vect && lrg.num_regs() == 2 && !lrg._fat_proj && rm.is_misaligned_pair()) {
lrg._fat_proj = 1;
lrg._is_bound = 1;
if (!is_vect && lrg_in.num_regs() == 2 && !lrg_in._fat_proj && rm_in.is_misaligned_pair()) {
lrg_in._fat_proj = 1;
lrg_in._is_bound = 1;
}
// if the LRG is an unaligned pair, we will have to spill
// so clear the LRG's register mask if it is not already spilled
if (!is_vect && !n->is_SpillCopy() &&
(lrg._def == nullptr || lrg.is_multidef() || !lrg._def->is_SpillCopy()) &&
lrgmask.is_misaligned_pair()) {
lrg.clear();
(lrg_in._def == nullptr || lrg_in.is_multidef() || !lrg_in._def->is_SpillCopy()) &&
lrgmask_in.is_misaligned_pair()) {
lrg_in.clear();
}
// Check for maximum frequency value
if (lrg._maxfreq < block->_freq) {
lrg._maxfreq = block->_freq;
if (lrg_in._maxfreq < block->_freq) {
lrg_in._maxfreq = block->_freq;
}
} // End for all allocated inputs