8382880: C2: Remove unused Final_Reshape_Counts::get_*_count() methods

Reviewed-by: kvn, aseoane
This commit is contained in:
jonghoonpark 2026-05-08 22:58:33 +00:00 committed by Vladimir Kozlov
parent 83cd98426e
commit 3c1af6b9c8

View File

@ -3157,30 +3157,20 @@ void Compile::Code_Gen() {
}
//------------------------------Final_Reshape_Counts---------------------------
// This class defines counters to help identify when a method
// may/must be executed using hardware with only 24-bit precision.
// This class defines counters and node lists collected during
// the final graph reshaping.
struct Final_Reshape_Counts : public StackObj {
int _call_count; // count non-inlined 'common' calls
int _float_count; // count float ops requiring 24-bit precision
int _double_count; // count double ops requiring more precision
int _java_call_count; // count non-inlined 'java' calls
int _inner_loop_count; // count loops which need alignment
VectorSet _visited; // Visitation flags
Node_List _tests; // Set of IfNodes & PCTableNodes
Final_Reshape_Counts() :
_call_count(0), _float_count(0), _double_count(0),
_java_call_count(0), _inner_loop_count(0) { }
void inc_call_count () { _call_count ++; }
void inc_float_count () { _float_count ++; }
void inc_double_count() { _double_count++; }
void inc_java_call_count() { _java_call_count++; }
void inc_inner_loop_count() { _inner_loop_count++; }
int get_call_count () const { return _call_count ; }
int get_float_count () const { return _float_count ; }
int get_double_count() const { return _double_count; }
int get_java_call_count() const { return _java_call_count; }
int get_inner_loop_count() const { return _inner_loop_count; }
};
@ -3243,7 +3233,6 @@ void Compile::final_graph_reshaping_impl(Node *n, Final_Reshape_Counts& frc, Uni
"unused CallLeafPureNode should have been removed before final graph reshaping");
}
#endif
// Count FPU ops and common calls, implements item (3)
bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->final_graph_reshaping(this, n, nop, dead_nodes);
if (!gc_handled) {
final_graph_reshaping_main_switch(n, frc, nop, dead_nodes);
@ -3288,50 +3277,6 @@ void Compile::handle_div_mod_op(Node* n, BasicType bt, bool is_unsigned) {
void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& frc, uint nop, Unique_Node_List& dead_nodes) {
switch( nop ) {
// Count all float operations that may use FPU
case Op_AddHF:
case Op_MulHF:
case Op_AddF:
case Op_SubF:
case Op_MulF:
case Op_DivF:
case Op_NegF:
case Op_ModF:
case Op_ConvI2F:
case Op_ConF:
case Op_CmpF:
case Op_CmpF3:
case Op_StoreF:
case Op_LoadF:
// case Op_ConvL2F: // longs are split into 32-bit halves
frc.inc_float_count();
break;
case Op_ConvF2D:
case Op_ConvD2F:
frc.inc_float_count();
frc.inc_double_count();
break;
// Count all double operations that may use FPU
case Op_AddD:
case Op_SubD:
case Op_MulD:
case Op_DivD:
case Op_NegD:
case Op_ModD:
case Op_ConvI2D:
case Op_ConvD2I:
// case Op_ConvL2D: // handled by leaf call
// case Op_ConvD2L: // handled by leaf call
case Op_ConD:
case Op_CmpD:
case Op_CmpD3:
case Op_StoreD:
case Op_LoadD:
case Op_LoadD_unaligned:
frc.inc_double_count();
break;
case Op_Opaque1: // Remove Opaque Nodes before matching
n->subsume_by(n->in(1), this);
break;
@ -3351,7 +3296,6 @@ void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& f
}
n->subsume_by(new_call, this);
}
frc.inc_call_count();
break;
}
case Op_CallStaticJava:
@ -3364,13 +3308,8 @@ void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& f
case Op_CallLeafNoFP: {
assert (n->is_Call(), "");
CallNode *call = n->as_Call();
// Count call sites where the FP mode bit would have to be flipped.
// Do not count uncommon runtime calls:
// uncommon_trap, _complete_monitor_locking, _complete_monitor_unlocking,
// _new_Java, _new_typeArray, _new_objArray, _rethrow_Java, ...
if (!call->is_CallStaticJava() || !call->as_CallStaticJava()->_name) {
frc.inc_call_count(); // Count the call site
} else { // See if uncommon argument is shared
// See if uncommon argument is shared
if (call->is_CallStaticJava() && call->as_CallStaticJava()->_name) {
Node *n = call->in(TypeFunc::Parms);
int nop = n->Opcode();
// Clone shared simple arguments to uncommon calls, item (1).
@ -3388,6 +3327,13 @@ void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& f
}
break;
}
// Mem nodes need explicit cases to satisfy assert(!n->is_Mem()) in default.
case Op_StoreF:
case Op_LoadF:
case Op_StoreD:
case Op_LoadD:
case Op_LoadD_unaligned:
case Op_StoreB:
case Op_StoreC:
case Op_StoreI:
@ -3435,6 +3381,12 @@ void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& f
case Op_LoadN:
case Op_LoadRange:
case Op_LoadS:
case Op_LoadVectorGather:
case Op_StoreVectorScatter:
case Op_LoadVectorGatherMasked:
case Op_StoreVectorScatterMasked:
case Op_LoadVectorMasked:
case Op_StoreVectorMasked:
break;
case Op_AddP: { // Assert sane base pointers
@ -3785,35 +3737,6 @@ void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& f
#endif
break;
case Op_LoadVectorGather:
case Op_StoreVectorScatter:
case Op_LoadVectorGatherMasked:
case Op_StoreVectorScatterMasked:
case Op_VectorCmpMasked:
case Op_VectorMaskGen:
case Op_LoadVectorMasked:
case Op_StoreVectorMasked:
break;
case Op_AddReductionVI:
case Op_AddReductionVL:
case Op_AddReductionVHF:
case Op_AddReductionVF:
case Op_AddReductionVD:
case Op_MulReductionVI:
case Op_MulReductionVL:
case Op_MulReductionVHF:
case Op_MulReductionVF:
case Op_MulReductionVD:
case Op_MinReductionV:
case Op_MaxReductionV:
case Op_UMinReductionV:
case Op_UMaxReductionV:
case Op_AndReductionV:
case Op_OrReductionV:
case Op_XorReductionV:
break;
case Op_PackB:
case Op_PackS:
case Op_PackI:
@ -3889,8 +3812,6 @@ void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& f
}
break;
}
case Op_Blackhole:
break;
case Op_RangeCheck: {
RangeCheckNode* rc = n->as_RangeCheck();
Node* iff = new IfNode(rc->in(0), rc->in(1), rc->_prob, rc->_fcnt);
@ -4059,20 +3980,13 @@ void Compile::final_graph_reshaping_walk(Node_Stack& nstack, Node* root, Final_R
// Intel update-in-place two-address operations and better register usage
// on RISCs. Must come after regular optimizations to avoid GVN Ideal
// calls canonicalizing them back.
// (3) Count the number of double-precision FP ops, single-precision FP ops
// and call sites. On Intel, we can get correct rounding either by
// forcing singles to memory (requires extra stores and loads after each
// FP bytecode) or we can set a rounding mode bit (requires setting and
// clearing the mode bit around call sites). The mode bit is only used
// if the relative frequency of single FP ops to calls is low enough.
// This is a key transform for SPEC mpeg_audio.
// (4) Detect infinite loops; blobs of code reachable from above but not
// (3) Detect infinite loops; blobs of code reachable from above but not
// below. Several of the Code_Gen algorithms fail on such code shapes,
// so we simply bail out. Happens a lot in ZKM.jar, but also happens
// from time to time in other codes (such as -Xcomp finalizer loops, etc).
// Detection is by looking for IfNodes where only 1 projection is
// reachable from below or CatchNodes missing some targets.
// (5) Assert for insane oop offsets in debug mode.
// (4) Assert for insane oop offsets in debug mode.
bool Compile::final_graph_reshaping() {
// an infinite loop may have been eliminated by the optimizer,