8345156: C2: Add bailouts next to a few asserts

Reviewed-by: kvn, epeter
This commit is contained in:
Daniel Skantz 2024-12-09 16:29:56 +00:00 committed by Vladimir Kozlov
parent b120404620
commit 480b508cf2
8 changed files with 64 additions and 13 deletions

View File

@ -1356,6 +1356,9 @@ void PhaseCFG::verify() const {
verify_memory_writer_placement(block, n);
if (n->needs_anti_dependence_check()) {
verify_anti_dependences(block, n);
if (C->failing()) {
return;
}
}
for (uint k = 0; k < n->req(); k++) {
Node *def = n->in(k);

View File

@ -2563,6 +2563,9 @@ void PhaseChaitin::verify_base_ptrs(ResourceArea* a) const {
void PhaseChaitin::verify(ResourceArea* a, bool verify_ifg) const {
if (VerifyRegisterAllocator) {
_cfg.verify();
if (C->failing()) {
return;
}
verify_base_ptrs(a);
if (verify_ifg) {
_ifg->verify(this);

View File

@ -2969,6 +2969,9 @@ void Compile::Code_Gen() {
print_method(PHASE_GLOBAL_CODE_MOTION, 2);
NOT_PRODUCT( verify_graph_edges(); )
cfg.verify();
if (failing()) {
return;
}
}
PhaseChaitin regalloc(unique(), cfg, matcher, false);
@ -5041,7 +5044,6 @@ bool Compile::fail_randomly() {
}
bool Compile::failure_is_artificial() {
assert(failing_internal(), "should be failing");
return C->failure_reason_is("StressBailout");
}
#endif

View File

@ -243,7 +243,6 @@ void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) {
}
}
#ifdef ASSERT
// Assert that new input b2 is dominated by all previous inputs.
// Check this by by seeing that it is dominated by b1, the deepest
// input observed until b2.
@ -255,6 +254,7 @@ static void assert_dom(Block* b1, Block* b2, Node* n, const PhaseCFG* cfg) {
tmp = tmp->_idom;
}
if (tmp != b1) {
#ifdef ASSERT
// Detected an unschedulable graph. Print some nice stuff and die.
tty->print_cr("!!! Unschedulable graph !!!");
for (uint j=0; j<n->len(); j++) { // For all inputs
@ -267,10 +267,11 @@ static void assert_dom(Block* b1, Block* b2, Node* n, const PhaseCFG* cfg) {
}
tty->print("Failing node: ");
n->dump();
assert(false, "unscheduable graph");
assert(false, "unschedulable graph");
#endif
cfg->C->record_failure("unschedulable graph");
}
}
#endif
static Block* find_deepest_input(Node* n, const PhaseCFG* cfg) {
// Find the last input dominated by all other inputs.
@ -285,7 +286,10 @@ static Block* find_deepest_input(Node* n, const PhaseCFG* cfg) {
// The new inb must be dominated by the previous deepb.
// The various inputs must be linearly ordered in the dom
// tree, or else there will not be a unique deepest block.
DEBUG_ONLY(assert_dom(deepb, inb, n, cfg));
assert_dom(deepb, inb, n, cfg);
if (cfg->C->failing()) {
return nullptr;
}
deepb = inb; // Save deepest block
deepb_dom_depth = deepb->_dom_depth;
}
@ -372,6 +376,9 @@ bool PhaseCFG::schedule_early(VectorSet &visited, Node_Stack &roots) {
if (!parent_node->pinned()) {
// Set earliest legal block.
Block* earliest_block = find_deepest_input(parent_node, this);
if (C->failing()) {
return false;
}
map_node_to_block(parent_node, earliest_block);
} else {
assert(get_block_for_node(parent_node) == get_block_for_node(parent_node->in(0)), "Pinned Node should be at the same block as its control edge");
@ -523,7 +530,10 @@ static Block* memory_early_block(Node* load, Block* early, const PhaseCFG* cfg)
// The new inb must be dominated by the previous deepb.
// The various inputs must be linearly ordered in the dom
// tree, or else there will not be a unique deepest block.
DEBUG_ONLY(assert_dom(deepb, inb, load, cfg));
assert_dom(deepb, inb, load, cfg);
if (cfg->C->failing()) {
return nullptr;
}
deepb = inb; // Save deepest block
deepb_dom_depth = deepb->_dom_depth;
}
@ -715,6 +725,9 @@ Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) {
// dominator tree, and allow for a broader discovery of anti-dependences.
if (C->subsume_loads()) {
early = memory_early_block(load, early, this);
if (C->failing()) {
return nullptr;
}
}
ResourceArea* area = Thread::current()->resource_area();
@ -1519,6 +1532,9 @@ void PhaseCFG::schedule_late(VectorSet &visited, Node_Stack &stack) {
// Hoist LCA above possible-defs and insert anti-dependences to
// defs in new LCA block.
LCA = insert_anti_dependences(LCA, self);
if (C->failing()) {
return;
}
}
if (early->_dom_depth > LCA->_dom_depth) {
@ -1611,8 +1627,8 @@ void PhaseCFG::global_code_motion() {
Node_Stack stack((C->live_nodes() >> 2) + 16); // pre-grow
if (!schedule_early(visited, stack)) {
// Bailout without retry
assert(false, "early schedule failed");
C->record_method_not_compilable("early schedule failed");
assert(C->failure_is_artificial(), "early schedule failed");
C->record_method_not_compilable("early schedule failed" DEBUG_ONLY(COMMA true));
return;
}
@ -1657,6 +1673,9 @@ void PhaseCFG::global_code_motion() {
// uncommon trap. Combined with the too_many_traps guards
// above, this prevents SEGV storms reported in 6366351,
// by recompiling offending methods without this optimization.
if (C->failing()) {
return;
}
}
}
@ -1726,6 +1745,9 @@ void PhaseCFG::global_code_motion() {
for (uint i = 0; i < number_of_blocks(); i++) {
Block* block = get_block(i);
call_catch_cleanup(block);
if (C->failing()) {
return;
}
}
#ifndef PRODUCT

View File

@ -493,6 +493,9 @@ void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allo
n->in(LoadNode::Memory) == best->in(StoreNode::Memory)) {
// Found anti-dependent load
insert_anti_dependences(block, n);
if (C->failing()) {
return;
}
}
}
}
@ -1362,6 +1365,9 @@ void PhaseCFG::call_catch_cleanup(Block* block) {
map_node_to_block(clone, sb);
if (clone->needs_anti_dependence_check()) {
insert_anti_dependences(sb, clone);
if (C->failing()) {
return;
}
}
}
}

View File

@ -4899,7 +4899,10 @@ void PhaseIdealLoop::build_and_optimize() {
// that require basic-block info (like cloning through Phi's)
if (!C->major_progress() && SplitIfBlocks && do_split_ifs) {
visited.clear();
split_if_with_blocks( visited, nstack);
split_if_with_blocks(visited, nstack);
if (C->failing()) {
return;
}
DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } );
}
@ -6423,6 +6426,7 @@ void PhaseIdealLoop::build_loop_late_post_work(Node *n, bool pinned) {
#ifdef ASSERT
if (_verify_only && !n->is_CFG()) {
// Check def-use domination.
// We would like to expose this check in product but it appears to be expensive.
compute_lca_of_uses(n, get_ctrl(n), true /* verify */);
}
#endif

View File

@ -1548,6 +1548,9 @@ void PhaseIdealLoop::split_if_with_blocks_post(Node *n) {
}
try_sink_out_of_loop(n);
if (C->failing()) {
return;
}
try_move_store_after_loop(n);
}
@ -1735,7 +1738,11 @@ void PhaseIdealLoop::try_sink_out_of_loop(Node* n) {
Node* early_ctrl = compute_early_ctrl(n, n_ctrl);
if (n_loop->is_member(get_loop(early_ctrl)) && // check that this one can't be hoisted now
ctrl_of_all_uses_out_of_loop(n, early_ctrl, n_loop)) { // All uses in outer loops!
assert(!n->is_Store() && !n->is_LoadStore(), "no node with a side effect");
if (n->is_Store() || n->is_LoadStore()) {
assert(false, "no node with a side effect");
C->record_failure("no node with a side effect");
return;
}
Node* outer_loop_clone = nullptr;
for (DUIterator_Last jmin, j = n->last_outs(jmin); j >= jmin;) {
Node* u = n->last_out(j); // Clone private computation per use
@ -1983,6 +1990,9 @@ void PhaseIdealLoop::split_if_with_blocks(VectorSet &visited, Node_Stack &nstack
if (cnt != 0 && !n->is_Con()) {
assert(has_node(n), "no dead nodes");
split_if_with_blocks_post(n);
if (C->failing()) {
return;
}
}
if (must_throttle_split_if()) {
nstack.clear();

View File

@ -1753,18 +1753,19 @@ Node* Matcher::Label_Root(const Node* n, State* svec, Node* control, Node*& mem)
// Call DFA to match this node, and return
svec->DFA( n->Opcode(), n );
#ifdef ASSERT
uint x;
for( x = 0; x < _LAST_MACH_OPER; x++ )
if( svec->valid(x) )
break;
if (x >= _LAST_MACH_OPER) {
#ifdef ASSERT
n->dump();
svec->dump();
assert( false, "bad AD file" );
}
#endif
assert( false, "bad AD file" );
C->record_failure("bad AD file");
}
return control;
}