8327963: C2: fix construction of memory graph around Initialize node to prevent incorrect execution if allocation is removed

Co-authored-by: Emanuel Peter <epeter@openjdk.org>
Co-authored-by: Roberto Castañeda Lozano <rcastanedalo@openjdk.org>
Reviewed-by: epeter, rcastanedalo
This commit is contained in:
Roland Westrelin 2025-11-04 11:17:56 +00:00
parent a840dc22c5
commit e6546683a8
24 changed files with 897 additions and 91 deletions

View File

@ -274,6 +274,7 @@ macro(NegL)
macro(NegD)
macro(NegF)
macro(NeverBranch)
macro(NarrowMemProj)
macro(OnSpinWait)
macro(Opaque1)
macro(OpaqueLoopInit)

View File

@ -863,7 +863,7 @@ Node* ConnectionGraph::split_castpp_load_through_phi(Node* curr_addp, Node* curr
// \|/
// Phi # "Field" Phi
//
void ConnectionGraph::reduce_phi_on_castpp_field_load(Node* curr_castpp, GrowableArray<Node *> &alloc_worklist, GrowableArray<Node *> &memnode_worklist) {
void ConnectionGraph::reduce_phi_on_castpp_field_load(Node* curr_castpp, GrowableArray<Node*> &alloc_worklist) {
Node* ophi = curr_castpp->in(1);
assert(ophi->is_Phi(), "Expected this to be a Phi node.");
@ -1279,7 +1279,7 @@ bool ConnectionGraph::reduce_phi_on_safepoints_helper(Node* ophi, Node* cast, No
return true;
}
void ConnectionGraph::reduce_phi(PhiNode* ophi, GrowableArray<Node *> &alloc_worklist, GrowableArray<Node *> &memnode_worklist) {
void ConnectionGraph::reduce_phi(PhiNode* ophi, GrowableArray<Node*> &alloc_worklist) {
bool delay = _igvn->delay_transform();
_igvn->set_delay_transform(true);
_igvn->hash_delete(ophi);
@ -1306,7 +1306,7 @@ void ConnectionGraph::reduce_phi(PhiNode* ophi, GrowableArray<Node *> &alloc_wo
// splitting CastPPs we make reference to the inputs of the Cmp that is used
// by the If controlling the CastPP.
for (uint i = 0; i < castpps.size(); i++) {
reduce_phi_on_castpp_field_load(castpps.at(i), alloc_worklist, memnode_worklist);
reduce_phi_on_castpp_field_load(castpps.at(i), alloc_worklist);
}
for (uint i = 0; i < others.size(); i++) {
@ -4152,6 +4152,11 @@ Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArra
// which contains this memory slice, otherwise skip over it.
if (alloc == nullptr || alloc->_idx != (uint)toop->instance_id()) {
result = proj_in->in(TypeFunc::Memory);
} else if (C->get_alias_index(result->adr_type()) != alias_idx) {
assert(C->get_general_index(alias_idx) == C->get_alias_index(result->adr_type()), "should be projection for the same field/array element");
result = get_map(result->_idx);
assert(result != nullptr, "new projection should have been allocated");
break;
}
} else if (proj_in->is_MemBar()) {
// Check if there is an array copy for a clone
@ -4448,6 +4453,22 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist,
_compile->get_alias_index(tinst->add_offset(oopDesc::mark_offset_in_bytes()));
_compile->get_alias_index(tinst->add_offset(oopDesc::klass_offset_in_bytes()));
if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) {
// Add a new NarrowMem projection for each existing NarrowMem projection with new adr type
InitializeNode* init = alloc->as_Allocate()->initialization();
assert(init != nullptr, "can't find Initialization node for this Allocate node");
auto process_narrow_proj = [&](NarrowMemProjNode* proj) {
const TypePtr* adr_type = proj->adr_type();
const TypePtr* new_adr_type = tinst->add_offset(adr_type->offset());
if (adr_type != new_adr_type && !init->already_has_narrow_mem_proj_with_adr_type(new_adr_type)) {
DEBUG_ONLY( uint alias_idx = _compile->get_alias_index(new_adr_type); )
assert(_compile->get_general_index(alias_idx) == _compile->get_alias_index(adr_type), "new adr type should be narrowed down from existing adr type");
NarrowMemProjNode* new_proj = new NarrowMemProjNode(init, new_adr_type);
igvn->set_type(new_proj, new_proj->bottom_type());
record_for_optimizer(new_proj);
set_map(proj, new_proj); // record it so ConnectionGraph::find_inst_mem() can find it
}
};
init->for_each_narrow_mem_proj_with_new_uses(process_narrow_proj);
// First, put on the worklist all Field edges from Connection Graph
// which is more accurate than putting immediate users from Ideal Graph.
@ -4519,7 +4540,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist,
// finishes. For now we just try to split out the SR inputs of the merge.
Node* parent = n->in(1);
if (reducible_merges.member(n)) {
reduce_phi(n->as_Phi(), alloc_worklist, memnode_worklist);
reduce_phi(n->as_Phi(), alloc_worklist);
#ifdef ASSERT
if (VerifyReduceAllocationMerges) {
reduced_merges.push(n);
@ -4711,11 +4732,13 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist,
}
if (n->is_Phi() || n->is_ClearArray()) {
// we don't need to do anything, but the users must be pushed
} else if (n->is_MemBar()) { // Initialize, MemBar nodes
// we don't need to do anything, but the users must be pushed
n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory);
if (n == nullptr) {
continue;
} else if (n->is_MemBar()) { // MemBar nodes
if (!n->is_Initialize()) { // memory projections for Initialize pushed below (so we get to all their uses)
// we don't need to do anything, but the users must be pushed
n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory);
if (n == nullptr) {
continue;
}
}
} else if (n->is_CallLeaf()) {
// Runtime calls with narrow memory input (no MergeMem node)
@ -4732,6 +4755,8 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist,
// get the memory projection
n = n->find_out_with(Op_SCMemProj);
assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
} else if (n->is_Proj()) {
assert(n->in(0)->is_Initialize(), "we only push memory projections for Initialize");
} else {
#ifdef ASSERT
if (!n->is_Mem()) {
@ -4775,6 +4800,11 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist,
if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
memnode_worklist.append_if_missing(use);
}
} else if (use->is_Proj()) {
assert(n->is_Initialize(), "We only push projections of Initialize");
if (use->as_Proj()->_con == TypeFunc::Memory) { // Ignore precedent edge
memnode_worklist.append_if_missing(use);
}
#ifdef ASSERT
} else if(use->is_Mem()) {
assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
@ -4826,7 +4856,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist,
// First, update mergemem by moving memory nodes to corresponding slices
// if their type became more precise since this mergemem was created.
while (mem->is_Mem()) {
const Type *at = igvn->type(mem->in(MemNode::Address));
const Type* at = igvn->type(mem->in(MemNode::Address));
if (at != Type::TOP) {
assert (at->isa_ptr() != nullptr, "pointer type required.");
uint idx = (uint)_compile->get_alias_index(at->is_ptr());
@ -4946,7 +4976,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist,
record_for_optimizer(n);
} else {
assert(n->is_Allocate() || n->is_CheckCastPP() ||
n->is_AddP() || n->is_Phi(), "unknown node used for set_map()");
n->is_AddP() || n->is_Phi() || n->is_NarrowMemProj(), "unknown node used for set_map()");
}
}
#if 0 // ifdef ASSERT

View File

@ -563,8 +563,10 @@ private:
// Memory Phi - most recent unique Phi split out
// from this Phi
// MemNode - new memory input for this node
// ChecCastPP - allocation that this is a cast of
// CheckCastPP - allocation that this is a cast of
// allocation - CheckCastPP of the allocation
// NarrowMem - newly created projection (type includes instance_id) from projection created
// before EA
// manage entries in _node_map
@ -609,11 +611,11 @@ private:
bool can_reduce_phi_check_inputs(PhiNode* ophi) const;
void reduce_phi_on_field_access(Node* previous_addp, GrowableArray<Node *> &alloc_worklist);
void reduce_phi_on_castpp_field_load(Node* castpp, GrowableArray<Node *> &alloc_worklist, GrowableArray<Node *> &memnode_worklist);
void reduce_phi_on_castpp_field_load(Node* castpp, GrowableArray<Node*> &alloc_worklist);
void reduce_phi_on_cmp(Node* cmp);
bool reduce_phi_on_safepoints(PhiNode* ophi);
bool reduce_phi_on_safepoints_helper(Node* ophi, Node* cast, Node* selector, Unique_Node_List& safepoints);
void reduce_phi(PhiNode* ophi, GrowableArray<Node *> &alloc_worklist, GrowableArray<Node *> &memnode_worklist);
void reduce_phi(PhiNode* ophi, GrowableArray<Node*> &alloc_worklist);
void set_not_scalar_replaceable(PointsToNode* ptn NOT_PRODUCT(COMMA const char* reason)) const {
#ifndef PRODUCT

View File

@ -3641,14 +3641,17 @@ Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
record_for_igvn(minit_in); // fold it up later, if possible
Node* minit_out = memory(rawidx);
assert(minit_out->is_Proj() && minit_out->in(0) == init, "");
// Add an edge in the MergeMem for the header fields so an access
// to one of those has correct memory state
set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::mark_offset_in_bytes())));
set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::klass_offset_in_bytes())));
int mark_idx = C->get_alias_index(oop_type->add_offset(oopDesc::mark_offset_in_bytes()));
// Add an edge in the MergeMem for the header fields so an access to one of those has correct memory state.
// Use one NarrowMemProjNode per slice to properly record the adr type of each slice. The Initialize node will have
// multiple projections as a result.
set_memory(_gvn.transform(new NarrowMemProjNode(init, C->get_adr_type(mark_idx))), mark_idx);
int klass_idx = C->get_alias_index(oop_type->add_offset(oopDesc::klass_offset_in_bytes()));
set_memory(_gvn.transform(new NarrowMemProjNode(init, C->get_adr_type(klass_idx))), klass_idx);
if (oop_type->isa_aryptr()) {
const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot);
int elemidx = C->get_alias_index(telemref);
hook_memory_on_init(*this, elemidx, minit_in, minit_out);
hook_memory_on_init(*this, elemidx, minit_in, _gvn.transform(new NarrowMemProjNode(init, C->get_adr_type(elemidx))));
} else if (oop_type->isa_instptr()) {
ciInstanceKlass* ik = oop_type->is_instptr()->instance_klass();
for (int i = 0, len = ik->nof_nonstatic_fields(); i < len; i++) {
@ -3657,7 +3660,7 @@ Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
continue; // do not bother to track really large numbers of fields
// Find (or create) the alias category for this field:
int fieldidx = C->alias_type(field)->index();
hook_memory_on_init(*this, fieldidx, minit_in, minit_out);
hook_memory_on_init(*this, fieldidx, minit_in, _gvn.transform(new NarrowMemProjNode(init, C->get_adr_type(fieldidx))));
}
}
}

View File

@ -598,7 +598,7 @@ void IdealGraphPrinter::visit_node(Node* n, bool edges) {
t->dump_on(&s2);
} else if( t == Type::MEMORY ) {
s2.print(" Memory:");
MemNode::dump_adr_type(node, node->adr_type(), &s2);
MemNode::dump_adr_type(node->adr_type(), &s2);
}
assert(s2.size() < sizeof(buffer), "size in range");

View File

@ -5544,7 +5544,7 @@ void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, No
InitializeNode* init = alloc->initialization();
Node* alloc_mem = alloc->in(TypeFunc::Memory);
C->gvn_replace_by(callprojs.fallthrough_ioproj, alloc->in(TypeFunc::I_O));
C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem);
init->replace_mem_projs_by(alloc_mem, C);
// The CastIINode created in GraphKit::new_array (in AllocateArrayNode::make_ideal_length) must stay below
// the allocation (i.e. is only valid if the allocation succeeds):
@ -5595,8 +5595,20 @@ void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, No
}
const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
int elemidx = C->get_alias_index(telemref);
set_memory(init->proj_out_or_null(TypeFunc::Memory), Compile::AliasIdxRaw);
set_memory(init->proj_out_or_null(TypeFunc::Memory), elemidx);
// Need to properly move every memory projection for the Initialize
#ifdef ASSERT
int mark_idx = C->get_alias_index(ary_type->add_offset(oopDesc::mark_offset_in_bytes()));
int klass_idx = C->get_alias_index(ary_type->add_offset(oopDesc::klass_offset_in_bytes()));
#endif
auto move_proj = [&](ProjNode* proj) {
int alias_idx = C->get_alias_index(proj->adr_type());
assert(alias_idx == Compile::AliasIdxRaw ||
alias_idx == elemidx ||
alias_idx == mark_idx ||
alias_idx == klass_idx, "should be raw memory or array element type");
set_memory(proj, alias_idx);
};
init->for_each_proj(move_proj, TypeFunc::Memory);
Node* allocx = _gvn.transform(alloc);
assert(allocx == alloc, "where has the allocation gone?");

View File

@ -4042,7 +4042,9 @@ bool PhaseIdealLoop::intrinsify_fill(IdealLoopTree* lpt) {
call->init_req(TypeFunc::I_O, C->top()); // Does no I/O.
call->init_req(TypeFunc::Memory, mem_phi->in(LoopNode::EntryControl));
call->init_req(TypeFunc::ReturnAdr, C->start()->proj_out_or_null(TypeFunc::ReturnAdr));
call->init_req(TypeFunc::FramePtr, C->start()->proj_out_or_null(TypeFunc::FramePtr));
Node* frame = new ParmNode(C->start(), TypeFunc::FramePtr);
_igvn.register_new_node_with_optimizer(frame);
call->init_req(TypeFunc::FramePtr, frame);
_igvn.register_new_node_with_optimizer(call);
result_ctrl = new ProjNode(call,TypeFunc::Control);
_igvn.register_new_node_with_optimizer(result_ctrl);

View File

@ -2693,8 +2693,9 @@ void PhaseIdealLoop::fix_ctrl_uses(const Node_List& body, const IdealLoopTree* l
if (head->is_strip_mined() && mode != IgnoreStripMined) {
CountedLoopNode* cl = head->as_CountedLoop();
CountedLoopEndNode* cle = cl->loopexit();
Node* cle_out = cle->proj_out_or_null(false);
if (use == cle_out) {
// is use the projection that exits the loop from the CountedLoopEndNode?
if (use->in(0) == cle) {
IfFalseNode* cle_out = use->as_IfFalse();
IfNode* le = cl->outer_loop_end();
use = le->proj_out(false);
use_loop = get_loop(use);

View File

@ -1042,7 +1042,6 @@ void PhaseMacroExpand::process_users_of_allocation(CallNode *alloc) {
if (use->is_Initialize()) {
// Eliminate Initialize node.
InitializeNode *init = use->as_Initialize();
assert(init->outcnt() <= 2, "only a control and memory projection expected");
Node *ctrl_proj = init->proj_out_or_null(TypeFunc::Control);
if (ctrl_proj != nullptr) {
_igvn.replace_node(ctrl_proj, init->in(TypeFunc::Control));
@ -1052,18 +1051,18 @@ void PhaseMacroExpand::process_users_of_allocation(CallNode *alloc) {
assert(tmp == nullptr || tmp == _callprojs.fallthrough_catchproj, "allocation control projection");
#endif
}
Node *mem_proj = init->proj_out_or_null(TypeFunc::Memory);
if (mem_proj != nullptr) {
Node *mem = init->in(TypeFunc::Memory);
Node* mem = init->in(TypeFunc::Memory);
#ifdef ASSERT
if (init->number_of_projs(TypeFunc::Memory) > 0) {
if (mem->is_MergeMem()) {
assert(mem->in(TypeFunc::Memory) == _callprojs.fallthrough_memproj, "allocation memory projection");
assert(mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw) == _callprojs.fallthrough_memproj, "allocation memory projection");
} else {
assert(mem == _callprojs.fallthrough_memproj, "allocation memory projection");
}
#endif
_igvn.replace_node(mem_proj, mem);
}
#endif
init->replace_mem_projs_by(mem, &_igvn);
assert(init->outcnt() == 0, "should only have had a control and some memory projections, and we removed them");
} else {
assert(false, "only Initialize or AddP expected");
}
@ -1650,7 +1649,16 @@ void PhaseMacroExpand::expand_initialize_membar(AllocateNode* alloc, InitializeN
// No InitializeNode or no stores captured by zeroing
// elimination. Simply add the MemBarStoreStore after object
// initialization.
MemBarNode* mb = MemBarNode::make(C, Op_MemBarStoreStore, Compile::AliasIdxBot);
// What we want is to prevent the compiler and the CPU from re-ordering the stores that initialize this object
// with subsequent stores to any slice. As a consequence, this MemBar should capture the entire memory state at
// this point in the IR and produce a new memory state that should cover all slices. However, the Initialize node
// only captures/produces a partial memory state making it complicated to insert such a MemBar. Because
// re-ordering by the compiler can't happen by construction (a later Store that publishes the just allocated
// object reference is indirectly control dependent on the Initialize node), preventing reordering by the CPU is
// sufficient. For that a MemBar on the raw memory slice is good enough.
// If init is null, this allocation does have an InitializeNode but this logic can't locate it (see comment in
// PhaseMacroExpand::initialize_object()).
MemBarNode* mb = MemBarNode::make(C, Op_MemBarStoreStore, Compile::AliasIdxRaw);
transform_later(mb);
mb->init_req(TypeFunc::Memory, fast_oop_rawmem);
@ -1666,24 +1674,33 @@ void PhaseMacroExpand::expand_initialize_membar(AllocateNode* alloc, InitializeN
// barrier.
Node* init_ctrl = init->proj_out_or_null(TypeFunc::Control);
Node* init_mem = init->proj_out_or_null(TypeFunc::Memory);
MemBarNode* mb = MemBarNode::make(C, Op_MemBarStoreStore, Compile::AliasIdxBot);
// See comment above that explains why a raw memory MemBar is good enough.
MemBarNode* mb = MemBarNode::make(C, Op_MemBarStoreStore, Compile::AliasIdxRaw);
transform_later(mb);
Node* ctrl = new ProjNode(init, TypeFunc::Control);
transform_later(ctrl);
Node* mem = new ProjNode(init, TypeFunc::Memory);
transform_later(mem);
Node* old_raw_mem_proj = nullptr;
auto find_raw_mem = [&](ProjNode* proj) {
if (C->get_alias_index(proj->adr_type()) == Compile::AliasIdxRaw) {
assert(old_raw_mem_proj == nullptr, "only one expected");
old_raw_mem_proj = proj;
}
};
init->for_each_proj(find_raw_mem, TypeFunc::Memory);
assert(old_raw_mem_proj != nullptr, "should have found raw mem Proj");
Node* raw_mem_proj = new ProjNode(init, TypeFunc::Memory);
transform_later(raw_mem_proj);
// The MemBarStoreStore depends on control and memory coming
// from the InitializeNode
mb->init_req(TypeFunc::Memory, mem);
mb->init_req(TypeFunc::Memory, raw_mem_proj);
mb->init_req(TypeFunc::Control, ctrl);
ctrl = new ProjNode(mb, TypeFunc::Control);
transform_later(ctrl);
mem = new ProjNode(mb, TypeFunc::Memory);
Node* mem = new ProjNode(mb, TypeFunc::Memory);
transform_later(mem);
// All nodes that depended on the InitializeNode for control
@ -1692,9 +1709,7 @@ void PhaseMacroExpand::expand_initialize_membar(AllocateNode* alloc, InitializeN
if (init_ctrl != nullptr) {
_igvn.replace_node(init_ctrl, ctrl);
}
if (init_mem != nullptr) {
_igvn.replace_node(init_mem, mem);
}
_igvn.replace_node(old_raw_mem_proj, mem);
}
}
}

View File

@ -151,6 +151,8 @@ void Matcher::verify_new_nodes_only(Node* xroot) {
continue;
}
assert(C->node_arena()->contains(n), "dead node");
assert(!n->is_Initialize() || n->as_Initialize()->number_of_projs(TypeFunc::Memory) == 1,
"after matching, Initialize should have a single memory projection");
for (uint j = 0; j < n->req(); j++) {
Node* in = n->in(j);
if (in != nullptr) {
@ -1029,7 +1031,7 @@ Node *Matcher::xform( Node *n, int max_stack ) {
// Old-space or new-space check
if (!C->node_arena()->contains(n)) {
// Old space!
Node* m;
Node* m = nullptr;
if (has_new_node(n)) { // Not yet Label/Reduced
m = new_node(n);
} else {
@ -1044,9 +1046,21 @@ Node *Matcher::xform( Node *n, int max_stack ) {
}
} else { // Nothing the matcher cares about
if (n->is_Proj() && n->in(0) != nullptr && n->in(0)->is_Multi()) { // Projections?
// Convert to machine-dependent projection
m = n->in(0)->as_Multi()->match( n->as_Proj(), this );
NOT_PRODUCT(record_new2old(m, n);)
if (n->in(0)->is_Initialize() && n->as_Proj()->_con == TypeFunc::Memory) {
// Initialize may have multiple NarrowMem projections. They would all match to identical raw mem MachProjs.
// We don't need multiple MachProjs. Create one if none already exist, otherwise use existing one.
m = n->in(0)->as_Initialize()->mem_mach_proj();
if (m == nullptr && has_new_node(n->in(0))) {
InitializeNode* new_init = new_node(n->in(0))->as_Initialize();
m = new_init->mem_mach_proj();
}
assert(m == nullptr || m->is_MachProj(), "no mem projection yet or a MachProj created during matching");
}
if (m == nullptr) {
// Convert to machine-dependent projection
m = n->in(0)->as_Multi()->match( n->as_Proj(), this );
NOT_PRODUCT(record_new2old(m, n);)
}
if (m->in(0) != nullptr) // m might be top
collect_null_checks(m, n);
} else { // Else just a regular 'ol guy

View File

@ -91,7 +91,7 @@ void MemNode::dump_spec(outputStream *st) const {
if (in(Address) != nullptr)
_adr_type = in(Address)->bottom_type()->isa_ptr();
#endif
dump_adr_type(this, _adr_type, st);
dump_adr_type(_adr_type, st);
Compile* C = Compile::current();
if (C->alias_type(_adr_type)->is_volatile()) {
@ -108,7 +108,7 @@ void MemNode::dump_spec(outputStream *st) const {
}
}
void MemNode::dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st) {
void MemNode::dump_adr_type(const TypePtr* adr_type, outputStream* st) {
st->print(" @");
if (adr_type == nullptr) {
st->print("null");
@ -4229,7 +4229,7 @@ MemBarNode* MemBarNode::make(Compile* C, int opcode, int atp, Node* pn) {
}
void MemBarNode::remove(PhaseIterGVN *igvn) {
assert(outcnt() > 0 && outcnt() <= 2, "Only one or two out edges allowed");
assert(outcnt() > 0 && (outcnt() <= 2 || Opcode() == Op_Initialize), "Only one or two out edges allowed");
if (trailing_store() || trailing_load_store()) {
MemBarNode* leading = leading_membar();
if (leading != nullptr) {
@ -4237,12 +4237,16 @@ void MemBarNode::remove(PhaseIterGVN *igvn) {
leading->remove(igvn);
}
}
if (proj_out_or_null(TypeFunc::Memory) != nullptr) {
igvn->replace_node(proj_out(TypeFunc::Memory), in(TypeFunc::Memory));
}
if (proj_out_or_null(TypeFunc::Control) != nullptr) {
igvn->replace_node(proj_out(TypeFunc::Control), in(TypeFunc::Control));
}
if (is_Initialize()) {
as_Initialize()->replace_mem_projs_by(in(TypeFunc::Memory), igvn);
} else {
if (proj_out_or_null(TypeFunc::Memory) != nullptr) {
igvn->replace_node(proj_out(TypeFunc::Memory), in(TypeFunc::Memory));
}
}
}
//------------------------------Ideal------------------------------------------
@ -5445,6 +5449,48 @@ Node* InitializeNode::complete_stores(Node* rawctl, Node* rawmem, Node* rawptr,
return rawmem;
}
void InitializeNode::replace_mem_projs_by(Node* mem, Compile* C) {
auto replace_proj = [&](ProjNode* proj) {
C->gvn_replace_by(proj, mem);
return CONTINUE;
};
apply_to_projs(replace_proj, TypeFunc::Memory);
}
void InitializeNode::replace_mem_projs_by(Node* mem, PhaseIterGVN* igvn) {
DUIterator_Fast imax, i = fast_outs(imax);
auto replace_proj = [&](ProjNode* proj) {
igvn->replace_node(proj, mem);
--i; --imax;
return CONTINUE;
};
apply_to_projs(imax, i, replace_proj, TypeFunc::Memory);
}
bool InitializeNode::already_has_narrow_mem_proj_with_adr_type(const TypePtr* adr_type) const {
auto find_proj = [&](ProjNode* proj) {
if (proj->adr_type() == adr_type) {
return BREAK_AND_RETURN_CURRENT_PROJ;
}
return CONTINUE;
};
DUIterator_Fast imax, i = fast_outs(imax);
return apply_to_narrow_mem_projs_any_iterator(UsesIteratorFast(imax, i, this), find_proj) != nullptr;
}
MachProjNode* InitializeNode::mem_mach_proj() const {
auto find_proj = [](ProjNode* proj) {
if (proj->is_MachProj()) {
return BREAK_AND_RETURN_CURRENT_PROJ;
}
return CONTINUE;
};
ProjNode* proj = apply_to_projs(find_proj, TypeFunc::Memory);
if (proj == nullptr) {
return nullptr;
}
return proj->as_MachProj();
}
#ifdef ASSERT
bool InitializeNode::stores_are_sane(PhaseValues* phase) {
@ -5867,6 +5913,7 @@ Node* MergeMemNode::memory_at(uint alias_idx) const {
|| n->adr_type() == nullptr // address is TOP
|| n->adr_type() == TypePtr::BOTTOM
|| n->adr_type() == TypeRawPtr::BOTTOM
|| n->is_NarrowMemProj()
|| !Compile::current()->do_aliasing(),
"must be a wide memory");
// do_aliasing == false if we are organizing the memory states manually.

View File

@ -167,7 +167,7 @@ public:
bool is_unsafe_access() const { return _unsafe_access; }
#ifndef PRODUCT
static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st);
static void dump_adr_type(const TypePtr* adr_type, outputStream* st);
virtual void dump_spec(outputStream *st) const;
#endif
};
@ -1371,7 +1371,20 @@ public:
intptr_t header_size, Node* size_in_bytes,
PhaseIterGVN* phase);
private:
// An Initialize node has multiple memory projections. Helper methods used when the node is removed.
// For use at parse time
void replace_mem_projs_by(Node* mem, Compile* C);
// For use with IGVN
void replace_mem_projs_by(Node* mem, PhaseIterGVN* igvn);
// Does a NarrowMemProj with this adr_type and this node as input already exist?
bool already_has_narrow_mem_proj_with_adr_type(const TypePtr* adr_type) const;
// Used during matching: find the MachProj memory projection if there's one. Expectation is that there should be at
// most one.
MachProjNode* mem_mach_proj() const;
private:
void remove_extra_zeroes();
// Find out where a captured store should be placed (or already is placed).
@ -1388,6 +1401,33 @@ public:
PhaseGVN* phase);
intptr_t find_next_fullword_store(uint i, PhaseGVN* phase);
// Iterate with i over all NarrowMemProj uses calling callback
template <class Callback, class Iterator> NarrowMemProjNode* apply_to_narrow_mem_projs_any_iterator(Iterator i, Callback callback) const {
auto filter = [&](ProjNode* proj) {
if (proj->is_NarrowMemProj() && callback(proj->as_NarrowMemProj()) == BREAK_AND_RETURN_CURRENT_PROJ) {
return BREAK_AND_RETURN_CURRENT_PROJ;
}
return CONTINUE;
};
ProjNode* res = apply_to_projs_any_iterator(i, filter);
if (res == nullptr) {
return nullptr;
}
return res->as_NarrowMemProj();
}
public:
// callback is allowed to add new uses that will then be iterated over
template <class Callback> void for_each_narrow_mem_proj_with_new_uses(Callback callback) const {
auto callback_always_continue = [&](NarrowMemProjNode* proj) {
callback(proj);
return MultiNode::CONTINUE;
};
DUIterator i = outs();
apply_to_narrow_mem_projs_any_iterator(UsesIterator(i, this), callback_always_continue);
}
};
//------------------------------MergeMem---------------------------------------

View File

@ -45,30 +45,58 @@ Node *MultiNode::match( const ProjNode *proj, const Matcher *m ) { return proj->
// Get a named projection or null if not found
ProjNode* MultiNode::proj_out_or_null(uint which_proj) const {
assert((Opcode() != Op_If && Opcode() != Op_RangeCheck) || which_proj == (uint)true || which_proj == (uint)false, "must be 1 or 0");
for( DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++ ) {
Node *p = fast_out(i);
if (p->is_Proj()) {
ProjNode *proj = p->as_Proj();
if (proj->_con == which_proj) {
assert((Opcode() != Op_If && Opcode() != Op_RangeCheck) || proj->Opcode() == (which_proj ? Op_IfTrue : Op_IfFalse), "bad if #2");
return proj;
}
} else {
assert(p == this && this->is_Start(), "else must be proj");
continue;
}
}
return nullptr;
assert(number_of_projs(which_proj) <= 1, "only when there's a single projection");
ProjNode* proj = find_first(which_proj);
assert(proj == nullptr || (Opcode() != Op_If && Opcode() != Op_RangeCheck) || proj->Opcode() == (which_proj ? Op_IfTrue : Op_IfFalse),
"incorrect projection node at If/RangeCheck: IfTrue on false path or IfFalse on true path");
return proj;
}
ProjNode* MultiNode::proj_out_or_null(uint which_proj, bool is_io_use) const {
for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
ProjNode* proj = fast_out(i)->isa_Proj();
if (proj != nullptr && (proj->_con == which_proj) && (proj->_is_io_use == is_io_use)) {
return proj;
assert(number_of_projs(which_proj, is_io_use) <= 1, "only when there's a single projection");
return find_first(which_proj, is_io_use);
}
template<class Callback> ProjNode* MultiNode::apply_to_projs(Callback callback, uint which_proj, bool is_io_use) const {
auto filter = [&](ProjNode* proj) {
if (proj->_is_io_use == is_io_use && callback(proj) == BREAK_AND_RETURN_CURRENT_PROJ) {
return BREAK_AND_RETURN_CURRENT_PROJ;
}
}
return nullptr;
return CONTINUE;
};
return apply_to_projs(filter, which_proj);
}
uint MultiNode::number_of_projs(uint which_proj) const {
uint cnt = 0;
auto count_projs = [&](ProjNode* proj) {
cnt++;
};
for_each_proj(count_projs, which_proj);
return cnt;
}
uint MultiNode::number_of_projs(uint which_proj, bool is_io_use) const {
uint cnt = 0;
auto count_projs = [&](ProjNode* proj) {
cnt++;
};
for_each_proj(count_projs, which_proj, is_io_use);
return cnt;
}
ProjNode* MultiNode::find_first(uint which_proj) const {
auto find_proj = [&](ProjNode* proj) {
return BREAK_AND_RETURN_CURRENT_PROJ;
};
return apply_to_projs(find_proj, which_proj);
}
ProjNode* MultiNode::find_first(uint which_proj, bool is_io_use) const {
auto find_proj = [](ProjNode* proj) {
return BREAK_AND_RETURN_CURRENT_PROJ;
};
return apply_to_projs(find_proj, which_proj, is_io_use);
}
// Get a named projection
@ -239,3 +267,8 @@ ProjNode* ProjNode::other_if_proj() const {
assert(_con == 0 || _con == 1, "not an if?");
return in(0)->as_If()->proj_out(1-_con);
}
NarrowMemProjNode::NarrowMemProjNode(InitializeNode* src, const TypePtr* adr_type)
: ProjNode(src, TypeFunc::Memory), _adr_type(adr_type) {
init_class_id(Class_NarrowMemProj);
}

View File

@ -49,6 +49,105 @@ public:
ProjNode* proj_out(uint which_proj) const; // Get a named projection
ProjNode* proj_out_or_null(uint which_proj) const;
ProjNode* proj_out_or_null(uint which_proj, bool is_io_use) const;
uint number_of_projs(uint which_proj) const;
uint number_of_projs(uint which_proj, bool is_io_use) const;
protected:
// Provide single interface for DUIterator_Fast/DUIterator for template method below
class UsesIteratorFast {
DUIterator_Fast& _imax;
DUIterator_Fast& _i;
const Node* _node;
public:
bool cont() {
return _i < _imax;
}
void next() {
_i++;
}
Node* current() {
return _node->fast_out(_i);
}
UsesIteratorFast(DUIterator_Fast& imax, DUIterator_Fast& i, const Node* node)
: _imax(imax), _i(i), _node(node) {
}
};
class UsesIterator {
DUIterator& _i;
const Node* _node;
public:
bool cont() {
return _node->has_out(_i);
}
void next() {
_i++;
}
Node* current() {
return _node->out(_i);
}
UsesIterator(DUIterator& i, const Node* node)
: _i(i), _node(node) {
}
};
// Iterate with i over all Proj uses calling callback
template<class Callback, class Iterator> ProjNode* apply_to_projs_any_iterator(Iterator i, Callback callback) const {
for (; i.cont(); i.next()) {
Node* p = i.current();
if (p->is_Proj()) {
ProjNode* proj = p->as_Proj();
ApplyToProjs result = callback(proj);
if (result == BREAK_AND_RETURN_CURRENT_PROJ) {
return proj;
}
assert(result == CONTINUE, "should be either break or continue");
} else {
assert(p == this && is_Start(), "else must be proj");
}
}
return nullptr;
}
enum ApplyToProjs {
CONTINUE,
BREAK_AND_RETURN_CURRENT_PROJ
};
// Run callback on projections with iterator passed as argument
template <class Callback> ProjNode* apply_to_projs(DUIterator_Fast& imax, DUIterator_Fast& i, Callback callback, uint which_proj) const;
// Same but with default iterator and for matching _con
template<class Callback> ProjNode* apply_to_projs(Callback callback, uint which_proj) const {
DUIterator_Fast imax, i = fast_outs(imax);
return apply_to_projs(imax, i, callback, which_proj);
}
// Same but for matching _con and _is_io_use
template <class Callback> ProjNode* apply_to_projs(Callback callback, uint which_proj, bool is_io_use) const;
public:
template<class Callback> void for_each_proj(Callback callback, uint which_proj) const {
auto callback_always_continue = [&](ProjNode* proj) {
callback(proj);
return MultiNode::CONTINUE;
};
apply_to_projs(callback_always_continue, which_proj);
}
template <class Callback> void for_each_proj(Callback callback, uint which_proj, bool is_io_use) const {
auto callback_always_continue = [&](ProjNode* proj) {
callback(proj);
return MultiNode::CONTINUE;
};
apply_to_projs(callback_always_continue, which_proj, is_io_use);
}
ProjNode* find_first(uint which_proj) const;
ProjNode* find_first(uint which_proj, bool is_io_use) const;
};
//------------------------------ProjNode---------------------------------------
@ -106,6 +205,41 @@ public:
ProjNode* other_if_proj() const;
};
// A ProjNode variant that captures an adr_type(). Used as a projection of InitializeNode to have the right adr_type()
// for array elements/fields.
class NarrowMemProjNode : public ProjNode {
private:
const TypePtr* const _adr_type;
protected:
virtual uint hash() const {
return ProjNode::hash() + _adr_type->hash();
}
virtual bool cmp(const Node& n) const {
return ProjNode::cmp(n) && ((NarrowMemProjNode&)n)._adr_type == _adr_type;
}
virtual uint size_of() const {
return sizeof(*this);
}
public:
NarrowMemProjNode(InitializeNode* src, const TypePtr* adr_type);
virtual const TypePtr* adr_type() const {
return _adr_type;
}
virtual int Opcode() const;
};
template <class Callback> ProjNode* MultiNode::apply_to_projs(DUIterator_Fast& imax, DUIterator_Fast& i, Callback callback, uint which_proj) const {
auto filter = [&](ProjNode* proj) {
if (proj->_con == which_proj && callback(proj) == BREAK_AND_RETURN_CURRENT_PROJ) {
return BREAK_AND_RETURN_CURRENT_PROJ;
}
return CONTINUE;
};
return apply_to_projs_any_iterator(UsesIteratorFast(imax, i, this), filter);
}
/* Tuples are used to avoid manual graph surgery. When a node with Proj outputs (such as a call)
* must be removed and its ouputs replaced by its input, or some other value, we can make its
* ::Ideal return a tuple of what we want for each output: the ::Identity of output Proj will

View File

@ -2602,7 +2602,7 @@ void Node::dump(const char* suffix, bool mark, outputStream* st, DumpConfig* dc)
t->dump_on(st);
} else if (t == Type::MEMORY) {
st->print(" Memory:");
MemNode::dump_adr_type(this, adr_type(), st);
MemNode::dump_adr_type(adr_type(), st);
} else if (Verbose || WizardMode) {
st->print(" Type:");
if (t) {

View File

@ -134,6 +134,7 @@ class MoveNode;
class MulNode;
class MultiNode;
class MultiBranchNode;
class NarrowMemProjNode;
class NegNode;
class NegVNode;
class NeverBranchNode;
@ -771,6 +772,7 @@ public:
DEFINE_CLASS_ID(IfFalse, IfProj, 1)
DEFINE_CLASS_ID(Parm, Proj, 4)
DEFINE_CLASS_ID(MachProj, Proj, 5)
DEFINE_CLASS_ID(NarrowMemProj, Proj, 6)
DEFINE_CLASS_ID(Mem, Node, 4)
DEFINE_CLASS_ID(Load, Mem, 0)
@ -989,6 +991,7 @@ public:
DEFINE_CLASS_QUERY(Multi)
DEFINE_CLASS_QUERY(MultiBranch)
DEFINE_CLASS_QUERY(MulVL)
DEFINE_CLASS_QUERY(NarrowMemProj)
DEFINE_CLASS_QUERY(Neg)
DEFINE_CLASS_QUERY(NegV)
DEFINE_CLASS_QUERY(NeverBranch)

View File

@ -2592,12 +2592,14 @@ void PhaseIterGVN::add_users_of_use_to_worklist(Node* n, Node* use, Unique_Node_
}
}
}
auto enqueue_init_mem_projs = [&](ProjNode* proj) {
add_users_to_worklist0(proj, worklist);
};
// If changed initialization activity, check dependent Stores
if (use_op == Op_Allocate || use_op == Op_AllocateArray) {
InitializeNode* init = use->as_Allocate()->initialization();
if (init != nullptr) {
Node* imem = init->proj_out_or_null(TypeFunc::Memory);
if (imem != nullptr) add_users_to_worklist0(imem, worklist);
init->for_each_proj(enqueue_init_mem_projs, TypeFunc::Memory);
}
}
// If the ValidLengthTest input changes then the fallthrough path out of the AllocateArray may have become dead.
@ -2611,8 +2613,8 @@ void PhaseIterGVN::add_users_of_use_to_worklist(Node* n, Node* use, Unique_Node_
}
if (use_op == Op_Initialize) {
Node* imem = use->as_Initialize()->proj_out_or_null(TypeFunc::Memory);
if (imem != nullptr) add_users_to_worklist0(imem, worklist);
InitializeNode* init = use->as_Initialize();
init->for_each_proj(enqueue_init_mem_projs, TypeFunc::Memory);
}
// Loading the java mirror from a Klass requires two loads and the type
// of the mirror load depends on the type of 'n'. See LoadNode::Value().

View File

@ -380,17 +380,13 @@ void StringConcat::eliminate_initialize(InitializeNode* init) {
Compile* C = _stringopts->C;
// Eliminate Initialize node.
assert(init->outcnt() <= 2, "only a control and memory projection expected");
assert(init->req() <= InitializeNode::RawStores, "no pending inits");
Node *ctrl_proj = init->proj_out_or_null(TypeFunc::Control);
if (ctrl_proj != nullptr) {
C->gvn_replace_by(ctrl_proj, init->in(TypeFunc::Control));
}
Node *mem_proj = init->proj_out_or_null(TypeFunc::Memory);
if (mem_proj != nullptr) {
Node *mem = init->in(TypeFunc::Memory);
C->gvn_replace_by(mem_proj, mem);
}
Node* mem = init->in(TypeFunc::Memory);
init->replace_mem_projs_by(mem, C);
C->gvn_replace_by(init, C->top());
init->disconnect_inputs(C);
}

View File

@ -675,7 +675,9 @@ public class ServerCompilerScheduler implements Scheduler {
}
private static boolean isProj(Node n) {
return hasName(n, "Proj") || hasName(n, "MachProj");
return hasName(n, "Proj") ||
hasName(n, "MachProj") ||
hasName(n, "NarrowMemProj");
}
private static boolean isParm(Node n) {

View File

@ -53,6 +53,7 @@ split(and([matches("name", "Parm|MachProj"),
// Combine single-input nodes.
combine(anyNode, matches("name", "Proj|IfFalse|IfTrue|JProj|MachProj|JumpProj|CatchProj|Parm"));
combine(anyNode, matches("name", "NarrowMemProj"), ["N"]);
combine(anyNode, matches("name", "SCMemProj"), ["SCM"]);
combine(matches("name", "SubTypeCheck|Cmp.*"), matches("name", "Bool"), ["[dump_spec]"]);
combine(anyNode, matches("name", "Decode(N|NarrowPtr|NKlass)"), ["DC"]);

View File

@ -48,9 +48,13 @@ public class TestIterativeEA {
System.out.println(analyzer.getOutput());
analyzer.shouldHaveExitValue(0);
analyzer.shouldContain("++++ Eliminated: 26 Allocate");
analyzer.shouldContain("++++ Eliminated: 48 Allocate");
analyzer.shouldContain("++++ Eliminated: 78 Allocate");
analyzer.shouldMatch(
"(?s)" + // Let .* also match line terminators.
"Eliminated: \\d+ Allocate" +
".*" +
"Eliminated: \\d+ Allocate" +
".*" +
"Eliminated: \\d+ Allocate");
}
static class A {

View File

@ -0,0 +1,67 @@
/*
* Copyright (c) 2025, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8327963
* @summary C2: fix construction of memory graph around Initialize node to prevent incorrect execution if allocation is removed
* @run main/othervm -XX:-BackgroundCompilation compiler.macronodes.TestEarlyEliminationOfAllocationWithoutUse
* @run main/othervm compiler.macronodes.TestEarlyEliminationOfAllocationWithoutUse
*/
package compiler.macronodes;
import java.util.Arrays;
public class TestEarlyEliminationOfAllocationWithoutUse {
private static volatile int volatileField;
public static void main(String[] args) {
boolean[] allTrue = new boolean[3];
Arrays.fill(allTrue, true);
A a = new A();
boolean[] allFalse = new boolean[3];
for (int i = 0; i < 20_000; i++) {
a.field1 = 0;
test1(a, allTrue);
test1(a, allFalse);
if (a.field1 != 42) {
throw new RuntimeException("Lost Store");
}
}
}
private static void test1(A otherA, boolean[] flags) {
otherA.field1 = 42;
// Fully unrolled before EA
for (int i = 0; i < 3; i++) {
A a = new A(); // removed right after EA
if (flags[i]) {
break;
}
}
}
private static class A {
int field1;
}
}

View File

@ -0,0 +1,338 @@
/*
* Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8327012 8327963
* @summary Revealed issue where hook_memory_on_init links some array slice to the rawptr slice.
* Now that array slice depends on the rawslice. And then when the Initialize MemBar gets
* removed in expand_allocate_common, the rawslice sees that it has now no effect, looks
* through the MergeMem and sees the initial state. That way, also the linked array slice
* goes to the initial state, even if before the allocation there were stores on the array
* slice. This leads to a messed up memory graph, and missing stores in the generated code.
*
* @run main/othervm -Xcomp -XX:-TieredCompilation
* -XX:CompileCommand=compileonly,compiler.macronodes.TestEliminationOfAllocationWithoutUse::test*
* compiler.macronodes.TestEliminationOfAllocationWithoutUse
* @run main/othervm -Xcomp
* -XX:CompileCommand=compileonly,compiler.macronodes.TestEliminationOfAllocationWithoutUse::test*
* compiler.macronodes.TestEliminationOfAllocationWithoutUse
*/
package compiler.macronodes;
public class TestEliminationOfAllocationWithoutUse {
static public void main(String[] args) {
int failures = 0;
failures += run1();
failures += run2();
failures += run3();
failures += run4();
failures += run5();
failures += run6();
failures += run7();
failures += run8();
failures += run9();
if (failures != 0) {
throw new RuntimeException("Had test failures: " + failures);
}
}
static public int run1() {
int size = 10;
double[] arr1 = new double[size];
double[] arr2 = new double[size];
test1(arr1, arr2);
double sum = 0;
for (int i = 0; i < arr1.length; ++i) {
sum += arr1[i] - arr2[i];
}
if (sum != (double)(size)) {
System.out.println("test1: wrong result: " + sum + " vs expected: " + size);
return 1;
}
return 0;
}
// Simplified from JDK-8327012 regression test.
public static void test1(double[] arr1, double[] arr2) {
for(int i = 0; i < arr1.length; ++i) {
// stores on double[] slice
arr1[i] = (double)(i + 2);
arr2[i] = (double)(i + 1);
// Allocation without use: but Initialize MemBar tangles the rawptr and double[] slices
double[] tmp = new double[100];
// When the Initialize MemBar is removed, the rawptr slice sees that there is no effect
// and takes the initial state. The double[] slice is hooked on to the rawptr slice, and
// also thinks it has the initial state, ignoring the double[] stores above.
}
}
static public int run2() {
int size = 10;
double[] arr1 = new double[size];
test2(arr1);
double sum = 0;
for(int i = 0; i < arr1.length; ++i) {
sum += arr1[i];
}
if (sum != (double)(size)) {
System.out.println("test2: wrong result: " + sum + " vs expected: " + size);
return 1;
}
return 0;
}
// Simplified from test1
public static void test2(double[] arr1) {
for(int i = 0; i < arr1.length; ++i) {
arr1[i] = 1;
double[] tmp = new double[100];
}
}
static public int run3() {
int size = 10;
int[] arr1 = new int[size];
test3(arr1);
int sum = 0;
for(int i = 0; i < arr1.length; ++i) {
sum += arr1[i];
}
if (sum != size) {
System.out.println("test3: wrong result: " + sum + " vs expected: " + size);
return 1;
}
return 0;
}
// Modified from test2
public static void test3(int[] arr1) {
for(int i = 0; i < arr1.length; ++i) {
arr1[i] = 1;
int[] tmp = new int[100];
}
}
// From TestIncorrectResult.java in JDK-8324739
static int test4(int l2) {
int[] tmp = new int[20];
for (int j = 0; j < l2; ++j) {
tmp[j] = 42;
int[] unused_but_necessary = new int[400];
}
return tmp[0];
}
public static int run4() {
for (int i = 0; i < 100; ++i) {
long res = test4(20);
if (res != 42) {
System.out.println("test4: wrong result: " + res + " vs expected: 42");
return 1;
}
}
return 0;
}
// From JDK-8336701
static class Test5 {
int[] b = new int[400];
static int[] staticArray = new int[400];
}
static void test5() {
long e;
for (e = 1; e < 9; ++e) {
Test5.staticArray[(int) e] -= e;
synchronized (new Test5()) { }
}
for (int f = 0; f < 10000; ++f) ;
}
static int run5() {
new Test5();
for (int i = 0; i < 1000; ++i) {
test5();
}
if (Test5.staticArray[8] != -8000) {
System.out.println("test5: wrong result: " + Test5.staticArray[8] + " vs expected: -8000");
return 1;
}
return 0;
}
// From JDK-8336293
static class Test6 {
static long c;
static int a = 400;
double[] b = new double[400];
}
static void test6() {
long d;
double[] e = new double[Test6.a];
for (int f = 0; f < e.length; f++)
e[f] = 1.116242;
d = 1;
while (++d < 7)
synchronized (new Test6()) { }
long g = 0;
for (int f = 0; f < e.length; f++)
g += e[f];
Test6.c += g;
}
static int run6() {
new Test6();
for (int f = 0; f < 10000; ++f) {
test6();
}
if (Test6.c != 4000000) {
System.out.println("test6: wrong result: " + Test6.c + " vs expected: 4000000 ");
return 1;
}
return 0;
}
// From JDK-8327868
static class Test7 {
static int a = 400;
int[] b = new int[400];
static int[] staticArray = new int[a];
}
static int test7() {
int l, d = 3;
for (l = 2; 58 > l; l++) {
for (int e = 2; e < 8; e += 2)
for (int f = 1; f < e; f += 2)
synchronized (new Test7()) {
}
do
; while (d < 2);
int g = 0;
do
g++;
while (g < 20000);
Test7.staticArray[1] -= 3023399;
}
int h = 0;
for (int i = 0; i < Test7.staticArray.length; i++)
h += Test7.staticArray[i];
return h;
}
static int run7() {
new Test7();
int res = test7();
if (res != -169310344) {
System.out.println("test7: wrong result: " + res + " vs expected: -169310344");
return 1;
}
return 0;
}
// from JDK-8329984
static class Test8 {
static int a = 400;
int[] e = new int[400];
}
static int test8() {
int i = 22738;
int b;
int h;
int[] c = new int[Test8.a];
for (b = 3; b < 273; b++) {
h = 1;
while (++h < 97) switch (b % 6 + 56) {
case 56:
c[1] = i;
case 57:
synchronized (new Test8()) {}
}
}
int k = 0;
for (int j = 0; j < c.length; j++) k += c[j];
return k;
}
public static int run8() {
new Test8();
for (int i = 0; i < 20; i++) {
int res = test8();
if (res != 22738) {
System.out.println("test8: wrong result: " + res + " vs expected: 22738");
return 1;
}
}
return 0;
}
// from JDK-8341009
static class Test9 {
static int a = 256;
float[] b = new float[256];
static long c;
}
static void test9() {
for (int f = 0; f < 10000; ++f) ;
float[][] g = new float[Test9.a][Test9.a];
for (int d = 7; d < 16; d++) {
long e = 1;
do {
g[d][(int) e] = d;
synchronized (new Test9()) {
}
} while (++e < 5);
}
for (int i = 0; i < Test9.a; ++i) {
for (int j = 0; j < Test9.a ; ++j) {
Test9.c += g[i][j];
}
}
}
static int run9() {
for (int j = 6; 116 > j; ++j) {
test9();
}
if (Test9.c != 43560) {
System.out.println("test9: wrong result: " + Test9.c + " vs expected: 43560");
return 1;
}
return 0;
}
}

View File

@ -0,0 +1,59 @@
/*
* Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8327012 8327963
* @summary Test that initializing store gets captured, i.e. moved before the InitializeNode
* and made into a raw-store.
* @library /test/lib /
* @run driver compiler.macronodes.TestInitializingStoreCapturing
*/
package compiler.macronodes;
import compiler.lib.ir_framework.*;
public class TestInitializingStoreCapturing {
static class A {
float value;
A(float v) { value = v; }
};
static public void main(String[] args) {
TestFramework.run();
}
@Test
@IR(counts = {IRNode.STORE_F, "= 0"})
static A testInitializeField() {
return new A(4.2f);
}
@Test
@IR(counts = {IRNode.STORE_F, "= 0"})
static float[] testInitializeArray() {
return new float[] {4.2f};
}
}