8379790: MemNode::can_see_stored_value incorrectly walks past barriers

Reviewed-by: chagedorn, dfenacci
This commit is contained in:
Quan Anh Mai 2026-04-01 06:59:27 +00:00
parent 2e0ce34d3c
commit 3dcf2a3bac
2 changed files with 39 additions and 29 deletions

View File

@ -1156,40 +1156,38 @@ Node* LoadNode::can_see_arraycopy_value(Node* st, PhaseGVN* phase) const {
return nullptr;
}
//---------------------------can_see_stored_value------------------------------
// This routine exists to make sure this set of tests is done the same
// everywhere. We need to make a coordinated change: first LoadNode::Ideal
// will change the graph shape in a way which makes memory alive twice at the
// same time (uses the Oracle model of aliasing), then some
// LoadXNode::Identity will fold things back to the equivalence-class model
// of aliasing.
Node* MemNode::can_see_stored_value(Node* st, PhaseValues* phase) const {
Node* LoadNode::can_see_stored_value_through_membars(Node* st, PhaseValues* phase) const {
Node* ld_adr = in(MemNode::Address);
intptr_t ld_off = 0;
Node* ld_base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ld_off);
Node* ld_alloc = AllocateNode::Ideal_allocation(ld_base);
const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr();
Compile::AliasType* atp = (tp != nullptr) ? phase->C->alias_type(tp) : nullptr;
// This is more general than load from boxing objects.
if (skip_through_membars(atp, tp, phase->C->eliminate_boxing())) {
uint alias_idx = atp->index();
Node* result = nullptr;
Node* current = st;
// Skip through chains of MemBarNodes checking the MergeMems for
// new states for the slice of this load. Stop once any other
// kind of node is encountered. Loads from final memory can skip
// through any kind of MemBar but normal loads shouldn't skip
// through MemBarAcquire since the could allow them to move out of
// a synchronized region. It is not safe to step over MemBarCPUOrder,
// because alias info above them may be inaccurate (e.g., due to
// mixed/mismatched unsafe accesses).
// Skip through chains of MemBarNodes checking the MergeMems for new states for the slice of
// this load. Stop once any other kind of node is encountered.
//
// In principle, folding a load is moving it up until it meets a matching store.
//
// store(ptr, v); store(ptr, v); store(ptr, v);
// membar1; -> membar1; -> load(ptr);
// membar2; load(ptr); membar1;
// load(ptr); membar2; membar2;
//
// So, we can decide which kinds of barriers we can walk past. It is not safe to step over
// MemBarCPUOrder, even if the memory is not rewritable, because alias info above them may be
// inaccurate (e.g., due to mixed/mismatched unsafe accesses).
bool is_final_mem = !atp->is_rewritable();
while (current->is_Proj()) {
int opc = current->in(0)->Opcode();
if ((is_final_mem && (opc == Op_MemBarAcquire ||
opc == Op_MemBarAcquireLock ||
opc == Op_LoadFence)) ||
if ((is_final_mem && (opc == Op_MemBarAcquire || opc == Op_MemBarAcquireLock || opc == Op_LoadFence)) ||
opc == Op_MemBarRelease ||
opc == Op_StoreFence ||
opc == Op_MemBarReleaseLock ||
@ -1216,6 +1214,17 @@ Node* MemNode::can_see_stored_value(Node* st, PhaseValues* phase) const {
}
}
return can_see_stored_value(st, phase);
}
// If st is a store to the same location as this, return the stored value
Node* MemNode::can_see_stored_value(Node* st, PhaseValues* phase) const {
Node* ld_adr = in(MemNode::Address);
intptr_t ld_off = 0;
Node* ld_base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ld_off);
Node* ld_alloc = AllocateNode::Ideal_allocation(ld_base);
const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr();
// Loop around twice in the case Load -> Initialize -> Store.
// (See PhaseIterGVN::add_users_to_worklist, which knows about this case.)
for (int trip = 0; trip <= 1; trip++) {
@ -1344,7 +1353,7 @@ Node* LoadNode::Identity(PhaseGVN* phase) {
// If the previous store-maker is the right kind of Store, and the store is
// to the same address, then we are equal to the value stored.
Node* mem = in(Memory);
Node* value = can_see_stored_value(mem, phase);
Node* value = can_see_stored_value_through_membars(mem, phase);
if( value ) {
// byte, short & char stores truncate naturally.
// A load has to load the truncated value which requires
@ -2042,7 +2051,7 @@ Node *LoadNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// (c) See if we can fold up on the spot, but don't fold up here.
// Fold-up might require truncation (for LoadB/LoadS/LoadUS) or
// just return a prior value, which is done by Identity calls.
if (can_see_stored_value(prev_mem, phase)) {
if (can_see_stored_value_through_membars(prev_mem, phase)) {
// Make ready for step (d):
set_req_X(MemNode::Memory, prev_mem, phase);
return this;
@ -2099,7 +2108,7 @@ const Type* LoadNode::Value(PhaseGVN* phase) const {
Compile* C = phase->C;
// If load can see a previous constant store, use that.
Node* value = can_see_stored_value(mem, phase);
Node* value = can_see_stored_value_through_membars(mem, phase);
if (value != nullptr && value->is_Con()) {
assert(value->bottom_type()->higher_equal(_type), "sanity");
return value->bottom_type();
@ -2350,7 +2359,7 @@ uint LoadNode::match_edge(uint idx) const {
//
Node* LoadBNode::Ideal(PhaseGVN* phase, bool can_reshape) {
Node* mem = in(MemNode::Memory);
Node* value = can_see_stored_value(mem,phase);
Node* value = can_see_stored_value_through_membars(mem, phase);
if (value != nullptr) {
Node* narrow = Compile::narrow_value(T_BYTE, value, _type, phase, false);
if (narrow != value) {
@ -2363,7 +2372,7 @@ Node* LoadBNode::Ideal(PhaseGVN* phase, bool can_reshape) {
const Type* LoadBNode::Value(PhaseGVN* phase) const {
Node* mem = in(MemNode::Memory);
Node* value = can_see_stored_value(mem,phase);
Node* value = can_see_stored_value_through_membars(mem, phase);
if (value != nullptr && value->is_Con() &&
!value->bottom_type()->higher_equal(_type)) {
// If the input to the store does not fit with the load's result type,
@ -2384,7 +2393,7 @@ const Type* LoadBNode::Value(PhaseGVN* phase) const {
//
Node* LoadUBNode::Ideal(PhaseGVN* phase, bool can_reshape) {
Node* mem = in(MemNode::Memory);
Node* value = can_see_stored_value(mem, phase);
Node* value = can_see_stored_value_through_membars(mem, phase);
if (value != nullptr) {
Node* narrow = Compile::narrow_value(T_BOOLEAN, value, _type, phase, false);
if (narrow != value) {
@ -2397,7 +2406,7 @@ Node* LoadUBNode::Ideal(PhaseGVN* phase, bool can_reshape) {
const Type* LoadUBNode::Value(PhaseGVN* phase) const {
Node* mem = in(MemNode::Memory);
Node* value = can_see_stored_value(mem,phase);
Node* value = can_see_stored_value_through_membars(mem, phase);
if (value != nullptr && value->is_Con() &&
!value->bottom_type()->higher_equal(_type)) {
// If the input to the store does not fit with the load's result type,
@ -2418,7 +2427,7 @@ const Type* LoadUBNode::Value(PhaseGVN* phase) const {
//
Node* LoadUSNode::Ideal(PhaseGVN* phase, bool can_reshape) {
Node* mem = in(MemNode::Memory);
Node* value = can_see_stored_value(mem,phase);
Node* value = can_see_stored_value_through_membars(mem, phase);
if (value != nullptr) {
Node* narrow = Compile::narrow_value(T_CHAR, value, _type, phase, false);
if (narrow != value) {
@ -2431,7 +2440,7 @@ Node* LoadUSNode::Ideal(PhaseGVN* phase, bool can_reshape) {
const Type* LoadUSNode::Value(PhaseGVN* phase) const {
Node* mem = in(MemNode::Memory);
Node* value = can_see_stored_value(mem,phase);
Node* value = can_see_stored_value_through_membars(mem, phase);
if (value != nullptr && value->is_Con() &&
!value->bottom_type()->higher_equal(_type)) {
// If the input to the store does not fit with the load's result type,
@ -2452,7 +2461,7 @@ const Type* LoadUSNode::Value(PhaseGVN* phase) const {
//
Node* LoadSNode::Ideal(PhaseGVN* phase, bool can_reshape) {
Node* mem = in(MemNode::Memory);
Node* value = can_see_stored_value(mem,phase);
Node* value = can_see_stored_value_through_membars(mem, phase);
if (value != nullptr) {
Node* narrow = Compile::narrow_value(T_SHORT, value, _type, phase, false);
if (narrow != value) {
@ -2465,7 +2474,7 @@ Node* LoadSNode::Ideal(PhaseGVN* phase, bool can_reshape) {
const Type* LoadSNode::Value(PhaseGVN* phase) const {
Node* mem = in(MemNode::Memory);
Node* value = can_see_stored_value(mem,phase);
Node* value = can_see_stored_value_through_membars(mem, phase);
if (value != nullptr && value->is_Con() &&
!value->bottom_type()->higher_equal(_type)) {
// If the input to the store does not fit with the load's result type,

View File

@ -266,6 +266,7 @@ protected:
const Type* const _type; // What kind of value is loaded?
virtual Node* find_previous_arraycopy(PhaseValues* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const;
Node* can_see_stored_value_through_membars(Node* st, PhaseValues* phase) const;
public:
LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, MemOrd mo, ControlDependency control_dependency)