mirror of
https://github.com/openjdk/jdk.git
synced 2026-01-28 03:58:21 +00:00
Refactor the logic in MemNode::find_previous_store
This commit is contained in:
parent
6f6966b28b
commit
4ce0cf4f3e
@ -552,6 +552,15 @@ Node::DomResult MemNode::maybe_all_controls_dominate(Node* dom, Node* sub) {
|
||||
bool MemNode::detect_ptr_independence(Node* p1, AllocateNode* a1,
|
||||
Node* p2, AllocateNode* a2,
|
||||
PhaseTransform* phase) {
|
||||
// Trivial case: Non-overlapping values, be careful, we can cast a raw pointer to an oop so
|
||||
// joining the types only works if both are oops
|
||||
const Type* p1_type = p1->bottom_type();
|
||||
const Type* p2_type = p2->bottom_type();
|
||||
const Type* join = p1_type->join(p2_type);
|
||||
if (p1_type->isa_oopptr() && p2_type->isa_oopptr() && join->empty()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Attempt to prove that these two pointers cannot be aliased.
|
||||
// They may both manifestly be allocations, and they should differ.
|
||||
// Or, if they are not both allocations, they can be distinct constants.
|
||||
@ -690,6 +699,25 @@ Node* MemNode::find_previous_store(PhaseValues* phase) {
|
||||
Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
|
||||
AllocateNode* alloc = AllocateNode::Ideal_allocation(base);
|
||||
|
||||
const TypePtr* adr_type = this->adr_type();
|
||||
if (adr_type == nullptr) {
|
||||
// This means the access is dead
|
||||
return phase->C->top();
|
||||
} else if (adr_type->base() == TypePtr::AnyPtr) {
|
||||
// Compile::get_alias_index will complain with these accesses
|
||||
if (adr_type->ptr() == TypePtr::Null) {
|
||||
// Access to null cannot happen, this means the access must be in a dead path
|
||||
return phase->C->top();
|
||||
} else {
|
||||
// Give up on a very wide access
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
int alias_idx = phase->C->get_alias_index(adr_type);
|
||||
assert(alias_idx != Compile::AliasIdxTop, "must not be a dead node");
|
||||
assert(alias_idx != Compile::AliasIdxBot || !phase->C->do_aliasing(), "must not be a very wide access");
|
||||
|
||||
if (offset == Type::OffsetBot)
|
||||
return nullptr; // cannot unalias unless there are precise offsets
|
||||
|
||||
@ -709,15 +737,31 @@ Node* MemNode::find_previous_store(PhaseValues* phase) {
|
||||
Node* st_adr = mem->in(MemNode::Address);
|
||||
intptr_t st_offset = 0;
|
||||
Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset);
|
||||
if (st_base == nullptr)
|
||||
break; // inscrutable pointer
|
||||
|
||||
// For raw accesses it's not enough to prove that constant offsets don't intersect.
|
||||
// We need the bases to be the equal in order for the offset check to make sense.
|
||||
if ((adr_maybe_raw || check_if_adr_maybe_raw(st_adr)) && st_base != base) {
|
||||
if (st_base == nullptr) {
|
||||
// inscrutable pointer
|
||||
break;
|
||||
}
|
||||
|
||||
// If the bases are the same and the offsets are the same, it seems that this is the exact
|
||||
// store we are looking for, the caller will check if the type of the store matches
|
||||
if (st_base == base && st_offset == offset) {
|
||||
return mem;
|
||||
}
|
||||
|
||||
// If it is provable that the memory accessed by mem does not overlap the memory accessed by
|
||||
// this, we may walk past mem.
|
||||
// For raw accesses, 2 accesses are independent if they have the same base and the offsets
|
||||
// say that they do not overlap.
|
||||
// For heap accesses, 2 accesses are independent if either the bases are provably different
|
||||
// at runtime or the offsets say that the accesses do not overlap.
|
||||
if ((adr_maybe_raw || check_if_adr_maybe_raw(st_adr)) && st_base != base) {
|
||||
// Raw accesses can only be provably independent if they have the same base
|
||||
break;
|
||||
}
|
||||
|
||||
// If the offsets say that the accesses do not overlap, then it is provable that mem and this
|
||||
// do not overlap. For example, a LoadI from Object+8 is independent from a StoreL into
|
||||
// Object+12, no matter what the bases are.
|
||||
if (st_offset != offset && st_offset != Type::OffsetBot) {
|
||||
const int MAX_STORE = MAX2(BytesPerLong, (int)MaxVectorSize);
|
||||
assert(mem->as_Store()->memory_size() <= MAX_STORE, "");
|
||||
@ -730,25 +774,27 @@ Node* MemNode::find_previous_store(PhaseValues* phase) {
|
||||
// in the same sequence of RawMem effects. We sometimes initialize
|
||||
// a whole 'tile' of array elements with a single jint or jlong.)
|
||||
mem = mem->in(MemNode::Memory);
|
||||
continue; // (a) advance through independent store memory
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (st_base != base &&
|
||||
detect_ptr_independence(base, alloc,
|
||||
st_base,
|
||||
AllocateNode::Ideal_allocation(st_base),
|
||||
phase)) {
|
||||
// Success: The bases are provably independent.
|
||||
|
||||
// Same base and overlapping offsets, it seems provable that the accesses overlap, give up
|
||||
if (st_base == base) {
|
||||
break;
|
||||
}
|
||||
|
||||
// Try to prove that 2 different base nodes at compile time are different values at runtime
|
||||
bool known_independent = false;
|
||||
if (detect_ptr_independence(base, alloc, st_base, AllocateNode::Ideal_allocation(st_base), phase)) {
|
||||
// detect_ptr_independence == true means that it can prove that base and st_base cannot
|
||||
// have the same runtime value
|
||||
known_independent = true;
|
||||
}
|
||||
|
||||
if (known_independent) {
|
||||
mem = mem->in(MemNode::Memory);
|
||||
continue; // (a) advance through independent store memory
|
||||
continue;
|
||||
}
|
||||
|
||||
// (b) At this point, if the bases or offsets do not agree, we lose,
|
||||
// since we have not managed to prove 'this' and 'mem' independent.
|
||||
if (st_base == base && st_offset == offset) {
|
||||
return mem; // let caller handle steps (c), (d)
|
||||
}
|
||||
|
||||
} else if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
|
||||
InitializeNode* st_init = mem->in(0)->as_Initialize();
|
||||
AllocateNode* st_alloc = st_init->allocation();
|
||||
@ -769,7 +815,6 @@ Node* MemNode::find_previous_store(PhaseValues* phase) {
|
||||
// The bases are provably independent: Either they are
|
||||
// manifestly distinct allocations, or else the control
|
||||
// of this load dominates the store's allocation.
|
||||
int alias_idx = phase->C->get_alias_index(adr_type());
|
||||
if (alias_idx == Compile::AliasIdxRaw) {
|
||||
mem = st_alloc->in(TypeFunc::Memory);
|
||||
} else {
|
||||
@ -792,6 +837,9 @@ Node* MemNode::find_previous_store(PhaseValues* phase) {
|
||||
}
|
||||
// Found an arraycopy that may affect that load
|
||||
return mem;
|
||||
} else if (mem->is_MergeMem()) {
|
||||
mem = mem->as_MergeMem()->memory_at(alias_idx);
|
||||
continue;
|
||||
} else if (addr_t != nullptr && addr_t->is_known_instance_field()) {
|
||||
// Can't use optimize_simple_memory_chain() since it needs PhaseGVN.
|
||||
if (mem->is_Proj() && mem->in(0)->is_Call()) {
|
||||
@ -817,10 +865,6 @@ Node* MemNode::find_previous_store(PhaseValues* phase) {
|
||||
// we are looking for.
|
||||
return mem;
|
||||
}
|
||||
} else if (mem->is_MergeMem()) {
|
||||
int alias_idx = phase->C->get_alias_index(adr_type());
|
||||
mem = mem->as_MergeMem()->memory_at(alias_idx);
|
||||
continue; // (a) advance through independent MergeMem memory
|
||||
}
|
||||
}
|
||||
|
||||
@ -1851,7 +1895,6 @@ Node *LoadNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
|
||||
Node* ctrl = in(MemNode::Control);
|
||||
Node* address = in(MemNode::Address);
|
||||
bool progress = false;
|
||||
|
||||
bool addr_mark = ((phase->type(address)->isa_oopptr() || phase->type(address)->isa_narrowoop()) &&
|
||||
phase->type(address)->is_ptr()->offset() == oopDesc::mark_offset_in_bytes());
|
||||
@ -1864,7 +1907,7 @@ Node *LoadNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
(depends_only_on_test() || has_unknown_control_dependency())) {
|
||||
ctrl = ctrl->in(0);
|
||||
set_req(MemNode::Control,ctrl);
|
||||
progress = true;
|
||||
return this;
|
||||
}
|
||||
|
||||
intptr_t ignore = 0;
|
||||
@ -1878,7 +1921,7 @@ Node *LoadNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
&& all_controls_dominate(base, phase->C->start())) {
|
||||
// A method-invariant, non-null address (constant or 'this' argument).
|
||||
set_req(MemNode::Control, nullptr);
|
||||
progress = true;
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1951,6 +1994,10 @@ Node *LoadNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
// the alias index stuff. So instead, peek through Stores and IFF we can
|
||||
// fold up, do so.
|
||||
Node* prev_mem = find_previous_store(phase);
|
||||
if (prev_mem != nullptr && prev_mem->is_top()) {
|
||||
// find_previous_store returns top when the access is dead
|
||||
return prev_mem;
|
||||
}
|
||||
if (prev_mem != nullptr) {
|
||||
Node* value = can_see_arraycopy_value(prev_mem, phase);
|
||||
if (value != nullptr) {
|
||||
@ -1969,7 +2016,11 @@ Node *LoadNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
}
|
||||
}
|
||||
|
||||
return progress ? this : nullptr;
|
||||
if (!can_reshape) {
|
||||
phase->record_for_igvn(this);
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Helper to recognize certain Klass fields which are invariant across
|
||||
@ -3552,18 +3603,19 @@ Node* StoreNode::Identity(PhaseGVN* phase) {
|
||||
if (mem->is_Proj() && mem->in(0)->is_Allocate()) {
|
||||
result = mem;
|
||||
}
|
||||
}
|
||||
|
||||
if (result == this) {
|
||||
// the store may also apply to zero-bits in an earlier object
|
||||
Node* prev_mem = find_previous_store(phase);
|
||||
// Steps (a), (b): Walk past independent stores to find an exact match.
|
||||
if (prev_mem != nullptr) {
|
||||
Node* prev_val = can_see_stored_value(prev_mem, phase);
|
||||
if (prev_val != nullptr && prev_val == val) {
|
||||
// prev_val and val might differ by a cast; it would be good
|
||||
// to keep the more informative of the two.
|
||||
result = mem;
|
||||
}
|
||||
// Store the same value that is in the memory, we can elide the store
|
||||
if (result == this && is_unordered()) {
|
||||
Node* prev_mem = find_previous_store(phase);
|
||||
if (prev_mem != nullptr) {
|
||||
if (prev_mem->is_top()) {
|
||||
// find_previous_store returns top when the access is dead
|
||||
return prev_mem;
|
||||
}
|
||||
Node* prev_val = can_see_stored_value(prev_mem, phase);
|
||||
if (prev_val == val) {
|
||||
result = mem;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
71
test/hotspot/jtreg/compiler/c2/gvn/TestFindStore.java
Normal file
71
test/hotspot/jtreg/compiler/c2/gvn/TestFindStore.java
Normal file
@ -0,0 +1,71 @@
|
||||
/*
|
||||
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package compiler.c2.gvn;
|
||||
|
||||
import compiler.lib.ir_framework.*;
|
||||
import jdk.test.lib.Asserts;
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @bug 8360192
|
||||
* @summary Tests that count bits nodes are handled correctly.
|
||||
* @library /test/lib /
|
||||
* @run driver ${test.main.class}
|
||||
*/
|
||||
public class TestFindStore {
|
||||
static class P {
|
||||
int v;
|
||||
}
|
||||
|
||||
static class C1 extends P {}
|
||||
static class C2 extends P {}
|
||||
|
||||
public static void main(String[] args) {
|
||||
TestFramework.run();
|
||||
}
|
||||
|
||||
@Run(test = {"testLoad", "testStore"})
|
||||
public void run() {
|
||||
C1 c1 = new C1();
|
||||
C2 c2 = new C2();
|
||||
|
||||
Asserts.assertEQ(1, testLoad(c1, c2, 1, 0));
|
||||
testStore(c1, c2, 1, 0);
|
||||
}
|
||||
|
||||
@Test
|
||||
@IR(failOn = IRNode.LOAD)
|
||||
static int testLoad(C1 c1, C2 c2, int v1, int v2) {
|
||||
c1.v = v1;
|
||||
c2.v = v2;
|
||||
return c1.v;
|
||||
}
|
||||
|
||||
@Test
|
||||
@IR(counts = {IRNode.STORE, "2"})
|
||||
static void testStore(C1 c1, C2 c2, int v1, int v2) {
|
||||
c1.v = v1;
|
||||
c2.v = v2;
|
||||
c1.v = v1;
|
||||
}
|
||||
}
|
||||
Loading…
x
Reference in New Issue
Block a user