8311125: Remove unused parameter 'phase' in AllocateNode::Ideal_allocation

Reviewed-by: chagedorn, kvn
This commit is contained in:
Xin Liu 2023-07-01 07:25:26 +00:00
parent 8c8e9d911d
commit d2e1159300
12 changed files with 43 additions and 43 deletions

View File

@ -94,7 +94,7 @@ bool G1BarrierSetC2::g1_can_remove_pre_barrier(GraphKit* kit,
uint adr_idx) const {
intptr_t offset = 0;
Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase);
AllocateNode* alloc = AllocateNode::Ideal_allocation(base);
if (offset == Type::OffsetBot) {
return false; // cannot unalias unless there are precise offsets
@ -142,7 +142,7 @@ bool G1BarrierSetC2::g1_can_remove_pre_barrier(GraphKit* kit,
if (st_base != base
&& MemNode::detect_ptr_independence(base, alloc, st_base,
AllocateNode::Ideal_allocation(st_base, phase),
AllocateNode::Ideal_allocation(st_base),
phase)) {
// Success: The bases are provably independent.
mem = mem->in(MemNode::Memory);
@ -307,7 +307,7 @@ bool G1BarrierSetC2::g1_can_remove_post_barrier(GraphKit* kit,
Node* adr) const {
intptr_t offset = 0;
Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase);
AllocateNode* alloc = AllocateNode::Ideal_allocation(base);
if (offset == Type::OffsetBot) {
return false; // cannot unalias unless there are precise offsets

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2021, Red Hat, Inc. All rights reserved.
* Copyright (c) 2018, 2023, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -101,7 +101,7 @@ bool ShenandoahBarrierSetC2::satb_can_remove_pre_barrier(GraphKit* kit, PhaseVal
BasicType bt, uint adr_idx) const {
intptr_t offset = 0;
Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase);
AllocateNode* alloc = AllocateNode::Ideal_allocation(base);
if (offset == Type::OffsetBot) {
return false; // cannot unalias unless there are precise offsets
@ -149,7 +149,7 @@ bool ShenandoahBarrierSetC2::satb_can_remove_pre_barrier(GraphKit* kit, PhaseVal
if (st_base != base
&& MemNode::detect_ptr_independence(base, alloc, st_base,
AllocateNode::Ideal_allocation(st_base, phase),
AllocateNode::Ideal_allocation(st_base),
phase)) {
// Success: The bases are provably independent.
mem = mem->in(MemNode::Memory);

View File

@ -922,7 +922,7 @@ public:
// (Note: This function is defined in file graphKit.cpp, near
// GraphKit::new_instance/new_array, whose output it recognizes.)
// The 'ptr' may not have an offset unless the 'offset' argument is given.
static AllocateNode* Ideal_allocation(Node* ptr, PhaseValues* phase);
static AllocateNode* Ideal_allocation(Node* ptr);
// Fancy version which uses AddPNode::Ideal_base_and_offset to strip
// an offset, which is reported back to the caller.
@ -932,7 +932,7 @@ public:
// Dig the klass operand out of a (possible) allocation site.
static Node* Ideal_klass(Node* ptr, PhaseValues* phase) {
AllocateNode* allo = Ideal_allocation(ptr, phase);
AllocateNode* allo = Ideal_allocation(ptr);
return (allo == nullptr) ? nullptr : allo->in(KlassNode);
}
@ -1001,8 +1001,8 @@ public:
// Pattern-match a possible usage of AllocateArrayNode.
// Return null if no allocation is recognized.
static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseValues* phase) {
AllocateNode* allo = Ideal_allocation(ptr, phase);
static AllocateArrayNode* Ideal_array_allocation(Node* ptr) {
AllocateNode* allo = Ideal_allocation(ptr);
return (allo == nullptr || !allo->is_AllocateArray())
? nullptr : allo->as_AllocateArray();
}

View File

@ -1205,7 +1205,7 @@ Node* GraphKit::load_object_klass(Node* obj) {
//-------------------------load_array_length-----------------------------------
Node* GraphKit::load_array_length(Node* array) {
// Special-case a fresh allocation to avoid building nodes:
AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(array, &_gvn);
AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(array);
Node *alen;
if (alloc == nullptr) {
Node *r_adr = basic_plus_adr(array, arrayOopDesc::length_offset_in_bytes());
@ -3625,14 +3625,14 @@ Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
#ifdef ASSERT
{ // Verify that the AllocateNode::Ideal_allocation recognizers work:
assert(AllocateNode::Ideal_allocation(rawoop, &_gvn) == alloc,
assert(AllocateNode::Ideal_allocation(rawoop) == alloc,
"Ideal_allocation works");
assert(AllocateNode::Ideal_allocation(javaoop, &_gvn) == alloc,
assert(AllocateNode::Ideal_allocation(javaoop) == alloc,
"Ideal_allocation works");
if (alloc->is_AllocateArray()) {
assert(AllocateArrayNode::Ideal_array_allocation(rawoop, &_gvn) == alloc->as_AllocateArray(),
assert(AllocateArrayNode::Ideal_array_allocation(rawoop) == alloc->as_AllocateArray(),
"Ideal_allocation works");
assert(AllocateArrayNode::Ideal_array_allocation(javaoop, &_gvn) == alloc->as_AllocateArray(),
assert(AllocateArrayNode::Ideal_array_allocation(javaoop) == alloc->as_AllocateArray(),
"Ideal_allocation works");
} else {
assert(alloc->in(AllocateNode::ALength)->is_top(), "no length, please");
@ -3918,7 +3918,7 @@ Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)
//---------------------------Ideal_allocation----------------------------------
// Given an oop pointer or raw pointer, see if it feeds from an AllocateNode.
AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseValues* phase) {
AllocateNode* AllocateNode::Ideal_allocation(Node* ptr) {
if (ptr == nullptr) { // reduce dumb test in callers
return nullptr;
}
@ -3949,7 +3949,7 @@ AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseValues* phase,
intptr_t& offset) {
Node* base = AddPNode::Ideal_base_and_offset(ptr, phase, offset);
if (base == nullptr) return nullptr;
return Ideal_allocation(base, phase);
return Ideal_allocation(base);
}
// Trace Initialize <- Proj[Parm] <- Allocate

View File

@ -4042,7 +4042,7 @@ bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
if (uninitialized) {
// Mark the allocation so that zeroing is skipped
AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(obj, &_gvn);
AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(obj);
alloc->maybe_set_complete(&_gvn);
}
}
@ -4756,7 +4756,7 @@ void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, b
if (ReduceBulkZeroing) {
// We will be completely responsible for initializing this object -
// mark Initialize node as complete.
alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn);
alloc = AllocateNode::Ideal_allocation(alloc_obj);
// The object was just allocated - there should be no any stores!
guarantee(alloc != nullptr && alloc->maybe_set_complete(&_gvn), "");
// Mark as complete_with_arraycopy so that on AllocateNode
@ -5465,7 +5465,7 @@ LibraryCallKit::tightly_coupled_allocation(Node* ptr) {
if (stopped()) return nullptr; // no fast path
if (!C->do_aliasing()) return nullptr; // no MergeMems around
AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(ptr, &_gvn);
AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(ptr);
if (alloc == nullptr) return nullptr;
Node* rawmem = memory(Compile::AliasIdxRaw);

View File

@ -1255,7 +1255,7 @@ void PhaseMacroExpand::expand_arraycopy_node(ArrayCopyNode *ac) {
AllocateArrayNode* alloc = nullptr;
if (ac->is_alloc_tightly_coupled()) {
alloc = AllocateArrayNode::Ideal_array_allocation(dest, &_igvn);
alloc = AllocateArrayNode::Ideal_array_allocation(dest);
assert(alloc != nullptr, "expect alloc");
}
@ -1273,7 +1273,7 @@ void PhaseMacroExpand::expand_arraycopy_node(ArrayCopyNode *ac) {
AllocateArrayNode* alloc = nullptr;
if (ac->is_alloc_tightly_coupled()) {
alloc = AllocateArrayNode::Ideal_array_allocation(dest, &_igvn);
alloc = AllocateArrayNode::Ideal_array_allocation(dest);
assert(alloc != nullptr, "expect alloc");
}

View File

@ -553,7 +553,7 @@ bool MemNode::detect_ptr_independence(Node* p1, AllocateNode* a1,
// when searching stored value.
// Otherwise return null.
Node* LoadNode::find_previous_arraycopy(PhaseValues* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const {
ArrayCopyNode* ac = find_array_copy_clone(phase, ld_alloc, mem);
ArrayCopyNode* ac = find_array_copy_clone(ld_alloc, mem);
if (ac != nullptr) {
Node* ld_addp = in(MemNode::Address);
Node* src = ac->in(ArrayCopyNode::Src);
@ -608,7 +608,7 @@ Node* LoadNode::find_previous_arraycopy(PhaseValues* phase, Node* ld_alloc, Node
return nullptr;
}
ArrayCopyNode* MemNode::find_array_copy_clone(PhaseValues* phase, Node* ld_alloc, Node* mem) const {
ArrayCopyNode* MemNode::find_array_copy_clone(Node* ld_alloc, Node* mem) const {
if (mem->is_Proj() && mem->in(0) != nullptr && (mem->in(0)->Opcode() == Op_MemBarStoreStore ||
mem->in(0)->Opcode() == Op_MemBarCPUOrder)) {
if (ld_alloc != nullptr) {
@ -629,7 +629,7 @@ ArrayCopyNode* MemNode::find_array_copy_clone(PhaseValues* phase, Node* ld_alloc
}
if (ac != nullptr && ac->is_clonebasic()) {
AllocateNode* alloc = AllocateNode::Ideal_allocation(ac->in(ArrayCopyNode::Dest), phase);
AllocateNode* alloc = AllocateNode::Ideal_allocation(ac->in(ArrayCopyNode::Dest));
if (alloc != nullptr && alloc == ld_alloc) {
return ac;
}
@ -657,7 +657,7 @@ Node* MemNode::find_previous_store(PhaseValues* phase) {
Node* adr = in(MemNode::Address);
intptr_t offset = 0;
Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase);
AllocateNode* alloc = AllocateNode::Ideal_allocation(base);
if (offset == Type::OffsetBot)
return nullptr; // cannot unalias unless there are precise offsets
@ -705,7 +705,7 @@ Node* MemNode::find_previous_store(PhaseValues* phase) {
if (st_base != base &&
detect_ptr_independence(base, alloc,
st_base,
AllocateNode::Ideal_allocation(st_base, phase),
AllocateNode::Ideal_allocation(st_base),
phase)) {
// Success: The bases are provably independent.
mem = mem->in(MemNode::Memory);
@ -1058,7 +1058,7 @@ Node* MemNode::can_see_stored_value(Node* st, PhaseValues* phase) const {
Node* ld_adr = in(MemNode::Address);
intptr_t ld_off = 0;
Node* ld_base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ld_off);
Node* ld_alloc = AllocateNode::Ideal_allocation(ld_base, phase);
Node* ld_alloc = AllocateNode::Ideal_allocation(ld_base);
const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr();
Compile::AliasType* atp = (tp != nullptr) ? phase->C->alias_type(tp) : nullptr;
// This is more general than load from boxing objects.
@ -1160,7 +1160,7 @@ Node* MemNode::can_see_stored_value(Node* st, PhaseValues* phase) const {
// can create new nodes. Think of it as lazily manifesting
// virtually pre-existing constants.)
if (memory_type() != T_VOID) {
if (ReduceBulkZeroing || find_array_copy_clone(phase, ld_alloc, in(MemNode::Memory)) == nullptr) {
if (ReduceBulkZeroing || find_array_copy_clone(ld_alloc, in(MemNode::Memory)) == nullptr) {
// If ReduceBulkZeroing is disabled, we need to check if the allocation does not belong to an
// ArrayCopyNode clone. If it does, then we cannot assume zero since the initialization is done
// by the ArrayCopyNode.
@ -1713,10 +1713,10 @@ Node* LoadNode::split_through_phi(PhaseGVN* phase) {
return phi;
}
AllocateNode* LoadNode::is_new_object_mark_load(PhaseGVN *phase) const {
AllocateNode* LoadNode::is_new_object_mark_load() const {
if (Opcode() == Op_LoadX) {
Node* address = in(MemNode::Address);
AllocateNode* alloc = AllocateNode::Ideal_allocation(address, phase);
AllocateNode* alloc = AllocateNode::Ideal_allocation(address);
Node* mem = in(MemNode::Memory);
if (alloc != nullptr && mem->is_Proj() &&
mem->in(0) != nullptr &&
@ -2130,7 +2130,7 @@ const Type* LoadNode::Value(PhaseGVN* phase) const {
}
}
Node* alloc = is_new_object_mark_load(phase);
Node* alloc = is_new_object_mark_load();
if (alloc != nullptr) {
return TypeX::make(markWord::prototype().value());
}
@ -2517,7 +2517,7 @@ Node *LoadRangeNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// We can fetch the length directly through an AllocateArrayNode.
// This works even if the length is not constant (clone or newArray).
if (offset == arrayOopDesc::length_offset_in_bytes()) {
AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(base, phase);
AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(base);
if (alloc != nullptr) {
Node* allocated_length = alloc->Ideal_length();
Node* len = alloc->make_ideal_length(tary, phase);
@ -2549,7 +2549,7 @@ Node* LoadRangeNode::Identity(PhaseGVN* phase) {
// We can fetch the length directly through an AllocateArrayNode.
// This works even if the length is not constant (clone or newArray).
if (offset == arrayOopDesc::length_offset_in_bytes()) {
AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(base, phase);
AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(base);
if (alloc != nullptr) {
Node* allocated_length = alloc->Ideal_length();
// Do not allow make_ideal_length to allocate a CastII node.
@ -3360,7 +3360,7 @@ Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) {
}
} else if (opc == Op_MemBarRelease) {
// Final field stores.
Node* alloc = AllocateNode::Ideal_allocation(in(MemBarNode::Precedent), phase);
Node* alloc = AllocateNode::Ideal_allocation(in(MemBarNode::Precedent));
if ((alloc != nullptr) && alloc->is_Allocate() &&
alloc->as_Allocate()->does_not_escape_thread()) {
// The allocated object does not escape.

View File

@ -93,7 +93,7 @@ protected:
}
virtual Node* find_previous_arraycopy(PhaseValues* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const { return nullptr; }
ArrayCopyNode* find_array_copy_clone(PhaseValues* phase, Node* ld_alloc, Node* mem) const;
ArrayCopyNode* find_array_copy_clone(Node* ld_alloc, Node* mem) const;
static bool check_if_adr_maybe_raw(Node* adr);
public:
@ -199,7 +199,7 @@ private:
// this field.
const MemOrd _mo;
AllocateNode* is_new_object_mark_load(PhaseGVN *phase) const;
AllocateNode* is_new_object_mark_load() const;
protected:
virtual bool cmp(const Node &n) const;

View File

@ -1011,7 +1011,7 @@ void Parse::do_exits() {
// and allocation node does not escape the initialize method,
// then barrier introduced by allocation node can be removed.
if (DoEscapeAnalysis && alloc_with_final()) {
AllocateNode *alloc = AllocateNode::Ideal_allocation(alloc_with_final(), &_gvn);
AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_with_final());
alloc->compute_MemBar_redundancy(method());
}
if (PrintOpto && (Verbose || WizardMode)) {

View File

@ -242,7 +242,7 @@ void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
// Any method can write a @Stable field; insert memory barriers after those also.
if (field->is_final()) {
set_wrote_final(true);
if (AllocateNode::Ideal_allocation(obj, &_gvn) != nullptr) {
if (AllocateNode::Ideal_allocation(obj) != nullptr) {
// Preserve allocation ptr to create precedent edge to it in membar
// generated on exit from constructor.
// Can't bind stable with its allocation, only record allocation for final field.

View File

@ -1681,7 +1681,7 @@ Node* PhaseStringOpts::allocate_byte_array(GraphKit& kit, IdealKit* ideal, Node*
// Mark the allocation so that zeroing is skipped since the code
// below will overwrite the entire array
AllocateArrayNode* byte_alloc = AllocateArrayNode::Ideal_array_allocation(byte_array, _gvn);
AllocateArrayNode* byte_alloc = AllocateArrayNode::Ideal_array_allocation(byte_array);
byte_alloc->maybe_set_complete(_gvn);
if (ideal != nullptr) {
@ -2009,7 +2009,7 @@ void PhaseStringOpts::replace_string_concat(StringConcat* sc) {
// The value field is final. Emit a barrier here to ensure that the effect
// of the initialization is committed to memory before any code publishes
// a reference to the newly constructed object (see Parse::do_exits()).
assert(AllocateNode::Ideal_allocation(result, _gvn) != nullptr, "should be newly allocated");
assert(AllocateNode::Ideal_allocation(result) != nullptr, "should be newly allocated");
kit.insert_mem_bar(Op_MemBarRelease, result);
} else {
result = C->top();

View File

@ -988,8 +988,8 @@ const Type *CmpPNode::sub( const Type *t1, const Type *t2 ) const {
if (p0 && p1) {
Node* in1 = in(1)->uncast();
Node* in2 = in(2)->uncast();
AllocateNode* alloc1 = AllocateNode::Ideal_allocation(in1, nullptr);
AllocateNode* alloc2 = AllocateNode::Ideal_allocation(in2, nullptr);
AllocateNode* alloc1 = AllocateNode::Ideal_allocation(in1);
AllocateNode* alloc2 = AllocateNode::Ideal_allocation(in2);
if (MemNode::detect_ptr_independence(in1, alloc1, in2, alloc2, nullptr)) {
return TypeInt::CC_GT; // different pointers
}