mirror of
https://github.com/openjdk/jdk.git
synced 2026-01-28 03:58:21 +00:00
8373343: C2: verify AddP base input only set for heap addresses
Reviewed-by: dlong, chagedorn, qamai
This commit is contained in:
parent
e8eb218ca2
commit
6e9256cb61
@ -771,7 +771,7 @@ Node* BarrierSetC2::obj_allocate(PhaseMacroExpand* macro, Node* mem, Node* toobi
|
||||
// this will require extensive changes to the loop optimization in order to
|
||||
// prevent a degradation of the optimization.
|
||||
// See comment in memnode.hpp, around line 227 in class LoadPNode.
|
||||
Node* tlab_end = macro->make_load(toobig_false, mem, tlab_end_adr, 0, TypeRawPtr::BOTTOM, T_ADDRESS);
|
||||
Node* tlab_end = macro->make_load_raw(toobig_false, mem, tlab_end_adr, 0, TypeRawPtr::BOTTOM, T_ADDRESS);
|
||||
|
||||
// Load the TLAB top.
|
||||
Node* old_tlab_top = new LoadPNode(toobig_false, mem, tlab_top_adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered);
|
||||
|
||||
@ -226,8 +226,11 @@ public:
|
||||
Base, // Base oop, for GC purposes
|
||||
Address, // Actually address, derived from base
|
||||
Offset } ; // Offset added to address
|
||||
AddPNode( Node *base, Node *ptr, Node *off ) : Node(nullptr,base,ptr,off) {
|
||||
AddPNode(Node *base, Node *ptr, Node *off) : Node(nullptr,base,ptr,off) {
|
||||
init_class_id(Class_AddP);
|
||||
assert((ptr->bottom_type() == Type::TOP) ||
|
||||
((base == Compile::current()->top()) == (ptr->bottom_type()->make_ptr()->isa_oopptr() == nullptr)),
|
||||
"base input only needed for heap addresses");
|
||||
}
|
||||
virtual int Opcode() const;
|
||||
virtual Node* Identity(PhaseGVN* phase);
|
||||
|
||||
@ -1737,11 +1737,11 @@ void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer)
|
||||
_is_allocation_MemBar_redundant = true;
|
||||
}
|
||||
}
|
||||
Node *AllocateNode::make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem) {
|
||||
Node *AllocateNode::make_ideal_mark(PhaseGVN* phase, Node* control, Node* mem) {
|
||||
Node* mark_node = nullptr;
|
||||
if (UseCompactObjectHeaders) {
|
||||
Node* klass_node = in(AllocateNode::KlassNode);
|
||||
Node* proto_adr = phase->transform(new AddPNode(klass_node, klass_node, phase->MakeConX(in_bytes(Klass::prototype_header_offset()))));
|
||||
Node* proto_adr = phase->transform(new AddPNode(phase->C->top(), klass_node, phase->MakeConX(in_bytes(Klass::prototype_header_offset()))));
|
||||
mark_node = LoadNode::make(*phase, control, mem, proto_adr, TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
|
||||
} else {
|
||||
// For now only enable fast locking for non-array types
|
||||
|
||||
@ -1101,7 +1101,7 @@ public:
|
||||
void compute_MemBar_redundancy(ciMethod* initializer);
|
||||
bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; }
|
||||
|
||||
Node* make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem);
|
||||
Node* make_ideal_mark(PhaseGVN* phase, Node* control, Node* mem);
|
||||
|
||||
NOT_PRODUCT(virtual void dump_spec(outputStream* st) const;)
|
||||
};
|
||||
|
||||
@ -3772,9 +3772,9 @@ Node* ConnectionGraph::get_addp_base(Node *addp) {
|
||||
// | |
|
||||
// AddP ( base == address )
|
||||
//
|
||||
// case #6. Constant Pool, ThreadLocal, CastX2P or
|
||||
// case #6. Constant Pool, ThreadLocal, CastX2P, Klass, OSR buffer buf or
|
||||
// Raw object's field reference:
|
||||
// {ConP, ThreadLocal, CastX2P, raw Load}
|
||||
// {ConP, ThreadLocal, CastX2P, raw Load, Parm0}
|
||||
// top |
|
||||
// \ |
|
||||
// AddP ( base == top )
|
||||
@ -3816,7 +3816,9 @@ Node* ConnectionGraph::get_addp_base(Node *addp) {
|
||||
int opcode = uncast_base->Opcode();
|
||||
assert(opcode == Op_ConP || opcode == Op_ThreadLocal ||
|
||||
opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() ||
|
||||
(_igvn->C->is_osr_compilation() && uncast_base->is_Parm() && uncast_base->as_Parm()->_con == TypeFunc::Parms)||
|
||||
(uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != nullptr)) ||
|
||||
(uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_klassptr() != nullptr)) ||
|
||||
is_captured_store_address(addp), "sanity");
|
||||
}
|
||||
}
|
||||
@ -4411,7 +4413,6 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist,
|
||||
uint new_index_start = (uint) _compile->num_alias_types();
|
||||
VectorSet visited;
|
||||
ideal_nodes.clear(); // Reset for use with set_map/get_map.
|
||||
uint unique_old = _compile->unique();
|
||||
|
||||
// Phase 1: Process possible allocations from alloc_worklist.
|
||||
// Create instance types for the CheckCastPP for allocations where possible.
|
||||
|
||||
@ -2743,7 +2743,7 @@ Node* Phase::gen_subtype_check(Node* subklass, Node* superklass, Node** ctrl, No
|
||||
// will always succeed. We could leave a dependency behind to ensure this.
|
||||
|
||||
// First load the super-klass's check-offset
|
||||
Node *p1 = gvn.transform(new AddPNode(superklass, superklass, gvn.MakeConX(in_bytes(Klass::super_check_offset_offset()))));
|
||||
Node *p1 = gvn.transform(new AddPNode(C->top(), superklass, gvn.MakeConX(in_bytes(Klass::super_check_offset_offset()))));
|
||||
Node* m = C->immutable_memory();
|
||||
Node *chk_off = gvn.transform(new LoadINode(nullptr, m, p1, gvn.type(p1)->is_ptr(), TypeInt::INT, MemNode::unordered));
|
||||
int cacheoff_con = in_bytes(Klass::secondary_super_cache_offset());
|
||||
@ -2761,7 +2761,7 @@ Node* Phase::gen_subtype_check(Node* subklass, Node* superklass, Node** ctrl, No
|
||||
#ifdef _LP64
|
||||
chk_off_X = gvn.transform(new ConvI2LNode(chk_off_X));
|
||||
#endif
|
||||
Node *p2 = gvn.transform(new AddPNode(subklass,subklass,chk_off_X));
|
||||
Node* p2 = gvn.transform(new AddPNode(C->top(), subklass, chk_off_X));
|
||||
// For some types like interfaces the following loadKlass is from a 1-word
|
||||
// cache which is mutable so can't use immutable memory. Other
|
||||
// types load from the super-class display table which is immutable.
|
||||
@ -3598,7 +3598,7 @@ Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) {
|
||||
}
|
||||
}
|
||||
constant_value = Klass::_lh_neutral_value; // put in a known value
|
||||
Node* lhp = basic_plus_adr(klass_node, klass_node, in_bytes(Klass::layout_helper_offset()));
|
||||
Node* lhp = basic_plus_adr(top(), klass_node, in_bytes(Klass::layout_helper_offset()));
|
||||
return make_load(nullptr, lhp, TypeInt::INT, T_INT, MemNode::unordered);
|
||||
}
|
||||
|
||||
|
||||
@ -3000,7 +3000,7 @@ bool LibraryCallKit::inline_unsafe_allocate() {
|
||||
// Note: The argument might still be an illegal value like
|
||||
// Serializable.class or Object[].class. The runtime will handle it.
|
||||
// But we must make an explicit check for initialization.
|
||||
Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset()));
|
||||
Node* insp = basic_plus_adr(top(), kls, in_bytes(InstanceKlass::init_state_offset()));
|
||||
// Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
|
||||
// can generate code to load it as unsigned byte.
|
||||
Node* inst = make_load(nullptr, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::acquire);
|
||||
@ -3048,7 +3048,7 @@ bool LibraryCallKit::inline_native_vthread_start_transition(address funcAddr, co
|
||||
IdealKit ideal(this);
|
||||
|
||||
Node* thread = ideal.thread();
|
||||
Node* jt_addr = basic_plus_adr(thread, in_bytes(JavaThread::is_in_vthread_transition_offset()));
|
||||
Node* jt_addr = basic_plus_adr(top(), thread, in_bytes(JavaThread::is_in_vthread_transition_offset()));
|
||||
Node* vt_addr = basic_plus_adr(vt_oop, java_lang_Thread::is_in_vthread_transition_offset());
|
||||
access_store_at(nullptr, jt_addr, _gvn.type(jt_addr)->is_ptr(), ideal.ConI(1), TypeInt::BOOL, T_BOOLEAN, IN_NATIVE | MO_UNORDERED);
|
||||
access_store_at(nullptr, vt_addr, _gvn.type(vt_addr)->is_ptr(), ideal.ConI(1), TypeInt::BOOL, T_BOOLEAN, IN_NATIVE | MO_UNORDERED);
|
||||
@ -3089,7 +3089,7 @@ bool LibraryCallKit::inline_native_vthread_end_transition(address funcAddr, cons
|
||||
ideal.sync_kit(this);
|
||||
} ideal.else_(); {
|
||||
Node* thread = ideal.thread();
|
||||
Node* jt_addr = basic_plus_adr(thread, in_bytes(JavaThread::is_in_vthread_transition_offset()));
|
||||
Node* jt_addr = basic_plus_adr(top(), thread, in_bytes(JavaThread::is_in_vthread_transition_offset()));
|
||||
Node* vt_addr = basic_plus_adr(vt_oop, java_lang_Thread::is_in_vthread_transition_offset());
|
||||
|
||||
sync_kit(ideal);
|
||||
@ -3115,7 +3115,7 @@ bool LibraryCallKit::inline_native_notify_jvmti_sync() {
|
||||
// unconditionally update the is_disable_suspend bit in current JavaThread
|
||||
Node* thread = ideal.thread();
|
||||
Node* arg = _gvn.transform(argument(0)); // argument for notification
|
||||
Node* addr = basic_plus_adr(thread, in_bytes(JavaThread::is_disable_suspend_offset()));
|
||||
Node* addr = basic_plus_adr(top(), thread, in_bytes(JavaThread::is_disable_suspend_offset()));
|
||||
const TypePtr *addr_type = _gvn.type(addr)->isa_ptr();
|
||||
|
||||
sync_kit(ideal);
|
||||
@ -3689,7 +3689,7 @@ void LibraryCallKit::extend_setCurrentThread(Node* jt, Node* thread) {
|
||||
IfNode* iff_thread_not_equal_carrierThread =
|
||||
create_and_map_if(control(), test_thread_not_equal_carrierThread, PROB_FAIR, COUNT_UNKNOWN);
|
||||
|
||||
Node* vthread_offset = basic_plus_adr(jt, in_bytes(THREAD_LOCAL_OFFSET_JFR + VTHREAD_OFFSET_JFR));
|
||||
Node* vthread_offset = basic_plus_adr(top(), jt, in_bytes(THREAD_LOCAL_OFFSET_JFR + VTHREAD_OFFSET_JFR));
|
||||
|
||||
// False branch, is carrierThread.
|
||||
Node* thread_equal_carrierThread = _gvn.transform(new IfFalseNode(iff_thread_not_equal_carrierThread));
|
||||
@ -3714,7 +3714,7 @@ void LibraryCallKit::extend_setCurrentThread(Node* jt, Node* thread) {
|
||||
Node* tid = load_field_from_object(thread, "tid", "J");
|
||||
|
||||
// Store the vthread tid to the jfr thread local.
|
||||
Node* thread_id_offset = basic_plus_adr(jt, in_bytes(THREAD_LOCAL_OFFSET_JFR + VTHREAD_ID_OFFSET_JFR));
|
||||
Node* thread_id_offset = basic_plus_adr(top(), jt, in_bytes(THREAD_LOCAL_OFFSET_JFR + VTHREAD_ID_OFFSET_JFR));
|
||||
Node* tid_memory = store_to_memory(control(), thread_id_offset, tid, T_LONG, MemNode::unordered, true);
|
||||
|
||||
// Branch is_excluded to conditionalize updating the epoch .
|
||||
@ -3736,7 +3736,7 @@ void LibraryCallKit::extend_setCurrentThread(Node* jt, Node* thread) {
|
||||
Node* epoch = _gvn.transform(new AndINode(epoch_raw, _gvn.transform(epoch_mask)));
|
||||
|
||||
// Store the vthread epoch to the jfr thread local.
|
||||
Node* vthread_epoch_offset = basic_plus_adr(jt, in_bytes(THREAD_LOCAL_OFFSET_JFR + VTHREAD_EPOCH_OFFSET_JFR));
|
||||
Node* vthread_epoch_offset = basic_plus_adr(top(), jt, in_bytes(THREAD_LOCAL_OFFSET_JFR + VTHREAD_EPOCH_OFFSET_JFR));
|
||||
Node* included_memory = store_to_memory(control(), vthread_epoch_offset, epoch, T_CHAR, MemNode::unordered, true);
|
||||
|
||||
RegionNode* excluded_rgn = new RegionNode(PATH_LIMIT);
|
||||
@ -3759,7 +3759,7 @@ void LibraryCallKit::extend_setCurrentThread(Node* jt, Node* thread) {
|
||||
set_all_memory(excluded_mem);
|
||||
|
||||
// Store the vthread exclusion state to the jfr thread local.
|
||||
Node* thread_local_excluded_offset = basic_plus_adr(jt, in_bytes(THREAD_LOCAL_OFFSET_JFR + VTHREAD_EXCLUDED_OFFSET_JFR));
|
||||
Node* thread_local_excluded_offset = basic_plus_adr(top(), jt, in_bytes(THREAD_LOCAL_OFFSET_JFR + VTHREAD_EXCLUDED_OFFSET_JFR));
|
||||
store_to_memory(control(), thread_local_excluded_offset, _gvn.transform(exclusion), T_BOOLEAN, MemNode::unordered, true);
|
||||
|
||||
// Store release
|
||||
@ -3814,7 +3814,7 @@ bool LibraryCallKit::inline_native_setCurrentThread() {
|
||||
|
||||
// Change the _monitor_owner_id of the JavaThread
|
||||
Node* tid = load_field_from_object(arr, "tid", "J");
|
||||
Node* monitor_owner_id_offset = basic_plus_adr(thread, in_bytes(JavaThread::monitor_owner_id_offset()));
|
||||
Node* monitor_owner_id_offset = basic_plus_adr(top(), thread, in_bytes(JavaThread::monitor_owner_id_offset()));
|
||||
store_to_memory(control(), monitor_owner_id_offset, tid, T_LONG, MemNode::unordered, true);
|
||||
|
||||
JFR_ONLY(extend_setCurrentThread(thread, arr);)
|
||||
@ -3956,7 +3956,7 @@ bool LibraryCallKit::inline_native_Continuation_pinning(bool unpin) {
|
||||
//---------------------------load_mirror_from_klass----------------------------
|
||||
// Given a klass oop, load its java mirror (a java.lang.Class oop).
|
||||
Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
|
||||
Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
|
||||
Node* p = basic_plus_adr(top(), klass, in_bytes(Klass::java_mirror_offset()));
|
||||
Node* load = make_load(nullptr, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
|
||||
// mirror = ((OopHandle)mirror)->resolve();
|
||||
return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE);
|
||||
@ -3996,7 +3996,7 @@ Node* LibraryCallKit::generate_klass_flags_guard(Node* kls, int modifier_mask, i
|
||||
ByteSize offset, const Type* type, BasicType bt) {
|
||||
// Branch around if the given klass has the given modifier bit set.
|
||||
// Like generate_guard, adds a new path onto the region.
|
||||
Node* modp = basic_plus_adr(kls, in_bytes(offset));
|
||||
Node* modp = basic_plus_adr(top(), kls, in_bytes(offset));
|
||||
Node* mods = make_load(nullptr, modp, type, bt, MemNode::unordered);
|
||||
Node* mask = intcon(modifier_mask);
|
||||
Node* bits = intcon(modifier_bits);
|
||||
@ -4130,7 +4130,7 @@ bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
|
||||
phi->add_req(null());
|
||||
}
|
||||
// If we fall through, it's a plain class. Get its _super.
|
||||
p = basic_plus_adr(kls, in_bytes(Klass::super_offset()));
|
||||
p = basic_plus_adr(top(), kls, in_bytes(Klass::super_offset()));
|
||||
kls = _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeInstKlassPtr::OBJECT_OR_NULL));
|
||||
null_ctl = top();
|
||||
kls = null_check_oop(kls, &null_ctl);
|
||||
@ -4668,7 +4668,7 @@ Node* LibraryCallKit::generate_virtual_guard(Node* obj_klass,
|
||||
int entry_offset = in_bytes(Klass::vtable_start_offset()) +
|
||||
vtable_index*vtableEntry::size_in_bytes() +
|
||||
in_bytes(vtableEntry::method_offset());
|
||||
Node* entry_addr = basic_plus_adr(obj_klass, entry_offset);
|
||||
Node* entry_addr = basic_plus_adr(top(), obj_klass, entry_offset);
|
||||
Node* target_call = make_load(nullptr, entry_addr, TypePtr::NOTNULL, T_ADDRESS, MemNode::unordered);
|
||||
|
||||
// Compare the target method with the expected method (e.g., Object.hashCode).
|
||||
|
||||
@ -1198,8 +1198,8 @@ bool PhaseMacroExpand::eliminate_boxing_node(CallStaticJavaNode *boxing) {
|
||||
}
|
||||
|
||||
|
||||
Node* PhaseMacroExpand::make_load(Node* ctl, Node* mem, Node* base, int offset, const Type* value_type, BasicType bt) {
|
||||
Node* adr = basic_plus_adr(base, offset);
|
||||
Node* PhaseMacroExpand::make_load_raw(Node* ctl, Node* mem, Node* base, int offset, const Type* value_type, BasicType bt) {
|
||||
Node* adr = basic_plus_adr(top(), base, offset);
|
||||
const TypePtr* adr_type = adr->bottom_type()->is_ptr();
|
||||
Node* value = LoadNode::make(_igvn, ctl, mem, adr, adr_type, value_type, bt, MemNode::unordered);
|
||||
transform_later(value);
|
||||
@ -1207,8 +1207,8 @@ Node* PhaseMacroExpand::make_load(Node* ctl, Node* mem, Node* base, int offset,
|
||||
}
|
||||
|
||||
|
||||
Node* PhaseMacroExpand::make_store(Node* ctl, Node* mem, Node* base, int offset, Node* value, BasicType bt) {
|
||||
Node* adr = basic_plus_adr(base, offset);
|
||||
Node* PhaseMacroExpand::make_store_raw(Node* ctl, Node* mem, Node* base, int offset, Node* value, BasicType bt) {
|
||||
Node* adr = basic_plus_adr(top(), base, offset);
|
||||
mem = StoreNode::make(_igvn, ctl, mem, adr, nullptr, value, bt, MemNode::unordered);
|
||||
transform_later(mem);
|
||||
return mem;
|
||||
@ -1753,20 +1753,20 @@ PhaseMacroExpand::initialize_object(AllocateNode* alloc,
|
||||
Node* size_in_bytes) {
|
||||
InitializeNode* init = alloc->initialization();
|
||||
// Store the klass & mark bits
|
||||
Node* mark_node = alloc->make_ideal_mark(&_igvn, object, control, rawmem);
|
||||
Node* mark_node = alloc->make_ideal_mark(&_igvn, control, rawmem);
|
||||
if (!mark_node->is_Con()) {
|
||||
transform_later(mark_node);
|
||||
}
|
||||
rawmem = make_store(control, rawmem, object, oopDesc::mark_offset_in_bytes(), mark_node, TypeX_X->basic_type());
|
||||
rawmem = make_store_raw(control, rawmem, object, oopDesc::mark_offset_in_bytes(), mark_node, TypeX_X->basic_type());
|
||||
|
||||
if (!UseCompactObjectHeaders) {
|
||||
rawmem = make_store(control, rawmem, object, oopDesc::klass_offset_in_bytes(), klass_node, T_METADATA);
|
||||
rawmem = make_store_raw(control, rawmem, object, oopDesc::klass_offset_in_bytes(), klass_node, T_METADATA);
|
||||
}
|
||||
int header_size = alloc->minimum_header_size(); // conservatively small
|
||||
|
||||
// Array length
|
||||
if (length != nullptr) { // Arrays need length field
|
||||
rawmem = make_store(control, rawmem, object, arrayOopDesc::length_offset_in_bytes(), length, T_INT);
|
||||
rawmem = make_store_raw(control, rawmem, object, arrayOopDesc::length_offset_in_bytes(), length, T_INT);
|
||||
// conservatively small header size:
|
||||
header_size = arrayOopDesc::base_offset_in_bytes(T_BYTE);
|
||||
if (_igvn.type(klass_node)->isa_aryklassptr()) { // we know the exact header size in most cases:
|
||||
@ -1792,6 +1792,7 @@ PhaseMacroExpand::initialize_object(AllocateNode* alloc,
|
||||
if (!(UseTLAB && ZeroTLAB)) {
|
||||
rawmem = ClearArrayNode::clear_memory(control, rawmem, object,
|
||||
header_size, size_in_bytes,
|
||||
true,
|
||||
&_igvn);
|
||||
}
|
||||
} else {
|
||||
@ -1946,7 +1947,7 @@ Node* PhaseMacroExpand::prefetch_allocation(Node* i_o, Node*& needgc_false,
|
||||
uint step_size = AllocatePrefetchStepSize;
|
||||
uint distance = AllocatePrefetchDistance;
|
||||
for ( intx i = 0; i < lines; i++ ) {
|
||||
prefetch_adr = new AddPNode( old_eden_top, new_eden_top,
|
||||
prefetch_adr = new AddPNode( top(), new_eden_top,
|
||||
_igvn.MakeConX(distance) );
|
||||
transform_later(prefetch_adr);
|
||||
prefetch = new PrefetchAllocationNode( i_o, prefetch_adr );
|
||||
|
||||
@ -58,10 +58,10 @@ public:
|
||||
_igvn.register_new_node_with_optimizer(n);
|
||||
return n;
|
||||
}
|
||||
Node* make_load( Node* ctl, Node* mem, Node* base, int offset,
|
||||
const Type* value_type, BasicType bt);
|
||||
Node* make_store(Node* ctl, Node* mem, Node* base, int offset,
|
||||
Node* value, BasicType bt);
|
||||
Node* make_load_raw(Node* ctl, Node* mem, Node* base, int offset,
|
||||
const Type* value_type, BasicType bt);
|
||||
Node* make_store_raw(Node* ctl, Node* mem, Node* base, int offset,
|
||||
Node* value, BasicType bt);
|
||||
|
||||
Node* make_leaf_call(Node* ctrl, Node* mem,
|
||||
const TypeFunc* call_type, address call_addr,
|
||||
@ -144,11 +144,10 @@ private:
|
||||
Node* slice_idx,
|
||||
Node* slice_len,
|
||||
Node* dest_size);
|
||||
bool generate_block_arraycopy(Node** ctrl, MergeMemNode** mem, Node* io,
|
||||
bool generate_block_arraycopy(Node** ctrl, MergeMemNode** mem,
|
||||
const TypePtr* adr_type,
|
||||
BasicType basic_elem_type,
|
||||
AllocateNode* alloc,
|
||||
Node* src, Node* src_offset,
|
||||
Node* src, Node* src_offset,
|
||||
Node* dest, Node* dest_offset,
|
||||
Node* dest_size, bool dest_uninitialized);
|
||||
MergeMemNode* generate_slow_arraycopy(ArrayCopyNode *ac,
|
||||
|
||||
@ -384,7 +384,6 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode*
|
||||
transform_later(slow_region);
|
||||
}
|
||||
|
||||
Node* original_dest = dest;
|
||||
bool dest_needs_zeroing = false;
|
||||
bool acopy_to_uninitialized = false;
|
||||
|
||||
@ -424,7 +423,6 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode*
|
||||
// No zeroing elimination needed here.
|
||||
alloc = nullptr;
|
||||
acopy_to_uninitialized = false;
|
||||
//original_dest = dest;
|
||||
//dest_needs_zeroing = false;
|
||||
}
|
||||
|
||||
@ -557,10 +555,9 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode*
|
||||
MergeMemNode* local_mem = MergeMemNode::make(mem);
|
||||
transform_later(local_mem);
|
||||
|
||||
didit = generate_block_arraycopy(&local_ctrl, &local_mem, local_io,
|
||||
adr_type, basic_elem_type, alloc,
|
||||
src, src_offset, dest, dest_offset,
|
||||
dest_size, acopy_to_uninitialized);
|
||||
didit = generate_block_arraycopy(&local_ctrl, &local_mem, adr_type,
|
||||
basic_elem_type, src, src_offset,
|
||||
dest, dest_offset, dest_size, acopy_to_uninitialized);
|
||||
if (didit) {
|
||||
// Present the results of the block-copying fast call.
|
||||
result_region->init_req(bcopy_path, local_ctrl);
|
||||
@ -641,7 +638,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode*
|
||||
|
||||
// (At this point we can assume disjoint_bases, since types differ.)
|
||||
int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
|
||||
Node* p1 = basic_plus_adr(dest_klass, ek_offset);
|
||||
Node* p1 = basic_plus_adr(top(), dest_klass, ek_offset);
|
||||
Node* n1 = LoadKlassNode::make(_igvn, C->immutable_memory(), p1, TypeRawPtr::BOTTOM);
|
||||
Node* dest_elem_klass = transform_later(n1);
|
||||
Node* cv = generate_checkcast_arraycopy(&local_ctrl, &local_mem,
|
||||
@ -918,12 +915,12 @@ void PhaseMacroExpand::generate_clear_array(Node* ctrl, MergeMemNode* merge_mem,
|
||||
if (start_con >= 0 && end_con >= 0) {
|
||||
// Constant start and end. Simple.
|
||||
mem = ClearArrayNode::clear_memory(ctrl, mem, dest,
|
||||
start_con, end_con, &_igvn);
|
||||
start_con, end_con, false, &_igvn);
|
||||
} else if (start_con >= 0 && dest_size != top()) {
|
||||
// Constant start, pre-rounded end after the tail of the array.
|
||||
Node* end = dest_size;
|
||||
mem = ClearArrayNode::clear_memory(ctrl, mem, dest,
|
||||
start_con, end, &_igvn);
|
||||
start_con, end, false, &_igvn);
|
||||
} else if (start_con >= 0 && slice_len != top()) {
|
||||
// Constant start, non-constant end. End needs rounding up.
|
||||
// End offset = round_up(abase + ((slice_idx_con + slice_len) << scale), 8)
|
||||
@ -936,7 +933,7 @@ void PhaseMacroExpand::generate_clear_array(Node* ctrl, MergeMemNode* merge_mem,
|
||||
end = transform_later(new AddXNode(end, MakeConX(end_base)) );
|
||||
end = transform_later(new AndXNode(end, MakeConX(~end_round)) );
|
||||
mem = ClearArrayNode::clear_memory(ctrl, mem, dest,
|
||||
start_con, end, &_igvn);
|
||||
start_con, end, false, &_igvn);
|
||||
} else if (start_con < 0 && dest_size != top()) {
|
||||
// Non-constant start, pre-rounded end after the tail of the array.
|
||||
// This is almost certainly a "round-to-end" operation.
|
||||
@ -970,7 +967,7 @@ void PhaseMacroExpand::generate_clear_array(Node* ctrl, MergeMemNode* merge_mem,
|
||||
}
|
||||
Node* end = dest_size; // pre-rounded
|
||||
mem = ClearArrayNode::clear_memory(ctrl, mem, dest,
|
||||
start, end, &_igvn);
|
||||
start, end, false, &_igvn);
|
||||
} else {
|
||||
// Non-constant start, unrounded non-constant end.
|
||||
// (Nobody zeroes a random midsection of an array using this routine.)
|
||||
@ -981,11 +978,10 @@ void PhaseMacroExpand::generate_clear_array(Node* ctrl, MergeMemNode* merge_mem,
|
||||
merge_mem->set_memory_at(alias_idx, mem);
|
||||
}
|
||||
|
||||
bool PhaseMacroExpand::generate_block_arraycopy(Node** ctrl, MergeMemNode** mem, Node* io,
|
||||
bool PhaseMacroExpand::generate_block_arraycopy(Node** ctrl, MergeMemNode** mem,
|
||||
const TypePtr* adr_type,
|
||||
BasicType basic_elem_type,
|
||||
AllocateNode* alloc,
|
||||
Node* src, Node* src_offset,
|
||||
Node* src, Node* src_offset,
|
||||
Node* dest, Node* dest_offset,
|
||||
Node* dest_size, bool dest_uninitialized) {
|
||||
// See if there is an advantage from block transfer.
|
||||
@ -1133,7 +1129,7 @@ Node* PhaseMacroExpand::generate_checkcast_arraycopy(Node** ctrl, MergeMemNode**
|
||||
// look in each non-null element's class, at the desired klass's
|
||||
// super_check_offset, for the desired klass.
|
||||
int sco_offset = in_bytes(Klass::super_check_offset_offset());
|
||||
Node* p3 = basic_plus_adr(dest_elem_klass, sco_offset);
|
||||
Node* p3 = basic_plus_adr(top(), dest_elem_klass, sco_offset);
|
||||
Node* n3 = new LoadINode(nullptr, *mem /*memory(p3)*/, p3, _igvn.type(p3)->is_ptr(), TypeInt::INT, MemNode::unordered);
|
||||
Node* check_offset = ConvI2X(transform_later(n3));
|
||||
Node* check_value = dest_elem_klass;
|
||||
|
||||
@ -2563,7 +2563,13 @@ Node* LoadNode::klass_identity_common(PhaseGVN* phase) {
|
||||
) {
|
||||
int mirror_field = in_bytes(Klass::java_mirror_offset());
|
||||
if (tkls->offset() == mirror_field) {
|
||||
return adr2->in(AddPNode::Base);
|
||||
#ifdef ASSERT
|
||||
const TypeKlassPtr* tkls2 = phase->type(adr2->in(AddPNode::Address))->is_klassptr();
|
||||
assert(tkls2->offset() == 0, "not a load of java_mirror");
|
||||
#endif
|
||||
assert(adr2->in(AddPNode::Base)->is_top(), "not an off heap load");
|
||||
assert(adr2->in(AddPNode::Offset)->find_intptr_t_con(-1) == in_bytes(Klass::java_mirror_offset()), "incorrect offset");
|
||||
return adr2->in(AddPNode::Address);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -4112,18 +4118,27 @@ bool ClearArrayNode::step_through(Node** np, uint instance_id, PhaseValues* phas
|
||||
return true;
|
||||
}
|
||||
|
||||
Node* ClearArrayNode::make_address(Node* dest, Node* offset, bool raw_base, PhaseGVN* phase) {
|
||||
Node* base = dest;
|
||||
if (raw_base) {
|
||||
// May be called as part of the initialization of a just allocated object
|
||||
base = phase->C->top();
|
||||
}
|
||||
return phase->transform(new AddPNode(base, dest, offset));
|
||||
}
|
||||
|
||||
//----------------------------clear_memory-------------------------------------
|
||||
// Generate code to initialize object storage to zero.
|
||||
Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
|
||||
intptr_t start_offset,
|
||||
Node* end_offset,
|
||||
bool raw_base,
|
||||
PhaseGVN* phase) {
|
||||
intptr_t offset = start_offset;
|
||||
|
||||
int unit = BytesPerLong;
|
||||
if ((offset % unit) != 0) {
|
||||
Node* adr = new AddPNode(dest, dest, phase->MakeConX(offset));
|
||||
adr = phase->transform(adr);
|
||||
Node* adr = make_address(dest, phase->MakeConX(offset), raw_base, phase);
|
||||
const TypePtr* atp = TypeRawPtr::BOTTOM;
|
||||
mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
|
||||
mem = phase->transform(mem);
|
||||
@ -4132,12 +4147,13 @@ Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
|
||||
assert((offset % unit) == 0, "");
|
||||
|
||||
// Initialize the remaining stuff, if any, with a ClearArray.
|
||||
return clear_memory(ctl, mem, dest, phase->MakeConX(offset), end_offset, phase);
|
||||
return clear_memory(ctl, mem, dest, phase->MakeConX(offset), end_offset, raw_base, phase);
|
||||
}
|
||||
|
||||
Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
|
||||
Node* start_offset,
|
||||
Node* end_offset,
|
||||
bool raw_base,
|
||||
PhaseGVN* phase) {
|
||||
if (start_offset == end_offset) {
|
||||
// nothing to do
|
||||
@ -4157,7 +4173,7 @@ Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
|
||||
|
||||
// Bulk clear double-words
|
||||
Node* zsize = phase->transform(new SubXNode(zend, zbase) );
|
||||
Node* adr = phase->transform(new AddPNode(dest, dest, start_offset) );
|
||||
Node* adr = make_address(dest, start_offset, raw_base, phase);
|
||||
mem = new ClearArrayNode(ctl, mem, zsize, adr, false);
|
||||
return phase->transform(mem);
|
||||
}
|
||||
@ -4165,6 +4181,7 @@ Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
|
||||
Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
|
||||
intptr_t start_offset,
|
||||
intptr_t end_offset,
|
||||
bool raw_base,
|
||||
PhaseGVN* phase) {
|
||||
if (start_offset == end_offset) {
|
||||
// nothing to do
|
||||
@ -4178,11 +4195,10 @@ Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
|
||||
}
|
||||
if (done_offset > start_offset) {
|
||||
mem = clear_memory(ctl, mem, dest,
|
||||
start_offset, phase->MakeConX(done_offset), phase);
|
||||
start_offset, phase->MakeConX(done_offset), raw_base, phase);
|
||||
}
|
||||
if (done_offset < end_offset) { // emit the final 32-bit store
|
||||
Node* adr = new AddPNode(dest, dest, phase->MakeConX(done_offset));
|
||||
adr = phase->transform(adr);
|
||||
Node* adr = make_address(dest, phase->MakeConX(done_offset), raw_base, phase);
|
||||
const TypePtr* atp = TypeRawPtr::BOTTOM;
|
||||
mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
|
||||
mem = phase->transform(mem);
|
||||
@ -5389,6 +5405,7 @@ Node* InitializeNode::complete_stores(Node* rawctl, Node* rawmem, Node* rawptr,
|
||||
zeroes_done = align_down(zeroes_done, BytesPerInt);
|
||||
rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
|
||||
zeroes_done, zeroes_needed,
|
||||
true,
|
||||
phase);
|
||||
zeroes_done = zeroes_needed;
|
||||
if (zsize > InitArrayShortSize && ++big_init_gaps > 2)
|
||||
@ -5447,7 +5464,7 @@ Node* InitializeNode::complete_stores(Node* rawctl, Node* rawmem, Node* rawptr,
|
||||
}
|
||||
if (zeroes_done < size_limit) {
|
||||
rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
|
||||
zeroes_done, size_in_bytes, phase);
|
||||
zeroes_done, size_in_bytes, true, phase);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1072,6 +1072,7 @@ public:
|
||||
class ClearArrayNode: public Node {
|
||||
private:
|
||||
bool _is_large;
|
||||
static Node* make_address(Node* dest, Node* offset, bool raw_base, PhaseGVN* phase);
|
||||
public:
|
||||
ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, bool is_large)
|
||||
: Node(ctrl,arymem,word_cnt,base), _is_large(is_large) {
|
||||
@ -1099,14 +1100,17 @@ public:
|
||||
static Node* clear_memory(Node* control, Node* mem, Node* dest,
|
||||
intptr_t start_offset,
|
||||
intptr_t end_offset,
|
||||
bool raw_base,
|
||||
PhaseGVN* phase);
|
||||
static Node* clear_memory(Node* control, Node* mem, Node* dest,
|
||||
intptr_t start_offset,
|
||||
Node* end_offset,
|
||||
bool raw_base,
|
||||
PhaseGVN* phase);
|
||||
static Node* clear_memory(Node* control, Node* mem, Node* dest,
|
||||
Node* start_offset,
|
||||
Node* end_offset,
|
||||
bool raw_base,
|
||||
PhaseGVN* phase);
|
||||
// Return allocation input memory edge if it is different instance
|
||||
// or itself if it is the one we are looking for.
|
||||
|
||||
@ -442,7 +442,7 @@ class Parse : public GraphKit {
|
||||
SafePointNode* create_entry_map();
|
||||
|
||||
// OSR helpers
|
||||
Node *fetch_interpreter_state(int index, BasicType bt, Node *local_addrs, Node *local_addrs_base);
|
||||
Node *fetch_interpreter_state(int index, BasicType bt, Node* local_addrs);
|
||||
Node* check_interpreter_type(Node* l, const Type* type, SafePointNode* &bad_type_exit);
|
||||
void load_interpreter_state(Node* osr_buf);
|
||||
|
||||
|
||||
@ -103,10 +103,9 @@ void Parse::print_statistics() {
|
||||
// on stack replacement.
|
||||
Node *Parse::fetch_interpreter_state(int index,
|
||||
BasicType bt,
|
||||
Node *local_addrs,
|
||||
Node *local_addrs_base) {
|
||||
Node* local_addrs) {
|
||||
Node *mem = memory(Compile::AliasIdxRaw);
|
||||
Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
|
||||
Node *adr = basic_plus_adr(top(), local_addrs, -index*wordSize);
|
||||
Node *ctl = control();
|
||||
|
||||
// Very similar to LoadNode::make, except we handle un-aligned longs and
|
||||
@ -121,7 +120,7 @@ Node *Parse::fetch_interpreter_state(int index,
|
||||
case T_DOUBLE: {
|
||||
// Since arguments are in reverse order, the argument address 'adr'
|
||||
// refers to the back half of the long/double. Recompute adr.
|
||||
adr = basic_plus_adr(local_addrs_base, local_addrs, -(index+1)*wordSize);
|
||||
adr = basic_plus_adr(top(), local_addrs, -(index+1)*wordSize);
|
||||
if (Matcher::misaligned_doubles_ok) {
|
||||
l = (bt == T_DOUBLE)
|
||||
? (Node*)new LoadDNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::DOUBLE, MemNode::unordered)
|
||||
@ -220,7 +219,7 @@ void Parse::load_interpreter_state(Node* osr_buf) {
|
||||
// Commute monitors from interpreter frame to compiler frame.
|
||||
assert(jvms()->monitor_depth() == 0, "should be no active locks at beginning of osr");
|
||||
int mcnt = osr_block->flow()->monitor_count();
|
||||
Node *monitors_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals+mcnt*2-1)*wordSize);
|
||||
Node *monitors_addr = basic_plus_adr(top(), osr_buf, (max_locals+mcnt*2-1)*wordSize);
|
||||
for (index = 0; index < mcnt; index++) {
|
||||
// Make a BoxLockNode for the monitor.
|
||||
BoxLockNode* osr_box = new BoxLockNode(next_monitor());
|
||||
@ -241,9 +240,9 @@ void Parse::load_interpreter_state(Node* osr_buf) {
|
||||
// Displaced headers and locked objects are interleaved in the
|
||||
// temp OSR buffer. We only copy the locked objects out here.
|
||||
// Fetch the locked object from the OSR temp buffer and copy to our fastlock node.
|
||||
Node *lock_object = fetch_interpreter_state(index*2, T_OBJECT, monitors_addr, osr_buf);
|
||||
Node *lock_object = fetch_interpreter_state(index*2, T_OBJECT, monitors_addr);
|
||||
// Try and copy the displaced header to the BoxNode
|
||||
Node *displaced_hdr = fetch_interpreter_state((index*2) + 1, T_ADDRESS, monitors_addr, osr_buf);
|
||||
Node *displaced_hdr = fetch_interpreter_state((index*2) + 1, T_ADDRESS, monitors_addr);
|
||||
|
||||
|
||||
store_to_memory(control(), box, displaced_hdr, T_ADDRESS, MemNode::unordered);
|
||||
@ -271,7 +270,7 @@ void Parse::load_interpreter_state(Node* osr_buf) {
|
||||
}
|
||||
|
||||
// Extract the needed locals from the interpreter frame.
|
||||
Node *locals_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals-1)*wordSize);
|
||||
Node *locals_addr = basic_plus_adr(top(), osr_buf, (max_locals-1)*wordSize);
|
||||
|
||||
// find all the locals that the interpreter thinks contain live oops
|
||||
const ResourceBitMap live_oops = method()->live_local_oops_at_bci(osr_bci());
|
||||
@ -318,7 +317,7 @@ void Parse::load_interpreter_state(Node* osr_buf) {
|
||||
// really for T_OBJECT types so correct it.
|
||||
bt = T_OBJECT;
|
||||
}
|
||||
Node *value = fetch_interpreter_state(index, bt, locals_addr, osr_buf);
|
||||
Node *value = fetch_interpreter_state(index, bt, locals_addr);
|
||||
set_local(index, value);
|
||||
}
|
||||
|
||||
@ -2128,7 +2127,7 @@ void Parse::call_register_finalizer() {
|
||||
Node* klass_addr = basic_plus_adr( receiver, receiver, oopDesc::klass_offset_in_bytes() );
|
||||
Node* klass = _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(), klass_addr, TypeInstPtr::KLASS));
|
||||
|
||||
Node* access_flags_addr = basic_plus_adr(klass, klass, in_bytes(Klass::misc_flags_offset()));
|
||||
Node* access_flags_addr = basic_plus_adr(top(), klass, in_bytes(Klass::misc_flags_offset()));
|
||||
Node* access_flags = make_load(nullptr, access_flags_addr, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered);
|
||||
|
||||
Node* mask = _gvn.transform(new AndINode(access_flags, intcon(KlassFlags::_misc_has_finalizer)));
|
||||
|
||||
@ -220,7 +220,7 @@ void Parse::array_store_check() {
|
||||
|
||||
// Extract the array element class
|
||||
int element_klass_offset = in_bytes(ObjArrayKlass::element_klass_offset());
|
||||
Node* p2 = basic_plus_adr(array_klass, array_klass, element_klass_offset);
|
||||
Node* p2 = basic_plus_adr(top(), array_klass, element_klass_offset);
|
||||
Node* a_e_klass = _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(), p2, tak));
|
||||
assert(array_klass->is_Con() == a_e_klass->is_Con() || StressReflectiveCode, "a constant array type must come with a constant element type");
|
||||
|
||||
|
||||
@ -182,7 +182,7 @@ bool SubTypeCheckNode::verify(PhaseGVN* phase) {
|
||||
return verify_helper(phase, load_klass(phase), cached_t);
|
||||
}
|
||||
case Compile::SSC_full_test: {
|
||||
Node* p1 = phase->transform(new AddPNode(superklass, superklass, phase->MakeConX(in_bytes(Klass::super_check_offset_offset()))));
|
||||
Node* p1 = phase->transform(new AddPNode(C->top(), superklass, phase->MakeConX(in_bytes(Klass::super_check_offset_offset()))));
|
||||
Node* chk_off = phase->transform(new LoadINode(nullptr, C->immutable_memory(), p1, phase->type(p1)->is_ptr(), TypeInt::INT, MemNode::unordered));
|
||||
record_for_cleanup(chk_off, phase);
|
||||
|
||||
@ -194,7 +194,7 @@ bool SubTypeCheckNode::verify(PhaseGVN* phase) {
|
||||
#ifdef _LP64
|
||||
chk_off_X = phase->transform(new ConvI2LNode(chk_off_X));
|
||||
#endif
|
||||
Node* p2 = phase->transform(new AddPNode(subklass, subklass, chk_off_X));
|
||||
Node* p2 = phase->transform(new AddPNode(C->top(), subklass, chk_off_X));
|
||||
Node* nkls = phase->transform(LoadKlassNode::make(*phase, C->immutable_memory(), p2, phase->type(p2)->is_ptr(), TypeInstKlassPtr::OBJECT_OR_NULL));
|
||||
|
||||
return verify_helper(phase, nkls, cached_t);
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user