8301074: Replace NULL with nullptr in share/opto/

Reviewed-by: kvn, jwilhelm
This commit is contained in:
Johan Sjölen 2023-03-09 20:28:26 +00:00
parent a9dba56568
commit 5726d31e56
111 changed files with 5590 additions and 5589 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -137,7 +137,7 @@ Node *AddNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// Check for commutative operation desired
if (commute(phase, this)) return this;
AddNode *progress = NULL; // Progress flag
AddNode *progress = nullptr; // Progress flag
// Convert "(x+1)+2" into "x+(1+2)". If the right input is a
// constant, and the left input is an add of a constant, flatten the
@ -241,7 +241,7 @@ const Type *AddNode::add_of_identity( const Type *t1, const Type *t2 ) const {
if( t1->higher_equal( zero ) ) return t2;
if( t2->higher_equal( zero ) ) return t1;
return NULL;
return nullptr;
}
AddNode* AddNode::make(Node* in1, Node* in2, BasicType bt) {
@ -253,7 +253,7 @@ AddNode* AddNode::make(Node* in1, Node* in2, BasicType bt) {
default:
fatal("Not implemented for %s", type2name(bt));
}
return NULL;
return nullptr;
}
//=============================================================================
@ -282,7 +282,7 @@ Node* AddNode::IdealIL(PhaseGVN* phase, bool can_reshape, BasicType bt) {
// Check for dead cycle: d = (a-b)+(c-d)
assert( in1->in(2) != this && in2->in(2) != this,
"dead loop in AddINode::Ideal" );
Node* sub = SubNode::make(NULL, NULL, bt);
Node* sub = SubNode::make(nullptr, nullptr, bt);
sub->init_req(1, phase->transform(AddNode::make(in1->in(1), in2->in(1), bt)));
sub->init_req(2, phase->transform(AddNode::make(in1->in(2), in2->in(2), bt)));
return sub;
@ -313,9 +313,9 @@ Node* AddNode::IdealIL(PhaseGVN* phase, bool can_reshape, BasicType bt) {
// Associative
if (op1 == Op_Mul(bt) && op2 == Op_Mul(bt)) {
Node* add_in1 = NULL;
Node* add_in2 = NULL;
Node* mul_in = NULL;
Node* add_in1 = nullptr;
Node* add_in2 = nullptr;
Node* mul_in = nullptr;
if (in1->in(1) == in2->in(1)) {
// Convert "a*b+a*c into a*(b+c)
@ -339,7 +339,7 @@ Node* AddNode::IdealIL(PhaseGVN* phase, bool can_reshape, BasicType bt) {
mul_in = in1->in(1);
}
if (mul_in != NULL) {
if (mul_in != nullptr) {
Node* add = phase->transform(AddNode::make(add_in1, add_in2, bt));
return MulNode::make(mul_in, add, bt);
}
@ -348,16 +348,16 @@ Node* AddNode::IdealIL(PhaseGVN* phase, bool can_reshape, BasicType bt) {
// Convert (x >>> rshift) + (x << lshift) into RotateRight(x, rshift)
if (Matcher::match_rule_supported(Op_RotateRight) &&
((op1 == Op_URShift(bt) && op2 == Op_LShift(bt)) || (op1 == Op_LShift(bt) && op2 == Op_URShift(bt))) &&
in1->in(1) != NULL && in1->in(1) == in2->in(1)) {
in1->in(1) != nullptr && in1->in(1) == in2->in(1)) {
Node* rshift = op1 == Op_URShift(bt) ? in1->in(2) : in2->in(2);
Node* lshift = op1 == Op_URShift(bt) ? in2->in(2) : in1->in(2);
if (rshift != NULL && lshift != NULL) {
if (rshift != nullptr && lshift != nullptr) {
const TypeInt* rshift_t = phase->type(rshift)->isa_int();
const TypeInt* lshift_t = phase->type(lshift)->isa_int();
int bits = bt == T_INT ? 32 : 64;
int mask = bt == T_INT ? 0x1F : 0x3F;
if (lshift_t != NULL && lshift_t->is_con() &&
rshift_t != NULL && rshift_t->is_con() &&
if (lshift_t != nullptr && lshift_t->is_con() &&
rshift_t != nullptr && rshift_t->is_con() &&
((lshift_t->get_con() & mask) == (bits - (rshift_t->get_con() & mask)))) {
return new RotateRightNode(in1->in(1), phase->intcon(rshift_t->get_con() & mask), TypeInteger::bottom(bt));
}
@ -505,7 +505,7 @@ const Type *AddFNode::add_of_identity( const Type *t1, const Type *t2 ) const {
// if( t1->higher_equal( zero ) ) return t2;
// if( t2->higher_equal( zero ) ) return t1;
return NULL;
return nullptr;
}
//------------------------------add_ring---------------------------------------
@ -520,7 +520,7 @@ const Type *AddFNode::add_ring( const Type *t0, const Type *t1 ) const {
//------------------------------Ideal------------------------------------------
Node *AddFNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// Floating point additions are not associative because of boundary conditions (infinity)
return commute(phase, this) ? this : NULL;
return commute(phase, this) ? this : nullptr;
}
@ -537,7 +537,7 @@ const Type *AddDNode::add_of_identity( const Type *t1, const Type *t2 ) const {
// if( t1->higher_equal( zero ) ) return t2;
// if( t2->higher_equal( zero ) ) return t1;
return NULL;
return nullptr;
}
//------------------------------add_ring---------------------------------------
// Supplied function returns the sum of the inputs.
@ -551,7 +551,7 @@ const Type *AddDNode::add_ring( const Type *t0, const Type *t1 ) const {
//------------------------------Ideal------------------------------------------
Node *AddDNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// Floating point additions are not associative because of boundary conditions (infinity)
return commute(phase, this) ? this : NULL;
return commute(phase, this) ? this : nullptr;
}
@ -565,7 +565,7 @@ Node* AddPNode::Identity(PhaseGVN* phase) {
//------------------------------Idealize---------------------------------------
Node *AddPNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// Bail out if dead inputs
if( phase->type( in(Address) ) == Type::TOP ) return NULL;
if( phase->type( in(Address) ) == Type::TOP ) return nullptr;
// If the left input is an add of a constant, flatten the expression tree.
const Node *n = in(Address);
@ -576,12 +576,12 @@ Node *AddPNode::Ideal(PhaseGVN *phase, bool can_reshape) {
"dead loop in AddPNode::Ideal" );
// Type of left input's right input
const Type *t = phase->type( addp->in(Offset) );
if( t == Type::TOP ) return NULL;
if( t == Type::TOP ) return nullptr;
const TypeX *t12 = t->is_intptr_t();
if( t12->is_con() ) { // Left input is an add of a constant?
// If the right input is a constant, combine constants
const Type *temp_t2 = phase->type( in(Offset) );
if( temp_t2 == Type::TOP ) return NULL;
if( temp_t2 == Type::TOP ) return nullptr;
const TypeX *t2 = temp_t2->is_intptr_t();
Node* address;
Node* offset;
@ -602,7 +602,7 @@ Node *AddPNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// Raw pointers?
if( in(Base)->bottom_type() == Type::TOP ) {
// If this is a NULL+long form (from unsafe accesses), switch to a rawptr.
// If this is a null+long form (from unsafe accesses), switch to a rawptr.
if (phase->type(in(Address)) == TypePtr::NULL_PTR) {
Node* offset = in(Offset);
return new CastX2PNode(offset);
@ -623,13 +623,13 @@ Node *AddPNode::Ideal(PhaseGVN *phase, bool can_reshape) {
}
}
return NULL; // No progress
return nullptr; // No progress
}
//------------------------------bottom_type------------------------------------
// Bottom-type is the pointer-type with unknown offset.
const Type *AddPNode::bottom_type() const {
if (in(Address) == NULL) return TypePtr::BOTTOM;
if (in(Address) == nullptr) return TypePtr::BOTTOM;
const TypePtr *tp = in(Address)->bottom_type()->isa_ptr();
if( !tp ) return Type::TOP; // TOP input means TOP output
assert( in(Offset)->Opcode() != Op_ConP, "" );
@ -667,7 +667,7 @@ const Type* AddPNode::Value(PhaseGVN* phase) const {
//------------------------Ideal_base_and_offset--------------------------------
// Split an oop pointer into a base and offset.
// (The offset might be Type::OffsetBot in the case of an array.)
// Return the base, or NULL if failure.
// Return the base, or null if failure.
Node* AddPNode::Ideal_base_and_offset(Node* ptr, PhaseTransform* phase,
// second return value:
intptr_t& offset) {
@ -683,7 +683,7 @@ Node* AddPNode::Ideal_base_and_offset(Node* ptr, PhaseTransform* phase,
}
}
offset = Type::OffsetBot;
return NULL;
return nullptr;
}
//------------------------------unpack_offsets----------------------------------
@ -733,20 +733,20 @@ Node* rotate_shift(PhaseGVN* phase, Node* lshift, Node* rshift, int mask) {
// val << norm_con_shift | val >> ({32|64} - norm_con_shift) => rotate_left val, norm_con_shift
const TypeInt* lshift_t = phase->type(lshift)->isa_int();
const TypeInt* rshift_t = phase->type(rshift)->isa_int();
if (lshift_t != NULL && lshift_t->is_con() &&
rshift_t != NULL && rshift_t->is_con() &&
if (lshift_t != nullptr && lshift_t->is_con() &&
rshift_t != nullptr && rshift_t->is_con() &&
((lshift_t->get_con() & mask) == ((mask + 1) - (rshift_t->get_con() & mask)))) {
return phase->intcon(lshift_t->get_con() & mask);
}
// val << var_shift | val >> ({0|32|64} - var_shift) => rotate_left val, var_shift
if (rshift->Opcode() == Op_SubI && rshift->in(2) == lshift && rshift->in(1)->is_Con()){
const TypeInt* shift_t = phase->type(rshift->in(1))->isa_int();
if (shift_t != NULL && shift_t->is_con() &&
if (shift_t != nullptr && shift_t->is_con() &&
(shift_t->get_con() == 0 || shift_t->get_con() == (mask + 1))) {
return lshift;
}
}
return NULL;
return nullptr;
}
Node* OrINode::Ideal(PhaseGVN* phase, bool can_reshape) {
@ -757,21 +757,21 @@ Node* OrINode::Ideal(PhaseGVN* phase, bool can_reshape) {
Node* lshift = in(1)->in(2);
Node* rshift = in(2)->in(2);
Node* shift = rotate_shift(phase, lshift, rshift, 0x1F);
if (shift != NULL) {
if (shift != nullptr) {
return new RotateLeftNode(in(1)->in(1), shift, TypeInt::INT);
}
return NULL;
return nullptr;
}
if (Matcher::match_rule_supported(Op_RotateRight) &&
lopcode == Op_URShiftI && ropcode == Op_LShiftI && in(1)->in(1) == in(2)->in(1)) {
Node* rshift = in(1)->in(2);
Node* lshift = in(2)->in(2);
Node* shift = rotate_shift(phase, rshift, lshift, 0x1F);
if (shift != NULL) {
if (shift != nullptr) {
return new RotateRightNode(in(1)->in(1), shift, TypeInt::INT);
}
}
return NULL;
return nullptr;
}
//------------------------------add_ring---------------------------------------
@ -823,21 +823,21 @@ Node* OrLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
Node* lshift = in(1)->in(2);
Node* rshift = in(2)->in(2);
Node* shift = rotate_shift(phase, lshift, rshift, 0x3F);
if (shift != NULL) {
if (shift != nullptr) {
return new RotateLeftNode(in(1)->in(1), shift, TypeLong::LONG);
}
return NULL;
return nullptr;
}
if (Matcher::match_rule_supported(Op_RotateRight) &&
lopcode == Op_URShiftL && ropcode == Op_LShiftL && in(1)->in(1) == in(2)->in(1)) {
Node* rshift = in(1)->in(2);
Node* lshift = in(2)->in(2);
Node* shift = rotate_shift(phase, rshift, lshift, 0x3F);
if (shift != NULL) {
if (shift != nullptr) {
return new RotateRightNode(in(1)->in(1), shift, TypeLong::LONG);
}
}
return NULL;
return nullptr;
}
//------------------------------add_ring---------------------------------------
@ -1005,16 +1005,16 @@ const Type* XorLNode::Value(PhaseGVN* phase) const {
Node* MaxNode::build_min_max(Node* a, Node* b, bool is_max, bool is_unsigned, const Type* t, PhaseGVN& gvn) {
bool is_int = gvn.type(a)->isa_int();
assert(is_int || gvn.type(a)->isa_long(), "int or long inputs");
assert(is_int == (gvn.type(b)->isa_int() != NULL), "inconsistent inputs");
assert(is_int == (gvn.type(b)->isa_int() != nullptr), "inconsistent inputs");
BasicType bt = is_int ? T_INT: T_LONG;
Node* hook = NULL;
Node* hook = nullptr;
if (gvn.is_IterGVN()) {
// Make sure a and b are not destroyed
hook = new Node(2);
hook->init_req(0, a);
hook->init_req(1, b);
}
Node* res = NULL;
Node* res = nullptr;
if (is_int && !is_unsigned) {
if (is_max) {
res = gvn.transform(new MaxINode(a, b));
@ -1024,16 +1024,16 @@ Node* MaxNode::build_min_max(Node* a, Node* b, bool is_max, bool is_unsigned, co
assert(gvn.type(res)->is_int()->_lo >= t->is_int()->_lo && gvn.type(res)->is_int()->_hi <= t->is_int()->_hi, "type doesn't match");
}
} else {
Node* cmp = NULL;
Node* cmp = nullptr;
if (is_max) {
cmp = gvn.transform(CmpNode::make(a, b, bt, is_unsigned));
} else {
cmp = gvn.transform(CmpNode::make(b, a, bt, is_unsigned));
}
Node* bol = gvn.transform(new BoolNode(cmp, BoolTest::lt));
res = gvn.transform(CMoveNode::make(NULL, bol, a, b, t));
res = gvn.transform(CMoveNode::make(nullptr, bol, a, b, t));
}
if (hook != NULL) {
if (hook != nullptr) {
hook->destruct(&gvn);
}
return res;
@ -1042,17 +1042,17 @@ Node* MaxNode::build_min_max(Node* a, Node* b, bool is_max, bool is_unsigned, co
Node* MaxNode::build_min_max_diff_with_zero(Node* a, Node* b, bool is_max, const Type* t, PhaseGVN& gvn) {
bool is_int = gvn.type(a)->isa_int();
assert(is_int || gvn.type(a)->isa_long(), "int or long inputs");
assert(is_int == (gvn.type(b)->isa_int() != NULL), "inconsistent inputs");
assert(is_int == (gvn.type(b)->isa_int() != nullptr), "inconsistent inputs");
BasicType bt = is_int ? T_INT: T_LONG;
Node* zero = gvn.integercon(0, bt);
Node* hook = NULL;
Node* hook = nullptr;
if (gvn.is_IterGVN()) {
// Make sure a and b are not destroyed
hook = new Node(2);
hook->init_req(0, a);
hook->init_req(1, b);
}
Node* cmp = NULL;
Node* cmp = nullptr;
if (is_max) {
cmp = gvn.transform(CmpNode::make(a, b, bt, false));
} else {
@ -1060,8 +1060,8 @@ Node* MaxNode::build_min_max_diff_with_zero(Node* a, Node* b, bool is_max, const
}
Node* sub = gvn.transform(SubNode::make(a, b, bt));
Node* bol = gvn.transform(new BoolNode(cmp, BoolTest::lt));
Node* res = gvn.transform(CMoveNode::make(NULL, bol, sub, zero, t));
if (hook != NULL) {
Node* res = gvn.transform(CMoveNode::make(nullptr, bol, sub, zero, t));
if (hook != nullptr) {
hook->destruct(&gvn);
}
return res;
@ -1108,7 +1108,7 @@ Node* MaxINode::Ideal(PhaseGVN* phase, bool can_reshape) {
if (x->Opcode() == Op_AddI && // Check for "x+c0" and collect constant
x->in(2)->is_Con()) {
const Type* t = x->in(2)->bottom_type();
if (t == Type::TOP) return NULL; // No progress
if (t == Type::TOP) return nullptr; // No progress
x_off = t->is_int()->get_con();
x = x->in(1);
}
@ -1120,7 +1120,7 @@ Node* MaxINode::Ideal(PhaseGVN* phase, bool can_reshape) {
if (y->Opcode() == Op_AddI && // Check for "y+c1" and collect constant
y->in(2)->is_Con()) {
const Type* t = y->in(2)->bottom_type();
if (t == Type::TOP) return NULL; // No progress
if (t == Type::TOP) return nullptr; // No progress
y_off = t->is_int()->get_con();
y = y->in(1);
}
@ -1138,7 +1138,7 @@ Node* MaxINode::Ideal(PhaseGVN* phase, bool can_reshape) {
if (y->Opcode() == Op_AddI &&// Check for "y+c1" and collect constant
y->in(2)->is_Con()) {
const Type* t = y->in(2)->bottom_type();
if (t == Type::TOP) return NULL; // No progress
if (t == Type::TOP) return nullptr; // No progress
y_off = t->is_int()->get_con();
y = y->in(1);
}
@ -1148,7 +1148,7 @@ Node* MaxINode::Ideal(PhaseGVN* phase, bool can_reshape) {
// Transform MAX2(x + c0, MAX2(x + c1, z)) into MAX2(x + MAX2(c0, c1), z)
// if x == y and the additions can't overflow.
if (x == y && tx != NULL &&
if (x == y && tx != nullptr &&
!can_overflow(tx, x_off) &&
!can_overflow(tx, y_off)) {
return new MaxINode(phase->transform(new AddINode(x, phase->intcon(MAX2(x_off, y_off)))), r->in(2));
@ -1156,13 +1156,13 @@ Node* MaxINode::Ideal(PhaseGVN* phase, bool can_reshape) {
} else {
// Transform MAX2(x + c0, y + c1) into x + MAX2(c0, c1)
// if x == y and the additions can't overflow.
if (x == y && tx != NULL &&
if (x == y && tx != nullptr &&
!can_overflow(tx, x_off) &&
!can_overflow(tx, y_off)) {
return new AddINode(x, phase->intcon(MAX2(x_off, y_off)));
}
}
return NULL;
return nullptr;
}
//=============================================================================
@ -1170,7 +1170,7 @@ Node* MaxINode::Ideal(PhaseGVN* phase, bool can_reshape) {
// MINs show up in range-check loop limit calculations. Look for
// "MIN2(x+c0,MIN2(y,x+c1))". Pick the smaller constant: "MIN2(x+c0,y)"
Node *MinINode::Ideal(PhaseGVN *phase, bool can_reshape) {
Node *progress = NULL;
Node *progress = nullptr;
// Force a right-spline graph
Node *l = in(1);
Node *r = in(2);
@ -1191,7 +1191,7 @@ Node *MinINode::Ideal(PhaseGVN *phase, bool can_reshape) {
if( x->Opcode() == Op_AddI && // Check for "x+c0" and collect constant
x->in(2)->is_Con() ) {
const Type *t = x->in(2)->bottom_type();
if( t == Type::TOP ) return NULL; // No progress
if( t == Type::TOP ) return nullptr; // No progress
x_off = t->is_int()->get_con();
x = x->in(1);
}
@ -1203,7 +1203,7 @@ Node *MinINode::Ideal(PhaseGVN *phase, bool can_reshape) {
if( y->Opcode() == Op_AddI && // Check for "y+c1" and collect constant
y->in(2)->is_Con() ) {
const Type *t = y->in(2)->bottom_type();
if( t == Type::TOP ) return NULL; // No progress
if( t == Type::TOP ) return nullptr; // No progress
y_off = t->is_int()->get_con();
y = y->in(1);
}
@ -1221,7 +1221,7 @@ Node *MinINode::Ideal(PhaseGVN *phase, bool can_reshape) {
if( y->Opcode() == Op_AddI &&// Check for "y+c1" and collect constant
y->in(2)->is_Con() ) {
const Type *t = y->in(2)->bottom_type();
if( t == Type::TOP ) return NULL; // No progress
if( t == Type::TOP ) return nullptr; // No progress
y_off = t->is_int()->get_con();
y = y->in(1);
}
@ -1231,7 +1231,7 @@ Node *MinINode::Ideal(PhaseGVN *phase, bool can_reshape) {
// Transform MIN2(x + c0, MIN2(x + c1, z)) into MIN2(x + MIN2(c0, c1), z)
// if x == y and the additions can't overflow.
if (x == y && tx != NULL &&
if (x == y && tx != nullptr &&
!can_overflow(tx, x_off) &&
!can_overflow(tx, y_off)) {
return new MinINode(phase->transform(new AddINode(x, phase->intcon(MIN2(x_off, y_off)))), r->in(2));
@ -1239,13 +1239,13 @@ Node *MinINode::Ideal(PhaseGVN *phase, bool can_reshape) {
} else {
// Transform MIN2(x + c0, y + c1) into x + MIN2(c0, c1)
// if x == y and the additions can't overflow.
if (x == y && tx != NULL &&
if (x == y && tx != nullptr &&
!can_overflow(tx, x_off) &&
!can_overflow(tx, y_off)) {
return new AddINode(x,phase->intcon(MIN2(x_off,y_off)));
}
}
return NULL;
return nullptr;
}
//------------------------------add_ring---------------------------------------

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,7 +34,7 @@
#include "utilities/powerOfTwo.hpp"
ArrayCopyNode::ArrayCopyNode(Compile* C, bool alloc_tightly_coupled, bool has_negative_length_guard)
: CallNode(arraycopy_type(), NULL, TypePtr::BOTTOM),
: CallNode(arraycopy_type(), nullptr, TypePtr::BOTTOM),
_kind(None),
_alloc_tightly_coupled(alloc_tightly_coupled),
_has_negative_length_guard(has_negative_length_guard),
@ -131,7 +131,7 @@ int ArrayCopyNode::get_count(PhaseGVN *phase) const {
return nb_fields;
} else {
const TypeAryPtr* ary_src = src_type->isa_aryptr();
assert (ary_src != NULL, "not an array or instance?");
assert (ary_src != nullptr, "not an array or instance?");
// clone passes a length as a rounded number of longs. If we're
// cloning an array we'll do it element by element. If the
// length input to ArrayCopyNode is constant, length of input
@ -174,7 +174,7 @@ void ArrayCopyNode::store(BarrierSetC2* bs, PhaseGVN *phase, Node*& ctl, MergeMe
Node* ArrayCopyNode::try_clone_instance(PhaseGVN *phase, bool can_reshape, int count) {
if (!is_clonebasic()) {
return NULL;
return nullptr;
}
Node* base_src = in(ArrayCopyNode::Src);
@ -184,8 +184,8 @@ Node* ArrayCopyNode::try_clone_instance(PhaseGVN *phase, bool can_reshape, int c
const Type* src_type = phase->type(base_src);
const TypeInstPtr* inst_src = src_type->isa_instptr();
if (inst_src == NULL) {
return NULL;
if (inst_src == nullptr) {
return nullptr;
}
MergeMemNode* mem = phase->transform(MergeMemNode::make(in_mem))->as_MergeMem();
@ -264,8 +264,8 @@ bool ArrayCopyNode::prepare_array_copy(PhaseGVN *phase, bool can_reshape,
// newly allocated object is guaranteed to not overlap with source object
disjoint_bases = is_alloc_tightly_coupled();
if (ary_src == NULL || ary_src->elem() == Type::BOTTOM ||
ary_dest == NULL || ary_dest->elem() == Type::BOTTOM) {
if (ary_src == nullptr || ary_src->elem() == Type::BOTTOM ||
ary_dest == nullptr || ary_dest->elem() == Type::BOTTOM) {
// We don't know if arguments are arrays
return false;
}
@ -324,7 +324,7 @@ bool ArrayCopyNode::prepare_array_copy(PhaseGVN *phase, bool can_reshape,
copy_type = dest_elem;
} else {
assert(ary_src != NULL, "should be a clone");
assert(ary_src != nullptr, "should be a clone");
assert(is_clonebasic(), "should be");
disjoint_bases = true;
@ -372,7 +372,7 @@ void ArrayCopyNode::array_copy_test_overlap(PhaseGVN *phase, bool can_reshape, b
if (!disjoint_bases && count > 1) {
Node* src_offset = in(ArrayCopyNode::SrcPos);
Node* dest_offset = in(ArrayCopyNode::DestPos);
assert(src_offset != NULL && dest_offset != NULL, "should be");
assert(src_offset != nullptr && dest_offset != nullptr, "should be");
Node* cmp = phase->transform(new CmpINode(src_offset, dest_offset));
Node *bol = phase->transform(new BoolNode(cmp, BoolTest::lt));
IfNode *iff = new IfNode(ctl, bol, PROB_FAIR, COUNT_UNKNOWN);
@ -489,13 +489,13 @@ bool ArrayCopyNode::finish_transform(PhaseGVN *phase, bool can_reshape,
CallProjections callprojs;
extract_projections(&callprojs, true, false);
if (callprojs.fallthrough_ioproj != NULL) {
if (callprojs.fallthrough_ioproj != nullptr) {
igvn->replace_node(callprojs.fallthrough_ioproj, in(TypeFunc::I_O));
}
if (callprojs.fallthrough_memproj != NULL) {
if (callprojs.fallthrough_memproj != nullptr) {
igvn->replace_node(callprojs.fallthrough_memproj, mem);
}
if (callprojs.fallthrough_catchproj != NULL) {
if (callprojs.fallthrough_catchproj != nullptr) {
igvn->replace_node(callprojs.fallthrough_catchproj, ctl);
}
@ -525,7 +525,7 @@ Node *ArrayCopyNode::Ideal(PhaseGVN *phase, bool can_reshape) {
if (StressArrayCopyMacroNode && !can_reshape) {
phase->record_for_igvn(this);
return NULL;
return nullptr;
}
// See if it's a small array copy and we can inline it as
@ -537,51 +537,51 @@ Node *ArrayCopyNode::Ideal(PhaseGVN *phase, bool can_reshape) {
if (!is_clonebasic() && !is_arraycopy_validated() &&
!is_copyofrange_validated() && !is_copyof_validated()) {
return NULL;
return nullptr;
}
assert(in(TypeFunc::Control) != NULL &&
in(TypeFunc::Memory) != NULL &&
in(ArrayCopyNode::Src) != NULL &&
in(ArrayCopyNode::Dest) != NULL &&
in(ArrayCopyNode::Length) != NULL &&
in(ArrayCopyNode::SrcPos) != NULL &&
in(ArrayCopyNode::DestPos) != NULL, "broken inputs");
assert(in(TypeFunc::Control) != nullptr &&
in(TypeFunc::Memory) != nullptr &&
in(ArrayCopyNode::Src) != nullptr &&
in(ArrayCopyNode::Dest) != nullptr &&
in(ArrayCopyNode::Length) != nullptr &&
in(ArrayCopyNode::SrcPos) != nullptr &&
in(ArrayCopyNode::DestPos) != nullptr, "broken inputs");
if (in(TypeFunc::Control)->is_top() ||
in(TypeFunc::Memory)->is_top() ||
phase->type(in(ArrayCopyNode::Src)) == Type::TOP ||
phase->type(in(ArrayCopyNode::Dest)) == Type::TOP ||
(in(ArrayCopyNode::SrcPos) != NULL && in(ArrayCopyNode::SrcPos)->is_top()) ||
(in(ArrayCopyNode::DestPos) != NULL && in(ArrayCopyNode::DestPos)->is_top())) {
return NULL;
(in(ArrayCopyNode::SrcPos) != nullptr && in(ArrayCopyNode::SrcPos)->is_top()) ||
(in(ArrayCopyNode::DestPos) != nullptr && in(ArrayCopyNode::DestPos)->is_top())) {
return nullptr;
}
int count = get_count(phase);
if (count < 0 || count > ArrayCopyLoadStoreMaxElem) {
return NULL;
return nullptr;
}
Node* mem = try_clone_instance(phase, can_reshape, count);
if (mem != NULL) {
return (mem == NodeSentinel) ? NULL : mem;
if (mem != nullptr) {
return (mem == NodeSentinel) ? nullptr : mem;
}
Node* adr_src = NULL;
Node* base_src = NULL;
Node* adr_dest = NULL;
Node* base_dest = NULL;
Node* adr_src = nullptr;
Node* base_src = nullptr;
Node* adr_dest = nullptr;
Node* base_dest = nullptr;
BasicType copy_type = T_ILLEGAL;
const Type* value_type = NULL;
const Type* value_type = nullptr;
bool disjoint_bases = false;
if (!prepare_array_copy(phase, can_reshape,
adr_src, base_src, adr_dest, base_dest,
copy_type, value_type, disjoint_bases)) {
assert(adr_src == NULL, "no node can be left behind");
assert(adr_dest == NULL, "no node can be left behind");
return NULL;
assert(adr_src == nullptr, "no node can be left behind");
assert(adr_dest == nullptr, "no node can be left behind");
return nullptr;
}
Node* src = in(ArrayCopyNode::Src);
@ -611,7 +611,7 @@ Node *ArrayCopyNode::Ideal(PhaseGVN *phase, bool can_reshape) {
adr_src, base_src, adr_dest, base_dest,
copy_type, value_type, count);
Node* ctl = NULL;
Node* ctl = nullptr;
if (!forward_ctl->is_top() && !backward_ctl->is_top()) {
ctl = new RegionNode(3);
ctl->init_req(1, forward_ctl);
@ -648,7 +648,7 @@ Node *ArrayCopyNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// put in worklist, so that if it happens to be dead it is removed
phase->is_IterGVN()->_worklist.push(mem);
}
return NULL;
return nullptr;
}
return mem;
@ -673,7 +673,7 @@ bool ArrayCopyNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) {
}
bool ArrayCopyNode::may_modify_helper(const TypeOopPtr *t_oop, Node* n, PhaseTransform *phase, CallNode*& call) {
if (n != NULL &&
if (n != nullptr &&
n->is_Call() &&
n->as_Call()->may_modify(t_oop, phase) &&
(n->as_Call()->is_ArrayCopy() || n->as_Call()->is_call_to_arraycopystub())) {
@ -691,11 +691,11 @@ bool ArrayCopyNode::may_modify(const TypeOopPtr *t_oop, MemBarNode* mb, PhaseTra
// step over g1 gc barrier if we're at e.g. a clone with ReduceInitialCardMarks off
c = bs->step_over_gc_barrier(c);
CallNode* call = NULL;
guarantee(c != NULL, "step_over_gc_barrier failed, there must be something to step to.");
CallNode* call = nullptr;
guarantee(c != nullptr, "step_over_gc_barrier failed, there must be something to step to.");
if (c->is_Region()) {
for (uint i = 1; i < c->req(); i++) {
if (c->in(i) != NULL) {
if (c->in(i) != nullptr) {
Node* n = c->in(i)->in(0);
if (may_modify_helper(t_oop, n, phase, call)) {
ac = call->isa_ArrayCopy();
@ -709,7 +709,7 @@ bool ArrayCopyNode::may_modify(const TypeOopPtr *t_oop, MemBarNode* mb, PhaseTra
#ifdef ASSERT
bool use_ReduceInitialCardMarks = BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
static_cast<CardTableBarrierSetC2*>(bs)->use_ReduceInitialCardMarks();
assert(c == mb->in(0) || (ac != NULL && ac->is_clonebasic() && !use_ReduceInitialCardMarks), "only for clone");
assert(c == mb->in(0) || (ac != nullptr && ac->is_clonebasic() && !use_ReduceInitialCardMarks), "only for clone");
#endif
return true;
} else if (mb->trailing_partial_array_copy()) {
@ -736,7 +736,7 @@ bool ArrayCopyNode::modifies(intptr_t offset_lo, intptr_t offset_hi, PhaseTransf
const TypeInt *len_t = phase->type(len)->isa_int();
const TypeAryPtr* ary_t = phase->type(dest)->isa_aryptr();
if (dest_pos_t == NULL || len_t == NULL || ary_t == NULL) {
if (dest_pos_t == nullptr || len_t == nullptr || ary_t == nullptr) {
return !must_modify;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -142,8 +142,8 @@ public:
Node* length,
bool alloc_tightly_coupled,
bool has_negative_length_guard,
Node* src_klass = NULL, Node* dest_klass = NULL,
Node* src_length = NULL, Node* dest_length = NULL);
Node* src_klass = nullptr, Node* dest_klass = nullptr,
Node* src_length = nullptr, Node* dest_length = nullptr);
void connect_outputs(GraphKit* kit, bool deoptimize_on_exception = false);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -45,7 +45,7 @@ void Block_Array::grow( uint i ) {
if( !_size ) {
_size = 1;
_blocks = (Block**)_arena->Amalloc( _size * sizeof(Block*) );
_blocks[0] = NULL;
_blocks[0] = nullptr;
}
uint old = _size;
_size = next_power_of_2(i);
@ -313,7 +313,7 @@ void Block::dump_head(const PhaseCFG* cfg, outputStream* st) const {
st->print("in( ");
for (uint i=1; i<num_preds(); i++) {
Node *s = pred(i);
if (cfg != NULL) {
if (cfg != nullptr) {
Block *p = cfg->get_block_for_node(s);
p->dump_pred(cfg, p, st);
} else {
@ -332,7 +332,7 @@ void Block::dump_head(const PhaseCFG* cfg, outputStream* st) const {
const Block *bhead = this; // Head of self-loop
Node *bh = bhead->head();
if ((cfg != NULL) && bh->is_Loop() && !head()->is_Root()) {
if ((cfg != nullptr) && bh->is_Loop() && !head()->is_Root()) {
LoopNode *loop = bh->as_Loop();
const Block *bx = cfg->get_block_for_node(loop->in(LoopNode::LoopBackControl));
while (bx->is_connector()) {
@ -359,7 +359,7 @@ void Block::dump_head(const PhaseCFG* cfg, outputStream* st) const {
}
void Block::dump() const {
dump(NULL);
dump(nullptr);
}
void Block::dump(const PhaseCFG* cfg) const {
@ -375,11 +375,11 @@ PhaseCFG::PhaseCFG(Arena* arena, RootNode* root, Matcher& matcher)
: Phase(CFG)
, _root(root)
, _block_arena(arena)
, _regalloc(NULL)
, _regalloc(nullptr)
, _scheduling_for_pressure(false)
, _matcher(matcher)
, _node_to_block_mapping(arena)
, _node_latency(NULL)
, _node_latency(nullptr)
#ifndef PRODUCT
, _trace_opto_pipelining(C->directive()->TraceOptoPipeliningOption)
#endif
@ -391,10 +391,10 @@ PhaseCFG::PhaseCFG(Arena* arena, RootNode* root, Matcher& matcher)
// I'll need a few machine-specific GotoNodes. Make an Ideal GotoNode,
// then Match it into a machine-specific Node. Then clone the machine
// Node on demand.
Node *x = new GotoNode(NULL);
Node *x = new GotoNode(nullptr);
x->init_req(0, x);
_goto = matcher.match_tree(x);
assert(_goto != NULL, "");
assert(_goto != nullptr, "");
_goto->set_req(0,_goto);
// Build the CFG in Reverse Post Order
@ -427,7 +427,7 @@ uint PhaseCFG::build_cfg() {
const Node *x = proj->is_block_proj();
// Does the block end with a proper block-ending Node? One of Return,
// If or Goto? (This check should be done for visited nodes also).
if (x == NULL) { // Does not end right...
if (x == nullptr) { // Does not end right...
Node *g = _goto->clone(); // Force it to end in a Goto
g->set_req(0, proj);
np->set_req(idx, g);
@ -661,7 +661,7 @@ void PhaseCFG::convert_NeverBranch_to_Goto(Block *b) {
// Helper function to move block bx to the slot following b_index. Return
// true if the move is successful, otherwise false
bool PhaseCFG::move_to_next(Block* bx, uint b_index) {
if (bx == NULL) return false;
if (bx == nullptr) return false;
// Return false if bx is already scheduled.
uint bx_index = bx->_pre_order;
@ -848,7 +848,7 @@ void PhaseCFG::fixup_flow() {
}
assert(block->is_Empty() != Block::completely_empty, "Empty blocks should be connectors");
Block* bnext = (i < number_of_blocks() - 1) ? get_block(i + 1) : NULL;
Block* bnext = (i < number_of_blocks() - 1) ? get_block(i + 1) : nullptr;
Block* bs0 = block->non_connector_successor(0);
// Check for multi-way branches where I cannot negate the test to
@ -1204,7 +1204,7 @@ void PhaseCFG::postalloc_expand(PhaseRegAlloc* _ra) {
uint index = b->find_node(n);
// Insert new nodes into block and map them in nodes->blocks array
// and remember last node in n2.
Node *n2 = NULL;
Node *n2 = nullptr;
for (int k = 0; k < new_nodes.length(); ++k) {
n2 = new_nodes.at(k);
b->insert_node(n2, ++index);
@ -1233,7 +1233,7 @@ void PhaseCFG::postalloc_expand(PhaseRegAlloc* _ra) {
assert(remove.at(k)->is_Proj() && (remove.at(k)->in(0)->is_MachBranch()), "");
}
}
// If anything has been inserted (n2 != NULL), continue after last node inserted.
// If anything has been inserted (n2 != nullptr), continue after last node inserted.
// This does not always work. Some postalloc expands don't insert any nodes, if they
// do optimizations (e.g., max(x,x)). In this case we decrement j accordingly.
j = n2 ? b->find_node(n2) : j;
@ -1292,7 +1292,7 @@ void PhaseCFG::dump( ) const {
void PhaseCFG::dump_headers() {
for (uint i = 0; i < number_of_blocks(); i++) {
Block* block = get_block(i);
if (block != NULL) {
if (block != nullptr) {
block->dump_head(this);
}
}
@ -1312,7 +1312,7 @@ void PhaseCFG::verify_memory_writer_placement(const Block* b, const Node* n) con
break;
}
home_or_ancestor = home_or_ancestor->parent();
} while (home_or_ancestor != NULL);
} while (home_or_ancestor != nullptr);
assert(found, "block b is not in n's home loop or an ancestor of it");
}
@ -1365,7 +1365,7 @@ void PhaseCFG::verify() const {
// when CreateEx node is moved in build_ifg_physical().
if (def_block == block && !(block->head()->is_Loop() && n->is_Phi()) &&
// See (+++) comment in reg_split.cpp
!(n->jvms() != NULL && n->jvms()->is_monitor_use(k))) {
!(n->jvms() != nullptr && n->jvms()->is_monitor_use(k))) {
bool is_loop = false;
if (n->is_Phi()) {
for (uint l = 1; l < def->req(); l++) {
@ -1389,7 +1389,7 @@ void PhaseCFG::verify() const {
assert(j >= 1, "a projection cannot be the first instruction in a block");
Node* pred = block->get_node(j - 1);
Node* parent = n->in(0);
assert(parent != NULL, "projections must have a parent");
assert(parent != nullptr, "projections must have a parent");
assert(pred == parent || (pred->is_Proj() && pred->in(0) == parent),
"projections must follow their parents or other sibling projections");
}
@ -1483,7 +1483,7 @@ void UnionFind::Union( uint idx1, uint idx2 ) {
#ifndef PRODUCT
void Trace::dump( ) const {
tty->print_cr("Trace (freq %f)", first_block()->_freq);
for (Block *b = first_block(); b != NULL; b = next(b)) {
for (Block *b = first_block(); b != nullptr; b = next(b)) {
tty->print(" B%d", b->_pre_order);
if (b->head()->is_Loop()) {
tty->print(" (L%d)", b->compute_loop_alignment());
@ -1561,7 +1561,7 @@ extern "C" int trace_frequency_order(const void *p0, const void *p1) {
void PhaseBlockLayout::find_edges() {
// Walk the blocks, creating edges and Traces
uint i;
Trace *tr = NULL;
Trace *tr = nullptr;
for (i = 0; i < _cfg.number_of_blocks(); i++) {
Block* b = _cfg.get_block(i);
tr = new Trace(b, next, prev);
@ -1590,7 +1590,7 @@ void PhaseBlockLayout::find_edges() {
assert(n == _cfg.get_block(i), "expecting next block");
tr->append(n);
uf->map(n->_pre_order, tr->id());
traces[n->_pre_order] = NULL;
traces[n->_pre_order] = nullptr;
nfallthru = b->num_fall_throughs();
b = n;
}
@ -1616,7 +1616,7 @@ void PhaseBlockLayout::find_edges() {
assert(b->is_connector(), "connector blocks at the end");
tr->append(b);
uf->map(b->_pre_order, tr->id());
traces[b->_pre_order] = NULL;
traces[b->_pre_order] = nullptr;
}
}
@ -1642,7 +1642,7 @@ void PhaseBlockLayout::union_traces(Trace* updated_trace, Trace* old_trace) {
// Union the lower with the higher and remove the pointer
// to the higher.
uf->Union(lo_id, hi_id);
traces[hi_id] = NULL;
traces[hi_id] = nullptr;
}
// Append traces together via the most frequently executed edges
@ -1772,7 +1772,7 @@ void PhaseBlockLayout::reorder_traces(int count) {
// Compact the traces.
for (int i = 0; i < count; i++) {
Trace *tr = traces[i];
if (tr != NULL) {
if (tr != nullptr) {
new_traces[new_count++] = tr;
}
}
@ -1788,9 +1788,9 @@ void PhaseBlockLayout::reorder_traces(int count) {
_cfg.clear_blocks();
for (int i = 0; i < new_count; i++) {
Trace *tr = new_traces[i];
if (tr != NULL) {
if (tr != nullptr) {
// push blocks onto the CFG list
for (Block* b = tr->first_block(); b != NULL; b = tr->next(b)) {
for (Block* b = tr->first_block(); b != nullptr; b = tr->next(b)) {
_cfg.add_block(b);
}
}
@ -1855,13 +1855,13 @@ bool Trace::backedge(CFGEdge *e) {
// Find the last block in the trace that has a conditional
// branch.
Block *b;
for (b = last_block(); b != NULL; b = prev(b)) {
for (b = last_block(); b != nullptr; b = prev(b)) {
if (b->num_fall_throughs() == 2) {
break;
}
}
if (b != last_block() && b != NULL) {
if (b != last_block() && b != nullptr) {
loop_rotated = true;
// Rotate the loop by doing two-part linked-list surgery.
@ -1873,7 +1873,7 @@ bool Trace::backedge(CFGEdge *e) {
// Backbranch to the top of a trace
// Scroll forward through the trace from the targ_block. If we find
// a loop head before another loop top, use the loop head alignment.
for (Block *b = targ_block; b != NULL; b = next(b)) {
for (Block *b = targ_block; b != nullptr; b = next(b)) {
if (b->has_loop_alignment()) {
break;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -43,7 +43,7 @@ struct Tarjan;
//------------------------------Block_Array------------------------------------
// Map dense integer indices to Blocks. Uses classic doubling-array trick.
// Abstractly provides an infinite array of Block*'s, initialized to NULL.
// Abstractly provides an infinite array of Block*'s, initialized to null.
// Note that the constructor just zeros things, and since I use Arena
// allocation I do not need a destructor to reclaim storage.
class Block_Array : public ArenaObj {
@ -60,11 +60,11 @@ public:
debug_only(_limit=0);
_blocks = NEW_ARENA_ARRAY( a, Block *, OptoBlockListSize );
for( int i = 0; i < OptoBlockListSize; i++ ) {
_blocks[i] = NULL;
_blocks[i] = nullptr;
}
}
Block *lookup( uint i ) const // Lookup, or NULL for not mapped
{ return (i<Max()) ? _blocks[i] : (Block*)NULL; }
Block *lookup( uint i ) const // Lookup, or null for not mapped
{ return (i<Max()) ? _blocks[i] : (Block*)nullptr; }
Block *operator[] ( uint i ) const // Lookup, or assert for not mapped
{ assert( i < Max(), "oob" ); return _blocks[i]; }
// Extend the mapping: index i maps to Block *n.
@ -114,7 +114,7 @@ private:
public:
// Get the node at index 'at_index', if 'at_index' is out of bounds return NULL
// Get the node at index 'at_index', if 'at_index' is out of bounds return null
Node* get_node(uint at_index) const {
return _nodes[at_index];
}
@ -282,7 +282,7 @@ public:
_num_succs(0),
_pre_order(0),
_idom(0),
_loop(NULL),
_loop(nullptr),
_reg_pressure(0),
_ihrp_index(1),
_freg_pressure(0),
@ -466,8 +466,8 @@ class PhaseCFG : public Phase {
Node* catch_cleanup_find_cloned_def(Block* use_blk, Node* def, Block* def_blk, int n_clone_idx);
void catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, int n_clone_idx);
// Detect implicit-null-check opportunities. Basically, find NULL checks
// with suitable memory ops nearby. Use the memory op to do the NULL check.
// Detect implicit-null-check opportunities. Basically, find null checks
// with suitable memory ops nearby. Use the memory op to do the null check.
// I can generate a memory op if there is not one nearby.
void implicit_null_check(Block* block, Node *proj, Node *val, int allowed_reasons);
@ -578,7 +578,7 @@ class PhaseCFG : public Phase {
// removes the mapping from a node to a block
void unmap_node_from_block(const Node* node) {
_node_to_block_mapping.map(node->_idx, NULL);
_node_to_block_mapping.map(node->_idx, nullptr);
}
// get the block in which this node resides
@ -588,7 +588,7 @@ class PhaseCFG : public Phase {
// does this node reside in a block; return true
bool has_block(const Node* node) const {
return (_node_to_block_mapping.lookup(node->_idx) != NULL);
return (_node_to_block_mapping.lookup(node->_idx) != nullptr);
}
// Use frequency calculations and code shape to predict if the block
@ -691,7 +691,7 @@ protected:
Block* _target; // block target
double _prob; // probability of edge to block
public:
BlockProbPair() : _target(NULL), _prob(0.0) {}
BlockProbPair() : _target(nullptr), _prob(0.0) {}
BlockProbPair(Block* b, double p) : _target(b), _prob(p) {}
Block* get_target() const { return _target; }
@ -716,9 +716,9 @@ class CFGLoop : public CFGElement {
CFGElement(),
_id(id),
_depth(0),
_parent(NULL),
_sibling(NULL),
_child(NULL),
_parent(nullptr),
_sibling(nullptr),
_child(nullptr),
_exit_prob(1.0f) {}
CFGLoop* parent() { return _parent; }
void push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg);
@ -731,7 +731,7 @@ class CFGLoop : public CFGElement {
assert(hd->head()->is_Loop(), "must begin with loop head node");
return hd;
}
Block* backedge_block(); // Return the block on the backedge of the loop (else NULL)
Block* backedge_block(); // Return the block on the backedge of the loop (else null)
void compute_loop_depth(int depth);
void compute_freq(); // compute frequency with loop assuming head freq 1.0f
void scale_freq(); // scale frequency by loop trip count (including outer loops)
@ -817,8 +817,8 @@ class Trace : public ResourceObj {
void break_loop_after(Block *b) {
_last = b;
_first = next(b);
set_prev(_first, NULL);
set_next(_last, NULL);
set_prev(_first, nullptr);
set_next(_last, nullptr);
}
public:
@ -829,8 +829,8 @@ class Trace : public ResourceObj {
_prev_list(prev_list),
_first(b),
_last(b) {
set_next(b, NULL);
set_prev(b, NULL);
set_next(b, nullptr);
set_prev(b, nullptr);
};
// Return the id number
@ -849,7 +849,7 @@ class Trace : public ResourceObj {
// Insert a trace in the middle of this one after b
void insert_after(Block *b, Trace *tr) {
set_next(tr->last_block(), next(b));
if (next(b) != NULL) {
if (next(b) != nullptr) {
set_prev(next(b), tr->last_block());
}
@ -863,7 +863,7 @@ class Trace : public ResourceObj {
void insert_before(Block *b, Trace *tr) {
Block *p = prev(b);
assert(p != NULL, "use append instead");
assert(p != nullptr, "use append instead");
insert_after(p, tr);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -71,7 +71,7 @@
// an array of structs, but the struct-of-arrays is generally a little more
// efficient). The arrays are indexed by register number (including
// stack-slots as registers) and so is bounded by 200 to 300 elements in
// practice. One array will map to a reaching def Node (or NULL for
// practice. One array will map to a reaching def Node (or null for
// conflict/dead). The other array will map to a callee-saved register or
// OptoReg::Bad for not-callee-saved.
@ -80,16 +80,16 @@
struct OopFlow : public ArenaObj {
short *_callees; // Array mapping register to callee-saved
Node **_defs; // array mapping register to reaching def
// or NULL if dead/conflict
// or null if dead/conflict
// OopFlow structs, when not being actively modified, describe the _end_ of
// this block.
Block *_b; // Block for this struct
OopFlow *_next; // Next free OopFlow
// or NULL if dead/conflict
// or null if dead/conflict
Compile* C;
OopFlow( short *callees, Node **defs, Compile* c ) : _callees(callees), _defs(defs),
_b(NULL), _next(NULL), C(c) { }
_b(nullptr), _next(nullptr), C(c) { }
// Given reaching-defs for this block start, compute it for this block end
void compute_reach( PhaseRegAlloc *regalloc, int max_reg, Dict *safehash );
@ -166,19 +166,19 @@ void OopFlow::compute_reach( PhaseRegAlloc *regalloc, int max_reg, Dict *safehas
// Merge the given flow into the 'this' flow
void OopFlow::merge( OopFlow *flow, int max_reg ) {
assert( _b == NULL, "merging into a happy flow" );
assert( _b == nullptr, "merging into a happy flow" );
assert( flow->_b, "this flow is still alive" );
assert( flow != this, "no self flow" );
// Do the merge. If there are any differences, drop to 'bottom' which
// is OptoReg::Bad or NULL depending.
// is OptoReg::Bad or null depending.
for( int i=0; i<max_reg; i++ ) {
// Merge the callee-save's
if( _callees[i] != flow->_callees[i] )
_callees[i] = OptoReg::Bad;
// Merge the reaching defs
if( _defs[i] != flow->_defs[i] )
_defs[i] = NULL;
_defs[i] = nullptr;
}
}
@ -214,7 +214,7 @@ OopMap *OopFlow::build_oop_map( Node *n, int max_reg, PhaseRegAlloc *regalloc, i
memset(dup_check,0,OptoReg::stack0()) );
OopMap *omap = new OopMap( framesize, max_inarg_slot );
MachCallNode *mcall = n->is_MachCall() ? n->as_MachCall() : NULL;
MachCallNode *mcall = n->is_MachCall() ? n->as_MachCall() : nullptr;
JVMState* jvms = n->jvms();
// For all registers do...
@ -468,7 +468,7 @@ static void do_liveness(PhaseRegAlloc* regalloc, PhaseCFG* cfg, Block_List* work
if( OptoReg::is_valid(first) ) clr_live_bit(tmp_live,first);
if( OptoReg::is_valid(second) ) clr_live_bit(tmp_live,second);
MachNode *m = n->is_Mach() ? n->as_Mach() : NULL;
MachNode *m = n->is_Mach() ? n->as_Mach() : nullptr;
// Check if m is potentially a CISC alternate instruction (i.e, possibly
// synthesized by RegAlloc from a conventional instruction and a
@ -494,7 +494,7 @@ static void do_liveness(PhaseRegAlloc* regalloc, PhaseCFG* cfg, Block_List* work
// for this stack location, and set the appropriate bit in the
// live vector 4987749.
if (is_cisc_alternate && def == fp) {
const TypePtr *adr_type = NULL;
const TypePtr *adr_type = nullptr;
intptr_t offset;
const Node* base = m->get_base_and_disp(offset, adr_type);
if (base == NodeSentinel) {
@ -505,7 +505,7 @@ static void do_liveness(PhaseRegAlloc* regalloc, PhaseCFG* cfg, Block_List* work
assert(!def->bottom_type()->isa_oop_ptr(), "expecting non-oop mem input");
} else if (base != fp || offset == Type::OffsetBot) {
// Do nothing: the fp operand is either not from a memory use
// (base == NULL) OR the fp is used in a non-memory context
// (base == nullptr) OR the fp is used in a non-memory context
// (base is some other register) OR the offset is not constant,
// so it is not a stack slot.
} else {
@ -561,7 +561,7 @@ static void do_liveness(PhaseRegAlloc* regalloc, PhaseCFG* cfg, Block_List* work
Block* block = cfg->get_block(i);
uint j;
for (j = 1; j < block->number_of_nodes(); j++) {
if (block->get_node(j)->jvms() && (*safehash)[block->get_node(j)] == NULL) {
if (block->get_node(j)->jvms() && (*safehash)[block->get_node(j)] == nullptr) {
break;
}
}
@ -596,12 +596,12 @@ void PhaseOutput::BuildOopMaps() {
Block_List worklist; // Worklist of pending blocks
int max_reg_ints = align_up(max_reg, BitsPerInt)>>LogBitsPerInt;
Dict *safehash = NULL; // Used for assert only
Dict *safehash = nullptr; // Used for assert only
// Compute a backwards liveness per register. Needs a bitarray of
// #blocks x (#registers, rounded up to ints)
safehash = new Dict(cmpkey,hashkey,A);
do_liveness( C->regalloc(), C->cfg(), &worklist, max_reg_ints, A, safehash );
OopFlow *free_list = NULL; // Free, unused
OopFlow *free_list = nullptr; // Free, unused
// Array mapping blocks to completed oopflows
OopFlow **flows = NEW_ARENA_ARRAY(A, OopFlow*, C->cfg()->number_of_blocks());
@ -645,7 +645,7 @@ void PhaseOutput::BuildOopMaps() {
// If this block has a visited predecessor AND that predecessor has this
// last block as his only undone child, we can move the OopFlow from the
// pred to this block. Otherwise we have to grab a new OopFlow.
OopFlow *flow = NULL; // Flag for finding optimized flow
OopFlow *flow = nullptr; // Flag for finding optimized flow
Block *pred = (Block*)((intptr_t)0xdeadbeef);
// Scan this block's preds to find a done predecessor
for (uint j = 1; j < b->num_preds(); j++) {
@ -679,9 +679,9 @@ void PhaseOutput::BuildOopMaps() {
if( !free_list )
free_list = OopFlow::make(A,max_reg,C);
flow = free_list;
assert( flow->_b == NULL, "oopFlow is not free" );
assert( flow->_b == nullptr, "oopFlow is not free" );
free_list = flow->_next;
flow->_next = NULL;
flow->_next = nullptr;
// Copy/clone over the data
flow->clone(flows[pred->_pre_order], max_reg);
@ -691,7 +691,7 @@ void PhaseOutput::BuildOopMaps() {
// because after the first time they are guarded from entering
// this code again.
assert( flow->_b == pred, "have some prior flow" );
flow->_b = NULL;
flow->_b = nullptr;
// Now push flow forward
flows[b->_pre_order] = flow;// Mark flow for this block

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -44,31 +44,31 @@ InlineTree::InlineTree(Compile* c,
JVMState* caller_jvms, int caller_bci,
int max_inline_level) :
C(c),
_caller_jvms(NULL),
_caller_jvms(nullptr),
_method(callee),
_late_inline(false),
_caller_tree((InlineTree*) caller_tree),
_count_inline_bcs(method()->code_size_for_inlining()),
_max_inline_level(max_inline_level),
_subtrees(c->comp_arena(), 2, 0, NULL),
_msg(NULL)
_subtrees(c->comp_arena(), 2, 0, nullptr),
_msg(nullptr)
{
#ifndef PRODUCT
_count_inlines = 0;
_forced_inline = false;
#endif
if (caller_jvms != NULL) {
if (caller_jvms != nullptr) {
// Keep a private copy of the caller_jvms:
_caller_jvms = new (C) JVMState(caller_jvms->method(), caller_tree->caller_jvms());
_caller_jvms->set_bci(caller_jvms->bci());
assert(!caller_jvms->should_reexecute(), "there should be no reexecute bytecode with inlining");
assert(_caller_jvms->same_calls_as(caller_jvms), "consistent JVMS");
}
assert((caller_tree == NULL ? 0 : caller_tree->stack_depth() + 1) == stack_depth(), "correct (redundant) depth parameter");
assert((caller_tree == nullptr ? 0 : caller_tree->stack_depth() + 1) == stack_depth(), "correct (redundant) depth parameter");
assert(caller_bci == this->caller_bci(), "correct (redundant) bci parameter");
// Update hierarchical counts, count_inline_bcs() and count_inlines()
InlineTree *caller = (InlineTree *)caller_tree;
for( ; caller != NULL; caller = ((InlineTree *)(caller->caller_tree())) ) {
for( ; caller != nullptr; caller = ((InlineTree *)(caller->caller_tree())) ) {
caller->_count_inline_bcs += count_inline_bcs();
NOT_PRODUCT(caller->_count_inlines++;)
}
@ -198,7 +198,7 @@ bool InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_method,
// negative filter: should callee NOT be inlined?
bool InlineTree::should_not_inline(ciMethod* callee_method, ciMethod* caller_method,
int caller_bci, bool& should_delay, ciCallProfile& profile) {
const char* fail_msg = NULL;
const char* fail_msg = nullptr;
// First check all inlining restrictions which are required for correctness
if (callee_method->is_abstract()) {
@ -221,11 +221,11 @@ bool InlineTree::should_not_inline(ciMethod* callee_method, ciMethod* caller_met
}
// one more inlining restriction
if (fail_msg == NULL && callee_method->has_unloaded_classes_in_signature()) {
if (fail_msg == nullptr && callee_method->has_unloaded_classes_in_signature()) {
fail_msg = "unloaded signature classes";
}
if (fail_msg != NULL) {
if (fail_msg != nullptr) {
set_msg(fail_msg);
return true;
}
@ -281,10 +281,10 @@ bool InlineTree::should_not_inline(ciMethod* callee_method, ciMethod* caller_met
// don't inline exception code unless the top method belongs to an
// exception class
if (caller_tree() != NULL &&
if (caller_tree() != nullptr &&
callee_method->holder()->is_subclass_of(C->env()->Throwable_klass())) {
const InlineTree *top = this;
while (top->caller_tree() != NULL) top = top->caller_tree();
while (top->caller_tree() != nullptr) top = top->caller_tree();
ciInstanceKlass* k = top->method()->holder();
if (!k->is_subclass_of(C->env()->Throwable_klass())) {
set_msg("exception method");
@ -447,8 +447,8 @@ bool InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_method,
}
}
// count callers of current method and callee
Node* callee_argument0 = is_compiled_lambda_form ? jvms->map()->argument(jvms, 0)->uncast() : NULL;
for (JVMState* j = jvms->caller(); j != NULL && j->has_method(); j = j->caller()) {
Node* callee_argument0 = is_compiled_lambda_form ? jvms->map()->argument(jvms, 0)->uncast() : nullptr;
for (JVMState* j = jvms->caller(); j != nullptr && j->has_method(); j = j->caller()) {
if (j->method() == callee_method) {
if (is_compiled_lambda_form) {
// Since compiled lambda forms are heavily reused we allow recursive inlining. If it is truly
@ -487,7 +487,7 @@ bool InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_method,
//------------------------------pass_initial_checks----------------------------
bool InlineTree::pass_initial_checks(ciMethod* caller_method, int caller_bci, ciMethod* callee_method) {
// Check if a callee_method was suggested
if (callee_method == NULL) {
if (callee_method == nullptr) {
return false;
}
ciInstanceKlass *callee_holder = callee_method->holder();
@ -529,15 +529,15 @@ const char* InlineTree::check_can_parse(ciMethod* callee) {
if (!callee->has_balanced_monitors()) return "not compilable (unbalanced monitors)";
if ( callee->get_flow_analysis()->failing()) return "not compilable (flow analysis failed)";
if (!callee->can_be_parsed()) return "cannot be parsed";
return NULL;
return nullptr;
}
//------------------------------print_inlining---------------------------------
void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci,
ciMethod* caller_method, bool success) const {
const char* inline_msg = msg();
assert(inline_msg != NULL, "just checking");
if (C->log() != NULL) {
assert(inline_msg != nullptr, "just checking");
if (C->log() != nullptr) {
if (success) {
C->log()->inline_success(inline_msg);
} else {
@ -548,10 +548,10 @@ void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci,
caller_bci, inline_msg);
if (C->print_inlining()) {
C->print_inlining(callee_method, inline_level(), caller_bci, inline_msg);
guarantee(callee_method != NULL, "would crash in CompilerEvent::InlineEvent::post");
guarantee(callee_method != nullptr, "would crash in CompilerEvent::InlineEvent::post");
if (Verbose) {
const InlineTree *top = this;
while (top->caller_tree() != NULL) { top = top->caller_tree(); }
while (top->caller_tree() != nullptr) { top = top->caller_tree(); }
//tty->print(" bcs: %d+%d invoked: %d", top->count_inline_bcs(), callee_method->code_size(), callee_method->interpreter_invocation_count());
}
}
@ -565,11 +565,11 @@ void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci,
bool InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms, ciCallProfile& profile,
bool& should_delay) {
#ifdef ASSERT
assert(callee_method != NULL, "caller checks for optimized virtual!");
assert(callee_method != nullptr, "caller checks for optimized virtual!");
// Make sure the incoming jvms has the same information content as me.
// This means that we can eventually make this whole class AllStatic.
if (jvms->caller() == NULL) {
assert(_caller_jvms == NULL, "redundant instance state");
if (jvms->caller() == nullptr) {
assert(_caller_jvms == nullptr, "redundant instance state");
} else {
assert(_caller_jvms->same_calls_as(jvms->caller()), "redundant instance state");
}
@ -587,7 +587,7 @@ bool InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms, ciCallPro
// Do some parse checks.
set_msg(check_can_parse(callee_method));
if (msg() != NULL) {
if (msg() != nullptr) {
print_inlining(callee_method, caller_bci, caller_method, false /* !success */);
return false;
}
@ -597,7 +597,7 @@ bool InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms, ciCallPro
should_delay); // out
if (success) {
// Inline!
if (msg() == NULL) {
if (msg() == nullptr) {
set_msg("inline (hot)");
}
print_inlining(callee_method, caller_bci, caller_method, true /* success */);
@ -609,7 +609,7 @@ bool InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms, ciCallPro
return true;
} else {
// Do not inline
if (msg() == NULL) {
if (msg() == nullptr) {
set_msg("too cold to inline");
}
print_inlining(callee_method, caller_bci, caller_method, false /* !success */ );
@ -621,11 +621,11 @@ bool InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms, ciCallPro
InlineTree *InlineTree::build_inline_tree_for_callee( ciMethod* callee_method, JVMState* caller_jvms, int caller_bci) {
// Attempt inlining.
InlineTree* old_ilt = callee_at(caller_bci, callee_method);
if (old_ilt != NULL) {
if (old_ilt != nullptr) {
return old_ilt;
}
int max_inline_level_adjust = 0;
if (caller_jvms->method() != NULL) {
if (caller_jvms->method() != nullptr) {
if (caller_jvms->method()->is_compiled_lambda_form()) {
max_inline_level_adjust += 1; // don't count actions in MH or indy adapter frames
} else if (callee_method->is_method_handle_intrinsic() ||
@ -660,7 +660,7 @@ InlineTree *InlineTree::callee_at(int bci, ciMethod* callee) const {
return sub;
}
}
return NULL;
return nullptr;
}
@ -669,7 +669,7 @@ InlineTree *InlineTree::build_inline_tree_root() {
Compile* C = Compile::current();
// Root of inline tree
InlineTree* ilt = new InlineTree(C, NULL, C->method(), NULL, -1, MaxInlineLevel);
InlineTree* ilt = new InlineTree(C, nullptr, C->method(), nullptr, -1, MaxInlineLevel);
return ilt;
}
@ -688,11 +688,11 @@ InlineTree* InlineTree::find_subtree_from_root(InlineTree* root, JVMState* jvms,
assert(jvmsp->method() == iltp->method(), "tree still in sync");
ciMethod* d_callee = (d == depth) ? callee : jvms->of_depth(d+1)->method();
InlineTree* sub = iltp->callee_at(jvmsp->bci(), d_callee);
if (sub == NULL) {
if (sub == nullptr) {
if (d == depth) {
sub = iltp->build_inline_tree_for_callee(d_callee, jvmsp, jvmsp->bci());
}
guarantee(sub != NULL, "should be a sub-ilt here");
guarantee(sub != nullptr, "should be a sub-ilt here");
return sub;
}
iltp = sub;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,7 +31,7 @@
#include "opto/output.hpp"
C2CodeStubList::C2CodeStubList() :
_stubs(Compile::current()->comp_arena(), 2, 0, NULL) {}
_stubs(Compile::current()->comp_arena(), 2, 0, nullptr) {}
void C2CodeStubList::emit(CodeBuffer& cb) {
C2_MacroAssembler masm(&cb);
@ -39,7 +39,7 @@ void C2CodeStubList::emit(CodeBuffer& cb) {
C2CodeStub* stub = _stubs.at(i);
int max_size = stub->max_size();
// Make sure there is enough space in the code buffer
if (cb.insts()->maybe_expand_to_ensure_remaining(max_size) && cb.blob() == NULL) {
if (cb.insts()->maybe_expand_to_ensure_remaining(max_size) && cb.blob() == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
}

View File

@ -385,7 +385,7 @@
notproduct(ccstr, PrintIdealGraphAddress, "127.0.0.1", \
"IP address to connect to visualizer") \
\
notproduct(ccstr, PrintIdealGraphFile, NULL, \
notproduct(ccstr, PrintIdealGraphFile, nullptr, \
"File to dump ideal graph to. If set overrides the " \
"use of the network") \
\

View File

@ -113,7 +113,7 @@ void C2Compiler::compile_method(ciEnv* env, ciMethod* target, int entry_bci, boo
Compile C(env, target, entry_bci, options, directive);
// Check result and retry if appropriate.
if (C.failure_reason() != NULL) {
if (C.failure_reason() != nullptr) {
if (C.failure_reason_is(retry_class_loading_during_parsing())) {
env->report_failure(C.failure_reason());
continue; // retry
@ -229,7 +229,7 @@ bool C2Compiler::is_intrinsic_supported(const methodHandle& method, bool is_virt
if (!Matcher::match_rule_supported(Op_AryEq)) return false;
break;
case vmIntrinsics::_copyMemory:
if (StubRoutines::unsafe_arraycopy() == NULL) return false;
if (StubRoutines::unsafe_arraycopy() == nullptr) return false;
break;
case vmIntrinsics::_encodeAsciiArray:
if (!Matcher::match_rule_supported(Op_EncodeISOArray) || !Matcher::supports_encode_ascii_array) return false;
@ -483,7 +483,7 @@ bool C2Compiler::is_intrinsic_supported(const methodHandle& method, bool is_virt
if (!Matcher::match_rule_supported(Op_UMulHiL)) return false;
break;
case vmIntrinsics::_getCallerClass:
if (vmClasses::reflect_CallerSensitive_klass() == NULL) return false;
if (vmClasses::reflect_CallerSensitive_klass() == nullptr) return false;
break;
case vmIntrinsics::_onSpinWait:
if (!Matcher::match_rule_supported(Op_OnSpinWait)) return false;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -74,7 +74,7 @@ public:
{
_is_osr = is_osr;
_expected_uses = expected_uses;
assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible");
assert(InlineTree::check_can_parse(method) == nullptr, "parse must be possible");
}
virtual bool is_parse() const { return true; }
@ -93,7 +93,7 @@ JVMState* ParseGenerator::generate(JVMState* jvms) {
}
if (C->failing()) {
return NULL; // bailing out of the compile; do not try to parse
return nullptr; // bailing out of the compile; do not try to parse
}
Parse parser(jvms, method(), _expected_uses);
@ -101,8 +101,8 @@ JVMState* ParseGenerator::generate(JVMState* jvms) {
GraphKit& exits = parser.exits();
if (C->failing()) {
while (exits.pop_exception_state() != NULL) ;
return NULL;
while (exits.pop_exception_state() != nullptr) ;
return nullptr;
}
assert(exits.jvms()->same_calls_as(jvms), "sanity");
@ -147,7 +147,7 @@ JVMState* DirectCallGenerator::generate(JVMState* jvms) {
address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
: SharedRuntime::get_resolve_opt_virtual_call_stub();
if (kit.C->log() != NULL) {
if (kit.C->log() != nullptr) {
kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
}
@ -195,7 +195,7 @@ protected:
public:
VirtualCallGenerator(ciMethod* method, int vtable_index, bool separate_io_proj)
: CallGenerator(method), _vtable_index(vtable_index), _separate_io_proj(separate_io_proj), _call_node(NULL)
: CallGenerator(method), _vtable_index(vtable_index), _separate_io_proj(separate_io_proj), _call_node(nullptr)
{
assert(vtable_index == Method::invalid_vtable_index ||
vtable_index >= 0, "either invalid or usable");
@ -219,7 +219,7 @@ JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
kit.C->print_inlining_update(this);
if (kit.C->log() != NULL) {
if (kit.C->log() != nullptr) {
kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
}
@ -235,7 +235,7 @@ JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
kit.inc_sp(arg_size); // restore arguments
kit.uncommon_trap(Deoptimization::Reason_null_check,
Deoptimization::Action_none,
NULL, "null receiver");
nullptr, "null receiver");
return kit.transfer_exceptions_into_jvms();
}
@ -244,7 +244,7 @@ JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
// However currently the conversion to implicit null checks in
// Block::implicit_null_check() only looks for loads and stores, not calls.
ciMethod *caller = kit.method();
ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data();
ciMethodData *caller_md = (caller == nullptr) ? nullptr : caller->method_data();
if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() ||
((ImplicitNullCheckThreshold > 0) && caller_md &&
(caller_md->trap_count(Deoptimization::Reason_null_check)
@ -288,7 +288,7 @@ JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
}
CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
if (InlineTree::check_can_parse(m) != NULL) return NULL;
if (InlineTree::check_can_parse(m) != nullptr) return nullptr;
return new ParseGenerator(m, expected_uses);
}
@ -296,7 +296,7 @@ CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
// for the method execution already in progress, not just the JVMS
// of the caller. Thus, this CallGenerator cannot be mixed with others!
CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
if (InlineTree::check_can_parse(m) != NULL) return NULL;
if (InlineTree::check_can_parse(m) != nullptr) return nullptr;
float past_uses = m->interpreter_invocation_count();
float expected_uses = past_uses;
return new ParseGenerator(m, expected_uses, true);
@ -388,7 +388,7 @@ class LateInlineMHCallGenerator : public LateInlineCallGenerator {
public:
LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
LateInlineCallGenerator(callee, NULL), _caller(caller), _input_not_const(input_not_const) {}
LateInlineCallGenerator(callee, nullptr), _caller(caller), _input_not_const(input_not_const) {}
virtual bool is_mh_late_inline() const { return true; }
@ -429,7 +429,7 @@ bool LateInlineMHCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms)
CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), allow_inline, input_not_const);
assert(!input_not_const, "sanity"); // shouldn't have been scheduled for inlining in the first place
if (cg != NULL) {
if (cg != nullptr) {
assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline, "we're doing late inlining");
_inline_cg = cg;
C->dec_number_of_mh_late_inlines();
@ -466,7 +466,7 @@ class LateInlineVirtualCallGenerator : public VirtualCallGenerator {
public:
LateInlineVirtualCallGenerator(ciMethod* method, int vtable_index, float prof_factor)
: VirtualCallGenerator(method, vtable_index, true /*separate_io_projs*/),
_unique_id(0), _inline_cg(NULL), _callee(NULL), _is_pure_call(false), _prof_factor(prof_factor) {
_unique_id(0), _inline_cg(nullptr), _callee(nullptr), _is_pure_call(false), _prof_factor(prof_factor) {
assert(IncrementalInlineVirtual, "required");
}
@ -478,7 +478,7 @@ class LateInlineVirtualCallGenerator : public VirtualCallGenerator {
virtual void do_late_inline();
virtual void set_callee_method(ciMethod* m) {
assert(_callee == NULL, "repeated inlining attempt");
assert(_callee == nullptr, "repeated inlining attempt");
_callee = m;
}
@ -488,7 +488,7 @@ class LateInlineVirtualCallGenerator : public VirtualCallGenerator {
// through and exceptional uses of the memory and io projections
// as is done for allocations and macro expansion.
JVMState* new_jvms = VirtualCallGenerator::generate(jvms);
if (call_node() != NULL) {
if (call_node() != nullptr) {
call_node()->set_generator(this);
}
return new_jvms;
@ -548,10 +548,10 @@ bool LateInlineVirtualCallGenerator::do_late_inline_check(Compile* C, JVMState*
jvms,
allow_inline,
_prof_factor,
NULL /*speculative_receiver_type*/,
nullptr /*speculative_receiver_type*/,
true /*allow_intrinsics*/);
if (cg != NULL) {
if (cg != nullptr) {
assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline, "we're doing late inlining");
_inline_cg = cg;
return true;
@ -578,7 +578,7 @@ void LateInlineMHCallGenerator::do_late_inline() {
}
void LateInlineVirtualCallGenerator::do_late_inline() {
assert(_callee != NULL, "required"); // set up in CallDynamicJavaNode::Ideal
assert(_callee != nullptr, "required"); // set up in CallDynamicJavaNode::Ideal
CallGenerator::do_late_inline_helper();
}
@ -587,8 +587,8 @@ void CallGenerator::do_late_inline_helper() {
// Can't inline it
CallNode* call = call_node();
if (call == NULL || call->outcnt() == 0 ||
call->in(0) == NULL || call->in(0)->is_top()) {
if (call == nullptr || call->outcnt() == 0 ||
call->in(0) == nullptr || call->in(0)->is_top()) {
return;
}
@ -620,8 +620,8 @@ void CallGenerator::do_late_inline_helper() {
(callprojs.catchall_memproj == call->in(TypeFunc::Memory)) ||
(callprojs.fallthrough_ioproj == call->in(TypeFunc::I_O)) ||
(callprojs.catchall_ioproj == call->in(TypeFunc::I_O)) ||
(callprojs.resproj != NULL && call->find_edge(callprojs.resproj) != -1) ||
(callprojs.exobj != NULL && call->find_edge(callprojs.exobj) != -1)) {
(callprojs.resproj != nullptr && call->find_edge(callprojs.resproj) != -1) ||
(callprojs.exobj != nullptr && call->find_edge(callprojs.exobj) != -1)) {
return;
}
@ -633,7 +633,7 @@ void CallGenerator::do_late_inline_helper() {
// The call is marked as pure (no important side effects), but result isn't used.
// It's safe to remove the call.
bool result_not_used = (callprojs.resproj == NULL || callprojs.resproj->outcnt() == 0);
bool result_not_used = (callprojs.resproj == nullptr || callprojs.resproj->outcnt() == 0);
if (is_pure_call() && result_not_used) {
GraphKit kit(call->jvms());
@ -685,7 +685,7 @@ void CallGenerator::do_late_inline_helper() {
// Setup default node notes to be picked up by the inlining
Node_Notes* old_nn = C->node_notes_at(call->_idx);
if (old_nn != NULL) {
if (old_nn != nullptr) {
Node_Notes* entry_nn = old_nn->clone(C);
entry_nn->set_jvms(jvms);
C->set_default_node_notes(entry_nn);
@ -693,7 +693,7 @@ void CallGenerator::do_late_inline_helper() {
// Now perform the inlining using the synthesized JVMState
JVMState* new_jvms = inline_cg()->generate(jvms);
if (new_jvms == NULL) return; // no change
if (new_jvms == nullptr) return; // no change
if (C->failing()) return;
// Capture any exceptional control flow
@ -862,7 +862,7 @@ JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
// We share a map with the caller, so his JVMS gets adjusted.
Node* receiver = kit.argument(0);
CompileLog* log = kit.C->log();
if (log != NULL) {
if (log != nullptr) {
log->elem("predicted_call bci='%d' exact='%d' klass='%d'",
jvms->bci(), (_exact_check ? 1 : 0), log->identify(_predicted_receiver));
}
@ -877,7 +877,7 @@ JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
replaced_nodes.clone();
Node* casted_receiver = receiver; // will get updated in place...
Node* slow_ctl = NULL;
Node* slow_ctl = nullptr;
if (_exact_check) {
slow_ctl = kit.type_check_receiver(receiver, _predicted_receiver, _hit_prob,
&casted_receiver);
@ -886,15 +886,15 @@ JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
&casted_receiver);
}
SafePointNode* slow_map = NULL;
JVMState* slow_jvms = NULL;
SafePointNode* slow_map = nullptr;
JVMState* slow_jvms = nullptr;
{ PreserveJVMState pjvms(&kit);
kit.set_control(slow_ctl);
if (!kit.stopped()) {
slow_jvms = _if_missed->generate(kit.sync_jvms());
if (kit.failing())
return NULL; // might happen because of NodeCountInliningCutoff
assert(slow_jvms != NULL, "must be");
return nullptr; // might happen because of NodeCountInliningCutoff
assert(slow_jvms != nullptr, "must be");
kit.add_exception_states_from(slow_jvms);
kit.set_map(slow_jvms->map());
if (!kit.stopped())
@ -913,7 +913,7 @@ JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
// Make the hot call:
JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
if (new_jvms == NULL) {
if (new_jvms == nullptr) {
// Inline failed, so make a direct call.
assert(_if_hit->is_inline(), "must have been a failed inline");
CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
@ -923,7 +923,7 @@ JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
kit.set_jvms(new_jvms);
// Need to merge slow and fast?
if (slow_map == NULL) {
if (slow_map == nullptr) {
// The fast path is the only path remaining.
return kit.transfer_exceptions_into_jvms();
}
@ -983,7 +983,7 @@ CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* c
bool input_not_const;
CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, allow_inline, input_not_const);
Compile* C = Compile::current();
if (cg != NULL) {
if (cg != nullptr) {
if (AlwaysIncrementalInline) {
return CallGenerator::for_late_inline(callee, cg);
} else {
@ -1020,14 +1020,14 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod*
if (receiver->Opcode() == Op_ConP) {
input_not_const = false;
const TypeOopPtr* recv_toop = receiver->bottom_type()->isa_oopptr();
if (recv_toop != NULL) {
if (recv_toop != nullptr) {
ciMethod* target = recv_toop->const_oop()->as_method_handle()->get_vmtarget();
const int vtable_index = Method::invalid_vtable_index;
if (!ciMethod::is_consistent_info(callee, target)) {
print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
"signatures mismatch");
return NULL;
return nullptr;
}
CallGenerator *cg = C->call_generator(target, vtable_index,
@ -1064,7 +1064,7 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod*
if (!ciMethod::is_consistent_info(callee, target)) {
print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
"signatures mismatch");
return NULL;
return nullptr;
}
// In lambda forms we erase signature types to avoid resolving issues
@ -1078,7 +1078,7 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod*
Node* arg = kit.argument(0);
const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
const Type* sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass());
if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
if (arg_type != nullptr && !arg_type->higher_equal(sig_type)) {
const Type* recv_type = arg_type->filter_speculative(sig_type); // keep speculative part
Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, recv_type));
kit.set_argument(0, cast_obj);
@ -1091,7 +1091,7 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod*
Node* arg = kit.argument(receiver_skip + j);
const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass());
if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
if (arg_type != nullptr && !arg_type->higher_equal(sig_type)) {
const Type* narrowed_arg_type = arg_type->filter_speculative(sig_type); // keep speculative part
Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, narrowed_arg_type));
kit.set_argument(receiver_skip + j, cast_obj);
@ -1106,7 +1106,7 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod*
int vtable_index = Method::invalid_vtable_index;
bool call_does_dispatch = false;
ciKlass* speculative_receiver_type = NULL;
ciKlass* speculative_receiver_type = nullptr;
if (is_virtual_or_interface) {
ciInstanceKlass* klass = target->holder();
Node* receiver_node = kit.argument(0);
@ -1121,7 +1121,7 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod*
false /* check_access */);
// We lack profiling at this call but type speculation may
// provide us with a type
speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL;
speculative_receiver_type = (receiver_type != nullptr) ? receiver_type->speculative_type() : nullptr;
}
CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
allow_inline,
@ -1144,7 +1144,7 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod*
fatal("unexpected intrinsic %d: %s", vmIntrinsics::as_int(iid), vmIntrinsics::name_at(iid));
break;
}
return NULL;
return nullptr;
}
//------------------------PredicatedIntrinsicGenerator------------------------------
@ -1178,7 +1178,7 @@ CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic,
JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) {
// The code we want to generate here is:
// if (receiver == NULL)
// if (receiver == nullptr)
// uncommon_Trap
// if (predicate(0))
// do_intrinsic(0)
@ -1193,7 +1193,7 @@ JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) {
PhaseGVN& gvn = kit.gvn();
CompileLog* log = kit.C->log();
if (log != NULL) {
if (log != nullptr) {
log->elem("predicated_intrinsic bci='%d' method='%d'",
jvms->bci(), log->identify(method()));
}
@ -1237,7 +1237,7 @@ JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) {
PreserveJVMState pjvms(&kit);
// Generate intrinsic code:
JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms());
if (new_jvms == NULL) {
if (new_jvms == nullptr) {
// Intrinsic failed, use normal compilation path for this predicate.
slow_region->add_req(kit.control());
} else {
@ -1248,7 +1248,7 @@ JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) {
}
}
}
if (else_ctrl == NULL) {
if (else_ctrl == nullptr) {
else_ctrl = kit.C->top();
}
kit.set_control(else_ctrl);
@ -1263,8 +1263,8 @@ JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) {
kit.set_control(gvn.transform(slow_region));
JVMState* new_jvms = _cg->generate(kit.sync_jvms());
if (kit.failing())
return NULL; // might happen because of NodeCountInliningCutoff
assert(new_jvms != NULL, "must be");
return nullptr; // might happen because of NodeCountInliningCutoff
assert(new_jvms != nullptr, "must be");
kit.add_exception_states_from(new_jvms);
kit.set_jvms(new_jvms);
if (!kit.stopped()) {
@ -1327,7 +1327,7 @@ JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) {
for (int j = 1; j < results; j++) {
JVMState* jvms = result_jvms[j];
Node* jmap = jvms->map();
Node* m = NULL;
Node* m = nullptr;
if (jmap->req() > i) {
m = jmap->in(i);
if (m != n) {
@ -1397,7 +1397,7 @@ JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) {
// of a class cast failure for a monomorphic call as it will never let us convert
// the call to either bi-morphic or megamorphic and can lead to unc-trap loops
bool keep_exact_action = true;
kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action);
kit.uncommon_trap(_reason, _action, nullptr, "monomorphic vcall checkcast", false, keep_exact_action);
} else {
kit.uncommon_trap(_reason, _action);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -44,9 +44,9 @@ class CallGenerator : public ArenaObj {
void do_late_inline_helper();
virtual bool do_late_inline_check(Compile* C, JVMState* jvms) { ShouldNotReachHere(); return false; }
virtual CallGenerator* inline_cg() const { ShouldNotReachHere(); return NULL; }
virtual bool is_pure_call() const { ShouldNotReachHere(); return false; }
virtual bool do_late_inline_check(Compile* C, JVMState* jvms) { ShouldNotReachHere(); return false; }
virtual CallGenerator* inline_cg() const { ShouldNotReachHere(); return nullptr;}
virtual bool is_pure_call() const { ShouldNotReachHere(); return false; }
public:
// Accessors
@ -80,7 +80,7 @@ class CallGenerator : public ArenaObj {
// Replace the call with an inline version of the code
virtual void do_late_inline() { ShouldNotReachHere(); }
virtual CallNode* call_node() const { return NULL; }
virtual CallNode* call_node() const { return nullptr; }
virtual CallGenerator* with_call_node(CallNode* call) { return this; }
virtual void set_unique_id(jlong id) { fatal("unique id only for late inlines"); };
@ -119,7 +119,7 @@ class CallGenerator : public ArenaObj {
// If the call traps, the returned map must have a control edge of top.
// If the call can throw, the returned map must report has_exceptions().
//
// If the result is NULL, it means that this CallGenerator was unable
// If the result is null, it means that this CallGenerator was unable
// to handle the given call, and another CallGenerator should be consulted.
virtual JVMState* generate(JVMState* jvms) = 0;
@ -169,7 +169,7 @@ class CallGenerator : public ArenaObj {
static void register_intrinsic(ciMethod* m, CallGenerator* cg);
static CallGenerator* for_predicated_intrinsic(CallGenerator* intrinsic,
CallGenerator* cg);
virtual Node* generate_predicate(JVMState* jvms, int predicate) { return NULL; };
virtual Node* generate_predicate(JVMState* jvms, int predicate) { return nullptr; };
virtual void print_inlining_late(const char* msg) { ShouldNotReachHere(); }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -63,7 +63,7 @@ void StartNode::dump_compact_spec(outputStream *st) const { /* empty */ }
//------------------------------Ideal------------------------------------------
Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){
return remove_dead_region(phase, can_reshape) ? this : NULL;
return remove_dead_region(phase, can_reshape) ? this : nullptr;
}
//------------------------------calling_convention-----------------------------
@ -99,7 +99,7 @@ Node *StartNode::match( const ProjNode *proj, const Matcher *match ) {
return new MachProjNode(this,proj->_con,rm,ideal_reg);
}
}
return NULL;
return nullptr;
}
//------------------------------StartOSRNode----------------------------------
@ -169,7 +169,7 @@ ReturnNode::ReturnNode(uint edges, Node *cntrl, Node *i_o, Node *memory, Node *f
}
Node *ReturnNode::Ideal(PhaseGVN *phase, bool can_reshape){
return remove_dead_region(phase, can_reshape) ? this : NULL;
return remove_dead_region(phase, can_reshape) ? this : nullptr;
}
const Type* ReturnNode::Value(PhaseGVN* phase) const {
@ -219,7 +219,7 @@ RethrowNode::RethrowNode(
}
Node *RethrowNode::Ideal(PhaseGVN *phase, bool can_reshape){
return remove_dead_region(phase, can_reshape) ? this : NULL;
return remove_dead_region(phase, can_reshape) ? this : nullptr;
}
const Type* RethrowNode::Value(PhaseGVN* phase) const {
@ -264,13 +264,13 @@ uint TailJumpNode::match_edge(uint idx) const {
//=============================================================================
JVMState::JVMState(ciMethod* method, JVMState* caller) :
_method(method) {
assert(method != NULL, "must be valid call site");
assert(method != nullptr, "must be valid call site");
_bci = InvocationEntryBci;
_reexecute = Reexecute_Undefined;
debug_only(_bci = -99); // random garbage value
debug_only(_map = (SafePointNode*)-1);
_caller = caller;
_depth = 1 + (caller == NULL ? 0 : caller->depth());
_depth = 1 + (caller == nullptr ? 0 : caller->depth());
_locoff = TypeFunc::Parms;
_stkoff = _locoff + _method->max_locals();
_monoff = _stkoff + _method->max_stack();
@ -279,11 +279,11 @@ JVMState::JVMState(ciMethod* method, JVMState* caller) :
_sp = 0;
}
JVMState::JVMState(int stack_size) :
_method(NULL) {
_method(nullptr) {
_bci = InvocationEntryBci;
_reexecute = Reexecute_Undefined;
debug_only(_map = (SafePointNode*)-1);
_caller = NULL;
_caller = nullptr;
_depth = 1;
_locoff = TypeFunc::Parms;
_stkoff = _locoff;
@ -312,13 +312,13 @@ bool JVMState::same_calls_as(const JVMState* that) const {
const JVMState* q = that;
for (;;) {
if (p->_method != q->_method) return false;
if (p->_method == NULL) return true; // bci is irrelevant
if (p->_method == nullptr) return true; // bci is irrelevant
if (p->_bci != q->_bci) return false;
if (p->_reexecute != q->_reexecute) return false;
p = p->caller();
q = q->caller();
if (p == q) return true;
assert(p != NULL && q != NULL, "depth check ensures we don't run off end");
assert(p != nullptr && q != nullptr, "depth check ensures we don't run off end");
}
}
@ -339,7 +339,7 @@ uint JVMState::debug_end() const {
//------------------------------debug_depth------------------------------------
uint JVMState::debug_depth() const {
uint total = 0;
for (const JVMState* jvmp = this; jvmp != NULL; jvmp = jvmp->caller()) {
for (const JVMState* jvmp = this; jvmp != nullptr; jvmp = jvmp->caller()) {
total += jvmp->debug_size();
}
return total;
@ -351,7 +351,7 @@ uint JVMState::debug_depth() const {
// Given an allocation (a Chaitin object) and a Node decide if the Node carries
// any defined value or not. If it does, print out the register or constant.
static void format_helper( PhaseRegAlloc *regalloc, outputStream* st, Node *n, const char *msg, uint i, GrowableArray<SafePointScalarObjectNode*> *scobjs ) {
if (n == NULL) { st->print(" NULL"); return; }
if (n == nullptr) { st->print(" null"); return; }
if (n->is_SafePointScalarObject()) {
// Scalar replacement.
SafePointScalarObjectNode* spobj = n->as_SafePointScalarObject();
@ -374,7 +374,7 @@ static void format_helper( PhaseRegAlloc *regalloc, outputStream* st, Node *n, c
break;
case Type::AnyPtr:
assert( t == TypePtr::NULL_PTR || n->in_dump(), "" );
st->print(" %s%d]=#NULL",msg,i);
st->print(" %s%d]=#null",msg,i);
break;
case Type::AryPtr:
case Type::InstPtr:
@ -477,7 +477,7 @@ void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st)
ciKlass* cik = spobj->bottom_type()->is_oopptr()->exact_klass();
assert(cik->is_instance_klass() ||
cik->is_array_klass(), "Not supported allocation.");
ciInstanceKlass *iklass = NULL;
ciInstanceKlass *iklass = nullptr;
if (cik->is_instance_klass()) {
cik->print_name_on(st);
iklass = cik->as_instance_klass();
@ -505,7 +505,7 @@ void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st)
uint first_ind = spobj->first_index(mcall->jvms());
Node* fld_node = mcall->in(first_ind);
ciField* cifield;
if (iklass != NULL) {
if (iklass != nullptr) {
st->print(" [");
cifield = iklass->nonstatic_field_at(0);
cifield->print_name_on(st);
@ -515,7 +515,7 @@ void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st)
}
for (uint j = 1; j < nf; j++) {
fld_node = mcall->in(first_ind+j);
if (iklass != NULL) {
if (iklass != nullptr) {
st->print(", [");
cifield = iklass->nonstatic_field_at(j);
cifield->print_name_on(st);
@ -529,12 +529,12 @@ void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st)
}
}
st->cr();
if (caller() != NULL) caller()->format(regalloc, n, st);
if (caller() != nullptr) caller()->format(regalloc, n, st);
}
void JVMState::dump_spec(outputStream *st) const {
if (_method != NULL) {
if (_method != nullptr) {
bool printed = false;
if (!Verbose) {
// The JVMS dumps make really, really long lines.
@ -546,8 +546,8 @@ void JVMState::dump_spec(outputStream *st) const {
const char* name = namest.base();
if (name[0] == ' ') ++name;
const char* endcn = strchr(name, ':'); // end of class name
if (endcn == NULL) endcn = strchr(name, '(');
if (endcn == NULL) endcn = name + strlen(name);
if (endcn == nullptr) endcn = strchr(name, '(');
if (endcn == nullptr) endcn = name + strlen(name);
while (endcn > name && endcn[-1] != '.' && endcn[-1] != '/')
--endcn;
st->print(" %s", endcn);
@ -560,30 +560,30 @@ void JVMState::dump_spec(outputStream *st) const {
} else {
st->print(" runtime stub");
}
if (caller() != NULL) caller()->dump_spec(st);
if (caller() != nullptr) caller()->dump_spec(st);
}
void JVMState::dump_on(outputStream* st) const {
bool print_map = _map && !((uintptr_t)_map & 1) &&
((caller() == NULL) || (caller()->map() != _map));
((caller() == nullptr) || (caller()->map() != _map));
if (print_map) {
if (_map->len() > _map->req()) { // _map->has_exceptions()
Node* ex = _map->in(_map->req()); // _map->next_exception()
// skip the first one; it's already being printed
while (ex != NULL && ex->len() > ex->req()) {
while (ex != nullptr && ex->len() > ex->req()) {
ex = ex->in(ex->req()); // ex->next_exception()
ex->dump(1);
}
}
_map->dump(Verbose ? 2 : 1);
}
if (caller() != NULL) {
if (caller() != nullptr) {
caller()->dump_on(st);
}
st->print("JVMS depth=%d loc=%d stk=%d arg=%d mon=%d scalar=%d end=%d mondepth=%d sp=%d bci=%d reexecute=%s method=",
depth(), locoff(), stkoff(), argoff(), monoff(), scloff(), endoff(), monitor_depth(), sp(), bci(), should_reexecute()?"true":"false");
if (_method == NULL) {
if (_method == nullptr) {
st->print_cr("(none)");
} else {
_method->print_name(st);
@ -620,7 +620,7 @@ JVMState* JVMState::clone_shallow(Compile* C) const {
//---------------------------clone_deep----------------------------------------
JVMState* JVMState::clone_deep(Compile* C) const {
JVMState* n = clone_shallow(C);
for (JVMState* p = n; p->_caller != NULL; p = p->_caller) {
for (JVMState* p = n; p->_caller != nullptr; p = p->_caller) {
p->_caller = p->_caller->clone_shallow(C);
}
assert(n->depth() == depth(), "sanity");
@ -632,7 +632,7 @@ JVMState* JVMState::clone_deep(Compile* C) const {
* Reset map for all callers
*/
void JVMState::set_map_deep(SafePointNode* map) {
for (JVMState* p = this; p != NULL; p = p->_caller) {
for (JVMState* p = this; p != nullptr; p = p->_caller) {
p->set_map(map);
}
}
@ -646,7 +646,7 @@ void JVMState::bind_map(SafePointNode* map) {
// Adapt offsets in in-array after adding or removing an edge.
// Prerequisite is that the JVMState is used by only one node.
void JVMState::adapt_position(int delta) {
for (JVMState* jvms = this; jvms != NULL; jvms = jvms->caller()) {
for (JVMState* jvms = this; jvms != nullptr; jvms = jvms->caller()) {
jvms->set_locoff(jvms->locoff() + delta);
jvms->set_stkoff(jvms->stkoff() + delta);
jvms->set_monoff(jvms->monoff() + delta);
@ -665,7 +665,7 @@ int JVMState::interpreter_frame_size() const {
int callee_locals = 0;
int extra_args = method()->max_stack() - stk_size();
while (jvms != NULL) {
while (jvms != nullptr) {
int locks = jvms->nof_monitors();
int temps = jvms->stk_size();
bool is_top_frame = (jvms == this);
@ -710,9 +710,9 @@ void CallNode::dump_req(outputStream *st, DumpConfig* dc) const {
void CallNode::dump_spec(outputStream *st) const {
st->print(" ");
if (tf() != NULL) tf()->dump_on(st);
if (tf() != nullptr) tf()->dump_on(st);
if (_cnt != COUNT_UNKNOWN) st->print(" C=%f",_cnt);
if (jvms() != NULL) jvms()->dump_spec(st);
if (jvms() != nullptr) jvms()->dump_spec(st);
}
#endif
@ -774,7 +774,7 @@ Node *CallNode::match( const ProjNode *proj, const Matcher *match ) {
default:
ShouldNotReachHere();
}
return NULL;
return nullptr;
}
// Do we Match on this edge index or not? Match no edges
@ -787,10 +787,10 @@ uint CallNode::match_edge(uint idx) const {
// instance at the specified offset.
//
bool CallNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) {
assert((t_oop != NULL), "sanity");
assert((t_oop != nullptr), "sanity");
if (is_call_to_arraycopystub() && strcmp(_name, "unsafe_arraycopy") != 0) {
const TypeTuple* args = _tf->domain();
Node* dest = NULL;
Node* dest = nullptr;
// Stubs that can be called once an ArrayCopyNode is expanded have
// different signatures. Look for the second pointer argument,
// that is the destination of the copy.
@ -803,7 +803,7 @@ bool CallNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) {
}
}
}
guarantee(dest != NULL, "Call had only one ptr in, broken IR!");
guarantee(dest != nullptr, "Call had only one ptr in, broken IR!");
if (!dest->is_top() && may_modify_arraycopy_helper(phase->type(dest)->is_oopptr(), t_oop, phase)) {
return true;
}
@ -819,29 +819,29 @@ bool CallNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) {
if (is_CallStaticJava() && as_CallStaticJava()->is_boxing_method()) {
// Skip unrelated boxing methods.
Node* proj = proj_out_or_null(TypeFunc::Parms);
if ((proj == NULL) || (phase->type(proj)->is_instptr()->instance_klass() != boxing_klass)) {
if ((proj == nullptr) || (phase->type(proj)->is_instptr()->instance_klass() != boxing_klass)) {
return false;
}
}
if (is_CallJava() && as_CallJava()->method() != NULL) {
if (is_CallJava() && as_CallJava()->method() != nullptr) {
ciMethod* meth = as_CallJava()->method();
if (meth->is_getter()) {
return false;
}
// May modify (by reflection) if an boxing object is passed
// as argument or returned.
Node* proj = returns_pointer() ? proj_out_or_null(TypeFunc::Parms) : NULL;
if (proj != NULL) {
Node* proj = returns_pointer() ? proj_out_or_null(TypeFunc::Parms) : nullptr;
if (proj != nullptr) {
const TypeInstPtr* inst_t = phase->type(proj)->isa_instptr();
if ((inst_t != NULL) && (!inst_t->klass_is_exact() ||
(inst_t->instance_klass() == boxing_klass))) {
if ((inst_t != nullptr) && (!inst_t->klass_is_exact() ||
(inst_t->instance_klass() == boxing_klass))) {
return true;
}
}
const TypeTuple* d = tf()->domain();
for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr();
if ((inst_t != NULL) && (!inst_t->klass_is_exact() ||
if ((inst_t != nullptr) && (!inst_t->klass_is_exact() ||
(inst_t->instance_klass() == boxing_klass))) {
return true;
}
@ -866,18 +866,18 @@ bool CallNode::has_non_debug_use(Node *n) {
// Returns the unique CheckCastPP of a call
// or 'this' if there are several CheckCastPP or unexpected uses
// or returns NULL if there is no one.
// or returns null if there is no one.
Node *CallNode::result_cast() {
Node *cast = NULL;
Node *cast = nullptr;
Node *p = proj_out_or_null(TypeFunc::Parms);
if (p == NULL)
return NULL;
if (p == nullptr)
return nullptr;
for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) {
Node *use = p->fast_out(i);
if (use->is_CheckCastPP()) {
if (cast != NULL) {
if (cast != nullptr) {
return this; // more than 1 CheckCastPP
}
cast = use;
@ -896,15 +896,15 @@ Node *CallNode::result_cast() {
void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts) {
projs->fallthrough_proj = NULL;
projs->fallthrough_catchproj = NULL;
projs->fallthrough_ioproj = NULL;
projs->catchall_ioproj = NULL;
projs->catchall_catchproj = NULL;
projs->fallthrough_memproj = NULL;
projs->catchall_memproj = NULL;
projs->resproj = NULL;
projs->exobj = NULL;
projs->fallthrough_proj = nullptr;
projs->fallthrough_catchproj = nullptr;
projs->fallthrough_ioproj = nullptr;
projs->catchall_ioproj = nullptr;
projs->catchall_catchproj = nullptr;
projs->fallthrough_memproj = nullptr;
projs->catchall_memproj = nullptr;
projs->resproj = nullptr;
projs->exobj = nullptr;
for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
ProjNode *pn = fast_out(i)->as_Proj();
@ -915,8 +915,8 @@ void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj
// For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj
projs->fallthrough_proj = pn;
const Node* cn = pn->unique_ctrl_out_or_null();
if (cn != NULL && cn->is_Catch()) {
ProjNode *cpn = NULL;
if (cn != nullptr && cn->is_Catch()) {
ProjNode *cpn = nullptr;
for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) {
cpn = cn->fast_out(k)->as_Proj();
assert(cpn->is_CatchProj(), "must be a CatchProjNode");
@ -938,7 +938,7 @@ void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj
for (DUIterator j = pn->outs(); pn->has_out(j); j++) {
Node* e = pn->out(j);
if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj() && e->outcnt() > 0) {
assert(projs->exobj == NULL, "only one");
assert(projs->exobj == nullptr, "only one");
projs->exobj = e;
}
}
@ -960,15 +960,15 @@ void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj
// The resproj may not exist because the result could be ignored
// and the exception object may not exist if an exception handler
// swallows the exception but all the other must exist and be found.
assert(projs->fallthrough_proj != NULL, "must be found");
assert(projs->fallthrough_proj != nullptr, "must be found");
do_asserts = do_asserts && !Compile::current()->inlining_incrementally();
assert(!do_asserts || projs->fallthrough_catchproj != NULL, "must be found");
assert(!do_asserts || projs->fallthrough_memproj != NULL, "must be found");
assert(!do_asserts || projs->fallthrough_ioproj != NULL, "must be found");
assert(!do_asserts || projs->catchall_catchproj != NULL, "must be found");
assert(!do_asserts || projs->fallthrough_catchproj != nullptr, "must be found");
assert(!do_asserts || projs->fallthrough_memproj != nullptr, "must be found");
assert(!do_asserts || projs->fallthrough_ioproj != nullptr, "must be found");
assert(!do_asserts || projs->catchall_catchproj != nullptr, "must be found");
if (separate_io_proj) {
assert(!do_asserts || projs->catchall_memproj != NULL, "must be found");
assert(!do_asserts || projs->catchall_ioproj != NULL, "must be found");
assert(!do_asserts || projs->catchall_memproj != nullptr, "must be found");
assert(!do_asserts || projs->catchall_ioproj != nullptr, "must be found");
}
}
@ -976,7 +976,7 @@ Node* CallNode::Ideal(PhaseGVN* phase, bool can_reshape) {
#ifdef ASSERT
// Validate attached generator
CallGenerator* cg = generator();
if (cg != NULL) {
if (cg != nullptr) {
assert(is_CallStaticJava() && cg->is_mh_late_inline() ||
is_CallDynamicJava() && cg->is_virtual_late_inline(), "mismatch");
}
@ -985,7 +985,7 @@ Node* CallNode::Ideal(PhaseGVN* phase, bool can_reshape) {
}
bool CallNode::is_call_to_arraycopystub() const {
if (_name != NULL && strstr(_name, "arraycopy") != 0) {
if (_name != nullptr && strstr(_name, "arraycopy") != 0) {
return true;
}
return false;
@ -1013,7 +1013,7 @@ void CallJavaNode::copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt
for (uint i = old_dbg_start; i < sfpt->req(); i++) {
Node* old_in = sfpt->in(i);
// Clone old SafePointScalarObjectNodes, adjusting their field contents.
if (old_in != NULL && old_in->is_SafePointScalarObject()) {
if (old_in != nullptr && old_in->is_SafePointScalarObject()) {
SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject();
bool new_node;
Node* new_in = old_sosn->clone(sosn_map, new_node);
@ -1027,8 +1027,8 @@ void CallJavaNode::copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt
}
// JVMS may be shared so clone it before we modify it
set_jvms(sfpt->jvms() != NULL ? sfpt->jvms()->clone_deep(C) : NULL);
for (JVMState *jvms = this->jvms(); jvms != NULL; jvms = jvms->caller()) {
set_jvms(sfpt->jvms() != nullptr ? sfpt->jvms()->clone_deep(C) : nullptr);
for (JVMState *jvms = this->jvms(); jvms != nullptr; jvms = jvms->caller()) {
jvms->set_map(this);
jvms->set_locoff(jvms->locoff()+jvms_adj);
jvms->set_stkoff(jvms->stkoff()+jvms_adj);
@ -1040,7 +1040,7 @@ void CallJavaNode::copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt
#ifdef ASSERT
bool CallJavaNode::validate_symbolic_info() const {
if (method() == NULL) {
if (method() == nullptr) {
return true; // call into runtime or uncommon trap
}
ciMethod* symbolic_info = jvms()->method()->get_method_at_bci(jvms()->bci());
@ -1077,7 +1077,7 @@ bool CallStaticJavaNode::cmp( const Node &n ) const {
Node* CallStaticJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) {
CallGenerator* cg = generator();
if (can_reshape && cg != NULL) {
if (can_reshape && cg != nullptr) {
assert(IncrementalInlineMH, "required");
assert(cg->call_node() == this, "mismatch");
assert(cg->is_mh_late_inline(), "not virtual");
@ -1088,7 +1088,7 @@ Node* CallStaticJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) {
if (iid == vmIntrinsics::_invokeBasic) {
if (in(TypeFunc::Parms)->Opcode() == Op_ConP) {
phase->C->prepend_late_inline(cg);
set_generator(NULL);
set_generator(nullptr);
}
} else if (iid == vmIntrinsics::_linkToNative) {
// never retry
@ -1096,7 +1096,7 @@ Node* CallStaticJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) {
assert(callee->has_member_arg(), "wrong type of call?");
if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) {
phase->C->prepend_late_inline(cg);
set_generator(NULL);
set_generator(nullptr);
}
}
}
@ -1106,7 +1106,7 @@ Node* CallStaticJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) {
//----------------------------uncommon_trap_request----------------------------
// If this is an uncommon trap, return the request code, else zero.
int CallStaticJavaNode::uncommon_trap_request() const {
if (_name != NULL && !strcmp(_name, "uncommon_trap")) {
if (_name != nullptr && !strcmp(_name, "uncommon_trap")) {
return extract_uncommon_trap_request(this);
}
return 0;
@ -1114,7 +1114,7 @@ int CallStaticJavaNode::uncommon_trap_request() const {
int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) {
#ifndef PRODUCT
if (!(call->req() > TypeFunc::Parms &&
call->in(TypeFunc::Parms) != NULL &&
call->in(TypeFunc::Parms) != nullptr &&
call->in(TypeFunc::Parms)->is_Con() &&
call->in(TypeFunc::Parms)->bottom_type()->isa_int())) {
assert(in_dump() != 0, "OK if dumping");
@ -1128,7 +1128,7 @@ int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) {
#ifndef PRODUCT
void CallStaticJavaNode::dump_spec(outputStream *st) const {
st->print("# Static ");
if (_name != NULL) {
if (_name != nullptr) {
st->print("%s", _name);
int trap_req = uncommon_trap_request();
if (trap_req != 0) {
@ -1162,7 +1162,7 @@ bool CallDynamicJavaNode::cmp( const Node &n ) const {
Node* CallDynamicJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) {
CallGenerator* cg = generator();
if (can_reshape && cg != NULL) {
if (can_reshape && cg != nullptr) {
assert(IncrementalInlineVirtual, "required");
assert(cg->call_node() == this, "mismatch");
assert(cg->is_virtual_late_inline(), "not virtual");
@ -1195,7 +1195,7 @@ Node* CallDynamicJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) {
// Register for late inlining.
cg->set_callee_method(callee);
phase->C->prepend_late_inline(cg); // MH late inlining prepends to the list, so do the same
set_generator(NULL);
set_generator(nullptr);
}
}
return CallNode::Ideal(phase, can_reshape);
@ -1285,9 +1285,9 @@ bool SafePointNode::cmp( const Node &n ) const {
//-------------------------set_next_exception----------------------------------
void SafePointNode::set_next_exception(SafePointNode* n) {
assert(n == NULL || n->Opcode() == Op_SafePoint, "correct value for next_exception");
assert(n == nullptr || n->Opcode() == Op_SafePoint, "correct value for next_exception");
if (len() == req()) {
if (n != NULL) add_prec(n);
if (n != nullptr) add_prec(n);
} else {
set_prec(req(), n);
}
@ -1297,10 +1297,10 @@ void SafePointNode::set_next_exception(SafePointNode* n) {
//----------------------------next_exception-----------------------------------
SafePointNode* SafePointNode::next_exception() const {
if (len() == req()) {
return NULL;
return nullptr;
} else {
Node* n = in(req());
assert(n == NULL || n->Opcode() == Op_SafePoint, "no other uses of prec edges");
assert(n == nullptr || n->Opcode() == Op_SafePoint, "no other uses of prec edges");
return (SafePointNode*) n;
}
}
@ -1309,8 +1309,8 @@ SafePointNode* SafePointNode::next_exception() const {
//------------------------------Ideal------------------------------------------
// Skip over any collapsed Regions
Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) {
assert(_jvms == NULL || ((uintptr_t)_jvms->map() & 1) || _jvms->map() == this, "inconsistent JVMState");
return remove_dead_region(phase, can_reshape) ? this : NULL;
assert(_jvms == nullptr || ((uintptr_t)_jvms->map() & 1) || _jvms->map() == this, "inconsistent JVMState");
return remove_dead_region(phase, can_reshape) ? this : nullptr;
}
//------------------------------Identity---------------------------------------
@ -1322,7 +1322,7 @@ Node* SafePointNode::Identity(PhaseGVN* phase) {
Node* out_c = unique_ctrl_out_or_null();
// This can be the safepoint of an outer strip mined loop if the inner loop's backedge was removed. Replacing the
// outer loop's safepoint could confuse removal of the outer loop.
if (out_c != NULL && !out_c->is_OuterStripMinedLoopEnd()) {
if (out_c != nullptr && !out_c->is_OuterStripMinedLoopEnd()) {
return in(TypeFunc::Control);
}
}
@ -1508,7 +1508,7 @@ uint SafePointScalarObjectNode::match_edge(uint idx) const {
SafePointScalarObjectNode*
SafePointScalarObjectNode::clone(Dict* sosn_map, bool& new_node) const {
void* cached = (*sosn_map)[(void*)this];
if (cached != NULL) {
if (cached != nullptr) {
new_node = false;
return (SafePointScalarObjectNode*)cached;
}
@ -1533,7 +1533,7 @@ uint AllocateNode::size_of() const { return sizeof(*this); }
AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype,
Node *ctrl, Node *mem, Node *abio,
Node *size, Node *klass_node, Node *initial_test)
: CallNode(atype, NULL, TypeRawPtr::BOTTOM)
: CallNode(atype, nullptr, TypeRawPtr::BOTTOM)
{
init_class_id(Class_Allocate);
init_flags(Flag_is_macro);
@ -1557,12 +1557,12 @@ AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype,
void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer)
{
assert(initializer != NULL &&
assert(initializer != nullptr &&
initializer->is_initializer() &&
!initializer->is_static(),
"unexpected initializer method");
BCEscapeAnalyzer* analyzer = initializer->get_bcea();
if (analyzer == NULL) {
if (analyzer == nullptr) {
return;
}
@ -1572,7 +1572,7 @@ void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer)
}
}
Node *AllocateNode::make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem) {
Node* mark_node = NULL;
Node* mark_node = nullptr;
// For now only enable fast locking for non-array types
mark_node = phase->MakeConX(markWord::prototype().value());
return mark_node;
@ -1580,15 +1580,15 @@ Node *AllocateNode::make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, N
// Retrieve the length from the AllocateArrayNode. Narrow the type with a
// CastII, if appropriate. If we are not allowed to create new nodes, and
// a CastII is appropriate, return NULL.
// a CastII is appropriate, return null.
Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseTransform *phase, bool allow_new_nodes) {
Node *length = in(AllocateNode::ALength);
assert(length != NULL, "length is not null");
assert(length != nullptr, "length is not null");
const TypeInt* length_type = phase->find_int_type(length);
const TypeAryPtr* ary_type = oop_type->isa_aryptr();
if (ary_type != NULL && length_type != NULL) {
if (ary_type != nullptr && length_type != nullptr) {
const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type);
if (narrow_length_type != length_type) {
// Assert one of:
@ -1601,14 +1601,14 @@ Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseTran
narrow_length_type->_lo >= length_type->_lo),
"narrow type must be narrower than length type");
// Return NULL if new nodes are not allowed
// Return null if new nodes are not allowed
if (!allow_new_nodes) {
return NULL;
return nullptr;
}
// Create a cast which is control dependent on the initialization to
// propagate the fact that the array length must be positive.
InitializeNode* init = initialization();
if (init != NULL) {
if (init != nullptr) {
length = new CastIINode(length, narrow_length_type);
length->set_req(TypeFunc::Control, init->proj_out_or_null(TypeFunc::Control));
}
@ -1749,13 +1749,13 @@ uint LockNode::size_of() const { return sizeof(*this); }
// - eliminated locking nodes
//
static Node *next_control(Node *ctrl) {
if (ctrl == NULL)
return NULL;
if (ctrl == nullptr)
return nullptr;
while (1) {
if (ctrl->is_Region()) {
RegionNode *r = ctrl->as_Region();
Node *n = r->is_copy();
if (n == NULL)
if (n == nullptr)
break; // hit a region, return it
else
ctrl = n;
@ -1778,10 +1778,10 @@ static Node *next_control(Node *ctrl) {
//
bool AbstractLockNode::find_matching_unlock(const Node* ctrl, LockNode* lock,
GrowableArray<AbstractLockNode*> &lock_ops) {
ProjNode *ctrl_proj = (ctrl->is_Proj()) ? ctrl->as_Proj() : NULL;
if (ctrl_proj != NULL && ctrl_proj->_con == TypeFunc::Control) {
ProjNode *ctrl_proj = (ctrl->is_Proj()) ? ctrl->as_Proj() : nullptr;
if (ctrl_proj != nullptr && ctrl_proj->_con == TypeFunc::Control) {
Node *n = ctrl_proj->in(0);
if (n != NULL && n->is_Unlock()) {
if (n != nullptr && n->is_Unlock()) {
UnlockNode *unlock = n->as_Unlock();
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
Node* lock_obj = bs->step_over_gc_barrier(lock->obj_node());
@ -1801,11 +1801,11 @@ bool AbstractLockNode::find_matching_unlock(const Node* ctrl, LockNode* lock,
// Find the lock matching an unlock. Returns null if a safepoint
// or complicated control is encountered first.
LockNode *AbstractLockNode::find_matching_lock(UnlockNode* unlock) {
LockNode *lock_result = NULL;
LockNode *lock_result = nullptr;
// find the matching lock, or an intervening safepoint
Node *ctrl = next_control(unlock->in(0));
while (1) {
assert(ctrl != NULL, "invalid control graph");
assert(ctrl != nullptr, "invalid control graph");
assert(!ctrl->is_Start(), "missing lock for unlock");
if (ctrl->is_top()) break; // dead control path
if (ctrl->is_Proj()) ctrl = ctrl->in(0);
@ -1813,7 +1813,7 @@ LockNode *AbstractLockNode::find_matching_lock(UnlockNode* unlock) {
break; // found a safepoint (may be the lock we are searching for)
} else if (ctrl->is_Region()) {
// Check for a simple diamond pattern. Punt on anything more complicated
if (ctrl->req() == 3 && ctrl->in(1) != NULL && ctrl->in(2) != NULL) {
if (ctrl->req() == 3 && ctrl->in(1) != nullptr && ctrl->in(2) != nullptr) {
Node *in1 = next_control(ctrl->in(1));
Node *in2 = next_control(ctrl->in(2));
if (((in1->is_IfTrue() && in2->is_IfFalse()) ||
@ -1852,7 +1852,7 @@ bool AbstractLockNode::find_lock_and_unlock_through_if(Node* node, LockNode* loc
if (if_node->is_If() && if_node->outcnt() == 2 && (if_true || node->is_IfFalse())) {
Node *lock_ctrl = next_control(if_node->in(0));
if (find_matching_unlock(lock_ctrl, lock, lock_ops)) {
Node* lock1_node = NULL;
Node* lock1_node = nullptr;
ProjNode* proj = if_node->as_If()->proj_out(!if_true);
if (if_true) {
if (proj->is_IfFalse() && proj->outcnt() == 1) {
@ -1863,7 +1863,7 @@ bool AbstractLockNode::find_lock_and_unlock_through_if(Node* node, LockNode* loc
lock1_node = proj->unique_out();
}
}
if (lock1_node != NULL && lock1_node->is_Lock()) {
if (lock1_node != nullptr && lock1_node->is_Lock()) {
LockNode *lock1 = lock1_node->as_Lock();
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
Node* lock_obj = bs->step_over_gc_barrier(lock->obj_node());
@ -1888,7 +1888,7 @@ bool AbstractLockNode::find_unlocks_for_region(const RegionNode* region, LockNod
// in(0) should be self edge so skip it.
for (int i = 1; i < (int)region->req(); i++) {
Node *in_node = next_control(region->in(i));
if (in_node != NULL) {
if (in_node != nullptr) {
if (find_matching_unlock(in_node, lock, lock_ops)) {
// found a match so keep on checking.
continue;
@ -1943,11 +1943,11 @@ void AbstractLockNode::dump_compact_spec(outputStream* st) const {
//=============================================================================
Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// perform any generic optimizations first (returns 'this' or NULL)
// perform any generic optimizations first (returns 'this' or null)
Node *result = SafePointNode::Ideal(phase, can_reshape);
if (result != NULL) return result;
if (result != nullptr) return result;
// Don't bother trying to transform a dead node
if (in(0) && in(0)->is_top()) return NULL;
if (in(0) && in(0)->is_top()) return nullptr;
// Now see if we can optimize away this lock. We don't actually
// remove the locking here, we simply set the _eliminate flag which
@ -1959,7 +1959,7 @@ Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// If we are locking an non-escaped object, the lock/unlock is unnecessary
//
ConnectionGraph *cgr = phase->C->congraph();
if (cgr != NULL && cgr->not_global_escape(obj_node())) {
if (cgr != nullptr && cgr->not_global_escape(obj_node())) {
assert(!is_eliminated() || is_coarsened(), "sanity");
// The lock could be marked eliminated by lock coarsening
// code during first IGVN before EA. Replace coarsened flag
@ -1978,7 +1978,7 @@ Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// Try lock coarsening
//
PhaseIterGVN* iter = phase->is_IterGVN();
if (iter != NULL && !is_eliminated()) {
if (iter != nullptr && !is_eliminated()) {
GrowableArray<AbstractLockNode*> lock_ops;
@ -2058,10 +2058,10 @@ Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
//=============================================================================
bool LockNode::is_nested_lock_region() {
return is_nested_lock_region(NULL);
return is_nested_lock_region(nullptr);
}
// p is used for access to compilation log; no logging if NULL
// p is used for access to compilation log; no logging if null
bool LockNode::is_nested_lock_region(Compile * c) {
BoxLockNode* box = box_node()->as_BoxLock();
int stk_slot = box->stack_slot();
@ -2074,8 +2074,8 @@ bool LockNode::is_nested_lock_region(Compile * c) {
// Ignore complex cases: merged locks or multiple locks.
Node* obj = obj_node();
LockNode* unique_lock = NULL;
Node* bad_lock = NULL;
LockNode* unique_lock = nullptr;
Node* bad_lock = nullptr;
if (!box->is_simple_lock_region(&unique_lock, obj, &bad_lock)) {
#ifdef ASSERT
this->log_lock_optimization(c, "eliminate_lock_INLR_2a", bad_lock);
@ -2084,7 +2084,7 @@ bool LockNode::is_nested_lock_region(Compile * c) {
}
if (unique_lock != this) {
#ifdef ASSERT
this->log_lock_optimization(c, "eliminate_lock_INLR_2b", (unique_lock != NULL ? unique_lock : bad_lock));
this->log_lock_optimization(c, "eliminate_lock_INLR_2b", (unique_lock != nullptr ? unique_lock : bad_lock));
if (PrintEliminateLocks && Verbose) {
tty->print_cr("=============== unique_lock != this ============");
tty->print(" this: ");
@ -2093,11 +2093,11 @@ bool LockNode::is_nested_lock_region(Compile * c) {
box->dump();
tty->print(" obj: ");
obj->dump();
if (unique_lock != NULL) {
if (unique_lock != nullptr) {
tty->print(" unique_lock: ");
unique_lock->dump();
}
if (bad_lock != NULL) {
if (bad_lock != nullptr) {
tty->print(" bad_lock: ");
bad_lock->dump();
}
@ -2138,11 +2138,11 @@ uint UnlockNode::size_of() const { return sizeof(*this); }
//=============================================================================
Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// perform any generic optimizations first (returns 'this' or NULL)
// perform any generic optimizations first (returns 'this' or null)
Node *result = SafePointNode::Ideal(phase, can_reshape);
if (result != NULL) return result;
if (result != nullptr) return result;
// Don't bother trying to transform a dead node
if (in(0) && in(0)->is_top()) return NULL;
if (in(0) && in(0)->is_top()) return nullptr;
// Now see if we can optimize away this unlock. We don't actually
// remove the unlocking here, we simply set the _eliminate flag which
@ -2155,7 +2155,7 @@ Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// If we are unlocking an non-escaped object, the lock/unlock is unnecessary.
//
ConnectionGraph *cgr = phase->C->congraph();
if (cgr != NULL && cgr->not_global_escape(obj_node())) {
if (cgr != nullptr && cgr->not_global_escape(obj_node())) {
assert(!is_eliminated() || is_coarsened(), "sanity");
// The lock could be marked eliminated by lock coarsening
// code during first IGVN before EA. Replace coarsened flag
@ -2170,24 +2170,24 @@ Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
}
void AbstractLockNode::log_lock_optimization(Compile *C, const char * tag, Node* bad_lock) const {
if (C == NULL) {
if (C == nullptr) {
return;
}
CompileLog* log = C->log();
if (log != NULL) {
if (log != nullptr) {
Node* box = box_node();
Node* obj = obj_node();
int box_id = box != NULL ? box->_idx : -1;
int obj_id = obj != NULL ? obj->_idx : -1;
int box_id = box != nullptr ? box->_idx : -1;
int obj_id = obj != nullptr ? obj->_idx : -1;
log->begin_head("%s compile_id='%d' lock_id='%d' class='%s' kind='%s' box_id='%d' obj_id='%d' bad_id='%d'",
tag, C->compile_id(), this->_idx,
is_Unlock() ? "unlock" : is_Lock() ? "lock" : "?",
kind_as_string(), box_id, obj_id, (bad_lock != NULL ? bad_lock->_idx : -1));
kind_as_string(), box_id, obj_id, (bad_lock != nullptr ? bad_lock->_idx : -1));
log->stamp();
log->end_head();
JVMState* p = is_Unlock() ? (as_Unlock()->dbg_jvms()) : jvms();
while (p != NULL) {
while (p != nullptr) {
log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
p = p->caller();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -247,7 +247,7 @@ public:
int bci() const { return _bci; }
bool should_reexecute() const { return _reexecute==Reexecute_True; }
bool is_reexecute_undefined() const { return _reexecute==Reexecute_Undefined; }
bool has_method() const { return _method != NULL; }
bool has_method() const { return _method != nullptr; }
ciMethod* method() const { assert(has_method(), ""); return _method; }
JVMState* caller() const { return _caller; }
SafePointNode* map() const { return _map; }
@ -335,13 +335,13 @@ protected:
bool _has_ea_local_in_scope; // NoEscape or ArgEscape objects in JVM States
void set_jvms(JVMState* s) {
assert(s != nullptr, "assign NULL value to _jvms");
assert(s != nullptr, "assign null value to _jvms");
*(JVMState**)&_jvms = s; // override const attribute in the accessor
}
public:
SafePointNode(uint edges, JVMState* jvms,
// A plain safepoint advertises no memory effects (NULL):
const TypePtr* adr_type = NULL)
// A plain safepoint advertises no memory effects (null):
const TypePtr* adr_type = nullptr)
: MultiNode( edges ),
_jvms(jvms),
_adr_type(adr_type),
@ -353,7 +353,7 @@ public:
JVMState* jvms() const { return _jvms; }
virtual bool needs_deep_clone_jvms(Compile* C) { return false; }
void clone_jvms(Compile* C) {
if (jvms() != NULL) {
if (jvms() != nullptr) {
if (needs_deep_clone_jvms(C)) {
set_jvms(jvms()->clone_deep(C));
jvms()->set_map_deep(this);
@ -434,7 +434,7 @@ public:
}
// The parser marks useless maps as dead when it's done with them:
bool is_killed() { return in(TypeFunc::Control) == NULL; }
bool is_killed() { return in(TypeFunc::Control) == nullptr; }
// Exception states bubbling out of subgraphs such as inlined calls
// are recorded here. (There might be more than one, hence the "next".)
@ -442,7 +442,7 @@ public:
// for JVM states during parsing, intrinsic expansion, etc.
SafePointNode* next_exception() const;
void set_next_exception(SafePointNode* n);
bool has_exceptions() const { return next_exception() != NULL; }
bool has_exceptions() const { return next_exception() != nullptr; }
// Helper methods to operate on replaced nodes
ReplacedNodes replaced_nodes() const {
@ -531,7 +531,7 @@ public:
virtual uint match_edge(uint idx) const;
uint first_index(JVMState* jvms) const {
assert(jvms != NULL, "missed JVMS");
assert(jvms != nullptr, "missed JVMS");
return jvms->scloff() + _first_index;
}
uint n_fields() const { return _n_fields; }
@ -588,15 +588,15 @@ public:
address _entry_point; // Address of method being called
float _cnt; // Estimate of number of times called
CallGenerator* _generator; // corresponding CallGenerator for some late inline calls
const char* _name; // Printable name, if _method is NULL
const char* _name; // Printable name, if _method is null
CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type, JVMState* jvms = nullptr)
: SafePointNode(tf->domain()->cnt(), jvms, adr_type),
_tf(tf),
_entry_point(addr),
_cnt(COUNT_UNKNOWN),
_generator(NULL),
_name(NULL)
_generator(nullptr),
_name(nullptr)
{
init_class_id(Class_Call);
}
@ -634,7 +634,7 @@ public:
bool has_non_debug_use(Node* n);
// Returns the unique CheckCastPP of a call
// or result projection is there are several CheckCastPP
// or returns NULL if there is no one.
// or returns null if there is no one.
Node* result_cast();
// Does this node returns pointer?
bool returns_pointer() const {
@ -720,13 +720,13 @@ public:
CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method)
: CallJavaNode(tf, addr, method) {
init_class_id(Class_CallStaticJava);
if (C->eliminate_boxing() && (method != NULL) && method->is_boxing_method()) {
if (C->eliminate_boxing() && (method != nullptr) && method->is_boxing_method()) {
init_flags(Flag_is_macro);
C->add_macro_node(this);
}
}
CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, const TypePtr* adr_type)
: CallJavaNode(tf, addr, NULL) {
: CallJavaNode(tf, addr, nullptr) {
init_class_id(Class_CallStaticJava);
// This node calls a runtime stub, which often has narrow memory effects.
_adr_type = adr_type;
@ -738,7 +738,7 @@ public:
static int extract_uncommon_trap_request(const Node* call);
bool is_boxing_method() const {
return is_macro() && (method() != NULL) && method()->is_boxing_method();
return is_macro() && (method() != nullptr) && method()->is_boxing_method();
}
// Late inlining modifies the JVMState, so we need to deep clone it
// when the call node is cloned (because it is macro node).
@ -933,7 +933,7 @@ public:
// Dig the klass operand out of a (possible) allocation site.
static Node* Ideal_klass(Node* ptr, PhaseTransform* phase) {
AllocateNode* allo = Ideal_allocation(ptr, phase);
return (allo == NULL) ? NULL : allo->in(KlassNode);
return (allo == nullptr) ? nullptr : allo->in(KlassNode);
}
// Conservatively small estimate of offset of first non-header byte.
@ -954,13 +954,13 @@ public:
// Return true if allocation doesn't escape thread, its escape state
// needs be noEscape or ArgEscape. InitializeNode._does_not_escape
// is true when its allocation's escape state is noEscape or
// ArgEscape. In case allocation's InitializeNode is NULL, check
// ArgEscape. In case allocation's InitializeNode is null, check
// AlllocateNode._is_non_escaping flag.
// AlllocateNode._is_non_escaping is true when its escape state is
// noEscape.
bool does_not_escape_thread() {
InitializeNode* init = NULL;
return _is_non_escaping || (((init = initialization()) != NULL) && init->does_not_escape());
InitializeNode* init = nullptr;
return _is_non_escaping || (((init = initialization()) != nullptr) && init->does_not_escape());
}
// If object doesn't escape in <.init> method and there is memory barrier
@ -1003,8 +1003,8 @@ public:
// Return null if no allocation is recognized.
static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) {
AllocateNode* allo = Ideal_allocation(ptr, phase);
return (allo == NULL || !allo->is_AllocateArray())
? NULL : allo->as_AllocateArray();
return (allo == nullptr || !allo->is_AllocateArray())
? nullptr : allo->as_AllocateArray();
}
};
@ -1041,11 +1041,11 @@ protected:
public:
AbstractLockNode(const TypeFunc *tf)
: CallNode(tf, NULL, TypeRawPtr::BOTTOM),
: CallNode(tf, nullptr, TypeRawPtr::BOTTOM),
_kind(Regular)
{
#ifndef PRODUCT
_counter = NULL;
_counter = nullptr;
#endif
}
virtual int Opcode() const = 0;
@ -1064,7 +1064,7 @@ public:
bool is_nested() const { return (_kind == Nested); }
const char * kind_as_string() const;
void log_lock_optimization(Compile* c, const char * tag, Node* bad_lock = NULL) const;
void log_lock_optimization(Compile* c, const char * tag, Node* bad_lock = nullptr) const;
void set_non_esc_obj() { _kind = NonEscObj; set_eliminated_lock_counter(); }
void set_coarsened() { _kind = Coarsened; set_eliminated_lock_counter(); }
@ -1138,7 +1138,7 @@ public:
virtual uint size_of() const; // Size is bigger
UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf )
#ifdef ASSERT
, _dbg_jvms(NULL)
, _dbg_jvms(nullptr)
#endif
{
init_class_id(Class_Unlock);
@ -1154,7 +1154,7 @@ public:
}
JVMState* dbg_jvms() const { return _dbg_jvms; }
#else
JVMState* dbg_jvms() const { return NULL; }
JVMState* dbg_jvms() const { return nullptr; }
#endif
};
#endif // SHARE_OPTO_CALLNODE_HPP

View File

@ -37,7 +37,7 @@
// If input is already higher or equal to cast type, then this is an identity.
Node* ConstraintCastNode::Identity(PhaseGVN* phase) {
Node* dom = dominating_cast(phase, phase);
if (dom != NULL) {
if (dom != nullptr) {
return dom;
}
if (_dependency != RegularDependency) {
@ -97,7 +97,7 @@ const Type* ConstraintCastNode::Value(PhaseGVN* phase) const {
// Return a node which is more "ideal" than the current node. Strip out
// control copies
Node *ConstraintCastNode::Ideal(PhaseGVN *phase, bool can_reshape) {
return (in(0) && remove_dead_region(phase, can_reshape)) ? this : NULL;
return (in(0) && remove_dead_region(phase, can_reshape)) ? this : nullptr;
}
bool ConstraintCastNode::cmp(const Node &n) const {
@ -144,7 +144,7 @@ Node* ConstraintCastNode::make_cast(int opcode, Node* c, Node *n, const Type *t,
default:
fatal("Bad opcode %d", opcode);
}
return NULL;
return nullptr;
}
Node* ConstraintCastNode::make(Node* c, Node *n, const Type *t, DependencyType dependency, BasicType bt) {
@ -158,34 +158,34 @@ Node* ConstraintCastNode::make(Node* c, Node *n, const Type *t, DependencyType d
default:
fatal("Bad basic type %s", type2name(bt));
}
return NULL;
return nullptr;
}
TypeNode* ConstraintCastNode::dominating_cast(PhaseGVN* gvn, PhaseTransform* pt) const {
if (_dependency == UnconditionalDependency) {
return NULL;
return nullptr;
}
Node* val = in(1);
Node* ctl = in(0);
int opc = Opcode();
if (ctl == NULL) {
return NULL;
if (ctl == nullptr) {
return nullptr;
}
// Range check CastIIs may all end up under a single range check and
// in that case only the narrower CastII would be kept by the code
// below which would be incorrect.
if (is_CastII() && as_CastII()->has_range_check()) {
return NULL;
return nullptr;
}
if (type()->isa_rawptr() && (gvn->type_or_null(val) == NULL || gvn->type(val)->isa_oopptr())) {
return NULL;
if (type()->isa_rawptr() && (gvn->type_or_null(val) == nullptr || gvn->type(val)->isa_oopptr())) {
return nullptr;
}
for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
Node* u = val->fast_out(i);
if (u != this &&
u->outcnt() > 0 &&
u->Opcode() == opc &&
u->in(0) != NULL &&
u->in(0) != nullptr &&
u->bottom_type()->higher_equal(type())) {
if (pt->is_dominator(u->in(0), ctl)) {
return u->as_Type();
@ -199,7 +199,7 @@ TypeNode* ConstraintCastNode::dominating_cast(PhaseGVN* gvn, PhaseTransform* pt)
}
}
}
return NULL;
return nullptr;
}
#ifndef PRODUCT
@ -245,7 +245,7 @@ const Type* CastIINode::Value(PhaseGVN* phase) const {
// CastIINode
//
if (carry_dependency()) {
if (in(0) != NULL && in(0)->in(0) != NULL && in(0)->in(0)->is_If()) {
if (in(0) != nullptr && in(0)->in(0) != nullptr && in(0)->in(0)->is_If()) {
assert(in(0)->is_IfFalse() || in(0)->is_IfTrue(), "should be If proj");
Node* proj = in(0);
if (proj->in(0)->in(1)->is_Bool()) {
@ -307,7 +307,7 @@ const Type* CastIINode::Value(PhaseGVN* phase) const {
static Node* find_or_make_integer_cast(PhaseIterGVN* igvn, Node* parent, Node* control, const TypeInteger* type, ConstraintCastNode::DependencyType dependency, BasicType bt) {
Node* n = ConstraintCastNode::make(control, parent, type, dependency, bt);
Node* existing = igvn->hash_find_insert(n);
if (existing != NULL) {
if (existing != nullptr) {
n->destruct(igvn);
return existing;
}
@ -316,7 +316,7 @@ static Node* find_or_make_integer_cast(PhaseIterGVN* igvn, Node* parent, Node* c
Node *CastIINode::Ideal(PhaseGVN *phase, bool can_reshape) {
Node* progress = ConstraintCastNode::Ideal(phase, can_reshape);
if (progress != NULL) {
if (progress != nullptr) {
return progress;
}
if (can_reshape && !_range_check_dependency && !phase->C->post_loop_opts_phase()) {
@ -326,7 +326,7 @@ Node *CastIINode::Ideal(PhaseGVN *phase, bool can_reshape) {
if (!_range_check_dependency) {
return optimize_integer_cast(phase, T_INT);
}
return NULL;
return nullptr;
}
Node* CastIINode::Identity(PhaseGVN* phase) {
@ -373,7 +373,7 @@ const Type* CastLLNode::Value(PhaseGVN* phase) const {
Node* CastLLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
Node* progress = ConstraintCastNode::Ideal(phase, can_reshape);
if (progress != NULL) {
if (progress != nullptr) {
return progress;
}
if (!phase->C->post_loop_opts_phase()) {
@ -383,7 +383,7 @@ Node* CastLLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
// transform (CastLL (ConvI2L ..)) into (ConvI2L (CastII ..)) if the type of the CastLL is narrower than the type of
// the ConvI2L.
Node* in1 = in(1);
if (in1 != NULL && in1->Opcode() == Op_ConvI2L) {
if (in1 != nullptr && in1->Opcode() == Op_ConvI2L) {
const Type* t = Value(phase);
const Type* t_in = phase->type(in1);
if (t != Type::TOP && t_in != Type::TOP) {
@ -418,7 +418,7 @@ const Type* CheckCastPPNode::Value(PhaseGVN* phase) const {
const TypePtr *in_type = inn->isa_ptr();
const TypePtr *my_type = _type->isa_ptr();
const Type *result = _type;
if (in_type != NULL && my_type != NULL) {
if (in_type != nullptr && my_type != nullptr) {
TypePtr::PTR in_ptr = in_type->ptr();
if (in_ptr == TypePtr::Null) {
result = in_type;
@ -492,7 +492,7 @@ Node *CastX2PNode::Ideal(PhaseGVN *phase, bool can_reshape) {
}
break;
}
return NULL;
return nullptr;
}
//------------------------------Identity---------------------------------------
@ -514,7 +514,7 @@ const Type* CastP2XNode::Value(PhaseGVN* phase) const {
}
Node *CastP2XNode::Ideal(PhaseGVN *phase, bool can_reshape) {
return (in(0) && remove_dead_region(phase, can_reshape)) ? this : NULL;
return (in(0) && remove_dead_region(phase, can_reshape)) ? this : nullptr;
}
//------------------------------Identity---------------------------------------
@ -524,7 +524,7 @@ Node* CastP2XNode::Identity(PhaseGVN* phase) {
}
Node* ConstraintCastNode::make_cast_for_type(Node* c, Node* in, const Type* type, DependencyType dependency) {
Node* cast= NULL;
Node* cast= nullptr;
if (type->isa_int()) {
cast = make_cast(Op_CastII, c, in, type, dependency);
} else if (type->isa_long()) {
@ -545,15 +545,15 @@ Node* ConstraintCastNode::optimize_integer_cast(PhaseGVN* phase, BasicType bt) {
PhaseIterGVN *igvn = phase->is_IterGVN();
const TypeInteger* this_type = this->type()->is_integer(bt);
Node* z = in(1);
const TypeInteger* rx = NULL;
const TypeInteger* ry = NULL;
const TypeInteger* rx = nullptr;
const TypeInteger* ry = nullptr;
// Similar to ConvI2LNode::Ideal() for the same reasons
if (Compile::push_thru_add(phase, z, this_type, rx, ry, bt, bt)) {
if (igvn == NULL) {
if (igvn == nullptr) {
// Postpone this optimization to iterative GVN, where we can handle deep
// AddI chains without an exponential number of recursive Ideal() calls.
phase->record_for_igvn(this);
return NULL;
return nullptr;
}
int op = z->Opcode();
Node* x = z->in(1);
@ -567,9 +567,9 @@ Node* ConstraintCastNode::optimize_integer_cast(PhaseGVN* phase, BasicType bt) {
assert(op == Op_Sub(bt), "");
return SubNode::make(cx, cy, bt);
}
return NULL;
return nullptr;
}
return NULL;
return nullptr;
}
const Type* ConstraintCastNode::widen_type(const PhaseGVN* phase, const Type* res, BasicType bt) const {
@ -578,7 +578,7 @@ const Type* ConstraintCastNode::widen_type(const PhaseGVN* phase, const Type* re
}
const TypeInteger* this_type = res->is_integer(bt);
const TypeInteger* in_type = phase->type(in(1))->isa_integer(bt);
if (in_type != NULL &&
if (in_type != nullptr &&
(in_type->lo_as_long() != this_type->lo_as_long() ||
in_type->hi_as_long() != this_type->hi_as_long())) {
jlong lo1 = this_type->lo_as_long();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -190,7 +190,7 @@ class CheckCastPPNode: public ConstraintCastNode {
// convert a machine-pointer-sized integer to a raw pointer
class CastX2PNode : public Node {
public:
CastX2PNode( Node *n ) : Node(NULL, n) {}
CastX2PNode( Node *n ) : Node(nullptr, n) {}
virtual int Opcode() const;
virtual const Type* Value(PhaseGVN* phase) const;
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);

File diff suppressed because it is too large Load Diff

View File

@ -100,12 +100,12 @@ public:
Node* is_copy() const {
const Node* r = _in[Region];
if (r == NULL)
if (r == nullptr)
return nonnull_req();
return NULL; // not a copy!
return nullptr; // not a copy!
}
PhiNode* has_phi() const; // returns an arbitrary phi user, or NULL
PhiNode* has_unique_phi() const; // returns the unique phi user, or NULL
PhiNode* has_phi() const; // returns an arbitrary phi user, or null
PhiNode* has_unique_phi() const; // returns the unique phi user, or null
// Is this region node unreachable from root?
bool is_unreachable_region(const PhaseGVN* phase);
#ifdef ASSERT
@ -182,7 +182,7 @@ public:
Input // Input values are [1..len)
};
PhiNode( Node *r, const Type *t, const TypePtr* at = NULL,
PhiNode( Node *r, const Type *t, const TypePtr* at = nullptr,
const int imid = -1,
const int iid = TypeOopPtr::InstanceTop,
const int iidx = Compile::AliasIdxTop,
@ -201,7 +201,7 @@ public:
// create a new phi with in edges matching r and set (initially) to x
static PhiNode* make( Node* r, Node* x );
// extra type arguments override the new phi's bottom_type and adr_type
static PhiNode* make( Node* r, Node* x, const Type *t, const TypePtr* at = NULL );
static PhiNode* make( Node* r, Node* x, const Type *t, const TypePtr* at = nullptr );
// create a new phi with narrowed memory type
PhiNode* slice_memory(const TypePtr* adr_type) const;
PhiNode* split_out_instance(const TypePtr* at, PhaseIterGVN *igvn) const;
@ -214,11 +214,11 @@ public:
bool is_tripcount(BasicType bt) const;
// Determine a unique non-trivial input, if any.
// Ignore casts if it helps. Return NULL on failure.
// Ignore casts if it helps. Return null on failure.
Node* unique_input(PhaseTransform *phase, bool uncast);
Node* unique_input(PhaseTransform *phase) {
Node* uin = unique_input(phase, false);
if (uin == NULL) {
if (uin == nullptr) {
uin = unique_input(phase, true);
}
return uin;
@ -426,7 +426,7 @@ public:
// Takes the type of val and filters it through the test represented
// by if_proj and returns a more refined type if one is produced.
// Returns NULL is it couldn't improve the type.
// Returns null is it couldn't improve the type.
static const TypeInt* filtered_int_type(PhaseGVN* phase, Node* val, Node* if_proj);
#ifndef PRODUCT

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -61,7 +61,7 @@ void LRG::dump() const {
if( is_multidef() ) {
tty->print("MultiDef ");
if (_defs != NULL) {
if (_defs != nullptr) {
tty->print("(");
for (int i = 0; i < _defs->length(); i++) {
tty->print("N%d ", _defs->at(i)->_idx);
@ -200,7 +200,7 @@ PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher, bool sc
#ifndef PRODUCT
print_chaitin_statistics
#else
NULL
nullptr
#endif
)
, _live(0)
@ -234,7 +234,7 @@ PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher, bool sc
cutoff *= 0.001;
buckval[i] = cutoff;
for (uint j = 0; j < _cfg.number_of_blocks(); j++) {
buckets[i][j] = NULL;
buckets[i][j] = nullptr;
}
}
// Sort blocks into buckets
@ -379,7 +379,7 @@ void PhaseChaitin::Register_Allocate() {
{
Compile::TracePhase tp("computeLive", &timers[_t_computeLive]);
_live = NULL; // Mark live as being not available
_live = nullptr; // Mark live as being not available
rm.reset_to_mark(); // Reclaim working storage
IndexSet::reset_memory(C, &live_arena);
ifg.init(_lrg_map.max_lrg_id()); // Empty IFG
@ -397,7 +397,7 @@ void PhaseChaitin::Register_Allocate() {
if (stretch_base_pointer_live_ranges(&live_arena)) {
Compile::TracePhase tp("computeLive (sbplr)", &timers[_t_computeLive]);
// Since some live range stretched, I need to recompute live
_live = NULL;
_live = nullptr;
rm.reset_to_mark(); // Reclaim working storage
IndexSet::reset_memory(C, &live_arena);
ifg.init(_lrg_map.max_lrg_id());
@ -436,7 +436,7 @@ void PhaseChaitin::Register_Allocate() {
// To color, we need the IFG and for that we need LIVE.
{
Compile::TracePhase tp("computeLive", &timers[_t_computeLive]);
_live = NULL;
_live = nullptr;
rm.reset_to_mark(); // Reclaim working storage
IndexSet::reset_memory(C, &live_arena);
ifg.init(_lrg_map.max_lrg_id());
@ -474,7 +474,7 @@ void PhaseChaitin::Register_Allocate() {
{
Compile::TracePhase tp("computeLive", &timers[_t_computeLive]);
_live = NULL;
_live = nullptr;
rm.reset_to_mark(); // Reclaim working storage
IndexSet::reset_memory(C, &live_arena);
ifg.init(_lrg_map.max_lrg_id()); // Build a new interference graph
@ -544,7 +544,7 @@ void PhaseChaitin::Register_Allocate() {
// Nuke the live-ness and interference graph and LiveRanGe info
{
Compile::TracePhase tp("computeLive", &timers[_t_computeLive]);
_live = NULL;
_live = nullptr;
rm.reset_to_mark(); // Reclaim working storage
IndexSet::reset_memory(C, &live_arena);
ifg.init(_lrg_map.max_lrg_id());
@ -622,7 +622,7 @@ void PhaseChaitin::Register_Allocate() {
// Log regalloc results
CompileLog* log = Compile::current()->log();
if (log != NULL) {
if (log != nullptr) {
log->elem("regalloc attempts='%d' success='%d'", _trip_cnt, !C->failing());
}
@ -682,9 +682,9 @@ void PhaseChaitin::Register_Allocate() {
}
// Done!
_live = NULL;
_ifg = NULL;
C->set_indexSet_arena(NULL); // ResourceArea is at end of scope
_live = nullptr;
_ifg = nullptr;
C->set_indexSet_arena(nullptr); // ResourceArea is at end of scope
}
void PhaseChaitin::de_ssa() {
@ -791,10 +791,10 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
copy_src._has_copy = 1;
}
if (trace_spilling() && lrg._def != NULL) {
if (trace_spilling() && lrg._def != nullptr) {
// collect defs for MultiDef printing
if (lrg._defs == NULL) {
lrg._defs = new (_ifg->_arena) GrowableArray<Node*>(_ifg->_arena, 2, 0, NULL);
if (lrg._defs == nullptr) {
lrg._defs = new (_ifg->_arena) GrowableArray<Node*>(_ifg->_arena, 2, 0, nullptr);
lrg._defs->append(lrg._def);
}
lrg._defs->append(n);
@ -802,7 +802,7 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
#endif
// Check for a single def LRG; these can spill nicely
// via rematerialization. Flag as NULL for no def found
// via rematerialization. Flag as null for no def found
// yet, or 'n' for single def or -1 for many defs.
lrg._def = lrg._def ? NodeSentinel : n;
@ -844,7 +844,7 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
lrg.set_scalable_reg_slots(Matcher::scalable_predicate_reg_slots());
}
}
assert(n_type->isa_vect() == NULL || lrg._is_vector ||
assert(n_type->isa_vect() == nullptr || lrg._is_vector ||
ireg == Op_RegD || ireg == Op_RegL || ireg == Op_RegVectMask,
"vector must be in vector registers");
@ -1063,7 +1063,7 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
const RegMask &lrgmask = lrg.mask();
uint kreg = n->in(k)->ideal_reg();
bool is_vect = RegMask::is_vector(kreg);
assert(n->in(k)->bottom_type()->isa_vect() == NULL || is_vect ||
assert(n->in(k)->bottom_type()->isa_vect() == nullptr || is_vect ||
kreg == Op_RegD || kreg == Op_RegL || kreg == Op_RegVectMask,
"vector must be in vector registers");
if (lrgmask.is_bound(kreg))
@ -1092,7 +1092,7 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
// if the LRG is an unaligned pair, we will have to spill
// so clear the LRG's register mask if it is not already spilled
if (!is_vect && !n->is_SpillCopy() &&
(lrg._def == NULL || lrg.is_multidef() || !lrg._def->is_SpillCopy()) &&
(lrg._def == nullptr || lrg.is_multidef() || !lrg._def->is_SpillCopy()) &&
lrgmask.is_misaligned_pair()) {
lrg.Clear();
}
@ -1777,22 +1777,22 @@ Node *PhaseChaitin::find_base_for_derived( Node **derived_base_map, Node *derive
// See if this happens to be a base.
// NOTE: we use TypePtr instead of TypeOopPtr because we can have
// pointers derived from NULL! These are always along paths that
// pointers derived from null! These are always along paths that
// can't happen at run-time but the optimizer cannot deduce it so
// we have to handle it gracefully.
assert(!derived->bottom_type()->isa_narrowoop() ||
derived->bottom_type()->make_ptr()->is_ptr()->_offset == 0, "sanity");
const TypePtr *tj = derived->bottom_type()->isa_ptr();
// If its an OOP with a non-zero offset, then it is derived.
if( tj == NULL || tj->_offset == 0 ) {
if( tj == nullptr || tj->_offset == 0 ) {
derived_base_map[derived->_idx] = derived;
return derived;
}
// Derived is NULL+offset? Base is NULL!
// Derived is null+offset? Base is null!
if( derived->is_Con() ) {
Node *base = _matcher.mach_null();
assert(base != NULL, "sanity");
if (base->in(0) == NULL) {
assert(base != nullptr, "sanity");
if (base->in(0) == nullptr) {
// Initialize it once and make it shared:
// set control to _root and place it into Start block
// (where top() node is placed).
@ -1817,7 +1817,7 @@ Node *PhaseChaitin::find_base_for_derived( Node **derived_base_map, Node *derive
if (_lrg_map.live_range_id(base) == 0) {
new_lrg(base, maxlrg++);
}
assert(base->in(0) == _cfg.get_root_node() && _cfg.get_block_for_node(base) == _cfg.get_block_for_node(C->top()), "base NULL should be shared");
assert(base->in(0) == _cfg.get_root_node() && _cfg.get_block_for_node(base) == _cfg.get_block_for_node(C->top()), "base null should be shared");
derived_base_map[derived->_idx] = base;
return base;
}
@ -1866,7 +1866,7 @@ Node *PhaseChaitin::find_base_for_derived( Node **derived_base_map, Node *derive
uint j;
for( j = 1; j < base->req(); j++ )
if( phi->in(j) != base->in(j) &&
!(phi->in(j)->is_Con() && base->in(j)->is_Con()) ) // allow different NULLs
!(phi->in(j)->is_Con() && base->in(j)->is_Con()) ) // allow different nulls
break;
if( j == base->req() ) { // All inputs match?
base = phi; // Then use existing 'phi' and drop 'base'
@ -2428,7 +2428,7 @@ void PhaseChaitin::verify_base_ptrs(ResourceArea* a) const {
if (n->is_MachSafePoint()) {
MachSafePointNode* sfpt = n->as_MachSafePoint();
JVMState* jvms = sfpt->jvms();
if (jvms != NULL) {
if (jvms != nullptr) {
// Now scan for a live derived pointer
if (jvms->oopoff() < sfpt->req()) {
// Check each derived/base pair
@ -2452,11 +2452,11 @@ void PhaseChaitin::verify_base_ptrs(ResourceArea* a) const {
}
} else if (check->is_Con()) {
if (is_derived && check->bottom_type()->is_ptr()->_offset != 0) {
// Derived is NULL+non-zero offset, base must be NULL.
// Derived is null+non-zero offset, base must be null.
assert(check->bottom_type()->is_ptr()->ptr() == TypePtr::Null, "Bad derived pointer");
} else {
assert(check->bottom_type()->is_ptr()->_offset == 0, "Bad base pointer");
// Base either ConP(NULL) or loadConP
// Base either ConP(nullptr) or loadConP
if (check->is_Mach()) {
assert(check->as_Mach()->ideal_Opcode() == Op_ConP, "Bad base pointer");
} else {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -217,7 +217,7 @@ public:
// Alive if non-zero, dead if zero
bool alive() const { return _def != NULL; }
bool alive() const { return _def != nullptr; }
bool is_multidef() const { return _def == NodeSentinel; }
bool is_singledef() const { return _def != NodeSentinel; }
@ -747,7 +747,7 @@ private:
Node* _def;
Node* _first_use;
public:
RegDefUse() : _def(NULL), _first_use(NULL) { }
RegDefUse() : _def(nullptr), _first_use(nullptr) { }
Node* def() const { return _def; }
Node* first_use() const { return _first_use; }
@ -758,8 +758,8 @@ private:
}
}
void clear() {
_def = NULL;
_first_use = NULL;
_def = nullptr;
_first_use = nullptr;
}
};
typedef GrowableArray<RegDefUse> RegToDefUseMap;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -260,7 +260,7 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
Node *def = n->in(cidx);
if (_phc._lrg_map.find(n) == _phc._lrg_map.find(def)) {
n->replace_by(def);
n->set_req(cidx,NULL);
n->set_req(cidx,nullptr);
b->remove_node(l);
l--;
continue;
@ -503,7 +503,7 @@ void PhaseConservativeCoalesce::union_helper( Node *lr1_node, Node *lr2_node, ui
lrgs(lr1)._def = (lrgs(lr1).is_multidef() ||
lrgs(lr2).is_multidef() )
? NodeSentinel : src_def;
lrgs(lr2)._def = NULL; // No def for lrg 2
lrgs(lr2)._def = nullptr; // No def for lrg 2
lrgs(lr2).Clear(); // Force empty mask for LRG 2
//lrgs(lr2)._size = 0; // Live-range 2 goes dead
lrgs(lr1)._is_oop |= lrgs(lr2)._is_oop;
@ -520,7 +520,7 @@ void PhaseConservativeCoalesce::union_helper( Node *lr1_node, Node *lr2_node, ui
// _phc.free_spillcopy(b->_nodes[bindex]);
assert( b->get_node(bindex) == dst_copy, "" );
dst_copy->replace_by( dst_copy->in(didx) );
dst_copy->set_req( didx, NULL);
dst_copy->set_req( didx, nullptr);
b->remove_node(bindex);
if( bindex < b->_ihrp_index ) b->_ihrp_index--;
if( bindex < b->_fhrp_index ) b->_fhrp_index--;

File diff suppressed because it is too large Load Diff

View File

@ -146,7 +146,7 @@ class CloneMap {
void* _2p(node_idx_t key) const { return (void*)(intptr_t)key; } // 2 conversion functions to make gcc happy
node_idx_t _2_node_idx_t(const void* k) const { return (node_idx_t)(intptr_t)k; }
Dict* dict() const { return _dict; }
void insert(node_idx_t key, uint64_t val) { assert(_dict->operator[](_2p(key)) == NULL, "key existed"); _dict->Insert(_2p(key), (void*)val); }
void insert(node_idx_t key, uint64_t val) { assert(_dict->operator[](_2p(key)) == nullptr, "key existed"); _dict->Insert(_2p(key), (void*)val); }
void insert(node_idx_t key, NodeCloneInfo& ci) { insert(key, ci.get()); }
void remove(node_idx_t key) { _dict->Delete(_2p(key)); }
uint64_t value(node_idx_t key) const { return (uint64_t)_dict->operator[](_2p(key)); }
@ -216,7 +216,7 @@ class Compile : public Phase {
AliasIdxRaw = 3 // hard-wired index for TypeRawPtr::BOTTOM
};
// Variant of TraceTime(NULL, &_t_accumulator, CITime);
// Variant of TraceTime(nullptr, &_t_accumulator, CITime);
// Integrated with logging. If logging is turned on, and CITimeVerbose is true,
// then brackets are put into the log, with time stamps and node counts.
// (The time collection itself is always conditionalized on CITime.)
@ -265,7 +265,7 @@ class Compile : public Phase {
}
}
void set_element(const Type* e) {
assert(_element == NULL, "");
assert(_element == nullptr, "");
_element = e;
}
@ -291,9 +291,9 @@ class Compile : public Phase {
int _entry_bci; // entry bci for osr methods.
const TypeFunc* _tf; // My kind of signature
InlineTree* _ilt; // Ditto (temporary).
address _stub_function; // VM entry for stub being compiled, or NULL
const char* _stub_name; // Name of stub or adapter being compiled, or NULL
address _stub_entry_point; // Compile code entry for generated stub, or NULL
address _stub_function; // VM entry for stub being compiled, or null
const char* _stub_name; // Name of stub or adapter being compiled, or null
address _stub_entry_point; // Compile code entry for generated stub, or null
// Control of this compilation.
int _max_inline_size; // Max inline size for this compilation
@ -376,7 +376,7 @@ class Compile : public Phase {
debug_only(static int _debug_idx;) // Monotonic counter (not reset), use -XX:BreakAtNode=<idx>
Arena _node_arena; // Arena for new-space Nodes
Arena _old_arena; // Arena for old-space Nodes, lifetime during xform
RootNode* _root; // Unique root of compilation, or NULL after bail-out.
RootNode* _root; // Unique root of compilation, or null after bail-out.
Node* _top; // Unique top node. (Reset by various phases.)
Node* _immutable_memory; // Initial memory state
@ -437,7 +437,7 @@ class Compile : public Phase {
public:
PrintInliningBuffer()
: _cg(NULL), _ss(default_stream_buffer_size) {}
: _cg(nullptr), _ss(default_stream_buffer_size) {}
stringStream* ss() { return &_ss; }
CallGenerator* cg() { return _cg; }
@ -484,7 +484,7 @@ class Compile : public Phase {
void print_inlining_assert_ready();
void print_inlining_reset();
void print_inlining(ciMethod* method, int inline_level, int bci, const char* msg = NULL) {
void print_inlining(ciMethod* method, int inline_level, int bci, const char* msg = nullptr) {
stringStream ss;
CompileTask::print_inlining_inner(&ss, method, inline_level, bci, msg);
print_inlining_stream()->print("%s", ss.freeze());
@ -554,9 +554,9 @@ class Compile : public Phase {
ciMethod* method() const { return _method; }
int entry_bci() const { return _entry_bci; }
bool is_osr_compilation() const { return _entry_bci != InvocationEntryBci; }
bool is_method_compilation() const { return (_method != NULL && !_method->flags().is_native()); }
const TypeFunc* tf() const { assert(_tf!=NULL, ""); return _tf; }
void init_tf(const TypeFunc* tf) { assert(_tf==NULL, ""); _tf = tf; }
bool is_method_compilation() const { return (_method != nullptr && !_method->flags().is_native()); }
const TypeFunc* tf() const { assert(_tf!=nullptr, ""); return _tf; }
void init_tf(const TypeFunc* tf) { assert(_tf==nullptr, ""); _tf = tf; }
InlineTree* ilt() const { return _ilt; }
address stub_function() const { return _stub_function; }
const char* stub_name() const { return _stub_name; }
@ -633,7 +633,7 @@ class Compile : public Phase {
// check the CompilerOracle for special behaviours for this compile
bool method_has_option(enum CompileCommand option) {
return method() != NULL && method()->has_option(option);
return method() != nullptr && method()->has_option(option);
}
#ifndef PRODUCT
@ -754,11 +754,11 @@ class Compile : public Phase {
Arena* comp_arena() { return &_comp_arena; }
ciEnv* env() const { return _env; }
CompileLog* log() const { return _log; }
bool failing() const { return _env->failing() || _failure_reason != NULL; }
bool failing() const { return _env->failing() || _failure_reason != nullptr; }
const char* failure_reason() const { return (_env->failing()) ? _env->failure_reason() : _failure_reason; }
bool failure_reason_is(const char* r) const {
return (r == _failure_reason) || (r != NULL && _failure_reason != NULL && strcmp(r, _failure_reason) == 0);
return (r == _failure_reason) || (r != nullptr && _failure_reason != nullptr && strcmp(r, _failure_reason) == 0);
}
void record_failure(const char* reason);
@ -820,7 +820,7 @@ class Compile : public Phase {
DEBUG_ONLY( Unique_Node_List* modified_nodes() const { return _modified_nodes; } )
MachConstantBaseNode* mach_constant_base_node();
bool has_mach_constant_base_node() const { return _mach_constant_base_node != NULL; }
bool has_mach_constant_base_node() const { return _mach_constant_base_node != nullptr; }
// Generated by adlc, true if CallNode requires MachConstantBase.
bool needs_deep_clone_jvms();
@ -864,16 +864,16 @@ class Compile : public Phase {
void set_type_last_size(size_t sz) { _type_last_size = sz; }
const TypeFunc* last_tf(ciMethod* m) {
return (m == _last_tf_m) ? _last_tf : NULL;
return (m == _last_tf_m) ? _last_tf : nullptr;
}
void set_last_tf(ciMethod* m, const TypeFunc* tf) {
assert(m != NULL || tf == NULL, "");
assert(m != nullptr || tf == nullptr, "");
_last_tf_m = m;
_last_tf = tf;
}
AliasType* alias_type(int idx) { assert(idx < num_alias_types(), "oob"); return _alias_types[idx]; }
AliasType* alias_type(const TypePtr* adr_type, ciField* field = NULL) { return find_alias_type(adr_type, false, field); }
AliasType* alias_type(const TypePtr* adr_type, ciField* field = nullptr) { return find_alias_type(adr_type, false, field); }
bool have_alias_type(const TypePtr* adr_type);
AliasType* alias_type(ciField* field);
@ -889,7 +889,7 @@ class Compile : public Phase {
// Decide how to build a call.
// The profile factor is a discount to apply to this site's interp. profile.
CallGenerator* call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch,
JVMState* jvms, bool allow_inline, float profile_factor, ciKlass* speculative_receiver_type = NULL,
JVMState* jvms, bool allow_inline, float profile_factor, ciKlass* speculative_receiver_type = nullptr,
bool allow_intrinsics = true);
bool should_delay_inlining(ciMethod* call_method, JVMState* jvms) {
return should_delay_string_inlining(call_method, jvms) ||
@ -919,7 +919,7 @@ class Compile : public Phase {
// PerMethodTrapLimit was exceeded for all inlined methods seen so far.
bool too_many_traps(Deoptimization::DeoptReason reason,
// Privately used parameter for logging:
ciMethodData* logmd = NULL);
ciMethodData* logmd = nullptr);
// Report if there were too many recompiles at a method and bci.
bool too_many_recompiles(ciMethod* method, int bci, Deoptimization::DeoptReason reason);
// Report if there were too many traps or recompiles at a method and bci.
@ -1061,7 +1061,7 @@ class Compile : public Phase {
};
// Are we compiling a method?
bool has_method() { return method() != NULL; }
bool has_method() { return method() != nullptr; }
// Maybe print some information about this compile.
void print_compile_messages();
@ -1180,7 +1180,7 @@ class Compile : public Phase {
static Node* conv_I2X_index(PhaseGVN* phase, Node* offset, const TypeInt* sizetype,
// Optional control dependency (for example, on range check)
Node* ctrl = NULL);
Node* ctrl = nullptr);
// Convert integer value to a narrowed long type dependent on ctrl (for example, a range check)
static Node* constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl, bool carry_dependency = false);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -62,6 +62,6 @@ ConNode *ConNode::make(const Type *t) {
// or else TypeOopPtr::NULL_PTR. Then set Type::_basic_type[AnyPtr] = T_ILLEGAL
default:
ShouldNotReachHere();
return NULL;
return nullptr;
}
}

View File

@ -70,15 +70,16 @@ public:
// Simple pointer constants
class ConPNode : public ConNode {
public:
ConPNode( const TypePtr *t ) : ConNode(t) {}
ConPNode(const TypePtr *t) : ConNode(t) {}
virtual int Opcode() const;
// Factory methods:
static ConPNode* make(address con) {
if (con == NULL)
return new ConPNode( TypePtr::NULL_PTR ) ;
else
return new ConPNode( TypeRawPtr::make(con) );
if (con == nullptr) {
return new ConPNode(TypePtr::NULL_PTR);
} else {
return new ConPNode(TypeRawPtr::make(con));
}
}
};

View File

@ -148,7 +148,7 @@ bool ConstantTable::emit(CodeBuffer& cb) const {
MacroAssembler _masm(&cb);
for (int i = 0; i < _constants.length(); i++) {
Constant con = _constants.at(i);
address constant_addr = NULL;
address constant_addr = nullptr;
if (con.is_array()) {
constant_addr = _masm.array_constant(con.type(), con.get_array(), con.alignment());
} else {
@ -176,17 +176,17 @@ bool ConstantTable::emit(CodeBuffer& cb) const {
// filled in later in fill_jump_table.
address dummy = (address) n;
constant_addr = _masm.address_constant(dummy);
if (constant_addr == NULL) {
if (constant_addr == nullptr) {
return false;
}
assert((constant_addr - _masm.code()->consts()->start()) == con.offset(),
"must be: %d == %d", (int)(constant_addr - _masm.code()->consts()->start()), (int)(con.offset()));
// Expand jump-table
address last_addr = NULL;
address last_addr = nullptr;
for (uint j = 1; j < n->outcnt(); j++) {
last_addr = _masm.address_constant(dummy + j);
if (last_addr == NULL) {
if (last_addr == nullptr) {
return false;
}
}
@ -211,7 +211,7 @@ bool ConstantTable::emit(CodeBuffer& cb) const {
}
}
if (constant_addr == NULL) {
if (constant_addr == nullptr) {
return false;
}
assert((constant_addr - _masm.code()->consts()->start()) == con.offset(),

View File

@ -49,7 +49,7 @@ const Type* Conv2BNode::Value(PhaseGVN* phase) const {
if( t == TypeInt::ZERO ) return TypeInt::ZERO;
if( t == TypePtr::NULL_PTR ) return TypeInt::ZERO;
const TypePtr *tp = t->isa_ptr();
if( tp != NULL ) {
if(tp != nullptr) {
if( tp->ptr() == TypePtr::AnyNull ) return Type::TOP;
if( tp->ptr() == TypePtr::Constant) return TypeInt::ONE;
if (tp->ptr() == TypePtr::NotNull) return TypeInt::ONE;
@ -85,7 +85,7 @@ Node *ConvD2FNode::Ideal(PhaseGVN *phase, bool can_reshape) {
}
}
}
return NULL;
return nullptr;
}
//------------------------------Identity---------------------------------------
@ -112,7 +112,7 @@ Node *ConvD2INode::Ideal(PhaseGVN *phase, bool can_reshape) {
set_req(1, in(1)->in(1));
return this;
}
return NULL;
return nullptr;
}
//------------------------------Identity---------------------------------------
@ -148,7 +148,7 @@ Node *ConvD2LNode::Ideal(PhaseGVN *phase, bool can_reshape) {
set_req(1, in(1)->in(1));
return this;
}
return NULL;
return nullptr;
}
//=============================================================================
@ -199,7 +199,7 @@ Node *ConvF2INode::Ideal(PhaseGVN *phase, bool can_reshape) {
set_req(1, in(1)->in(1));
return this;
}
return NULL;
return nullptr;
}
//=============================================================================
@ -228,7 +228,7 @@ Node *ConvF2LNode::Ideal(PhaseGVN *phase, bool can_reshape) {
set_req(1, in(1)->in(1));
return this;
}
return NULL;
return nullptr;
}
//=============================================================================
@ -293,7 +293,7 @@ const Type* ConvI2LNode::Value(PhaseGVN* phase) const {
// Do NOT remove this node's type assertion until no more loop ops can happen.
if (phase->C->post_loop_opts_phase()) {
const TypeInt* in_type = phase->type(in(1))->isa_int();
if (in_type != NULL &&
if (in_type != nullptr &&
(in_type->_lo != this_type->_lo ||
in_type->_hi != this_type->_hi)) {
// Although this WORSENS the type, it increases GVN opportunities,
@ -571,7 +571,7 @@ static Node* find_or_make_convI2L(PhaseIterGVN* igvn, Node* parent,
const TypeLong* type) {
Node* n = new ConvI2LNode(parent, type);
Node* existing = igvn->hash_find_insert(n);
if (existing != NULL) {
if (existing != nullptr) {
n->destruct(igvn);
return existing;
}
@ -636,14 +636,14 @@ Node *ConvI2LNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// Addressing arithmetic will not absorb it as part of a 64-bit AddL.
PhaseIterGVN* igvn = phase->is_IterGVN();
Node* z = in(1);
const TypeInteger* rx = NULL;
const TypeInteger* ry = NULL;
const TypeInteger* rx = nullptr;
const TypeInteger* ry = nullptr;
if (Compile::push_thru_add(phase, z, this_type, rx, ry, T_INT, T_LONG)) {
if (igvn == NULL) {
if (igvn == nullptr) {
// Postpone this optimization to iterative GVN, where we can handle deep
// AddI chains without an exponential number of recursive Ideal() calls.
phase->record_for_igvn(this);
return NULL;
return nullptr;
}
int op = z->Opcode();
Node* x = z->in(1);
@ -659,7 +659,7 @@ Node *ConvI2LNode::Ideal(PhaseGVN *phase, bool can_reshape) {
}
#endif //_LP64
return NULL;
return nullptr;
}
//=============================================================================
@ -724,13 +724,13 @@ Node *ConvL2INode::Ideal(PhaseGVN *phase, bool can_reshape) {
if( andl_op == Op_AddL ) {
// Don't do this for nodes which have more than one user since
// we'll end up computing the long add anyway.
if (andl->outcnt() > 1) return NULL;
if (andl->outcnt() > 1) return nullptr;
Node* x = andl->in(1);
Node* y = andl->in(2);
assert( x != andl && y != andl, "dead loop in ConvL2INode::Ideal" );
if (phase->type(x) == Type::TOP) return NULL;
if (phase->type(y) == Type::TOP) return NULL;
if (phase->type(x) == Type::TOP) return nullptr;
if (phase->type(y) == Type::TOP) return nullptr;
Node *add1 = phase->transform(new ConvL2INode(x));
Node *add2 = phase->transform(new ConvL2INode(y));
return new AddINode(add1,add2);
@ -739,7 +739,7 @@ Node *ConvL2INode::Ideal(PhaseGVN *phase, bool can_reshape) {
// Disable optimization: LoadL->ConvL2I ==> LoadI.
// It causes problems (sizes of Load and Store nodes do not match)
// in objects initialization code and Escape Analysis.
return NULL;
return nullptr;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -89,7 +89,7 @@ static bool magic_int_divide_constants(jint d, jint &M, jint &s) {
//--------------------------transform_int_divide-------------------------------
// Convert a division by constant divisor into an alternate Ideal graph.
// Return NULL if no transformation occurs.
// Return null if no transformation occurs.
static Node *transform_int_divide( PhaseGVN *phase, Node *dividend, jint divisor ) {
// Check for invalid divisors
@ -101,7 +101,7 @@ static Node *transform_int_divide( PhaseGVN *phase, Node *dividend, jint divisor
const int N = 32;
// Result
Node *q = NULL;
Node *q = nullptr;
if (d == 1) {
// division by +/- 1
@ -334,7 +334,7 @@ static Node* long_by_long_mulhi(PhaseGVN* phase, Node* dividend, jlong magic_con
//--------------------------transform_long_divide------------------------------
// Convert a division by constant divisor into an alternate Ideal graph.
// Return NULL if no transformation occurs.
// Return null if no transformation occurs.
static Node *transform_long_divide( PhaseGVN *phase, Node *dividend, jlong divisor ) {
// Check for invalid divisors
assert( divisor != 0L && divisor != min_jlong,
@ -345,7 +345,7 @@ static Node *transform_long_divide( PhaseGVN *phase, Node *dividend, jlong divis
const int N = 64;
// Result
Node *q = NULL;
Node *q = nullptr;
if (d == 1) {
// division by +/- 1
@ -460,29 +460,29 @@ Node* DivINode::Identity(PhaseGVN* phase) {
Node *DivINode::Ideal(PhaseGVN *phase, bool can_reshape) {
if (in(0) && remove_dead_region(phase, can_reshape)) return this;
// Don't bother trying to transform a dead node
if( in(0) && in(0)->is_top() ) return NULL;
if( in(0) && in(0)->is_top() ) return nullptr;
const Type *t = phase->type( in(2) );
if( t == TypeInt::ONE ) // Identity?
return NULL; // Skip it
if( t == TypeInt::ONE ) // Identity?
return nullptr; // Skip it
const TypeInt *ti = t->isa_int();
if( !ti ) return NULL;
if( !ti ) return nullptr;
// Check for useless control input
// Check for excluding div-zero case
if (in(0) && (ti->_hi < 0 || ti->_lo > 0)) {
set_req(0, NULL); // Yank control input
set_req(0, nullptr); // Yank control input
return this;
}
if( !ti->is_con() ) return NULL;
if( !ti->is_con() ) return nullptr;
jint i = ti->get_con(); // Get divisor
if (i == 0) return NULL; // Dividing by zero constant does not idealize
if (i == 0) return nullptr; // Dividing by zero constant does not idealize
// Dividing by MININT does not optimize as a power-of-2 shift.
if( i == min_jint ) return NULL;
if( i == min_jint ) return nullptr;
return transform_int_divide( phase, in(1), i );
}
@ -566,29 +566,29 @@ Node* DivLNode::Identity(PhaseGVN* phase) {
Node *DivLNode::Ideal( PhaseGVN *phase, bool can_reshape) {
if (in(0) && remove_dead_region(phase, can_reshape)) return this;
// Don't bother trying to transform a dead node
if( in(0) && in(0)->is_top() ) return NULL;
if( in(0) && in(0)->is_top() ) return nullptr;
const Type *t = phase->type( in(2) );
if( t == TypeLong::ONE ) // Identity?
return NULL; // Skip it
return nullptr; // Skip it
const TypeLong *tl = t->isa_long();
if( !tl ) return NULL;
if( !tl ) return nullptr;
// Check for useless control input
// Check for excluding div-zero case
if (in(0) && (tl->_hi < 0 || tl->_lo > 0)) {
set_req(0, NULL); // Yank control input
set_req(0, nullptr); // Yank control input
return this;
}
if( !tl->is_con() ) return NULL;
if( !tl->is_con() ) return nullptr;
jlong l = tl->get_con(); // Get divisor
if (l == 0) return NULL; // Dividing by zero constant does not idealize
if (l == 0) return nullptr; // Dividing by zero constant does not idealize
// Dividing by MINLONG does not optimize as a power-of-2 shift.
if( l == min_jlong ) return NULL;
if( l == min_jlong ) return nullptr;
return transform_long_divide( phase, in(1), l );
}
@ -717,28 +717,28 @@ Node* DivFNode::Identity(PhaseGVN* phase) {
Node *DivFNode::Ideal(PhaseGVN *phase, bool can_reshape) {
if (in(0) && remove_dead_region(phase, can_reshape)) return this;
// Don't bother trying to transform a dead node
if( in(0) && in(0)->is_top() ) return NULL;
if( in(0) && in(0)->is_top() ) return nullptr;
const Type *t2 = phase->type( in(2) );
if( t2 == TypeF::ONE ) // Identity?
return NULL; // Skip it
return nullptr; // Skip it
const TypeF *tf = t2->isa_float_constant();
if( !tf ) return NULL;
if( tf->base() != Type::FloatCon ) return NULL;
if( !tf ) return nullptr;
if( tf->base() != Type::FloatCon ) return nullptr;
// Check for out of range values
if( tf->is_nan() || !tf->is_finite() ) return NULL;
if( tf->is_nan() || !tf->is_finite() ) return nullptr;
// Get the value
float f = tf->getf();
int exp;
// Only for special case of dividing by a power of 2
if( frexp((double)f, &exp) != 0.5 ) return NULL;
if( frexp((double)f, &exp) != 0.5 ) return nullptr;
// Limit the range of acceptable exponents
if( exp < -126 || exp > 126 ) return NULL;
if( exp < -126 || exp > 126 ) return nullptr;
// Compute the reciprocal
float reciprocal = ((float)1.0) / f;
@ -809,28 +809,28 @@ Node* DivDNode::Identity(PhaseGVN* phase) {
Node *DivDNode::Ideal(PhaseGVN *phase, bool can_reshape) {
if (in(0) && remove_dead_region(phase, can_reshape)) return this;
// Don't bother trying to transform a dead node
if( in(0) && in(0)->is_top() ) return NULL;
if( in(0) && in(0)->is_top() ) return nullptr;
const Type *t2 = phase->type( in(2) );
if( t2 == TypeD::ONE ) // Identity?
return NULL; // Skip it
return nullptr; // Skip it
const TypeD *td = t2->isa_double_constant();
if( !td ) return NULL;
if( td->base() != Type::DoubleCon ) return NULL;
if( !td ) return nullptr;
if( td->base() != Type::DoubleCon ) return nullptr;
// Check for out of range values
if( td->is_nan() || !td->is_finite() ) return NULL;
if( td->is_nan() || !td->is_finite() ) return nullptr;
// Get the value
double d = td->getd();
int exp;
// Only for special case of dividing by a power of 2
if( frexp(d, &exp) != 0.5 ) return NULL;
if( frexp(d, &exp) != 0.5 ) return nullptr;
// Limit the range of acceptable exponents
if( exp < -1021 || exp > 1022 ) return NULL;
if( exp < -1021 || exp > 1022 ) return nullptr;
// Compute the reciprocal
double reciprocal = 1.0 / d;
@ -876,7 +876,7 @@ const Type* UDivINode::Value(PhaseGVN* phase) const {
Node *UDivINode::Ideal(PhaseGVN *phase, bool can_reshape) {
// Check for dead control input
if (in(0) && remove_dead_region(phase, can_reshape)) return this;
return NULL;
return nullptr;
}
@ -915,7 +915,7 @@ const Type* UDivLNode::Value(PhaseGVN* phase) const {
Node *UDivLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// Check for dead control input
if (in(0) && remove_dead_region(phase, can_reshape)) return this;
return NULL;
return nullptr;
}
@ -925,22 +925,22 @@ Node *ModINode::Ideal(PhaseGVN *phase, bool can_reshape) {
// Check for dead control input
if( in(0) && remove_dead_region(phase, can_reshape) ) return this;
// Don't bother trying to transform a dead node
if( in(0) && in(0)->is_top() ) return NULL;
if( in(0) && in(0)->is_top() ) return nullptr;
// Get the modulus
const Type *t = phase->type( in(2) );
if( t == Type::TOP ) return NULL;
if( t == Type::TOP ) return nullptr;
const TypeInt *ti = t->is_int();
// Check for useless control input
// Check for excluding mod-zero case
if (in(0) && (ti->_hi < 0 || ti->_lo > 0)) {
set_req(0, NULL); // Yank control input
set_req(0, nullptr); // Yank control input
return this;
}
// See if we are MOD'ing by 2^k or 2^k-1.
if( !ti->is_con() ) return NULL;
if( !ti->is_con() ) return nullptr;
jint con = ti->get_con();
Node *hook = new Node(1);
@ -993,7 +993,7 @@ Node *ModINode::Ideal(PhaseGVN *phase, bool can_reshape) {
// into a long multiply/int multiply/subtract case
// Cannot handle mod 0, and min_jint isn't handled by the transform
if( con == 0 || con == min_jint ) return NULL;
if( con == 0 || con == min_jint ) return nullptr;
// Get the absolute value of the constant; at this point, we can use this
jint pos_con = (con >= 0) ? con : -con;
@ -1020,11 +1020,11 @@ Node *ModINode::Ideal(PhaseGVN *phase, bool can_reshape) {
// Divide using the transform from DivI to MulL
Node *result = transform_int_divide( phase, in(1), pos_con );
if (result != NULL) {
if (result != nullptr) {
Node *divide = phase->transform(result);
// Re-multiply, using a shift if this is a power of two
Node *mult = NULL;
Node *mult = nullptr;
if( log2_con >= 0 )
mult = phase->transform( new LShiftINode( divide, phase->intcon( log2_con ) ) );
@ -1088,7 +1088,7 @@ const Type* ModINode::Value(PhaseGVN* phase) const {
Node *UModINode::Ideal(PhaseGVN *phase, bool can_reshape) {
// Check for dead control input
if( in(0) && remove_dead_region(phase, can_reshape) ) return this;
return NULL;
return nullptr;
}
//=============================================================================
@ -1097,22 +1097,22 @@ Node *ModLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// Check for dead control input
if( in(0) && remove_dead_region(phase, can_reshape) ) return this;
// Don't bother trying to transform a dead node
if( in(0) && in(0)->is_top() ) return NULL;
if( in(0) && in(0)->is_top() ) return nullptr;
// Get the modulus
const Type *t = phase->type( in(2) );
if( t == Type::TOP ) return NULL;
if( t == Type::TOP ) return nullptr;
const TypeLong *tl = t->is_long();
// Check for useless control input
// Check for excluding mod-zero case
if (in(0) && (tl->_hi < 0 || tl->_lo > 0)) {
set_req(0, NULL); // Yank control input
set_req(0, nullptr); // Yank control input
return this;
}
// See if we are MOD'ing by 2^k or 2^k-1.
if( !tl->is_con() ) return NULL;
if( !tl->is_con() ) return nullptr;
jlong con = tl->get_con();
Node *hook = new Node(1);
@ -1167,7 +1167,7 @@ Node *ModLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// into a long multiply/int multiply/subtract case
// Cannot handle mod 0, and min_jlong isn't handled by the transform
if( con == 0 || con == min_jlong ) return NULL;
if( con == 0 || con == min_jlong ) return nullptr;
// Get the absolute value of the constant; at this point, we can use this
jlong pos_con = (con >= 0) ? con : -con;
@ -1194,11 +1194,11 @@ Node *ModLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// Divide using the transform from DivL to MulL
Node *result = transform_long_divide( phase, in(1), pos_con );
if (result != NULL) {
if (result != nullptr) {
Node *divide = phase->transform(result);
// Re-multiply, using a shift if this is a power of two
Node *mult = NULL;
Node *mult = nullptr;
if( log2_con >= 0 )
mult = phase->transform( new LShiftLNode( divide, phase->intcon( log2_con ) ) );
@ -1306,7 +1306,7 @@ const Type* ModFNode::Value(PhaseGVN* phase) const {
Node *UModLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// Check for dead control input
if( in(0) && remove_dead_region(phase, can_reshape) ) return this;
return NULL;
return nullptr;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -196,7 +196,7 @@ public:
};
virtual int Opcode() const;
virtual Node* Identity(PhaseGVN* phase) { return this; }
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) { return NULL; }
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) { return nullptr; }
virtual const Type* Value(PhaseGVN* phase) const { return bottom_type(); }
virtual uint hash() const { return Node::hash(); }
virtual bool is_CFG() const { return false; }

View File

@ -71,7 +71,7 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
JVMState* jvms, bool allow_inline,
float prof_factor, ciKlass* speculative_receiver_type,
bool allow_intrinsics) {
assert(callee != NULL, "failed method resolution");
assert(callee != nullptr, "failed method resolution");
ciMethod* caller = jvms->method();
int bci = jvms->bci();
@ -103,7 +103,7 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
}
CompileLog* log = this->log();
if (log != NULL) {
if (log != nullptr) {
int rid = (receiver_count >= 0)? log->identify(profile.receiver(0)): -1;
int r2id = (rid != -1 && profile.has_receiver(1))? log->identify(profile.receiver(1)):-1;
log->begin_elem("call method='%d' count='%d' prof_factor='%f'",
@ -125,15 +125,15 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
// Special case the handling of certain common, profitable library
// methods. If these methods are replaced with specialized code,
// then we return it as the inlined version of the call.
CallGenerator* cg_intrinsic = NULL;
CallGenerator* cg_intrinsic = nullptr;
if (allow_inline && allow_intrinsics) {
CallGenerator* cg = find_intrinsic(callee, call_does_dispatch);
if (cg != NULL) {
if (cg != nullptr) {
if (cg->is_predicated()) {
// Code without intrinsic but, hopefully, inlined.
CallGenerator* inline_cg = this->call_generator(callee,
vtable_index, call_does_dispatch, jvms, allow_inline, prof_factor, speculative_receiver_type, false);
if (inline_cg != NULL) {
if (inline_cg != nullptr) {
cg = CallGenerator::for_predicated_intrinsic(cg, inline_cg);
}
}
@ -143,7 +143,7 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
// We will retry the intrinsic if nothing had claimed it afterwards.
if (cg->does_virtual_dispatch()) {
cg_intrinsic = cg;
cg = NULL;
cg = nullptr;
} else if (IncrementalInline && should_delay_vector_inlining(callee, jvms)) {
return CallGenerator::for_late_inline(callee, cg);
} else {
@ -181,13 +181,13 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
// sometimes has a broader type. Similar scenario is possible with
// default methods when type system loses information about implemented
// interfaces.
if (cg != NULL && is_virtual_or_interface && !callee->is_static()) {
if (cg != nullptr && is_virtual_or_interface && !callee->is_static()) {
CallGenerator* trap_cg = CallGenerator::for_uncommon_trap(callee,
Deoptimization::Reason_receiver_constraint, Deoptimization::Action_none);
cg = CallGenerator::for_guarded_call(callee->holder(), trap_cg, cg);
}
if (cg != NULL) {
if (cg != nullptr) {
// Delay the inlining of this method to give us the
// opportunity to perform some high level optimizations
// first.
@ -210,10 +210,10 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
if (call_does_dispatch && site_count > 0 && UseTypeProfile) {
// The major receiver's count >= TypeProfileMajorReceiverPercent of site_count.
bool have_major_receiver = profile.has_receiver(0) && (100.*profile.receiver_prob(0) >= (float)TypeProfileMajorReceiverPercent);
ciMethod* receiver_method = NULL;
ciMethod* receiver_method = nullptr;
int morphism = profile.morphism();
if (speculative_receiver_type != NULL) {
if (speculative_receiver_type != nullptr) {
if (!too_many_traps_or_recompiles(caller, bci, Deoptimization::Reason_speculate_class_check)) {
// We have a speculative type, we should be able to resolve
// the call. We do that before looking at the profiling at
@ -221,18 +221,18 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
// a speculative type should help us avoid.
receiver_method = callee->resolve_invoke(jvms->method()->holder(),
speculative_receiver_type);
if (receiver_method == NULL) {
speculative_receiver_type = NULL;
if (receiver_method == nullptr) {
speculative_receiver_type = nullptr;
} else {
morphism = 1;
}
} else {
// speculation failed before. Use profiling at the call
// (could allow bimorphic inlining for instance).
speculative_receiver_type = NULL;
speculative_receiver_type = nullptr;
}
}
if (receiver_method == NULL &&
if (receiver_method == nullptr &&
(have_major_receiver || morphism == 1 ||
(morphism == 2 && UseBimorphicInlining))) {
// receiver_method = profile.method();
@ -240,33 +240,33 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
receiver_method = callee->resolve_invoke(jvms->method()->holder(),
profile.receiver(0));
}
if (receiver_method != NULL) {
if (receiver_method != nullptr) {
// The single majority receiver sufficiently outweighs the minority.
CallGenerator* hit_cg = this->call_generator(receiver_method,
vtable_index, !call_does_dispatch, jvms, allow_inline, prof_factor);
if (hit_cg != NULL) {
if (hit_cg != nullptr) {
// Look up second receiver.
CallGenerator* next_hit_cg = NULL;
ciMethod* next_receiver_method = NULL;
CallGenerator* next_hit_cg = nullptr;
ciMethod* next_receiver_method = nullptr;
if (morphism == 2 && UseBimorphicInlining) {
next_receiver_method = callee->resolve_invoke(jvms->method()->holder(),
profile.receiver(1));
if (next_receiver_method != NULL) {
if (next_receiver_method != nullptr) {
next_hit_cg = this->call_generator(next_receiver_method,
vtable_index, !call_does_dispatch, jvms,
allow_inline, prof_factor);
if (next_hit_cg != NULL && !next_hit_cg->is_inline() &&
if (next_hit_cg != nullptr && !next_hit_cg->is_inline() &&
have_major_receiver && UseOnlyInlinedBimorphic) {
// Skip if we can't inline second receiver's method
next_hit_cg = NULL;
next_hit_cg = nullptr;
}
}
}
CallGenerator* miss_cg;
Deoptimization::DeoptReason reason = (morphism == 2
? Deoptimization::Reason_bimorphic
: Deoptimization::reason_class_check(speculative_receiver_type != NULL));
if ((morphism == 1 || (morphism == 2 && next_hit_cg != NULL)) &&
: Deoptimization::reason_class_check(speculative_receiver_type != nullptr));
if ((morphism == 1 || (morphism == 2 && next_hit_cg != nullptr)) &&
!too_many_traps_or_recompiles(caller, bci, reason)
) {
// Generate uncommon trap for class check failure path
@ -279,20 +279,20 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
miss_cg = (IncrementalInlineVirtual ? CallGenerator::for_late_inline_virtual(callee, vtable_index, prof_factor)
: CallGenerator::for_virtual_call(callee, vtable_index));
}
if (miss_cg != NULL) {
if (next_hit_cg != NULL) {
assert(speculative_receiver_type == NULL, "shouldn't end up here if we used speculation");
if (miss_cg != nullptr) {
if (next_hit_cg != nullptr) {
assert(speculative_receiver_type == nullptr, "shouldn't end up here if we used speculation");
trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), next_receiver_method, profile.receiver(1), site_count, profile.receiver_count(1));
// We don't need to record dependency on a receiver here and below.
// Whenever we inline, the dependency is added by Parse::Parse().
miss_cg = CallGenerator::for_predicted_call(profile.receiver(1), miss_cg, next_hit_cg, PROB_MAX);
}
if (miss_cg != NULL) {
ciKlass* k = speculative_receiver_type != NULL ? speculative_receiver_type : profile.receiver(0);
if (miss_cg != nullptr) {
ciKlass* k = speculative_receiver_type != nullptr ? speculative_receiver_type : profile.receiver(0);
trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), receiver_method, k, site_count, receiver_count);
float hit_prob = speculative_receiver_type != NULL ? 1.0 : profile.receiver_prob(0);
float hit_prob = speculative_receiver_type != nullptr ? 1.0 : profile.receiver_prob(0);
CallGenerator* cg = CallGenerator::for_predicted_call(k, miss_cg, hit_cg, hit_prob);
if (cg != NULL) return cg;
if (cg != nullptr) return cg;
}
}
}
@ -318,13 +318,13 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
caller->get_declared_method_holder_at_bci(bci)->as_instance_klass();
ciInstanceKlass* singleton = declared_interface->unique_implementor();
if (singleton != NULL) {
if (singleton != nullptr) {
assert(singleton != declared_interface, "not a unique implementor");
ciMethod* cha_monomorphic_target =
callee->find_monomorphic_target(caller->holder(), declared_interface, singleton);
if (cha_monomorphic_target != NULL &&
if (cha_monomorphic_target != nullptr &&
cha_monomorphic_target->holder() != env()->Object_klass()) { // subtype check against Object is useless
ciKlass* holder = cha_monomorphic_target->holder();
@ -338,7 +338,7 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
ciKlass* constraint = (holder->is_subclass_of(singleton) ? holder : singleton); // avoid upcasts
CallGenerator* cg = CallGenerator::for_guarded_call(constraint, miss_cg, hit_cg);
if (hit_cg != NULL && cg != NULL) {
if (hit_cg != nullptr && cg != nullptr) {
dependencies()->assert_unique_implementor(declared_interface, singleton);
dependencies()->assert_unique_concrete_method(declared_interface, cha_monomorphic_target, declared_interface, callee);
return cg;
@ -349,7 +349,7 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
// Nothing claimed the intrinsic, we go with straight-forward inlining
// for already discovered intrinsic.
if (allow_intrinsics && cg_intrinsic != NULL) {
if (allow_intrinsics && cg_intrinsic != nullptr) {
assert(cg_intrinsic->does_virtual_dispatch(), "sanity");
return cg_intrinsic;
}
@ -373,7 +373,7 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
CallGenerator* cg = CallGenerator::for_direct_call(callee, should_delay_inlining(callee, jvms));
// For optimized virtual calls assert at runtime that receiver object
// is a subtype of the method holder.
if (cg != NULL && is_virtual_or_interface && !callee->is_static()) {
if (cg != nullptr && is_virtual_or_interface && !callee->is_static()) {
CallGenerator* trap_cg = CallGenerator::for_uncommon_trap(callee,
Deoptimization::Reason_receiver_constraint, Deoptimization::Action_none);
cg = CallGenerator::for_guarded_call(callee->holder(), trap_cg, cg);
@ -419,7 +419,7 @@ bool Compile::should_delay_string_inlining(ciMethod* call_method, JVMState* jvms
if (receiver->is_Proj() && receiver->in(0)->is_CallStaticJava()) {
CallStaticJavaNode* csj = receiver->in(0)->as_CallStaticJava();
ciMethod* m = csj->method();
if (m != NULL &&
if (m != nullptr &&
(m->intrinsic_id() == vmIntrinsics::_StringBuffer_toString ||
m->intrinsic_id() == vmIntrinsics::_StringBuilder_toString))
// Delay String.<init>(new SB())
@ -509,12 +509,12 @@ void Parse::do_call() {
// Find target being called
bool will_link;
ciSignature* declared_signature = NULL;
ciSignature* declared_signature = nullptr;
ciMethod* orig_callee = iter().get_method(will_link, &declared_signature); // callee in the bytecode
ciInstanceKlass* holder_klass = orig_callee->holder();
ciKlass* holder = iter().get_declared_method_holder();
ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder);
assert(declared_signature != NULL, "cannot be null");
assert(declared_signature != nullptr, "cannot be null");
JFR_ONLY(Jfr::on_resolution(this, holder, orig_callee);)
// Bump max node limit for JSR292 users
@ -535,7 +535,7 @@ void Parse::do_call() {
//assert((bc_callee->is_static() || is_invokedynamic) == !has_receiver , "must match bc"); // XXX invokehandle (cur_bc_raw)
// Note: this takes into account invokeinterface of methods declared in java/lang/Object,
// which should be invokevirtuals but according to the VM spec may be invokeinterfaces
assert(holder_klass->is_interface() || holder_klass->super() == NULL || (bc() != Bytecodes::_invokeinterface), "must match bc");
assert(holder_klass->is_interface() || holder_klass->super() == nullptr || (bc() != Bytecodes::_invokeinterface), "must match bc");
// Note: In the absence of miranda methods, an abstract class K can perform
// an invokevirtual directly on an interface method I.m if K implements I.
@ -565,7 +565,7 @@ void Parse::do_call() {
bool call_does_dispatch = false;
// Speculative type of the receiver if any
ciKlass* speculative_receiver_type = NULL;
ciKlass* speculative_receiver_type = nullptr;
if (is_virtual_or_interface) {
Node* receiver_node = stack(sp() - nargs);
const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr();
@ -579,11 +579,11 @@ void Parse::do_call() {
callee = C->optimize_virtual_call(method(), klass, holder, orig_callee,
receiver_type, is_virtual,
call_does_dispatch, vtable_index); // out-parameters
speculative_receiver_type = receiver_type != NULL ? receiver_type->speculative_type() : NULL;
speculative_receiver_type = receiver_type != nullptr ? receiver_type->speculative_type() : nullptr;
}
// Additional receiver subtype checks for interface calls via invokespecial or invokeinterface.
ciKlass* receiver_constraint = NULL;
ciKlass* receiver_constraint = nullptr;
if (iter().cur_bc_raw() == Bytecodes::_invokespecial && !orig_callee->is_object_initializer()) {
ciInstanceKlass* calling_klass = method()->holder();
ciInstanceKlass* sender_klass = calling_klass;
@ -595,12 +595,12 @@ void Parse::do_call() {
receiver_constraint = holder;
}
if (receiver_constraint != NULL) {
if (receiver_constraint != nullptr) {
Node* receiver_node = stack(sp() - nargs);
Node* cls_node = makecon(TypeKlassPtr::make(receiver_constraint, Type::trust_interfaces));
Node* bad_type_ctrl = NULL;
Node* bad_type_ctrl = nullptr;
Node* casted_receiver = gen_checkcast(receiver_node, cls_node, &bad_type_ctrl);
if (bad_type_ctrl != NULL) {
if (bad_type_ctrl != nullptr) {
PreserveJVMState pjvms(this);
set_control(bad_type_ctrl);
uncommon_trap(Deoptimization::Reason_class_check,
@ -628,7 +628,7 @@ void Parse::do_call() {
CallGenerator* cg = C->call_generator(callee, vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), speculative_receiver_type);
// NOTE: Don't use orig_callee and callee after this point! Use cg->method() instead.
orig_callee = callee = NULL;
orig_callee = callee = nullptr;
// ---------------------
// Round double arguments before call
@ -650,17 +650,17 @@ void Parse::do_call() {
assert(jvms_in_sync(), "jvms must carry full info into CG");
// save across call, for a subsequent cast_not_null.
Node* receiver = has_receiver ? argument(0) : NULL;
Node* receiver = has_receiver ? argument(0) : nullptr;
// The extra CheckCastPPs for speculative types mess with PhaseStringOpts
if (receiver != NULL && !call_does_dispatch && !cg->is_string_late_inline()) {
if (receiver != nullptr && !call_does_dispatch && !cg->is_string_late_inline()) {
// Feed profiling data for a single receiver to the type system so
// it can propagate it as a speculative type
receiver = record_profiled_receiver_for_speculation(receiver);
}
JVMState* new_jvms = cg->generate(jvms);
if (new_jvms == NULL) {
if (new_jvms == nullptr) {
// When inlining attempt fails (e.g., too many arguments),
// it may contaminate the current compile state, making it
// impossible to pull back and try again. Once we call
@ -674,7 +674,7 @@ void Parse::do_call() {
// get a normal java call that may inline in that case
cg = C->call_generator(cg->method(), vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), speculative_receiver_type, /* allow_intrinsics= */ false);
new_jvms = cg->generate(jvms);
if (new_jvms == NULL) {
if (new_jvms == nullptr) {
guarantee(failing(), "call failed to generate: calls should work");
return;
}
@ -700,7 +700,7 @@ void Parse::do_call() {
if (!stopped()) {
// This was some sort of virtual call, which did a null check for us.
// Now we can assert receiver-not-null, on the normal return path.
if (receiver != NULL && cg->is_virtual()) {
if (receiver != nullptr && cg->is_virtual()) {
Node* cast = cast_not_null(receiver);
// %%% assert(receiver == cast, "should already have cast the receiver");
}
@ -726,7 +726,7 @@ void Parse::do_call() {
if (ctype->is_loaded()) {
const TypeOopPtr* arg_type = TypeOopPtr::make_from_klass(rtype->as_klass());
const Type* sig_type = TypeOopPtr::make_from_klass(ctype->as_klass());
if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
if (arg_type != nullptr && !arg_type->higher_equal(sig_type)) {
Node* retnode = pop();
Node* cast_obj = _gvn.transform(new CheckCastPPNode(control(), retnode, sig_type));
push(cast_obj);
@ -759,7 +759,7 @@ void Parse::do_call() {
method()->print_name(); tty->print_cr(" asserting nullness of result at bci: %d", bci());
cg->method()->print_name(); tty->cr();
}
if (C->log() != NULL) {
if (C->log() != nullptr) {
C->log()->elem("assert_null reason='return' klass='%d'",
C->log()->identify(rtype));
}
@ -791,7 +791,7 @@ void Parse::catch_call_exceptions(ciExceptionHandlerStream& handlers) {
// Add a CatchNode.
GrowableArray<int>* bcis = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, -1);
GrowableArray<const Type*>* extypes = new (C->node_arena()) GrowableArray<const Type*>(C->node_arena(), 8, 0, NULL);
GrowableArray<const Type*>* extypes = new (C->node_arena()) GrowableArray<const Type*>(C->node_arena(), 8, 0, nullptr);
GrowableArray<int>* saw_unloaded = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, 0);
bool default_handler = false;
@ -901,7 +901,7 @@ void Parse::catch_call_exceptions(ciExceptionHandlerStream& handlers) {
// So we insert a RethrowCall and all the logic that goes with it.
void Parse::catch_inline_exceptions(SafePointNode* ex_map) {
// Caller is responsible for saving away the map for normal control flow!
assert(stopped(), "call set_map(NULL) first");
assert(stopped(), "call set_map(nullptr) first");
assert(method()->has_exception_handlers(), "don't come here w/o work to do");
Node* ex_node = saved_ex_oop(ex_map);
@ -910,8 +910,8 @@ void Parse::catch_inline_exceptions(SafePointNode* ex_map) {
return;
}
const TypeInstPtr* ex_type = _gvn.type(ex_node)->isa_instptr();
NOT_PRODUCT(if (ex_type==NULL) tty->print_cr("*** Exception not InstPtr"));
if (ex_type == NULL)
NOT_PRODUCT(if (ex_type==nullptr) tty->print_cr("*** Exception not InstPtr"));
if (ex_type == nullptr)
ex_type = TypeOopPtr::make_from_klass(env()->Throwable_klass())->is_instptr();
// determine potential exception handlers
@ -924,10 +924,10 @@ void Parse::catch_inline_exceptions(SafePointNode* ex_map) {
ex_node = use_exception_state(ex_map);
// Get the exception oop klass from its header
Node* ex_klass_node = NULL;
Node* ex_klass_node = nullptr;
if (has_ex_handler() && !ex_type->klass_is_exact()) {
Node* p = basic_plus_adr( ex_node, ex_node, oopDesc::klass_offset_in_bytes());
ex_klass_node = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT));
ex_klass_node = _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT));
// Compute the exception klass a little more cleverly.
// Obvious solution is to simple do a LoadKlass from the 'ex_node'.
@ -939,13 +939,13 @@ void Parse::catch_inline_exceptions(SafePointNode* ex_map) {
ex_klass_node = new PhiNode(ex_node->in(0), TypeInstKlassPtr::OBJECT);
for (uint i = 1; i < ex_node->req(); i++) {
Node* ex_in = ex_node->in(i);
if (ex_in == top() || ex_in == NULL) {
if (ex_in == top() || ex_in == nullptr) {
// This path was not taken.
ex_klass_node->init_req(i, top());
continue;
}
Node* p = basic_plus_adr(ex_in, ex_in, oopDesc::klass_offset_in_bytes());
Node* k = _gvn.transform( LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT));
Node* k = _gvn.transform( LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT));
ex_klass_node->init_req( i, k );
}
ex_klass_node = _gvn.transform(ex_klass_node);
@ -1030,7 +1030,7 @@ void Parse::catch_inline_exceptions(SafePointNode* ex_map) {
make_runtime_call(RC_NO_LEAF | RC_MUST_THROW,
OptoRuntime::rethrow_Type(),
OptoRuntime::rethrow_stub(),
NULL, NULL,
nullptr, nullptr,
ex_node);
// Rethrow is a pure call, no side effects, only a result.
@ -1093,7 +1093,7 @@ ciMethod* Compile::optimize_virtual_call(ciMethod* caller, ciInstanceKlass* klas
receiver_type, check_access);
// Have the call been sufficiently improved such that it is no longer a virtual?
if (optimized_virtual_method != NULL) {
if (optimized_virtual_method != nullptr) {
callee = optimized_virtual_method;
call_does_dispatch = false;
} else if (!UseInlineCaches && is_virtual && callee->is_loaded()) {
@ -1117,8 +1117,8 @@ ciMethod* Compile::optimize_inlining(ciMethod* caller, ciInstanceKlass* klass, c
return callee;
}
if (receiver_type == NULL) {
return NULL; // no receiver type info
if (receiver_type == nullptr) {
return nullptr; // no receiver type info
}
// Attempt to improve the receiver
@ -1134,7 +1134,7 @@ ciMethod* Compile::optimize_inlining(ciMethod* caller, ciInstanceKlass* klass, c
// All other interesting cases are instance klasses.
if (!receiver_type->isa_instptr()) {
return NULL;
return nullptr;
}
ciInstanceKlass* receiver_klass = receiver_type->is_instptr()->instance_klass();
@ -1150,7 +1150,7 @@ ciMethod* Compile::optimize_inlining(ciMethod* caller, ciInstanceKlass* klass, c
ciInstanceKlass* calling_klass = caller->holder();
ciMethod* cha_monomorphic_target = callee->find_monomorphic_target(calling_klass, klass, actual_receiver, check_access);
if (cha_monomorphic_target != NULL) {
if (cha_monomorphic_target != nullptr) {
// Hardwiring a virtual.
assert(!callee->can_be_statically_bound(), "should have been handled earlier");
assert(!cha_monomorphic_target->is_abstract(), "");
@ -1171,10 +1171,10 @@ ciMethod* Compile::optimize_inlining(ciMethod* caller, ciInstanceKlass* klass, c
// In case of evolution, there is a dependence on every inlined method, since each
// such method can be changed when its class is redefined.
ciMethod* exact_method = callee->resolve_invoke(calling_klass, actual_receiver);
if (exact_method != NULL) {
if (exact_method != nullptr) {
return exact_method;
}
}
return NULL;
return nullptr;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -129,12 +129,12 @@ void PhaseCFG::build_dominator_tree() {
Tarjan *w = &tarjan[i];
if( w->_dom != &tarjan[w->_semi] )
w->_dom = w->_dom->_dom;
w->_dom_next = w->_dom_child = NULL; // Initialize for building tree later
w->_dom_next = w->_dom_child = nullptr; // Initialize for building tree later
}
// No immediate dominator for the root
Tarjan *w = &tarjan[get_root_block()->_pre_order];
w->_dom = NULL;
w->_dom_next = w->_dom_child = NULL; // Initialize for building tree later
w->_dom = nullptr;
w->_dom_next = w->_dom_child = nullptr; // Initialize for building tree later
// Convert the dominator tree array into my kind of graph
for(uint i = 1; i <= number_of_blocks(); i++){ // For all Tarjan vertices
@ -145,7 +145,7 @@ void PhaseCFG::build_dominator_tree() {
t->_dom_next = tdom->_dom_child; // Make me a sibling of parent's child
tdom->_dom_child = t; // Make me a child of my parent
} else
t->_block->_idom = NULL; // Root
t->_block->_idom = nullptr; // Root
}
w->setdepth(number_of_blocks() + 1); // Set depth in dominator tree
@ -175,12 +175,12 @@ class Block_Stack {
t->_block = b; // Save actual block
t->_semi = pre_order; // Block to DFS map
t->_label = t; // DFS to vertex map
t->_ancestor = NULL; // Fast LINK & EVAL setup
t->_ancestor = nullptr; // Fast LINK & EVAL setup
t->_child = &_tarjan[0]; // Sentenial
t->_size = 1;
t->_bucket = NULL;
t->_bucket = nullptr;
if (pre_order == 1)
t->_parent = NULL; // first block doesn't have parent
t->_parent = nullptr; // first block doesn't have parent
else {
// Save parent (current top block on stack) in DFS
t->_parent = &_tarjan[_stack_top->block->_pre_order];
@ -341,11 +341,11 @@ void Tarjan::setdepth( uint stack_size ) {
t->_block->_dom_depth = depth; // Set depth in dominator tree
Tarjan *dom_child = t->_dom_child;
t = t->_dom_next; // next tarjan
if (dom_child != NULL) {
if (dom_child != nullptr) {
*top = dom_child; // save child on stack
++top;
}
} while (t != NULL);
} while (t != nullptr);
} while (next < last);
} while (last < top);
}
@ -395,7 +395,7 @@ void PhaseIdealLoop::Dominators() {
// Initialize _control field for fast reference
int i;
for( i= C->unique()-1; i>=0; i-- )
ntarjan[i]._control = NULL;
ntarjan[i]._control = nullptr;
// Store the DFS order for the main loop
const uint fill_value = max_juint;
@ -413,12 +413,12 @@ void PhaseIdealLoop::Dominators() {
for( i = dfsnum-1; i>1; i-- ) { // For all nodes in reverse DFS order
NTarjan *w = &ntarjan[i]; // Get Node from DFS
assert(w->_control != NULL,"bad DFS walk");
assert(w->_control != nullptr,"bad DFS walk");
// Step 2:
Node *whead = w->_control;
for( uint j=0; j < whead->req(); j++ ) { // For each predecessor
if( whead->in(j) == NULL || !whead->in(j)->is_CFG() )
if( whead->in(j) == nullptr || !whead->in(j)->is_CFG() )
continue; // Only process control nodes
uint b = dfsorder[whead->in(j)->_idx];
if(b == fill_value) continue;
@ -468,28 +468,28 @@ void PhaseIdealLoop::Dominators() {
// Step 4:
for( i=2; i < dfsnum; i++ ) { // DFS order
NTarjan *w = &ntarjan[i];
assert(w->_control != NULL,"Bad DFS walk");
assert(w->_control != nullptr,"Bad DFS walk");
if( w->_dom != &ntarjan[w->_semi] )
w->_dom = w->_dom->_dom;
w->_dom_next = w->_dom_child = NULL; // Initialize for building tree later
w->_dom_next = w->_dom_child = nullptr; // Initialize for building tree later
}
// No immediate dominator for the root
NTarjan *w = &ntarjan[dfsorder[C->root()->_idx]];
w->_dom = NULL;
w->_parent = NULL;
w->_dom_next = w->_dom_child = NULL; // Initialize for building tree later
w->_dom = nullptr;
w->_parent = nullptr;
w->_dom_next = w->_dom_child = nullptr; // Initialize for building tree later
// Convert the dominator tree array into my kind of graph
for( i=1; i<dfsnum; i++ ) { // For all Tarjan vertices
NTarjan *t = &ntarjan[i]; // Handy access
assert(t->_control != NULL,"Bad DFS walk");
assert(t->_control != nullptr,"Bad DFS walk");
NTarjan *tdom = t->_dom; // Handy access to immediate dominator
if( tdom ) { // Root has no immediate dominator
_idom[t->_control->_idx] = tdom->_control; // Set immediate dominator
t->_dom_next = tdom->_dom_child; // Make me a sibling of parent's child
tdom->_dom_child = t; // Make me a child of my parent
} else
_idom[C->root()->_idx] = NULL; // Root
_idom[C->root()->_idx] = nullptr; // Root
}
w->setdepth( C->unique()+1, _dom_depth ); // Set depth in dominator tree
// Pick up the 'top' node as well
@ -525,10 +525,10 @@ int NTarjan::DFS( NTarjan *ntarjan, VectorSet &visited, PhaseIdealLoop *pil, uin
dfsorder[b->_idx] = dfsnum; // Save DFS order info
w->_semi = dfsnum; // Node to DFS map
w->_label = w; // DFS to vertex map
w->_ancestor = NULL; // Fast LINK & EVAL setup
w->_ancestor = nullptr; // Fast LINK & EVAL setup
w->_child = &ntarjan[0]; // Sentinel
w->_size = 1;
w->_bucket = NULL;
w->_bucket = nullptr;
// Need DEF-USE info for this pass
for ( int i = b->outcnt(); i-- > 0; ) { // Put on stack backwards
@ -604,11 +604,11 @@ void NTarjan::setdepth( uint stack_size, uint *dom_depth ) {
dom_depth[t->_control->_idx] = depth; // Set depth in dominator tree
NTarjan *dom_child = t->_dom_child;
t = t->_dom_next; // next tarjan
if (dom_child != NULL) {
if (dom_child != nullptr) {
*top = dom_child; // save child on stack
++top;
}
} while (t != NULL);
} while (t != nullptr);
} while (next < last);
} while (last < top);
}
@ -628,13 +628,13 @@ void NTarjan::dump(int offset) const {
for(i = offset; i >0; i--) // Use indenting for tree structure
tty->print(" ");
tty->print("DFS Parent: ");
if(_parent != NULL)
if(_parent != nullptr)
_parent->_control->dump(); // Parent in DFS
tty->print("\n");
for(i = offset; i >0; i--) // Use indenting for tree structure
tty->print(" ");
tty->print("Dom Parent: ");
if(_dom != NULL)
if(_dom != nullptr)
_dom->_control->dump(); // Parent in Dominator Tree
tty->print("\n");

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -295,7 +295,7 @@ public:
inline PointsToIterator(const PointsToNode* n, int cnt) : node(n), cnt(cnt), i(0) { }
inline bool has_next() const { return i < cnt; }
inline void next() { i++; }
PointsToNode* get() const { ShouldNotCallThis(); return NULL; }
PointsToNode* get() const { ShouldNotCallThis(); return nullptr; }
};
class EdgeIterator: public PointsToIterator {
@ -432,7 +432,7 @@ private:
// Set the escape state of an object and its fields.
void set_escape_state(PointsToNode* ptn, PointsToNode::EscapeState esc
NOT_PRODUCT(COMMA const char* reason)) {
// Don't change non-escaping state of NULL pointer.
// Don't change non-escaping state of null pointer.
if (ptn != null_obj) {
if (ptn->escape_state() < esc) {
NOT_PRODUCT(trace_es_update_helper(ptn, esc, false, reason));
@ -446,7 +446,7 @@ private:
}
void set_fields_escape_state(PointsToNode* ptn, PointsToNode::EscapeState esc
NOT_PRODUCT(COMMA const char* reason)) {
// Don't change non-escaping state of NULL pointer.
// Don't change non-escaping state of null pointer.
if (ptn != null_obj) {
if (ptn->fields_escape_state() < esc) {
NOT_PRODUCT(trace_es_update_helper(ptn, esc, true, reason));
@ -472,7 +472,7 @@ private:
// Optimize objects compare.
const TypeInt* optimize_ptr_compare(Node* n);
// Returns unique corresponding java object or NULL.
// Returns unique corresponding java object or null.
JavaObjectNode* unique_java_object(Node *n);
// Add an edge of the specified type pointing to the specified target.
@ -510,7 +510,7 @@ private:
if (is_new) { // New edge?
assert(!_verify, "graph is incomplete");
if (to == null_obj) {
return is_new; // Don't add fields to NULL pointer.
return is_new; // Don't add fields to null pointer.
}
if (to->is_JavaObject()) {
is_new = to->add_edge(from);
@ -564,7 +564,7 @@ private:
PhiNode* get_map_phi(int idx) {
Node* phi = _node_map[idx];
return (phi == NULL) ? NULL : phi->as_Phi();
return (phi == nullptr) ? nullptr : phi->as_Phi();
}
// Returns true if there is an object in the scope of sfn that does not escape globally.
@ -617,21 +617,21 @@ public:
void add_local_var_and_edge(Node* n, PointsToNode::EscapeState es, Node* to,
Unique_Node_List *delayed_worklist) {
PointsToNode* ptn = ptnode_adr(to->_idx);
if (delayed_worklist != NULL) { // First iteration of CG construction
if (delayed_worklist != nullptr) { // First iteration of CG construction
add_local_var(n, es);
if (ptn == NULL) {
if (ptn == nullptr) {
delayed_worklist->push(n);
return; // Process it later.
}
} else {
assert(ptn != NULL, "node should be registered");
assert(ptn != nullptr, "node should be registered");
}
add_edge(ptnode_adr(n->_idx), ptn);
}
// Map ideal node to existing PointsTo node (usually phantom_object).
void map_ideal_node(Node *n, PointsToNode* ptn) {
assert(ptn != NULL, "only existing PointsTo node");
assert(ptn != nullptr, "only existing PointsTo node");
_nodes.at_put(n->_idx, ptn);
}
@ -649,8 +649,8 @@ public:
};
inline PointsToNode::PointsToNode(ConnectionGraph *CG, Node* n, EscapeState es, NodeType type):
_edges(CG->_compile->comp_arena(), 2, 0, NULL),
_uses (CG->_compile->comp_arena(), 2, 0, NULL),
_edges(CG->_compile->comp_arena(), 2, 0, nullptr),
_uses (CG->_compile->comp_arena(), 2, 0, nullptr),
_type((u1)type),
_flags(ScalarReplaceable),
_escape((u1)es),
@ -658,12 +658,12 @@ inline PointsToNode::PointsToNode(ConnectionGraph *CG, Node* n, EscapeState es,
_node(n),
_idx(n->_idx),
_pidx(CG->next_pidx()) {
assert(n != NULL && es != UnknownEscape, "sanity");
assert(n != nullptr && es != UnknownEscape, "sanity");
}
inline FieldNode::FieldNode(ConnectionGraph *CG, Node* n, EscapeState es, int offs, bool is_oop):
PointsToNode(CG, n, es, Field),
_bases(CG->_compile->comp_arena(), 2, 0, NULL),
_bases(CG->_compile->comp_arena(), 2, 0, nullptr),
_offset(offs), _is_oop(is_oop),
_has_unknown_base(false) {
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -61,7 +61,7 @@ void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) {
if (use->is_Proj()) {
Block* buse = get_block_for_node(use);
if (buse != b) { // In wrong block?
if (buse != NULL) {
if (buse != nullptr) {
buse->find_remove(use); // Remove from wrong block
}
map_node_to_block(use, b);
@ -77,9 +77,9 @@ void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) {
// the projection will be in a predecessor block.
void PhaseCFG::replace_block_proj_ctrl( Node *n ) {
const Node *in0 = n->in(0);
assert(in0 != NULL, "Only control-dependent");
assert(in0 != nullptr, "Only control-dependent");
const Node *p = in0->is_block_proj();
if (p != NULL && p != n) { // Control from a block projection?
if (p != nullptr && p != n) { // Control from a block projection?
assert(!n->pinned() || n->is_MachConstantBase(), "only pinned MachConstantBase node is expected here");
// Find trailing Region
Block *pb = get_block_for_node(in0); // Block-projection already has basic block
@ -109,7 +109,7 @@ bool PhaseCFG::is_dominator(Node* dom_node, Node* node) {
}
Block* d = find_block_for_node(dom_node);
Block* n = find_block_for_node(node);
assert(n != NULL && d != NULL, "blocks must exist");
assert(n != nullptr && d != nullptr, "blocks must exist");
if (d == n) {
if (dom_node->is_block_start()) {
@ -212,15 +212,15 @@ void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) {
// removed in final_graph_reshaping), fix the control of the
// node to cover the precedence edges and remove the
// dependencies.
Node* n = NULL;
Node* n = nullptr;
for (uint i = node->len()-1; i >= node->req(); i--) {
Node* m = node->in(i);
if (m == NULL) continue;
if (m == nullptr) continue;
// Only process precedence edges that are CFG nodes. Safepoints and control projections can be in the middle of a block
if (is_CFG(m)) {
node->rm_prec(i);
if (n == NULL) {
if (n == nullptr) {
n = m;
} else {
assert(is_dominator(n, m) || is_dominator(m, n), "one must dominate the other");
@ -231,7 +231,7 @@ void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) {
assert(node->as_Mach()->ideal_Opcode() == Op_StoreCM, "must be StoreCM node");
}
}
if (n != NULL) {
if (n != nullptr) {
assert(node->in(0), "control should have been set");
assert(is_dominator(n, node->in(0)) || is_dominator(node->in(0), n), "one must dominate the other");
if (!is_dominator(n, node->in(0))) {
@ -239,9 +239,9 @@ void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) {
}
}
// process all inputs that are non NULL
// process all inputs that are non null
for (int i = node->req()-1; i >= 0; --i) {
if (node->in(i) != NULL) {
if (node->in(i) != nullptr) {
spstack.push(node->in(i));
}
}
@ -254,10 +254,10 @@ void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) {
// Check this by by seeing that it is dominated by b1, the deepest
// input observed until b2.
static void assert_dom(Block* b1, Block* b2, Node* n, const PhaseCFG* cfg) {
if (b1 == NULL) return;
if (b1 == nullptr) return;
assert(b1->_dom_depth < b2->_dom_depth, "sanity");
Block* tmp = b2;
while (tmp != b1 && tmp != NULL) {
while (tmp != b1 && tmp != nullptr) {
tmp = tmp->_idom;
}
if (tmp != b1) {
@ -265,7 +265,7 @@ static void assert_dom(Block* b1, Block* b2, Node* n, const PhaseCFG* cfg) {
tty->print_cr("!!! Unschedulable graph !!!");
for (uint j=0; j<n->len(); j++) { // For all inputs
Node* inn = n->in(j); // Get input
if (inn == NULL) continue; // Ignore NULL, missing inputs
if (inn == nullptr) continue; // Ignore null, missing inputs
Block* inb = cfg->get_block_for_node(inn);
tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order,
inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth);
@ -280,13 +280,13 @@ static void assert_dom(Block* b1, Block* b2, Node* n, const PhaseCFG* cfg) {
static Block* find_deepest_input(Node* n, const PhaseCFG* cfg) {
// Find the last input dominated by all other inputs.
Block* deepb = NULL; // Deepest block so far
Block* deepb = nullptr; // Deepest block so far
int deepb_dom_depth = 0;
for (uint k = 0; k < n->len(); k++) { // For all inputs
Node* inn = n->in(k); // Get input
if (inn == NULL) continue; // Ignore NULL, missing inputs
if (inn == nullptr) continue; // Ignore null, missing inputs
Block* inb = cfg->get_block_for_node(inn);
assert(inb != NULL, "must already have scheduled this input");
assert(inb != nullptr, "must already have scheduled this input");
if (deepb_dom_depth < (int) inb->_dom_depth) {
// The new inb must be dominated by the previous deepb.
// The various inputs must be linearly ordered in the dom
@ -296,7 +296,7 @@ static Block* find_deepest_input(Node* n, const PhaseCFG* cfg) {
deepb_dom_depth = deepb->_dom_depth;
}
}
assert(deepb != NULL, "must be at least one input to n");
assert(deepb != nullptr, "must be at least one input to n");
return deepb;
}
@ -325,7 +325,7 @@ bool PhaseCFG::schedule_early(VectorSet &visited, Node_Stack &roots) {
// to root and nodes that use is_block_proj() nodes should be attached
// to the region that starts their block.
const Node* control_input = parent_node->in(0);
if (control_input != NULL) {
if (control_input != nullptr) {
replace_block_proj_ctrl(parent_node);
} else {
// Is a constant with NO inputs?
@ -345,7 +345,7 @@ bool PhaseCFG::schedule_early(VectorSet &visited, Node_Stack &roots) {
while (input_index < parent_node->len()) {
Node* in = parent_node->in(input_index++);
if (in == NULL) {
if (in == nullptr) {
continue;
}
@ -401,10 +401,10 @@ bool PhaseCFG::schedule_early(VectorSet &visited, Node_Stack &roots) {
//------------------------------dom_lca----------------------------------------
// Find least common ancestor in dominator tree
// LCA is a current notion of LCA, to be raised above 'this'.
// As a convenient boundary condition, return 'this' if LCA is NULL.
// As a convenient boundary condition, return 'this' if LCA is null.
// Find the LCA of those two nodes.
Block* Block::dom_lca(Block* LCA) {
if (LCA == NULL || LCA == this) return this;
if (LCA == nullptr || LCA == this) return this;
Block* anc = this;
while (anc->_dom_depth > LCA->_dom_depth)
@ -428,7 +428,7 @@ Block* Block::dom_lca(Block* LCA) {
// the LCA only with the phi input paths which actually use this def.
static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, const PhaseCFG* cfg) {
Block* buse = cfg->get_block_for_node(use);
if (buse == NULL) return LCA; // Unused killing Projs have no use block
if (buse == nullptr) return LCA; // Unused killing Projs have no use block
if (!use->is_Phi()) return buse->dom_lca(LCA);
uint pmax = use->req(); // Number of Phi inputs
// Why does not this loop just break after finding the matching input to
@ -507,9 +507,9 @@ static Block* memory_early_block(Node* load, Block* early, const PhaseCFG* cfg)
Node* mem_inputs[4];
int mem_inputs_length = 0;
if (base != NULL) mem_inputs[mem_inputs_length++] = base;
if (index != NULL) mem_inputs[mem_inputs_length++] = index;
if (store != NULL) mem_inputs[mem_inputs_length++] = store;
if (base != nullptr) mem_inputs[mem_inputs_length++] = base;
if (index != nullptr) mem_inputs[mem_inputs_length++] = index;
if (store != nullptr) mem_inputs[mem_inputs_length++] = store;
// In the comparison below, add one to account for the control input,
// which may be null, but always takes up a spot in the in array.
@ -519,9 +519,9 @@ static Block* memory_early_block(Node* load, Block* early, const PhaseCFG* cfg)
// from the early block of only the address portion of the instruction,
// and ignore other blocks that may have factored into the wider
// schedule_early calculation.
if (load->in(0) != NULL) mem_inputs[mem_inputs_length++] = load->in(0);
if (load->in(0) != nullptr) mem_inputs[mem_inputs_length++] = load->in(0);
Block* deepb = NULL; // Deepest block so far
Block* deepb = nullptr; // Deepest block so far
int deepb_dom_depth = 0;
for (int i = 0; i < mem_inputs_length; i++) {
Block* inb = cfg->get_block_for_node(mem_inputs[i]);
@ -554,9 +554,9 @@ bool PhaseCFG::unrelated_load_in_store_null_block(Node* store, Node* load) {
Node* end = store_block->end();
if (end->is_MachNullCheck() && (end->in(1) == store) && store_block->dominates(load_block)) {
Node* if_true = end->find_out_with(Op_IfTrue);
assert(if_true != NULL, "null check without null projection");
assert(if_true != nullptr, "null check without null projection");
Node* null_block_region = if_true->find_out_with(Op_Region);
assert(null_block_region != NULL, "null check without null region");
assert(null_block_region != nullptr, "null check without null region");
return get_block_for_node(null_block_region) == load_block;
}
return false;
@ -580,7 +580,7 @@ bool PhaseCFG::unrelated_load_in_store_null_block(Node* store, Node* load) {
// above the LCA, if it is not the early block.
Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) {
assert(load->needs_anti_dependence_check(), "must be a load of some sort");
assert(LCA != NULL, "");
assert(LCA != nullptr, "");
DEBUG_ONLY(Block* LCA_orig = LCA);
// Compute the alias index. Loads and stores with different alias indices
@ -650,7 +650,7 @@ Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) {
Node* initial_mem = load->in(MemNode::Memory);
worklist_store.push(initial_mem);
worklist_visited.push(initial_mem);
worklist_mem.push(NULL);
worklist_mem.push(nullptr);
while (worklist_store.size() > 0) {
// Examine a nearby store to see if it might interfere with our load.
Node* mem = worklist_mem.pop();
@ -665,7 +665,7 @@ Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) {
) {
mem = store; // It's not a possibly interfering store.
if (store == initial_mem)
initial_mem = NULL; // only process initial memory once
initial_mem = nullptr; // only process initial memory once
for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
store = mem->fast_out(i);
@ -708,7 +708,7 @@ Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) {
MachSafePointNode* ms = (MachSafePointNode*) mstore;
assert(ms->is_MachCallJava(), "");
MachCallJavaNode* mcj = (MachCallJavaNode*) ms;
if (mcj->_method == NULL) {
if (mcj->_method == nullptr) {
// These runtime calls do not write to Java visible memory
// (other than Raw) and so do not require anti-dependence edges.
continue;
@ -737,7 +737,7 @@ Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) {
// earliest legal block for 'load'. In the latter case,
// immediately insert an anti-dependence edge.
Block* store_block = get_block_for_node(store);
assert(store_block != NULL, "unused killing projections skipped above");
assert(store_block != nullptr, "unused killing projections skipped above");
if (store->is_Phi()) {
// Loop-phis need to raise load before input. (Other phis are treated
@ -887,9 +887,9 @@ Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited,
// Iterator for the Node_Backward_Iterator
Node *Node_Backward_Iterator::next() {
// If the _stack is empty, then just return NULL: finished.
// If the _stack is empty, then just return null: finished.
if ( !_stack.size() )
return NULL;
return nullptr;
// I visit unvisited not-anti-dependence users first, then anti-dependent
// children next. I iterate backwards to support removal of nodes.
@ -911,7 +911,7 @@ Node *Node_Backward_Iterator::next() {
uint src_rpo = _cfg.get_block_for_node(src)->_rpo;
// Schedule all nodes in a post-order visit
Node *unvisited = NULL; // Unvisited anti-dependent Node, if any
Node *unvisited = nullptr; // Unvisited anti-dependent Node, if any
// Scan for unvisited nodes
while (idx > 0) {
@ -1180,7 +1180,7 @@ Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {
// Do not hoist (to cover latency) instructions which target a
// single register. Hoisting stretches the live range of the
// single register and may force spilling.
MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL;
MachNode* mach = self->is_Mach() ? self->as_Mach() : nullptr;
if (mach && mach->out_RegMask().is_bound1() && mach->out_RegMask().is_NotEmpty())
in_latency = true;
@ -1206,10 +1206,10 @@ Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {
while (LCA != early) {
LCA = LCA->_idom; // Follow up the dominator tree
if (LCA == NULL) {
if (LCA == nullptr) {
// Bailout without retry
assert(false, "graph should be schedulable");
C->record_method_not_compilable("late schedule failed: LCA == NULL");
C->record_method_not_compilable("late schedule failed: LCA is null");
return least;
}
@ -1314,7 +1314,7 @@ void PhaseCFG::schedule_late(VectorSet &visited, Node_Stack &stack) {
}
#endif
MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL;
MachNode* mach = self->is_Mach() ? self->as_Mach() : nullptr;
if (mach) {
switch (mach->ideal_Opcode()) {
case Op_CreateEx:
@ -1326,7 +1326,7 @@ void PhaseCFG::schedule_late(VectorSet &visited, Node_Stack &stack) {
// Don't move CheckCastPP nodes away from their input, if the input
// is a rawptr (5071820).
Node *def = self->in(1);
if (def != NULL && def->bottom_type()->base() == Type::RawPtr) {
if (def != nullptr && def->bottom_type()->base() == Type::RawPtr) {
early->add_inst(self);
#ifdef ASSERT
_raw_oops.push(def);
@ -1384,20 +1384,20 @@ void PhaseCFG::schedule_late(VectorSet &visited, Node_Stack &stack) {
}
// Gather LCA of all uses
Block *LCA = NULL;
Block *LCA = nullptr;
{
for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) {
// For all uses, find LCA
Node* use = self->fast_out(i);
LCA = raise_LCA_above_use(LCA, use, self, this);
}
guarantee(LCA != NULL, "There must be a LCA");
guarantee(LCA != nullptr, "There must be a LCA");
} // (Hide defs of imax, i from rest of block.)
// Place temps in the block of their use. This isn't a
// requirement for correctness but it reduces useless
// interference between temps and other nodes.
if (mach != NULL && mach->is_MachTemp()) {
if (mach != nullptr && mach->is_MachTemp()) {
map_node_to_block(self, LCA);
LCA->add_inst(self);
continue;
@ -1432,7 +1432,7 @@ void PhaseCFG::schedule_late(VectorSet &visited, Node_Stack &stack) {
while (LCA->_loop->depth() > early->_loop->depth()) {
LCA = LCA->_idom;
}
assert(LCA != NULL, "a valid LCA must exist");
assert(LCA != nullptr, "a valid LCA must exist");
verify_memory_writer_placement(LCA, self);
}
@ -1445,10 +1445,10 @@ void PhaseCFG::schedule_late(VectorSet &visited, Node_Stack &stack) {
// allocatable (hoisting can make a value live longer, leading to
// anti and output dependency problems which are normally resolved
// by the register allocator giving everyone a different register).
if (mach != NULL && must_clone[mach->ideal_Opcode()])
if (mach != nullptr && must_clone[mach->ideal_Opcode()])
try_to_hoist = false;
Block* late = NULL;
Block* late = nullptr;
if (try_to_hoist) {
// Now find the block with the least execution frequency.
// Start at the latest schedule and work up to the earliest schedule
@ -1528,8 +1528,8 @@ void PhaseCFG::global_code_motion() {
}
#endif
// Detect implicit-null-check opportunities. Basically, find NULL checks
// with suitable memory ops nearby. Use the memory op to do the NULL check.
// Detect implicit-null-check opportunities. Basically, find null checks
// with suitable memory ops nearby. Use the memory op to do the null check.
// I can generate a memory op if there is not one nearby.
if (C->is_method_compilation()) {
// By reversing the loop direction we get a very minor gain on mpegaudio.
@ -1549,7 +1549,7 @@ void PhaseCFG::global_code_motion() {
}
bool block_size_threshold_ok = false;
intptr_t *recalc_pressure_nodes = NULL;
intptr_t *recalc_pressure_nodes = nullptr;
if (OptoRegScheduling) {
for (uint i = 0; i < number_of_blocks(); i++) {
Block* block = get_block(i);
@ -1602,11 +1602,11 @@ void PhaseCFG::global_code_motion() {
if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
C->record_method_not_compilable("local schedule failed");
}
_regalloc = NULL;
_regalloc = nullptr;
return;
}
}
_regalloc = NULL;
_regalloc = nullptr;
// If we inserted any instructions between a Call and his CatchNode,
// clone the instructions on all paths below the Catch.
@ -1747,7 +1747,7 @@ CFGLoop* PhaseCFG::create_loop_tree() {
for (uint i = 0; i < number_of_blocks(); i++) {
Block* block = get_block(i);
// Check that _loop field are clear...we could clear them if not.
assert(block->_loop == NULL, "clear _loop expected");
assert(block->_loop == nullptr, "clear _loop expected");
// Sanity check that the RPO numbering is reflected in the _blocks array.
// It doesn't have to be for the loop tree to be built, but if it is not,
// then the blocks have been reordered since dom graph building...which
@ -1780,7 +1780,7 @@ CFGLoop* PhaseCFG::create_loop_tree() {
assert(worklist.size() == 0, "nonempty worklist");
CFGLoop* nloop = new CFGLoop(idct++);
assert(loop_head->_loop == NULL, "just checking");
assert(loop_head->_loop == nullptr, "just checking");
loop_head->_loop = nloop;
// Add to nloop so push_pred() will skip over inner loops
nloop->add_member(loop_head);
@ -1803,7 +1803,7 @@ CFGLoop* PhaseCFG::create_loop_tree() {
for (uint i = 0; i < number_of_blocks(); i++) {
Block* block = get_block(i);
CFGLoop* lp = block->_loop;
if (lp == NULL) {
if (lp == nullptr) {
// Not assigned to a loop. Add it to the method's pseudo loop.
block->_loop = root_loop;
lp = root_loop;
@ -1812,7 +1812,7 @@ CFGLoop* PhaseCFG::create_loop_tree() {
lp->add_member(block);
}
if (lp != root_loop) {
if (lp->parent() == NULL) {
if (lp->parent() == nullptr) {
// Not a nested loop. Make it a child of the method's pseudo loop.
root_loop->add_nested_loop(lp);
}
@ -1831,7 +1831,7 @@ void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg)
Node* pred_n = blk->pred(i);
Block* pred = cfg->get_block_for_node(pred_n);
CFGLoop *pred_loop = pred->_loop;
if (pred_loop == NULL) {
if (pred_loop == nullptr) {
// Filter out blocks for non-single-entry loops.
// For all reasonable loops, the head occurs before the tail in RPO.
if (pred->_rpo > head()->_rpo) {
@ -1840,11 +1840,11 @@ void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg)
}
} else if (pred_loop != this) {
// Nested loop.
while (pred_loop->_parent != NULL && pred_loop->_parent != this) {
while (pred_loop->_parent != nullptr && pred_loop->_parent != this) {
pred_loop = pred_loop->_parent;
}
// Make pred's loop be a child
if (pred_loop->_parent == NULL) {
if (pred_loop->_parent == nullptr) {
add_nested_loop(pred_loop);
// Continue with loop entry predecessor.
Block* pred_head = pred_loop->head();
@ -1852,7 +1852,7 @@ void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg)
assert(pred_head != head(), "loop head in only one loop");
push_pred(pred_head, LoopNode::EntryControl, worklist, cfg);
} else {
assert(pred_loop->_parent == this && _parent == NULL, "just checking");
assert(pred_loop->_parent == this && _parent == nullptr, "just checking");
}
}
}
@ -1860,14 +1860,14 @@ void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg)
//------------------------------add_nested_loop--------------------------------
// Make cl a child of the current loop in the loop tree.
void CFGLoop::add_nested_loop(CFGLoop* cl) {
assert(_parent == NULL, "no parent yet");
assert(_parent == nullptr, "no parent yet");
assert(cl != this, "not my own parent");
cl->_parent = this;
CFGLoop* ch = _child;
if (ch == NULL) {
if (ch == nullptr) {
_child = cl;
} else {
while (ch->_sibling != NULL) { ch = ch->_sibling; }
while (ch->_sibling != nullptr) { ch = ch->_sibling; }
ch->_sibling = cl;
}
}
@ -1878,7 +1878,7 @@ void CFGLoop::add_nested_loop(CFGLoop* cl) {
void CFGLoop::compute_loop_depth(int depth) {
_depth = depth;
CFGLoop* ch = _child;
while (ch != NULL) {
while (ch != nullptr) {
ch->compute_loop_depth(depth + 1);
ch = ch->_sibling;
}
@ -1897,7 +1897,7 @@ void CFGLoop::compute_freq() {
// Nested loops first
CFGLoop* ch = _child;
while (ch != NULL) {
while (ch != nullptr) {
ch->compute_freq();
ch = ch->_sibling;
}
@ -2227,7 +2227,7 @@ void CFGLoop::scale_freq() {
s->_freq = block_freq;
}
CFGLoop* ch = _child;
while (ch != NULL) {
while (ch != nullptr) {
ch->scale_freq();
ch = ch->_sibling;
}
@ -2235,7 +2235,7 @@ void CFGLoop::scale_freq() {
// Frequency of outer loop
double CFGLoop::outer_loop_freq() const {
if (_child != NULL) {
if (_child != nullptr) {
return _child->_freq;
}
return _freq;
@ -2245,8 +2245,8 @@ double CFGLoop::outer_loop_freq() const {
//------------------------------dump_tree--------------------------------------
void CFGLoop::dump_tree() const {
dump();
if (_child != NULL) _child->dump_tree();
if (_sibling != NULL) _sibling->dump_tree();
if (_child != nullptr) _child->dump_tree();
if (_sibling != nullptr) _sibling->dump_tree();
}
//------------------------------dump-------------------------------------------

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -96,7 +96,7 @@ void GraphKit::gen_stub(address C_function,
// Drop in the last_Java_sp. last_Java_fp is not touched.
// Always do this after the other "last_Java_frame" fields are set since
// as soon as last_Java_sp != NULL the has_last_Java_frame is true and
// as soon as last_Java_sp != nullptr the has_last_Java_frame is true and
// users will look at the other fields.
//
Node *adr_sp = basic_plus_adr(top(), thread, in_bytes(JavaThread::last_Java_sp_offset()));
@ -234,7 +234,7 @@ void GraphKit::gen_stub(address C_function,
// Runtime call returning oop in TLS? Fetch it out
if( pass_tls ) {
Node* adr = basic_plus_adr(top(), thread, in_bytes(JavaThread::vm_result_offset()));
Node* vm_result = make_load(NULL, adr, TypeOopPtr::BOTTOM, T_OBJECT, NoAlias, MemNode::unordered);
Node* vm_result = make_load(nullptr, adr, TypeOopPtr::BOTTOM, T_OBJECT, NoAlias, MemNode::unordered);
map()->set_req(TypeFunc::Parms, vm_result); // vm_result passed as result
// clear thread-local-storage(tls)
store_to_memory(control(), adr, null(), T_ADDRESS, NoAlias, MemNode::unordered);
@ -243,7 +243,7 @@ void GraphKit::gen_stub(address C_function,
//-----------------------------
// check exception
Node* adr = basic_plus_adr(top(), thread, in_bytes(Thread::pending_exception_offset()));
Node* pending = make_load(NULL, adr, TypeOopPtr::BOTTOM, T_OBJECT, NoAlias, MemNode::unordered);
Node* pending = make_load(nullptr, adr, TypeOopPtr::BOTTOM, T_OBJECT, NoAlias, MemNode::unordered);
Node* exit_memory = reset_memory();
@ -254,7 +254,7 @@ void GraphKit::gen_stub(address C_function,
Node* if_null = _gvn.transform( new IfFalseNode(iff) );
Node* if_not_null = _gvn.transform( new IfTrueNode(iff) );
assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
assert (StubRoutines::forward_exception_entry() != nullptr, "must be generated before");
Node *exc_target = makecon(TypeRawPtr::make( StubRoutines::forward_exception_entry() ));
Node *to_exc = new TailCallNode(if_not_null,
i_o(),
@ -267,7 +267,7 @@ void GraphKit::gen_stub(address C_function,
//-----------------------------
// If this is a normal subroutine return, issue the return and be done.
Node *ret = NULL;
Node *ret = nullptr;
switch( is_fancy_jump ) {
case 0: // Make a return instruction
// Return to caller, free any space for return address

File diff suppressed because it is too large Load Diff

View File

@ -72,7 +72,7 @@ class GraphKit : public Phase {
private:
SafePointNode* map_not_null() const {
assert(_map != NULL, "must call stopped() to test for reset compiler map");
assert(_map != nullptr, "must call stopped() to test for reset compiler map");
return _map;
}
@ -86,8 +86,8 @@ class GraphKit : public Phase {
}
#endif
virtual Parse* is_Parse() const { return NULL; }
virtual LibraryCallKit* is_LibraryCallKit() const { return NULL; }
virtual Parse* is_Parse() const { return nullptr; }
virtual LibraryCallKit* is_LibraryCallKit() const { return nullptr; }
ciEnv* env() const { return _env; }
PhaseGVN& gvn() const { return _gvn; }
@ -132,7 +132,7 @@ class GraphKit : public Phase {
// See layout accessors in class JVMState.
SafePointNode* map() const { return _map; }
bool has_exceptions() const { return _exceptions != NULL; }
bool has_exceptions() const { return _exceptions != nullptr; }
JVMState* jvms() const { return map_not_null()->_jvms; }
int sp() const { return _sp; }
int bci() const { return _bci; }
@ -143,7 +143,7 @@ class GraphKit : public Phase {
assert(jvms == this->jvms(), "sanity");
_sp = jvms->sp();
_bci = jvms->bci();
_method = jvms->has_method() ? jvms->method() : NULL; }
_method = jvms->has_method() ? jvms->method() : nullptr; }
void set_map(SafePointNode* m) { _map = m; debug_only(verify_map()); }
void set_sp(int sp) { assert(sp >= 0, "sp must be non-negative: %d", sp); _sp = sp; }
void clean_stack(int from_sp); // clear garbage beyond from_sp to top
@ -182,14 +182,14 @@ class GraphKit : public Phase {
// Tell if the compilation is failing.
bool failing() const { return C->failing(); }
// Set _map to NULL, signalling a stop to further bytecode execution.
// Set _map to null, signalling a stop to further bytecode execution.
// Preserve the map intact for future use, and return it back to the caller.
SafePointNode* stop() { SafePointNode* m = map(); set_map(NULL); return m; }
SafePointNode* stop() { SafePointNode* m = map(); set_map(nullptr); return m; }
// Stop, but first smash the map's inputs to NULL, to mark it dead.
// Stop, but first smash the map's inputs to null, to mark it dead.
void stop_and_kill_map();
// Tell if _map is NULL, or control is top.
// Tell if _map is null, or control is top.
bool stopped();
// Tell if this method or any caller method has exception handlers.
@ -221,9 +221,9 @@ class GraphKit : public Phase {
// Detach and return an exception state.
SafePointNode* pop_exception_state() {
SafePointNode* ex_map = _exceptions;
if (ex_map != NULL) {
if (ex_map != nullptr) {
_exceptions = ex_map->next_exception();
ex_map->set_next_exception(NULL);
ex_map->set_next_exception(nullptr);
debug_only(verify_exception_state(ex_map));
}
return ex_map;
@ -246,10 +246,10 @@ class GraphKit : public Phase {
// Combine all exceptions of any sort whatever into a single master state.
SafePointNode* combine_and_pop_all_exception_states() {
if (_exceptions == NULL) return NULL;
if (_exceptions == nullptr) return nullptr;
SafePointNode* phi_map = pop_exception_state();
SafePointNode* ex_map;
while ((ex_map = pop_exception_state()) != NULL) {
while ((ex_map = pop_exception_state()) != nullptr) {
combine_exception_states(ex_map, phi_map);
}
return phi_map;
@ -353,16 +353,16 @@ class GraphKit : public Phase {
bool replace_length_in_map);
// Helper function to do a NULL pointer check or ZERO check based on type.
// Helper function to do a null pointer check or ZERO check based on type.
// Throw an exception if a given value is null.
// Return the value cast to not-null.
// Be clever about equivalent dominating null checks.
Node* null_check_common(Node* value, BasicType type,
bool assert_null = false,
Node* *null_control = NULL,
Node* *null_control = nullptr,
bool speculative = false);
Node* null_check(Node* value, BasicType type = T_OBJECT) {
return null_check_common(value, type, false, NULL, !_gvn.type(value)->speculative_maybe_null());
return null_check_common(value, type, false, nullptr, !_gvn.type(value)->speculative_maybe_null());
}
Node* null_check_receiver() {
assert(argument(0)->bottom_type()->isa_ptr(), "must be");
@ -381,7 +381,7 @@ class GraphKit : public Phase {
// Throw an uncommon trap if a given value is __not__ null.
// Return the value cast to null, and be clever about dominating checks.
Node* null_assert(Node* value, BasicType type = T_OBJECT) {
return null_check_common(value, type, true, NULL, _gvn.type(value)->speculative_always_null());
return null_check_common(value, type, true, nullptr, _gvn.type(value)->speculative_always_null());
}
// Check if value is null and abort if it is
@ -414,7 +414,7 @@ class GraphKit : public Phase {
profile.morphism() == 1) {
return profile.receiver(0);
}
return NULL;
return nullptr;
}
// record type from profiling with the type system
@ -479,7 +479,7 @@ class GraphKit : public Phase {
int n_size = type2size[n_type];
if (n_size == 1) return pop();
else if (n_size == 2) return pop_pair();
else return NULL;
else return nullptr;
}
Node* control() const { return map_not_null()->control(); }
@ -549,7 +549,7 @@ class GraphKit : public Phase {
bool require_atomic_access = false, bool unaligned = false,
bool mismatched = false, bool unsafe = false, uint8_t barrier_data = 0) {
// This version computes alias_index from an address type
assert(adr_type != NULL, "use other make_load factory");
assert(adr_type != nullptr, "use other make_load factory");
return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type),
mo, control_dependency, require_atomic_access,
unaligned, mismatched, unsafe, barrier_data);
@ -577,7 +577,7 @@ class GraphKit : public Phase {
bool unsafe = false,
int barrier_data = 0) {
// This version computes alias_index from an address type
assert(adr_type != NULL, "use other store_to_memory factory");
assert(adr_type != nullptr, "use other store_to_memory factory");
return store_to_memory(ctl, adr, val, bt,
C->get_alias_index(adr_type),
mo, require_atomic_access,
@ -660,9 +660,9 @@ class GraphKit : public Phase {
// Return addressing for an array element.
Node* array_element_address(Node* ary, Node* idx, BasicType elembt,
// Optional constraint on the array size:
const TypeInt* sizetype = NULL,
const TypeInt* sizetype = nullptr,
// Optional control dependency (for example, on range check)
Node* ctrl = NULL);
Node* ctrl = nullptr);
// Return a load of array element at idx.
Node* load_array_element(Node* ary, Node* idx, const TypeAryPtr* arytype, bool set_ctrl);
@ -717,12 +717,12 @@ class GraphKit : public Phase {
// Similar to set_edges_for_java_call, but simplified for runtime calls.
void set_predefined_output_for_runtime_call(Node* call) {
set_predefined_output_for_runtime_call(call, NULL, NULL);
set_predefined_output_for_runtime_call(call, nullptr, nullptr);
}
void set_predefined_output_for_runtime_call(Node* call,
Node* keep_mem,
const TypePtr* hook_mem);
Node* set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem = NULL);
Node* set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem = nullptr);
// Replace the call with the current state of the kit. Requires
// that the call was generated with separate io_projs so that
@ -738,13 +738,13 @@ class GraphKit : public Phase {
// The optional reason is debug information written to the compile log.
// Optional must_throw is the same as with add_safepoint_edges.
Node* uncommon_trap(int trap_request,
ciKlass* klass = NULL, const char* reason_string = NULL,
ciKlass* klass = nullptr, const char* reason_string = nullptr,
bool must_throw = false, bool keep_exact_action = false);
// Shorthand, to avoid saying "Deoptimization::" so many times.
Node* uncommon_trap(Deoptimization::DeoptReason reason,
Deoptimization::DeoptAction action,
ciKlass* klass = NULL, const char* reason_string = NULL,
ciKlass* klass = nullptr, const char* reason_string = nullptr,
bool must_throw = false, bool keep_exact_action = false) {
return uncommon_trap(Deoptimization::make_trap_request(reason, action),
klass, reason_string, must_throw, keep_exact_action);
@ -753,7 +753,7 @@ class GraphKit : public Phase {
// Bail out to the interpreter and keep exact action (avoid switching to Action_none).
Node* uncommon_trap_exact(Deoptimization::DeoptReason reason,
Deoptimization::DeoptAction action,
ciKlass* klass = NULL, const char* reason_string = NULL,
ciKlass* klass = nullptr, const char* reason_string = nullptr,
bool must_throw = false) {
return uncommon_trap(Deoptimization::make_trap_request(reason, action),
klass, reason_string, must_throw, /*keep_exact_action=*/true);
@ -800,11 +800,11 @@ class GraphKit : public Phase {
Node* make_runtime_call(int flags,
const TypeFunc* call_type, address call_addr,
const char* call_name,
const TypePtr* adr_type, // NULL if no memory effects
Node* parm0 = NULL, Node* parm1 = NULL,
Node* parm2 = NULL, Node* parm3 = NULL,
Node* parm4 = NULL, Node* parm5 = NULL,
Node* parm6 = NULL, Node* parm7 = NULL);
const TypePtr* adr_type, // null if no memory effects
Node* parm0 = nullptr, Node* parm1 = nullptr,
Node* parm2 = nullptr, Node* parm3 = nullptr,
Node* parm4 = nullptr, Node* parm5 = nullptr,
Node* parm6 = nullptr, Node* parm7 = nullptr);
Node* sign_extend_byte(Node* in);
Node* sign_extend_short(Node* in);
@ -826,8 +826,8 @@ class GraphKit : public Phase {
// Helper functions to build synchronizations
int next_monitor();
Node* insert_mem_bar(int opcode, Node* precedent = NULL);
Node* insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent = NULL);
Node* insert_mem_bar(int opcode, Node* precedent = nullptr);
Node* insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent = nullptr);
// Optional 'precedent' is appended as an extra edge, to force ordering.
FastLockNode* shared_lock(Node* obj);
void shared_unlock(Node* box, Node* obj);
@ -842,7 +842,7 @@ class GraphKit : public Phase {
// Generate a check-cast idiom. Used by both the check-cast bytecode
// and the array-store bytecode
Node* gen_checkcast( Node *subobj, Node* superkls,
Node* *failure_control = NULL );
Node* *failure_control = nullptr );
Node* gen_subtype_check(Node* obj, Node* superklass);
@ -862,11 +862,11 @@ class GraphKit : public Phase {
bool deoptimize_on_exception=false);
Node* get_layout_helper(Node* klass_node, jint& constant_value);
Node* new_instance(Node* klass_node,
Node* slow_test = NULL,
Node* *return_size_val = NULL,
Node* slow_test = nullptr,
Node* *return_size_val = nullptr,
bool deoptimize_on_exception = false);
Node* new_array(Node* klass_node, Node* count_val, int nargs,
Node* *return_size_val = NULL,
Node* *return_size_val = nullptr,
bool deoptimize_on_exception = false);
// java.lang.String helpers

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -79,10 +79,10 @@ int IdealGraphPrinter::_file_count = 0;
IdealGraphPrinter *IdealGraphPrinter::printer() {
JavaThread *thread = JavaThread::current();
if (!thread->is_Compiler_thread()) return NULL;
if (!thread->is_Compiler_thread()) return nullptr;
CompilerThread *compiler_thread = (CompilerThread *)thread;
if (compiler_thread->ideal_graph_printer() == NULL) {
if (compiler_thread->ideal_graph_printer() == nullptr) {
IdealGraphPrinter *printer = new IdealGraphPrinter();
compiler_thread->set_ideal_graph_printer(printer);
}
@ -98,15 +98,15 @@ void IdealGraphPrinter::clean_up() {
if (printer) {
delete printer;
}
c->set_ideal_graph_printer(NULL);
c->set_ideal_graph_printer(nullptr);
}
}
IdealGraphPrinter* debug_file_printer = Compile::debug_file_printer();
if (debug_file_printer != NULL) {
if (debug_file_printer != nullptr) {
delete debug_file_printer;
}
IdealGraphPrinter* debug_network_printer = Compile::debug_network_printer();
if (debug_network_printer != NULL) {
if (debug_network_printer != nullptr) {
delete debug_network_printer;
}
}
@ -116,11 +116,11 @@ IdealGraphPrinter::IdealGraphPrinter() {
init(PrintIdealGraphFile, true, false);
}
// Either print methods to the specified file 'file_name' or if NULL over the network to the IGV. If 'append'
// Either print methods to the specified file 'file_name' or if null over the network to the IGV. If 'append'
// is set, the next phase is directly appended to the specified file 'file_name'. This is useful when doing
// replay compilation with a tool like rr that cannot alter the current program state but only the file.
IdealGraphPrinter::IdealGraphPrinter(Compile* compile, const char* file_name, bool append) {
assert(!append || (append && file_name != NULL), "can only use append flag when printing to file");
assert(!append || (append && file_name != nullptr), "can only use append flag when printing to file");
init(file_name, false, append);
C = compile;
if (append) {
@ -138,13 +138,13 @@ void IdealGraphPrinter::init(const char* file_name, bool use_multiple_files, boo
// appear in the dump.
_traverse_outs = true;
_should_send_method = true;
_output = NULL;
_output = nullptr;
buffer[0] = 0;
_depth = 0;
_current_method = NULL;
_network_stream = NULL;
_current_method = nullptr;
_network_stream = nullptr;
if (file_name != NULL) {
if (file_name != nullptr) {
init_file_stream(file_name, use_multiple_files, append);
} else {
init_network_stream();
@ -165,20 +165,20 @@ IdealGraphPrinter::~IdealGraphPrinter() {
if(_xml) {
delete _xml;
_xml = NULL;
_xml = nullptr;
}
if (_network_stream) {
delete _network_stream;
if (_network_stream == _output) {
_output = NULL;
_output = nullptr;
}
_network_stream = NULL;
_network_stream = nullptr;
}
if (_output) {
delete _output;
_output = NULL;
_output = nullptr;
}
}
@ -257,7 +257,7 @@ void IdealGraphPrinter::print_method(ciMethod *method, int bci, InlineTree *tree
_xml->print_cr("]]>");
tail(BYTECODES_ELEMENT);
if (tree != NULL && tree->subtrees().length() > 0) {
if (tree != nullptr && tree->subtrees().length() > 0) {
head(INLINE_ELEMENT);
GrowableArray<InlineTree *> subtrees = tree->subtrees();
for (int i = 0; i < subtrees.length(); i++) {
@ -271,7 +271,7 @@ void IdealGraphPrinter::print_method(ciMethod *method, int bci, InlineTree *tree
}
void IdealGraphPrinter::print_inline_tree(InlineTree *tree) {
if (tree != NULL) {
if (tree != nullptr) {
print_method(tree->method(), tree->caller_bci(), tree);
}
}
@ -281,7 +281,7 @@ void IdealGraphPrinter::print_inlining() {
// Print inline tree
if (_should_send_method) {
InlineTree *inlineTree = C->ilt();
if (inlineTree != NULL) {
if (inlineTree != nullptr) {
print_inline_tree(inlineTree);
} else {
// print this method only
@ -334,7 +334,7 @@ void IdealGraphPrinter::begin_method() {
// Has to be called whenever a method has finished compilation
void IdealGraphPrinter::end_method() {
tail(GROUP_ELEMENT);
_current_method = NULL;
_current_method = nullptr;
_xml->flush();
}
@ -381,14 +381,14 @@ void IdealGraphPrinter::visit_node(Node *n, bool edges, VectorSet* temp_set) {
print_prop("debug_idx", node->_debug_idx);
#endif
if (C->cfg() != NULL) {
if (C->cfg() != nullptr) {
Block* block = C->cfg()->get_block_for_node(node);
if (block == NULL) {
if (block == nullptr) {
print_prop("block", C->cfg()->get_block(0)->_pre_order);
} else {
print_prop("block", block->_pre_order);
if (node == block->head()) {
if (block->_idom != NULL) {
if (block->_idom != nullptr) {
print_prop("idom", block->_idom->_pre_order);
}
print_prop("dom_depth", block->_dom_depth);
@ -427,7 +427,7 @@ void IdealGraphPrinter::visit_node(Node *n, bool edges, VectorSet* temp_set) {
}
Node_Notes* nn = C->node_notes_at(node->_idx);
if (nn != NULL && !nn->is_clear() && nn->jvms() != NULL) {
if (nn != nullptr && !nn->is_clear() && nn->jvms() != nullptr) {
buffer[0] = 0;
stringStream ss(buffer, sizeof(buffer) - 1);
nn->jvms()->dump_spec(&ss);
@ -466,7 +466,7 @@ void IdealGraphPrinter::visit_node(Node *n, bool edges, VectorSet* temp_set) {
print_prop("is_reduction", "true");
}
if (C->matcher() != NULL) {
if (C->matcher() != nullptr) {
if (C->matcher()->is_shared(node)) {
print_prop("is_shared", "true");
} else {
@ -478,7 +478,7 @@ void IdealGraphPrinter::visit_node(Node *n, bool edges, VectorSet* temp_set) {
print_prop("is_dontcare", "false");
}
Node* old = C->matcher()->find_old_node(node);
if (old != NULL) {
if (old != nullptr) {
print_prop("old_node_idx", old->_idx);
}
}
@ -497,7 +497,7 @@ void IdealGraphPrinter::visit_node(Node *n, bool edges, VectorSet* temp_set) {
stringStream s2(buffer, sizeof(buffer) - 1);
node->dump_spec(&s2);
if (t != NULL && (t->isa_instptr() || t->isa_instklassptr())) {
if (t != nullptr && (t->isa_instptr() || t->isa_instklassptr())) {
const TypeInstPtr *toop = t->isa_instptr();
const TypeInstKlassPtr *tkls = t->isa_instklassptr();
if (toop) {
@ -583,19 +583,19 @@ void IdealGraphPrinter::visit_node(Node *n, bool edges, VectorSet* temp_set) {
}
}
JVMState* caller = NULL;
JVMState* caller = nullptr;
if (node->is_SafePoint()) {
caller = node->as_SafePoint()->jvms();
} else {
Node_Notes* notes = C->node_notes_at(node->_idx);
if (notes != NULL) {
if (notes != nullptr) {
caller = notes->jvms();
}
}
if (caller != NULL) {
if (caller != nullptr) {
stringStream bciStream;
ciMethod* last = NULL;
ciMethod* last = nullptr;
int last_bci;
while(caller) {
if (caller->has_method()) {
@ -606,13 +606,13 @@ void IdealGraphPrinter::visit_node(Node *n, bool edges, VectorSet* temp_set) {
caller = caller->caller();
}
print_prop("bci", bciStream.freeze());
if (last != NULL && last->has_linenumber_table() && last_bci >= 0) {
if (last != nullptr && last->has_linenumber_table() && last_bci >= 0) {
print_prop("line", last->line_number_from_bci(last_bci));
}
}
#ifdef ASSERT
if (node->debug_orig() != NULL) {
if (node->debug_orig() != nullptr) {
stringStream dorigStream;
node->dump_orig(&dorigStream, false);
print_prop("debug_orig", dorigStream.freeze());
@ -643,12 +643,12 @@ void IdealGraphPrinter::print_field(const Node* node) {
stringStream ss(buffer, sizeof(buffer) - 1);
ciField* field = get_field(node);
uint depth = 0;
if (field == NULL) {
if (field == nullptr) {
depth++;
field = find_source_field_of_array_access(node, depth);
}
if (field != NULL) {
if (field != nullptr) {
// Either direct field access or array access
field->print_name_on(&ss);
for (uint i = 0; i < depth; i++) {
@ -665,34 +665,34 @@ void IdealGraphPrinter::print_field(const Node* node) {
ciField* IdealGraphPrinter::get_field(const Node* node) {
const TypePtr* adr_type = node->adr_type();
Compile::AliasType* atp = NULL;
Compile::AliasType* atp = nullptr;
if (C->have_alias_type(adr_type)) {
atp = C->alias_type(adr_type);
}
if (atp != NULL) {
if (atp != nullptr) {
ciField* field = atp->field();
if (field != NULL) {
if (field != nullptr) {
// Found field associated with 'node'.
return field;
}
}
return NULL;
return nullptr;
}
// Try to find the field that is associated with a memory node belonging to an array access.
ciField* IdealGraphPrinter::find_source_field_of_array_access(const Node* node, uint& depth) {
if (!node->is_Mem()) {
// Not an array access
return NULL;
return nullptr;
}
do {
if (node->adr_type() != NULL && node->adr_type()->isa_aryptr()) {
if (node->adr_type() != nullptr && node->adr_type()->isa_aryptr()) {
// Only process array accesses. Pattern match to find actual field source access.
node = get_load_node(node);
if (node != NULL) {
if (node != nullptr) {
ciField* field = get_field(node);
if (field != NULL) {
if (field != nullptr) {
return field;
}
// Could be a multi-dimensional array. Repeat loop.
@ -704,16 +704,16 @@ ciField* IdealGraphPrinter::find_source_field_of_array_access(const Node* node,
break;
} while (depth < 256); // Cannot have more than 255 dimensions
return NULL;
return nullptr;
}
// Pattern match on the inputs of 'node' to find load node for the field access.
Node* IdealGraphPrinter::get_load_node(const Node* node) {
Node* load = NULL;
Node* load = nullptr;
Node* addr = node->as_Mem()->in(MemNode::Address);
if (addr != NULL && addr->is_AddP()) {
if (addr != nullptr && addr->is_AddP()) {
Node* base = addr->as_AddP()->base_node();
if (base != NULL) {
if (base != nullptr) {
base = base->uncast();
if (base->is_Load()) {
// Mem(AddP([ConstraintCast*](LoadP))) for non-compressed oops.
@ -773,7 +773,7 @@ void IdealGraphPrinter::print_method(const char *name, int level) {
// Print current ideal graph
void IdealGraphPrinter::print(const char *name, Node *node) {
if (!_current_method || !_should_send_method || node == NULL) return;
if (!_current_method || !_should_send_method || node == nullptr) return;
// Warning, unsafe cast?
_chaitin = (PhaseChaitin *)C->regalloc();
@ -785,7 +785,7 @@ void IdealGraphPrinter::print(const char *name, Node *node) {
VectorSet temp_set;
head(NODES_ELEMENT);
if (C->cfg() != NULL) {
if (C->cfg() != nullptr) {
// Compute the maximum estimated frequency in the current graph.
_max_freq = 1.0e-6;
for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
@ -801,7 +801,7 @@ void IdealGraphPrinter::print(const char *name, Node *node) {
head(EDGES_ELEMENT);
walk_nodes(node, true, &temp_set);
tail(EDGES_ELEMENT);
if (C->cfg() != NULL) {
if (C->cfg() != nullptr) {
head(CONTROL_FLOW_ELEMENT);
for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
Block* block = C->cfg()->get_block(i);
@ -866,7 +866,7 @@ void IdealGraphPrinter::init_network_stream() {
tty->print_cr("Client available, but does not want to receive data!");
_network_stream->close();
delete _network_stream;
_network_stream = NULL;
_network_stream = nullptr;
return;
}
_output = _network_stream;
@ -879,11 +879,11 @@ void IdealGraphPrinter::init_network_stream() {
}
void IdealGraphPrinter::update_compiled_method(ciMethod* current_method) {
assert(C != NULL, "must already be set");
assert(C != nullptr, "must already be set");
if (current_method != _current_method) {
// If a different method, end the old and begin with the new one.
end_method();
_current_method = NULL;
_current_method = nullptr;
begin_method();
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -122,7 +122,7 @@ class IdealGraphPrinter : public CHeapObj<mtCompiler> {
~IdealGraphPrinter();
public:
IdealGraphPrinter(Compile* compile, const char* file_name = NULL, bool append = false);
IdealGraphPrinter(Compile* compile, const char* file_name = nullptr, bool append = false);
static void clean_up();
static IdealGraphPrinter *printer();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -45,9 +45,9 @@ IdealKit::IdealKit(GraphKit* gkit, bool delay_all_transforms, bool has_declarati
_initial_i_o = gkit->i_o();
_delay_all_transforms = delay_all_transforms;
_var_ct = 0;
_cvstate = NULL;
_cvstate = nullptr;
// We can go memory state free or else we need the entire memory state
assert(_initial_memory == NULL || _initial_memory->Opcode() == Op_MergeMem, "memory must be pre-split");
assert(_initial_memory == nullptr || _initial_memory->Opcode() == Op_MergeMem, "memory must be pre-split");
assert(!_gvn.is_IterGVN(), "IdealKit can't be used during Optimize phase");
int init_size = 5;
_pending_cvstates = new (C->node_arena()) GrowableArray<Node*>(C->node_arena(), init_size, 0, 0);
@ -73,11 +73,11 @@ void IdealKit::if_then(Node* left, BoolTest::mask relop,
Node* right, float prob, float cnt, bool push_new_state) {
assert((state() & (BlockS|LoopS|IfThenS|ElseS)), "bad state for new If");
Node* bol;
if (left->bottom_type()->isa_ptr() == NULL) {
if (left->bottom_type()->isa_int() != NULL) {
if (left->bottom_type()->isa_ptr() == nullptr) {
if (left->bottom_type()->isa_int() != nullptr) {
bol = Bool(CmpI(left, right), relop);
} else {
assert(left->bottom_type()->isa_long() != NULL, "what else?");
assert(left->bottom_type()->isa_long() != nullptr, "what else?");
bol = Bool(CmpL(left, right), relop);
}
@ -202,7 +202,7 @@ void IdealKit::end_loop() {
// must be specified (which should be 1 less than
// the number of precedessors.)
Node* IdealKit::make_label(int goto_ct) {
assert(_cvstate != NULL, "must declare variables before labels");
assert(_cvstate != nullptr, "must declare variables before labels");
Node* lab = new_cvstate();
int sz = 1 + goto_ct + 1 /* fall thru */;
Node* reg = delay_transform(new RegionNode(sz));
@ -228,7 +228,7 @@ void IdealKit::goto_(Node* lab, bool bind) {
Node* reg = lab->in(TypeFunc::Control);
// find next empty slot in region
uint slot = 1;
while (slot < reg->req() && reg->in(slot) != NULL) slot++;
while (slot < reg->req() && reg->in(slot) != nullptr) slot++;
assert(slot < reg->req(), "too many gotos");
// If this is last predecessor, then don't force phi creation
if (slot == reg->req() - 1) bind = false;
@ -245,9 +245,9 @@ void IdealKit::goto_(Node* lab, bool bind) {
// Get the current value of the var
Node* m = _cvstate->in(i);
// If the var went unused no need for a phi
if (m == NULL) {
if (m == nullptr) {
continue;
} else if (l == NULL || m == l) {
} else if (l == nullptr || m == l) {
// Only one unique value "m" is known to reach this label so a phi
// is not yet necessary unless:
// the label is being bound and all predecessors have not been seen,
@ -326,7 +326,7 @@ Node* IdealKit::copy_cvstate() {
//-----------------------------clear-----------------------------------
void IdealKit::clear(Node* m) {
for (uint i = 0; i < m->req(); i++) m->set_req(i, NULL);
for (uint i = 0; i < m->req(); i++) m->set_req(i, nullptr);
}
//-----------------------------IdealVariable----------------------------
@ -356,7 +356,7 @@ Node* IdealKit::load(Node* ctl,
LoadNode::ControlDependency control_dependency) {
assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
const TypePtr* adr_type = NULL; // debug-mode-only argument
const TypePtr* adr_type = nullptr; // debug-mode-only argument
debug_only(adr_type = C->get_adr_type(adr_idx));
Node* mem = memory(adr_idx);
Node* ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, require_atomic_access);
@ -368,7 +368,7 @@ Node* IdealKit::store(Node* ctl, Node* adr, Node *val, BasicType bt,
MemNode::MemOrd mo, bool require_atomic_access,
bool mismatched) {
assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory");
const TypePtr* adr_type = NULL;
const TypePtr* adr_type = nullptr;
debug_only(adr_type = C->get_adr_type(adr_idx));
Node *mem = memory(adr_idx);
Node* st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo, require_atomic_access);
@ -387,7 +387,7 @@ Node* IdealKit::storeCM(Node* ctl, Node* adr, Node *val, Node* oop_store, int oo
BasicType bt,
int adr_idx) {
assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
const TypePtr* adr_type = NULL;
const TypePtr* adr_type = nullptr;
debug_only(adr_type = C->get_adr_type(adr_idx));
Node *mem = memory(adr_idx);
@ -411,11 +411,11 @@ void IdealKit::do_memory_merge(Node* merging, Node* join) {
// Get the region for the join state
Node* join_region = join->in(TypeFunc::Control);
assert(join_region != NULL, "join region must exist");
if (join->in(TypeFunc::I_O) == NULL ) {
assert(join_region != nullptr, "join region must exist");
if (join->in(TypeFunc::I_O) == nullptr ) {
join->set_req(TypeFunc::I_O, merging->in(TypeFunc::I_O));
}
if (join->in(TypeFunc::Memory) == NULL ) {
if (join->in(TypeFunc::Memory) == nullptr ) {
join->set_req(TypeFunc::Memory, merging->in(TypeFunc::Memory));
return;
}
@ -503,10 +503,10 @@ Node* IdealKit::make_leaf_call(const TypeFunc *slow_call_type,
call->init_req( TypeFunc::FramePtr, top() /* frameptr() */ );
call->init_req( TypeFunc::ReturnAdr, top() );
if (parm0 != NULL) call->init_req(TypeFunc::Parms+0, parm0);
if (parm1 != NULL) call->init_req(TypeFunc::Parms+1, parm1);
if (parm2 != NULL) call->init_req(TypeFunc::Parms+2, parm2);
if (parm3 != NULL) call->init_req(TypeFunc::Parms+3, parm3);
if (parm0 != nullptr) call->init_req(TypeFunc::Parms+0, parm0);
if (parm1 != nullptr) call->init_req(TypeFunc::Parms+1, parm1);
if (parm2 != nullptr) call->init_req(TypeFunc::Parms+2, parm2);
if (parm3 != nullptr) call->init_req(TypeFunc::Parms+3, parm3);
// Node *c = _gvn.transform(call);
call = (CallNode *) _gvn.transform(call);
@ -524,7 +524,7 @@ Node* IdealKit::make_leaf_call(const TypeFunc *slow_call_type,
assert(C->alias_type(call->adr_type()) == C->alias_type(adr_type),
"call node must be constructed correctly");
Node* res = NULL;
Node* res = nullptr;
if (slow_call_type->range()->cnt() > TypeFunc::Parms) {
assert(slow_call_type->range()->cnt() == TypeFunc::Parms+1, "only one return value");
res = transform(new ProjNode(call, TypeFunc::Parms));
@ -555,10 +555,10 @@ void IdealKit::make_leaf_call_no_fp(const TypeFunc *slow_call_type,
call->init_req( TypeFunc::FramePtr, top() /* frameptr() */ );
call->init_req( TypeFunc::ReturnAdr, top() );
if (parm0 != NULL) call->init_req(TypeFunc::Parms+0, parm0);
if (parm1 != NULL) call->init_req(TypeFunc::Parms+1, parm1);
if (parm2 != NULL) call->init_req(TypeFunc::Parms+2, parm2);
if (parm3 != NULL) call->init_req(TypeFunc::Parms+3, parm3);
if (parm0 != nullptr) call->init_req(TypeFunc::Parms+0, parm0);
if (parm1 != nullptr) call->init_req(TypeFunc::Parms+1, parm1);
if (parm2 != nullptr) call->init_req(TypeFunc::Parms+2, parm2);
if (parm3 != nullptr) call->init_req(TypeFunc::Parms+3, parm3);
// Node *c = _gvn.transform(call);
call = (CallNode *) _gvn.transform(call);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -159,7 +159,7 @@ class IdealKit: public StackObj {
void set_i_o(Node* c) { _cvstate->set_req(TypeFunc::I_O, c); }
void set(IdealVariable& v, Node* rhs) { _cvstate->set_req(first_var + v.id(), rhs); }
Node* value(IdealVariable& v) { return _cvstate->in(first_var + v.id()); }
void dead(IdealVariable& v) { set(v, (Node*)NULL); }
void dead(IdealVariable& v) { set(v, (Node*)nullptr); }
void if_then(Node* left, BoolTest::mask relop, Node* right,
float prob = PROB_FAIR, float cnt = COUNT_UNKNOWN,
bool push_new_state = true);
@ -248,9 +248,9 @@ class IdealKit: public StackObj {
address slow_call,
const char *leaf_name,
Node* parm0,
Node* parm1 = NULL,
Node* parm2 = NULL,
Node* parm3 = NULL);
Node* parm1 = nullptr,
Node* parm2 = nullptr,
Node* parm3 = nullptr);
void make_leaf_call_no_fp(const TypeFunc *slow_call_type,
address slow_call,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -819,7 +819,7 @@ void PhaseChaitin::adjust_high_pressure_index(Block* b, uint& block_hrp_index, P
}
void PhaseChaitin::print_pressure_info(Pressure& pressure, const char *str) {
if (str != NULL) {
if (str != nullptr) {
tty->print_cr("# *** %s ***", str);
}
tty->print_cr("# start pressure is = %d", pressure.start_pressure());

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -79,28 +79,28 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) {
// Look for a compare of a constant and a merged value
Node *i1 = iff->in(1);
if( !i1->is_Bool() ) return NULL;
if( !i1->is_Bool() ) return nullptr;
BoolNode *b = i1->as_Bool();
Node *cmp = b->in(1);
if( !cmp->is_Cmp() ) return NULL;
if( !cmp->is_Cmp() ) return nullptr;
i1 = cmp->in(1);
if( i1 == NULL || !i1->is_Phi() ) return NULL;
if( i1 == nullptr || !i1->is_Phi() ) return nullptr;
PhiNode *phi = i1->as_Phi();
Node *con2 = cmp->in(2);
if( !con2->is_Con() ) return NULL;
if( !con2->is_Con() ) return nullptr;
// See that the merge point contains some constants
Node *con1=NULL;
Node *con1=nullptr;
uint i4;
for( i4 = 1; i4 < phi->req(); i4++ ) {
con1 = phi->in(i4);
if( !con1 ) return NULL; // Do not optimize partially collapsed merges
if( !con1 ) return nullptr; // Do not optimize partially collapsed merges
if( con1->is_Con() ) break; // Found a constant
// Also allow null-vs-not-null checks
const TypePtr *tp = igvn->type(con1)->isa_ptr();
if( tp && tp->_ptr == TypePtr::NotNull )
break;
}
if( i4 >= phi->req() ) return NULL; // Found no constants
if( i4 >= phi->req() ) return nullptr; // Found no constants
igvn->C->set_has_split_ifs(true); // Has chance for split-if
@ -111,18 +111,18 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) {
const Type *t = cmp2->Value(igvn);
// This compare is dead, so whack it!
igvn->remove_dead_node(cmp2);
if( !t->singleton() ) return NULL;
if( !t->singleton() ) return nullptr;
// No intervening control, like a simple Call
Node* r = iff->in(0);
if (!r->is_Region() || r->is_Loop() || phi->region() != r || r->as_Region()->is_copy()) {
return NULL;
return nullptr;
}
// No other users of the cmp/bool
if (b->outcnt() != 1 || cmp->outcnt() != 1) {
//tty->print_cr("many users of cmp/bool");
return NULL;
return nullptr;
}
// Make sure we can determine where all the uses of merged values go
@ -139,13 +139,13 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) {
tty->print_cr("Region has odd use");
u->dump(2);
}*/
return NULL;
return nullptr;
}
if( u != phi ) {
// CNC - do not allow any other merged value
//tty->print_cr("Merging another value");
//u->dump(2);
return NULL;
return nullptr;
}
// Make sure we can account for all Phi uses
for (DUIterator_Fast kmax, k = u->fast_outs(kmax); k < kmax; k++) {
@ -157,8 +157,8 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) {
// If the cast is derived from data flow edges, it may not have a control edge.
// If so, it should be safe to split. But follow-up code can not deal with
// this (l. 359). So skip.
if (v->in(0) == NULL) {
return NULL;
if (v->in(0) == nullptr) {
return nullptr;
}
if (v->in(0)->in(0) == iff) {
continue; // CastPP/II of the IfNode is OK
@ -167,9 +167,9 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) {
// Disabled following code because I cannot tell if exactly one
// path dominates without a real dominator check. CNC 9/9/1999
//uint vop = v->Opcode();
//if( vop == Op_Phi ) { // Phi from another merge point might be OK
// Node *r = v->in(0); // Get controlling point
// if( !r ) return NULL; // Degraded to a copy
//if( vop == Op_Phi ) { // Phi from another merge point might be OK
// Node *r = v->in(0); // Get controlling point
// if( !r ) return nullptr; // Degraded to a copy
// // Find exactly one path in (either True or False doms, but not IFF)
// int cnt = 0;
// for( uint i = 1; i < r->req(); i++ )
@ -190,7 +190,7 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) {
}
*/
}
return NULL;
return nullptr;
/* CNC - Cut out all the fancy acceptance tests
// Can we clone this use when doing the transformation?
@ -198,14 +198,14 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) {
if( !v->in(0) && v != cmp ) {
tty->print_cr("Phi has free-floating use");
v->dump(2);
return NULL;
return nullptr;
}
for( uint l = 1; l < v->req(); l++ ) {
if( (!v->in(l)->is_Phi() || v->in(l)->in(0) != r) &&
!v->in(l)->is_Con() ) {
tty->print_cr("Phi has use");
v->dump(2);
return NULL;
return nullptr;
} // End of if Phi-use input is neither Phi nor Constant
} // End of for all inputs to Phi-use
*/
@ -214,7 +214,7 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) {
// Only do this if the IF node is in a sane state
if (iff->outcnt() != 2)
return NULL;
return nullptr;
// Got a hit! Do the Mondo Hack!
//
@ -243,17 +243,17 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) {
req_c++;
}
Node* proj = PhaseIdealLoop::find_predicate(r->in(ii));
if (proj != NULL) {
if (proj != nullptr) {
// Bail out if splitting through a region with a predicate input (could
// also be a loop header before loop opts creates a LoopNode for it).
return NULL;
return nullptr;
}
}
// If all the defs of the phi are the same constant, we already have the desired end state.
// Skip the split that would create empty phi and region nodes.
if ((r->req() - req_c) == 1) {
return NULL;
return nullptr;
}
// At this point we know that we can apply the split if optimization. If the region is still on the worklist,
@ -261,7 +261,7 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) {
// This also avoids the creation of dead data loops when rewiring data nodes below when a region is dying.
if (igvn->_worklist.member(r)) {
igvn->_worklist.push(iff); // retry split if later again
return NULL;
return nullptr;
}
Node *region_c = new RegionNode(req_c + 1);
@ -336,17 +336,17 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) {
igvn->register_new_node_with_optimizer( region_f );
igvn->hash_delete(cmp);// Remove soon-to-be-dead node from hash table.
cmp->set_req(1,NULL); // Whack the inputs to cmp because it will be dead
cmp->set_req(2,NULL);
cmp->set_req(1,nullptr); // Whack the inputs to cmp because it will be dead
cmp->set_req(2,nullptr);
// Check for all uses of the Phi and give them a new home.
// The 'cmp' got cloned, but CastPP/IIs need to be moved.
Node *phi_s = NULL; // do not construct unless needed
Node *phi_f = NULL; // do not construct unless needed
Node *phi_s = nullptr; // do not construct unless needed
Node *phi_f = nullptr; // do not construct unless needed
for (DUIterator_Last i2min, i2 = phi->last_outs(i2min); i2 >= i2min; --i2) {
Node* v = phi->last_out(i2);// User of the phi
igvn->rehash_node_delayed(v); // Have to fixup other Phi users
uint vop = v->Opcode();
Node *proj = NULL;
Node *proj = nullptr;
if( vop == Op_Phi ) { // Remote merge point
Node *r = v->in(0);
for (uint i3 = 1; i3 < r->req(); i3++)
@ -359,11 +359,11 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) {
} else {
assert( 0, "do not know how to handle this guy" );
}
guarantee(proj != NULL, "sanity");
guarantee(proj != nullptr, "sanity");
Node *proj_path_data, *proj_path_ctrl;
if( proj->Opcode() == Op_IfTrue ) {
if( phi_s == NULL ) {
if( phi_s == nullptr ) {
// Only construct phi_s if needed, otherwise provides
// interfering use.
phi_s = PhiNode::make_blank(region_s,phi);
@ -375,7 +375,7 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) {
proj_path_data = phi_s;
proj_path_ctrl = region_s;
} else {
if( phi_f == NULL ) {
if( phi_f == nullptr ) {
// Only construct phi_f if needed, otherwise provides
// interfering use.
phi_f = PhiNode::make_blank(region_f,phi);
@ -431,7 +431,7 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) {
for (DUIterator_Last lmin, l = r->last_outs(lmin); l >= lmin;) {
Node* u = r->last_out(l);
if( u == r ) {
r->set_req(0, NULL);
r->set_req(0, nullptr);
} else {
assert(u->outcnt() == 0, "only dead users");
igvn->remove_dead_node(u);
@ -452,14 +452,14 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) {
// for the failed path
ProjNode* IfNode::range_check_trap_proj(int& flip_test, Node*& l, Node*& r) {
if (outcnt() != 2) {
return NULL;
return nullptr;
}
Node* b = in(1);
if (b == NULL || !b->is_Bool()) return NULL;
if (b == nullptr || !b->is_Bool()) return nullptr;
BoolNode* bn = b->as_Bool();
Node* cmp = bn->in(1);
if (cmp == NULL) return NULL;
if (cmp->Opcode() != Op_CmpU) return NULL;
if (cmp == nullptr) return nullptr;
if (cmp->Opcode() != Op_CmpU) return nullptr;
l = cmp->in(1);
r = cmp->in(2);
@ -469,10 +469,10 @@ ProjNode* IfNode::range_check_trap_proj(int& flip_test, Node*& l, Node*& r) {
r = cmp->in(1);
flip_test = 2;
} else if (bn->_test._test != BoolTest::lt) {
return NULL;
return nullptr;
}
if (l->is_top()) return NULL; // Top input means dead test
if (r->Opcode() != Op_LoadRange && !is_RangeCheck()) return NULL;
if (l->is_top()) return nullptr; // Top input means dead test
if (r->Opcode() != Op_LoadRange && !is_RangeCheck()) return nullptr;
// We have recognized one of these forms:
// Flip 1: If (Bool[<] CmpU(l, LoadRange)) ...
@ -485,15 +485,15 @@ ProjNode* IfNode::range_check_trap_proj(int& flip_test, Node*& l, Node*& r) {
//------------------------------is_range_check---------------------------------
// Return 0 if not a range check. Return 1 if a range check and set index and
// offset. Return 2 if we had to negate the test. Index is NULL if the check
// offset. Return 2 if we had to negate the test. Index is null if the check
// is versus a constant.
int RangeCheckNode::is_range_check(Node* &range, Node* &index, jint &offset) {
int flip_test = 0;
Node* l = NULL;
Node* r = NULL;
Node* l = nullptr;
Node* r = nullptr;
ProjNode* iftrap = range_check_trap_proj(flip_test, l, r);
if (iftrap == NULL) {
if (iftrap == nullptr) {
return 0;
}
@ -501,7 +501,7 @@ int RangeCheckNode::is_range_check(Node* &range, Node* &index, jint &offset) {
// along the OOB path. Otherwise, it's possible that the user wrote
// something which optimized to look like a range check but behaves
// in some other way.
if (iftrap->is_uncommon_trap_proj(Deoptimization::Reason_range_check) == NULL) {
if (iftrap->is_uncommon_trap_proj(Deoptimization::Reason_range_check) == nullptr) {
return 0;
}
@ -518,7 +518,7 @@ int RangeCheckNode::is_range_check(Node* &range, Node* &index, jint &offset) {
}
} else if ((off = l->find_int_con(-1)) >= 0) {
// constant offset with no variable index
ind = NULL;
ind = nullptr;
} else {
// variable index with no constant offset (or dead negative index)
off = 0;
@ -563,7 +563,7 @@ static void adjust_check(Node* proj, Node* range, Node* index,
}
//------------------------------up_one_dom-------------------------------------
// Walk up the dominator tree one step. Return NULL at root or true
// Walk up the dominator tree one step. Return null at root or true
// complex merges. Skips through small diamonds.
Node* IfNode::up_one_dom(Node *curr, bool linear_only) {
Node *dom = curr->in(0);
@ -576,10 +576,10 @@ Node* IfNode::up_one_dom(Node *curr, bool linear_only) {
// Use linear_only if we are still parsing, since we cannot
// trust the regions to be fully filled in.
if (linear_only)
return NULL;
return nullptr;
if( dom->is_Root() )
return NULL;
return nullptr;
// Else hit a Region. Check for a loop header
if( dom->is_Loop() )
@ -598,12 +598,12 @@ Node* IfNode::up_one_dom(Node *curr, bool linear_only) {
if( din4->is_Call() && // Handle a slow-path call on either arm
(din4 = din4->in(0)) )
din4 = din4->in(0);
if (din3 != NULL && din3 == din4 && din3->is_If()) // Regions not degraded to a copy
if (din3 != nullptr && din3 == din4 && din3->is_If()) // Regions not degraded to a copy
return din3; // Skip around diamonds
}
// Give up the search at true merges
return NULL; // Dead loop? Or hit root?
return nullptr; // Dead loop? Or hit root?
}
@ -620,7 +620,7 @@ const TypeInt* IfNode::filtered_int_type(PhaseGVN* gvn, Node* val, Node* if_proj
const CmpNode* cmp = bol->in(1)->as_Cmp();
if (cmp->in(1) == val) {
const TypeInt* cmp2_t = gvn->type(cmp->in(2))->isa_int();
if (cmp2_t != NULL) {
if (cmp2_t != nullptr) {
jint lo = cmp2_t->_lo;
jint hi = cmp2_t->_hi;
BoolTest::mask msk = if_proj->Opcode() == Op_IfTrue ? bol->_test._test : bol->_test.negate();
@ -628,7 +628,7 @@ const TypeInt* IfNode::filtered_int_type(PhaseGVN* gvn, Node* val, Node* if_proj
case BoolTest::ne: {
// If val is compared to its lower or upper bound, we can narrow the type
const TypeInt* val_t = gvn->type(val)->isa_int();
if (val_t != NULL && !val_t->singleton() && cmp2_t->is_con()) {
if (val_t != nullptr && !val_t->singleton() && cmp2_t->is_con()) {
if (val_t->_lo == lo) {
return TypeInt::make(val_t->_lo + 1, val_t->_hi, val_t->_widen);
} else if (val_t->_hi == hi) {
@ -636,7 +636,7 @@ const TypeInt* IfNode::filtered_int_type(PhaseGVN* gvn, Node* val, Node* if_proj
}
}
// Can't refine type
return NULL;
return nullptr;
}
case BoolTest::eq:
return cmp2_t;
@ -669,7 +669,7 @@ const TypeInt* IfNode::filtered_int_type(PhaseGVN* gvn, Node* val, Node* if_proj
}
}
}
return NULL;
return nullptr;
}
//------------------------------fold_compares----------------------------
@ -712,11 +712,11 @@ const TypeInt* IfNode::filtered_int_type(PhaseGVN* gvn, Node* val, Node* if_proj
// Is the comparison for this If suitable for folding?
bool IfNode::cmpi_folds(PhaseIterGVN* igvn, bool fold_ne) {
return in(1) != NULL &&
return in(1) != nullptr &&
in(1)->is_Bool() &&
in(1)->in(1) != NULL &&
in(1)->in(1) != nullptr &&
in(1)->in(1)->Opcode() == Op_CmpI &&
in(1)->in(1)->in(2) != NULL &&
in(1)->in(1)->in(2) != nullptr &&
in(1)->in(1)->in(2) != igvn->C->top() &&
(in(1)->as_Bool()->_test.is_less() ||
in(1)->as_Bool()->_test.is_greater() ||
@ -725,14 +725,14 @@ bool IfNode::cmpi_folds(PhaseIterGVN* igvn, bool fold_ne) {
// Is a dominating control suitable for folding with this if?
bool IfNode::is_ctrl_folds(Node* ctrl, PhaseIterGVN* igvn) {
return ctrl != NULL &&
return ctrl != nullptr &&
ctrl->is_Proj() &&
ctrl->in(0) != NULL &&
ctrl->in(0) != nullptr &&
ctrl->in(0)->Opcode() == Op_If &&
ctrl->in(0)->outcnt() == 2 &&
ctrl->in(0)->as_If()->cmpi_folds(igvn, true) &&
// Must compare same value
ctrl->in(0)->in(1)->in(1)->in(1) != NULL &&
ctrl->in(0)->in(1)->in(1)->in(1) != nullptr &&
ctrl->in(0)->in(1)->in(1)->in(1) != igvn->C->top() &&
ctrl->in(0)->in(1)->in(1)->in(1) == in(1)->in(1)->in(1);
}
@ -741,23 +741,23 @@ bool IfNode::is_ctrl_folds(Node* ctrl, PhaseIterGVN* igvn) {
bool IfNode::has_shared_region(ProjNode* proj, ProjNode*& success, ProjNode*& fail) {
ProjNode* otherproj = proj->other_if_proj();
Node* otherproj_ctrl_use = otherproj->unique_ctrl_out_or_null();
RegionNode* region = (otherproj_ctrl_use != NULL && otherproj_ctrl_use->is_Region()) ? otherproj_ctrl_use->as_Region() : NULL;
success = NULL;
fail = NULL;
RegionNode* region = (otherproj_ctrl_use != nullptr && otherproj_ctrl_use->is_Region()) ? otherproj_ctrl_use->as_Region() : nullptr;
success = nullptr;
fail = nullptr;
if (otherproj->outcnt() == 1 && region != NULL && !region->has_phi()) {
if (otherproj->outcnt() == 1 && region != nullptr && !region->has_phi()) {
for (int i = 0; i < 2; i++) {
ProjNode* proj = proj_out(i);
if (success == NULL && proj->outcnt() == 1 && proj->unique_out() == region) {
if (success == nullptr && proj->outcnt() == 1 && proj->unique_out() == region) {
success = proj;
} else if (fail == NULL) {
} else if (fail == nullptr) {
fail = proj;
} else {
success = fail = NULL;
success = fail = nullptr;
}
}
}
return success != NULL && fail != NULL;
return success != nullptr && fail != nullptr;
}
bool IfNode::is_dominator_unc(CallStaticJavaNode* dom_unc, CallStaticJavaNode* unc) {
@ -772,11 +772,11 @@ bool IfNode::is_dominator_unc(CallStaticJavaNode* dom_unc, CallStaticJavaNode* u
// that the call stacks are equal for both JVMStates.
JVMState* dom_caller = dom_unc->jvms()->caller();
JVMState* caller = unc->jvms()->caller();
if ((dom_caller == NULL) != (caller == NULL)) {
if ((dom_caller == nullptr) != (caller == nullptr)) {
// The current method must either be inlined into both dom_caller and
// caller or must not be inlined at all (top method). Bail out otherwise.
return false;
} else if (dom_caller != NULL && !dom_caller->same_calls_as(caller)) {
} else if (dom_caller != nullptr && !dom_caller->same_calls_as(caller)) {
return false;
}
// Check that the bci of the dominating uncommon trap dominates the bci
@ -796,11 +796,11 @@ bool IfNode::is_dominator_unc(CallStaticJavaNode* dom_unc, CallStaticJavaNode* u
ProjNode* IfNode::uncommon_trap_proj(CallStaticJavaNode*& call) const {
for (int i = 0; i < 2; i++) {
call = proj_out(i)->is_uncommon_trap_proj(Deoptimization::Reason_none);
if (call != NULL) {
if (call != nullptr) {
return proj_out(i);
}
}
return NULL;
return nullptr;
}
// Do this If and the dominating If both branch out to an uncommon trap
@ -808,22 +808,22 @@ bool IfNode::has_only_uncommon_traps(ProjNode* proj, ProjNode*& success, ProjNod
ProjNode* otherproj = proj->other_if_proj();
CallStaticJavaNode* dom_unc = otherproj->is_uncommon_trap_proj(Deoptimization::Reason_none);
if (otherproj->outcnt() == 1 && dom_unc != NULL) {
if (otherproj->outcnt() == 1 && dom_unc != nullptr) {
// We need to re-execute the folded Ifs after deoptimization from the merged traps
if (!dom_unc->jvms()->should_reexecute()) {
return false;
}
CallStaticJavaNode* unc = NULL;
CallStaticJavaNode* unc = nullptr;
ProjNode* unc_proj = uncommon_trap_proj(unc);
if (unc_proj != NULL && unc_proj->outcnt() == 1) {
if (unc_proj != nullptr && unc_proj->outcnt() == 1) {
if (dom_unc == unc) {
// Allow the uncommon trap to be shared through a region
RegionNode* r = unc->in(0)->as_Region();
if (r->outcnt() != 2 || r->req() != 3 || r->find_edge(otherproj) == -1 || r->find_edge(unc_proj) == -1) {
return false;
}
assert(r->has_phi() == NULL, "simple region shouldn't have a phi");
assert(r->has_phi() == nullptr, "simple region shouldn't have a phi");
} else if (dom_unc->in(0) != otherproj || unc->in(0) != unc_proj) {
return false;
}
@ -893,8 +893,8 @@ bool IfNode::fold_compares_helper(ProjNode* proj, ProjNode* success, ProjNode* f
// Figure out which of the two tests sets the upper bound and which
// sets the lower bound if any.
Node* adjusted_lim = NULL;
if (lo_type != NULL && hi_type != NULL && hi_type->_lo > lo_type->_hi &&
Node* adjusted_lim = nullptr;
if (lo_type != nullptr && hi_type != nullptr && hi_type->_lo > lo_type->_hi &&
hi_type->_hi == max_jint && lo_type->_lo == min_jint && lo_test != BoolTest::ne) {
assert((dom_bool->_test.is_less() && !proj->_con) ||
(dom_bool->_test.is_greater() && proj->_con), "incorrect test");
@ -939,7 +939,7 @@ bool IfNode::fold_compares_helper(ProjNode* proj, ProjNode* success, ProjNode* f
}
// this test was canonicalized
assert(this_bool->_test.is_less() && fail->_con, "incorrect test");
} else if (lo_type != NULL && hi_type != NULL && lo_type->_lo > hi_type->_hi &&
} else if (lo_type != nullptr && hi_type != nullptr && lo_type->_lo > hi_type->_hi &&
lo_type->_hi == max_jint && hi_type->_lo == min_jint && lo_test != BoolTest::ne) {
// this_bool = <
@ -999,9 +999,9 @@ bool IfNode::fold_compares_helper(ProjNode* proj, ProjNode* success, ProjNode* f
assert(this_bool->_test.is_less() && !fail->_con, "incorrect test");
} else {
const TypeInt* failtype = filtered_int_type(igvn, n, proj);
if (failtype != NULL) {
if (failtype != nullptr) {
const TypeInt* type2 = filtered_int_type(igvn, n, fail);
if (type2 != NULL) {
if (type2 != nullptr) {
failtype = failtype->join(type2)->is_int();
if (failtype->_lo > failtype->_hi) {
// previous if determines the result of this if so
@ -1011,8 +1011,8 @@ bool IfNode::fold_compares_helper(ProjNode* proj, ProjNode* success, ProjNode* f
}
}
}
lo = NULL;
hi = NULL;
lo = nullptr;
hi = nullptr;
}
if (lo && hi) {
@ -1020,7 +1020,7 @@ bool IfNode::fold_compares_helper(ProjNode* proj, ProjNode* success, ProjNode* f
hook->init_req(0, lo); // Add a use to lo to prevent him from dying
// Merge the two compares into a single unsigned compare by building (CmpU (n - lo) (hi - lo))
Node* adjusted_val = igvn->transform(new SubINode(n, lo));
if (adjusted_lim == NULL) {
if (adjusted_lim == nullptr) {
adjusted_lim = igvn->transform(new SubINode(hi, lo));
}
hook->destruct(igvn);
@ -1094,10 +1094,10 @@ Node* IfNode::merge_uncommon_traps(ProjNode* proj, ProjNode* success, ProjNode*
Deoptimization::DeoptAction action = Deoptimization::trap_request_action(trap_request);
int flip_test = 0;
Node* l = NULL;
Node* r = NULL;
Node* l = nullptr;
Node* r = nullptr;
if (success->in(0)->as_If()->range_check_trap_proj(flip_test, l, r) != NULL) {
if (success->in(0)->as_If()->range_check_trap_proj(flip_test, l, r) != nullptr) {
// If this looks like a range check, change the trap to
// Reason_range_check so the compiler recognizes it as a range
// check and applies the corresponding optimizations
@ -1152,7 +1152,7 @@ void IfNode::improve_address_types(Node* l, Node* r, ProjNode* fail, PhaseIterGV
}
} else if (use->is_Mem()) {
Node* ctrl = use->in(0);
for (int i = 0; i < 10 && ctrl != NULL && ctrl != fail; i++) {
for (int i = 0; i < 10 && ctrl != nullptr && ctrl != fail; i++) {
ctrl = up_one_dom(ctrl);
}
if (ctrl == fail) {
@ -1182,7 +1182,7 @@ void IfNode::improve_address_types(Node* l, Node* r, ProjNode* fail, PhaseIterGV
}
}
}
} else if (use->in(0) == NULL && (igvn->type(use)->isa_long() ||
} else if (use->in(0) == nullptr && (igvn->type(use)->isa_long() ||
igvn->type(use)->isa_ptr())) {
stack.set_index(i+1);
stack.push(use, 0);
@ -1197,16 +1197,16 @@ void IfNode::improve_address_types(Node* l, Node* r, ProjNode* fail, PhaseIterGV
}
bool IfNode::is_cmp_with_loadrange(ProjNode* proj) {
if (in(1) != NULL &&
in(1)->in(1) != NULL &&
in(1)->in(1)->in(2) != NULL) {
if (in(1) != nullptr &&
in(1)->in(1) != nullptr &&
in(1)->in(1)->in(2) != nullptr) {
Node* other = in(1)->in(1)->in(2);
if (other->Opcode() == Op_LoadRange &&
((other->in(0) != NULL && other->in(0) == proj) ||
(other->in(0) == NULL &&
other->in(2) != NULL &&
((other->in(0) != nullptr && other->in(0) == proj) ||
(other->in(0) == nullptr &&
other->in(2) != nullptr &&
other->in(2)->is_AddP() &&
other->in(2)->in(1) != NULL &&
other->in(2)->in(1) != nullptr &&
other->in(2)->in(1)->Opcode() == Op_CastPP &&
other->in(2)->in(1)->in(0) == proj))) {
return true;
@ -1217,12 +1217,12 @@ bool IfNode::is_cmp_with_loadrange(ProjNode* proj) {
bool IfNode::is_null_check(ProjNode* proj, PhaseIterGVN* igvn) {
Node* other = in(1)->in(1)->in(2);
if (other->in(MemNode::Address) != NULL &&
proj->in(0)->in(1) != NULL &&
if (other->in(MemNode::Address) != nullptr &&
proj->in(0)->in(1) != nullptr &&
proj->in(0)->in(1)->is_Bool() &&
proj->in(0)->in(1)->in(1) != NULL &&
proj->in(0)->in(1)->in(1) != nullptr &&
proj->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
proj->in(0)->in(1)->in(1)->in(2) != NULL &&
proj->in(0)->in(1)->in(1)->in(2) != nullptr &&
proj->in(0)->in(1)->in(1)->in(1) == other->in(MemNode::Address)->in(AddPNode::Address)->uncast() &&
igvn->type(proj->in(0)->in(1)->in(1)->in(2)) == TypePtr::NULL_PTR) {
return true;
@ -1233,17 +1233,17 @@ bool IfNode::is_null_check(ProjNode* proj, PhaseIterGVN* igvn) {
// Check that the If that is in between the 2 integer comparisons has
// no side effect
bool IfNode::is_side_effect_free_test(ProjNode* proj, PhaseIterGVN* igvn) {
if (proj == NULL) {
if (proj == nullptr) {
return false;
}
CallStaticJavaNode* unc = proj->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
if (unc != NULL && proj->outcnt() <= 2) {
if (unc != nullptr && proj->outcnt() <= 2) {
if (proj->outcnt() == 1 ||
// Allow simple null check from LoadRange
(is_cmp_with_loadrange(proj) && is_null_check(proj, igvn))) {
CallStaticJavaNode* unc = proj->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
CallStaticJavaNode* dom_unc = proj->in(0)->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
assert(dom_unc != NULL, "is_uncommon_trap_if_pattern returned NULL");
assert(dom_unc != nullptr, "is_uncommon_trap_if_pattern returned null");
// reroute_side_effect_free_unc changes the state of this
// uncommon trap to restart execution at the previous
@ -1298,15 +1298,15 @@ void IfNode::reroute_side_effect_free_unc(ProjNode* proj, ProjNode* dom_proj, Ph
}
Node* IfNode::fold_compares(PhaseIterGVN* igvn) {
if (Opcode() != Op_If) return NULL;
if (Opcode() != Op_If) return nullptr;
if (cmpi_folds(igvn)) {
Node* ctrl = in(0);
if (is_ctrl_folds(ctrl, igvn) && ctrl->outcnt() == 1) {
// A integer comparison immediately dominated by another integer
// comparison
ProjNode* success = NULL;
ProjNode* fail = NULL;
ProjNode* success = nullptr;
ProjNode* fail = nullptr;
ProjNode* dom_cmp = ctrl->as_Proj();
if (has_shared_region(dom_cmp, success, fail) &&
// Next call modifies graph so must be last
@ -1318,11 +1318,11 @@ Node* IfNode::fold_compares(PhaseIterGVN* igvn) {
fold_compares_helper(dom_cmp, success, fail, igvn)) {
return merge_uncommon_traps(dom_cmp, success, fail, igvn);
}
return NULL;
} else if (ctrl->in(0) != NULL &&
ctrl->in(0)->in(0) != NULL) {
ProjNode* success = NULL;
ProjNode* fail = NULL;
return nullptr;
} else if (ctrl->in(0) != nullptr &&
ctrl->in(0)->in(0) != nullptr) {
ProjNode* success = nullptr;
ProjNode* fail = nullptr;
Node* dom = ctrl->in(0)->in(0);
ProjNode* dom_cmp = dom->isa_Proj();
ProjNode* other_cmp = ctrl->isa_Proj();
@ -1339,7 +1339,7 @@ Node* IfNode::fold_compares(PhaseIterGVN* igvn) {
}
}
}
return NULL;
return nullptr;
}
//------------------------------remove_useless_bool----------------------------
@ -1348,33 +1348,33 @@ Node* IfNode::fold_compares(PhaseIterGVN* igvn) {
// Replace with if( x < y ) { ... }
static Node *remove_useless_bool(IfNode *iff, PhaseGVN *phase) {
Node *i1 = iff->in(1);
if( !i1->is_Bool() ) return NULL;
if( !i1->is_Bool() ) return nullptr;
BoolNode *bol = i1->as_Bool();
Node *cmp = bol->in(1);
if( cmp->Opcode() != Op_CmpI ) return NULL;
if( cmp->Opcode() != Op_CmpI ) return nullptr;
// Must be comparing against a bool
const Type *cmp2_t = phase->type( cmp->in(2) );
if( cmp2_t != TypeInt::ZERO &&
cmp2_t != TypeInt::ONE )
return NULL;
return nullptr;
// Find a prior merge point merging the boolean
i1 = cmp->in(1);
if( !i1->is_Phi() ) return NULL;
if( !i1->is_Phi() ) return nullptr;
PhiNode *phi = i1->as_Phi();
if( phase->type( phi ) != TypeInt::BOOL )
return NULL;
return nullptr;
// Check for diamond pattern
int true_path = phi->is_diamond_phi();
if( true_path == 0 ) return NULL;
if( true_path == 0 ) return nullptr;
// Make sure that iff and the control of the phi are different. This
// should really only happen for dead control flow since it requires
// an illegal cycle.
if (phi->in(0)->in(1)->in(0) == iff) return NULL;
if (phi->in(0)->in(1)->in(0) == iff) return nullptr;
// phi->region->if_proj->ifnode->bool->cmp
BoolNode *bol2 = phi->in(0)->in(1)->in(0)->in(1)->as_Bool();
@ -1383,19 +1383,19 @@ static Node *remove_useless_bool(IfNode *iff, PhaseGVN *phase) {
// either iff2->in(1) or its complement.
int flip = 0;
if( bol->_test._test == BoolTest::ne ) flip = 1-flip;
else if( bol->_test._test != BoolTest::eq ) return NULL;
else if( bol->_test._test != BoolTest::eq ) return nullptr;
if( cmp2_t == TypeInt::ZERO ) flip = 1-flip;
const Type *phi1_t = phase->type( phi->in(1) );
const Type *phi2_t = phase->type( phi->in(2) );
// Check for Phi(0,1) and flip
if( phi1_t == TypeInt::ZERO ) {
if( phi2_t != TypeInt::ONE ) return NULL;
if( phi2_t != TypeInt::ONE ) return nullptr;
flip = 1-flip;
} else {
// Check for Phi(1,0)
if( phi1_t != TypeInt::ONE ) return NULL;
if( phi2_t != TypeInt::ZERO ) return NULL;
if( phi1_t != TypeInt::ONE ) return nullptr;
if( phi2_t != TypeInt::ZERO ) return nullptr;
}
if( true_path == 2 ) {
flip = 1-flip;
@ -1419,25 +1419,25 @@ struct RangeCheck {
Node* IfNode::Ideal_common(PhaseGVN *phase, bool can_reshape) {
if (remove_dead_region(phase, can_reshape)) return this;
// No Def-Use info?
if (!can_reshape) return NULL;
if (!can_reshape) return nullptr;
// Don't bother trying to transform a dead if
if (in(0)->is_top()) return NULL;
if (in(0)->is_top()) return nullptr;
// Don't bother trying to transform an if with a dead test
if (in(1)->is_top()) return NULL;
if (in(1)->is_top()) return nullptr;
// Another variation of a dead test
if (in(1)->is_Con()) return NULL;
if (in(1)->is_Con()) return nullptr;
// Another variation of a dead if
if (outcnt() < 2) return NULL;
if (outcnt() < 2) return nullptr;
// Canonicalize the test.
Node* idt_if = idealize_test(phase, this);
if (idt_if != NULL) return idt_if;
if (idt_if != nullptr) return idt_if;
// Try to split the IF
PhaseIterGVN *igvn = phase->is_IterGVN();
Node *s = split_if(this, igvn);
if (s != NULL) return s;
if (s != nullptr) return s;
return NodeSentinel;
}
@ -1457,11 +1457,11 @@ Node* IfNode::Ideal(PhaseGVN *phase, bool can_reshape) {
Node* bol2 = remove_useless_bool(this, phase);
if (bol2) return bol2;
if (in(0) == NULL) return NULL; // Dead loop?
if (in(0) == nullptr) return nullptr; // Dead loop?
PhaseIterGVN* igvn = phase->is_IterGVN();
Node* result = fold_compares(igvn);
if (result != NULL) {
if (result != nullptr) {
return result;
}
@ -1470,7 +1470,7 @@ Node* IfNode::Ideal(PhaseGVN *phase, bool can_reshape) {
if (is_If() && in(1)->is_Bool()) {
Node* cmp = in(1)->in(1);
if (cmp->Opcode() == Op_CmpP &&
cmp->in(2) != NULL && // make sure cmp is not already dead
cmp->in(2) != nullptr && // make sure cmp is not already dead
cmp->in(2)->bottom_type() == TypePtr::NULL_PTR) {
dist = 64; // Limit for null-pointer scans
}
@ -1478,7 +1478,7 @@ Node* IfNode::Ideal(PhaseGVN *phase, bool can_reshape) {
Node* prev_dom = search_identical(dist);
if (prev_dom != NULL) {
if (prev_dom != nullptr) {
// Replace dominated IfNode
return dominated_by(prev_dom, igvn);
}
@ -1504,8 +1504,8 @@ Node* IfNode::dominated_by(Node* prev_dom, PhaseIterGVN *igvn) {
// be skipped. For example, range check predicate has two checks
// for lower and upper bounds.
ProjNode* unc_proj = proj_out(1 - prev_dom->as_Proj()->_con)->as_Proj();
if (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate) != NULL ||
unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_profile_predicate) != NULL) {
if (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate) != nullptr ||
unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_profile_predicate) != nullptr) {
prev_dom = idom;
}
@ -1560,21 +1560,21 @@ Node* IfNode::search_identical(int dist) {
while (dom->Opcode() != op || // Not same opcode?
dom->in(1) != in(1) || // Not same input 1?
prev_dom->in(0) != dom) { // One path of test does not dominate?
if (dist < 0) return NULL;
if (dist < 0) return nullptr;
dist--;
prev_dom = dom;
dom = up_one_dom(dom);
if (!dom) return NULL;
if (!dom) return nullptr;
}
// Check that we did not follow a loop back to ourselves
if (this == dom) {
return NULL;
return nullptr;
}
#ifndef PRODUCT
if (dist > 2) { // Add to count of NULL checks elided
if (dist > 2) { // Add to count of null checks elided
explicit_null_checks_elided++;
}
#endif
@ -1618,26 +1618,26 @@ Node* IfNode::simple_subsuming(PhaseIterGVN* igvn) {
Node* pre = in(0);
if (!pre->is_IfTrue() && !pre->is_IfFalse()) {
return NULL;
return nullptr;
}
Node* dom = pre->in(0);
if (!dom->is_If()) {
return NULL;
return nullptr;
}
Node* bol = in(1);
if (!bol->is_Bool()) {
return NULL;
return nullptr;
}
Node* cmp = in(1)->in(1);
if (!cmp->is_Cmp()) {
return NULL;
return nullptr;
}
if (!dom->in(1)->is_Bool()) {
return NULL;
return nullptr;
}
if (dom->in(1)->in(1) != cmp) { // Not same cond?
return NULL;
return nullptr;
}
int drel = subsuming_bool_test_encode(dom->in(1));
@ -1645,11 +1645,11 @@ Node* IfNode::simple_subsuming(PhaseIterGVN* igvn) {
int bout = pre->is_IfFalse() ? 1 : 0;
if (drel < 0 || trel < 0) {
return NULL;
return nullptr;
}
int br = s_short_circuit_map[trel][2*drel+bout];
if (br == na) {
return NULL;
return nullptr;
}
#ifndef PRODUCT
if (TraceIterativeGVN) {
@ -1726,7 +1726,7 @@ Node* IfProjNode::Identity(PhaseGVN* phase) {
// CountedLoopEndNode may be eliminated by if subsuming, replace CountedLoopNode with LoopNode to
// avoid mismatching between CountedLoopNode and CountedLoopEndNode in the following optimization.
Node* head = unique_ctrl_out_or_null();
if (head != NULL && head->is_BaseCountedLoop() && head->in(LoopNode::LoopBackControl) == this) {
if (head != nullptr && head->is_BaseCountedLoop() && head->in(LoopNode::LoopBackControl) == this) {
Node* new_head = new LoopNode(head->in(LoopNode::EntryControl), this);
phase->is_IterGVN()->register_new_node_with_optimizer(new_head);
phase->is_IterGVN()->replace_node(head, new_head);
@ -1751,27 +1751,27 @@ void IfNode::dump_spec(outputStream *st) const {
// converted to 'ne', 'le' and 'lt' forms. IfTrue/IfFalse get swapped as
// needed.
static IfNode* idealize_test(PhaseGVN* phase, IfNode* iff) {
assert(iff->in(0) != NULL, "If must be live");
assert(iff->in(0) != nullptr, "If must be live");
if (iff->outcnt() != 2) return NULL; // Malformed projections.
if (iff->outcnt() != 2) return nullptr; // Malformed projections.
Node* old_if_f = iff->proj_out(false);
Node* old_if_t = iff->proj_out(true);
// CountedLoopEnds want the back-control test to be TRUE, regardless of
// whether they are testing a 'gt' or 'lt' condition. The 'gt' condition
// happens in count-down loops
if (iff->is_BaseCountedLoopEnd()) return NULL;
if (!iff->in(1)->is_Bool()) return NULL; // Happens for partially optimized IF tests
if (iff->is_BaseCountedLoopEnd()) return nullptr;
if (!iff->in(1)->is_Bool()) return nullptr; // Happens for partially optimized IF tests
BoolNode *b = iff->in(1)->as_Bool();
BoolTest bt = b->_test;
// Test already in good order?
if( bt.is_canonical() )
return NULL;
return nullptr;
// Flip test to be canonical. Requires flipping the IfFalse/IfTrue and
// cloning the IfNode.
Node* new_b = phase->transform( new BoolNode(b->in(1), bt.negate()) );
if( !new_b->is_Bool() ) return NULL;
if( !new_b->is_Bool() ) return nullptr;
b = new_b->as_Bool();
PhaseIterGVN *igvn = phase->is_IterGVN();
@ -1845,7 +1845,7 @@ Node* RangeCheckNode::Ideal(PhaseGVN *phase, bool can_reshape) {
for (int dist = 0; dist < 999; dist++) { // Range-Check scan limit
if (dom->Opcode() == Op_RangeCheck && // Not same opcode?
prev_dom->in(0) == dom) { // One path of test does dominate?
if (dom == this) return NULL; // dead loop
if (dom == this) return nullptr; // dead loop
// See if this is a range check
Node* index2;
Node* range2;
@ -1882,18 +1882,18 @@ Node* RangeCheckNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// ones. Since range checks "fail" by uncommon-trapping to the
// interpreter, widening a check can make us speculatively enter
// the interpreter. If we see range-check deopt's, do not widen!
if (!phase->C->allow_range_check_smearing()) return NULL;
if (!phase->C->allow_range_check_smearing()) return nullptr;
// Didn't find prior covering check, so cannot remove anything.
if (nb_checks == 0) {
return NULL;
return nullptr;
}
// Constant indices only need to check the upper bound.
// Non-constant indices must check both low and high.
int chk0 = (nb_checks - 1) % NRC;
if (index1) {
if (nb_checks == 1) {
return NULL;
return nullptr;
} else {
// If the top range check's constant is the min or max of
// all constants we widen the next one to cover the whole
@ -1914,7 +1914,7 @@ Node* RangeCheckNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// accesses it protects to successfully read/write out of
// bounds.
if (nb_checks == 2) {
return NULL;
return nullptr;
}
int chk2 = (nb_checks - 3) % NRC;
RangeCheck rc2 = prev_checks[chk2];
@ -1961,8 +1961,8 @@ Node* RangeCheckNode::Ideal(PhaseGVN *phase, bool can_reshape) {
} else {
prev_dom = search_identical(4);
if (prev_dom == NULL) {
return NULL;
if (prev_dom == nullptr) {
return nullptr;
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -93,7 +93,7 @@ IndexSet::BitBlock *IndexSet::alloc_block() {
#endif
Compile *compile = Compile::current();
BitBlock* free_list = (BitBlock*)compile->indexSet_free_block_list();
if (free_list == NULL) {
if (free_list == nullptr) {
populate_free_list();
free_list = (BitBlock*)compile->indexSet_free_block_list();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -174,7 +174,7 @@ class IndexSet : public ResourceObj {
// from a new arena. It is essential that this method is called whenever
// the Arena being used for BitBlock allocation is reset.
static void reset_memory(Compile* compile, Arena *arena) {
compile->set_indexSet_free_block_list(NULL);
compile->set_indexSet_free_block_list(nullptr);
compile->set_indexSet_arena(arena);
// This should probably be done in a static initializer
@ -401,7 +401,7 @@ class IndexSetIterator {
// If the iterator was created from a non-const set, we replace
// non-canonical empty blocks with the _empty_block pointer. If
// _set is NULL, we do no replacement.
// _set is null, we do no replacement.
IndexSet *_set;
// Advance to the next non-empty word and return the next
@ -418,7 +418,7 @@ class IndexSetIterator {
_next_word(IndexSet::words_per_block),
_next_block(0),
_max_blocks(set->is_empty() ? 0 : set->_current_block_limit),
_words(NULL),
_words(nullptr),
_blocks(set->_blocks),
_set(set) {
#ifdef ASSERT
@ -435,9 +435,9 @@ class IndexSetIterator {
_next_word(IndexSet::words_per_block),
_next_block(0),
_max_blocks(set->is_empty() ? 0 : set->_current_block_limit),
_words(NULL),
_words(nullptr),
_blocks(set->_blocks),
_set(NULL)
_set(nullptr)
{
#ifdef ASSERT
if (CollectIndexSetStatistics) {

View File

@ -44,7 +44,7 @@ uint StrIntrinsicNode::match_edge(uint idx) const {
Node* StrIntrinsicNode::Ideal(PhaseGVN* phase, bool can_reshape) {
if (remove_dead_region(phase, can_reshape)) return this;
// Don't bother trying to transform a dead node
if (in(0) && in(0)->is_top()) return NULL;
if (in(0) && in(0)->is_top()) return nullptr;
if (can_reshape) {
Node* mem = phase->transform(in(MemNode::Memory));
@ -56,7 +56,7 @@ Node* StrIntrinsicNode::Ideal(PhaseGVN* phase, bool can_reshape) {
return this;
}
}
return NULL;
return nullptr;
}
//------------------------------Value------------------------------------------
@ -72,7 +72,7 @@ uint StrIntrinsicNode::size_of() const { return sizeof(*this); }
// Return a node which is more "ideal" than the current node. Strip out
// control copies
Node* StrCompressedCopyNode::Ideal(PhaseGVN* phase, bool can_reshape) {
return remove_dead_region(phase, can_reshape) ? this : NULL;
return remove_dead_region(phase, can_reshape) ? this : nullptr;
}
//=============================================================================
@ -80,7 +80,7 @@ Node* StrCompressedCopyNode::Ideal(PhaseGVN* phase, bool can_reshape) {
// Return a node which is more "ideal" than the current node. Strip out
// control copies
Node* StrInflatedCopyNode::Ideal(PhaseGVN* phase, bool can_reshape) {
return remove_dead_region(phase, can_reshape) ? this : NULL;
return remove_dead_region(phase, can_reshape) ? this : nullptr;
}
uint VectorizedHashCodeNode::match_edge(uint idx) const {
@ -89,7 +89,7 @@ uint VectorizedHashCodeNode::match_edge(uint idx) const {
}
Node* VectorizedHashCodeNode::Ideal(PhaseGVN* phase, bool can_reshape) {
return remove_dead_region(phase, can_reshape) ? this : NULL;
return remove_dead_region(phase, can_reshape) ? this : nullptr;
}
const Type* VectorizedHashCodeNode::Value(PhaseGVN* phase) const {
@ -109,7 +109,7 @@ uint EncodeISOArrayNode::match_edge(uint idx) const {
// Return a node which is more "ideal" than the current node. Strip out
// control copies
Node* EncodeISOArrayNode::Ideal(PhaseGVN* phase, bool can_reshape) {
return remove_dead_region(phase, can_reshape) ? this : NULL;
return remove_dead_region(phase, can_reshape) ? this : nullptr;
}
//------------------------------Value------------------------------------------
@ -171,7 +171,7 @@ Node* CompressBitsNode::Ideal(PhaseGVN* phase, bool can_reshape) {
return new AndLNode(compr, src->in(1));
}
}
return NULL;
return nullptr;
}
Node* compress_expand_identity(PhaseGVN* phase, Node* n) {
@ -227,7 +227,7 @@ Node* ExpandBitsNode::Ideal(PhaseGVN* phase, bool can_reshape) {
return new AndLNode(src->in(1), mask);
}
}
return NULL;
return nullptr;
}
Node* ExpandBitsNode::Identity(PhaseGVN* phase) {

View File

@ -41,13 +41,13 @@
// Optimization - Graph Style
// Check whether val is not-null-decoded compressed oop,
// i.e. will grab into the base of the heap if it represents NULL.
// i.e. will grab into the base of the heap if it represents null.
static bool accesses_heap_base_zone(Node *val) {
if (CompressedOops::base() != NULL) { // Implies UseCompressedOops.
if (CompressedOops::base() != nullptr) { // Implies UseCompressedOops.
if (val && val->is_Mach()) {
if (val->as_Mach()->ideal_Opcode() == Op_DecodeN) {
// This assumes all Decodes with TypePtr::NotNull are matched to nodes that
// decode NULL to point to the heap base (Decode_NN).
// decode null to point to the heap base (Decode_NN).
if (val->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull) {
return true;
}
@ -78,8 +78,8 @@ static bool needs_explicit_null_check_for_read(Node *val) {
}
//------------------------------implicit_null_check----------------------------
// Detect implicit-null-check opportunities. Basically, find NULL checks
// with suitable memory ops nearby. Use the memory op to do the NULL check.
// Detect implicit-null-check opportunities. Basically, find null checks
// with suitable memory ops nearby. Use the memory op to do the null check.
// I can generate a memory op if there is not one nearby.
// The proj is the control projection for the not-null case.
// The val is the pointer being checked for nullness or
@ -150,12 +150,12 @@ void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allo
bool is_decoden = ((intptr_t)val) & 1;
val = (Node*)(((intptr_t)val) & ~1);
assert(!is_decoden || (val->in(0) == NULL) && val->is_Mach() &&
assert(!is_decoden || (val->in(0) == nullptr) && val->is_Mach() &&
(val->as_Mach()->ideal_Opcode() == Op_DecodeN), "sanity");
// Search the successor block for a load or store who's base value is also
// the tested value. There may be several.
MachNode *best = NULL; // Best found so far
MachNode *best = nullptr; // Best found so far
for (DUIterator i = val->outs(); val->has_out(i); i++) {
Node *m = val->out(i);
if( !m->is_Mach() ) continue;
@ -224,7 +224,7 @@ void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allo
Node* base;
Node* index;
const MachOper* oper = mach->memory_inputs(base, index);
if (oper == NULL || oper == (MachOper*)-1) {
if (oper == nullptr || oper == (MachOper*)-1) {
continue; // Not an memory op; skip it
}
if (val == base ||
@ -247,7 +247,7 @@ void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allo
// Check that node's control edge is not-null block's head or dominates it,
// otherwise we can't hoist it because there are other control dependencies.
Node* ctrl = mach->in(0);
if (ctrl != NULL && !(ctrl == not_null_block->head() ||
if (ctrl != nullptr && !(ctrl == not_null_block->head() ||
get_block_for_node(ctrl)->dominates(not_null_block))) {
continue;
}
@ -255,9 +255,9 @@ void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allo
// check if the offset is not too high for implicit exception
{
intptr_t offset = 0;
const TypePtr *adr_type = NULL; // Do not need this return value here
const TypePtr *adr_type = nullptr; // Do not need this return value here
const Node* base = mach->get_base_and_disp(offset, adr_type);
if (base == NULL || base == NodeSentinel) {
if (base == nullptr || base == NodeSentinel) {
// Narrow oop address doesn't have base, only index.
// Give up if offset is beyond page size or if heap base is not protected.
if (val->bottom_type()->isa_narrowoop() &&
@ -354,17 +354,17 @@ void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allo
// Make sure this memory op is not already being used for a NullCheck
Node *e = mb->end();
if( e->is_MachNullCheck() && e->in(1) == mach )
continue; // Already being used as a NULL check
continue; // Already being used as a null check
// Found a candidate! Pick one with least dom depth - the highest
// in the dom tree should be closest to the null check.
if (best == NULL || get_block_for_node(mach)->_dom_depth < get_block_for_node(best)->_dom_depth) {
if (best == nullptr || get_block_for_node(mach)->_dom_depth < get_block_for_node(best)->_dom_depth) {
best = mach;
bidx = vidx;
}
}
// No candidate!
if (best == NULL) {
if (best == nullptr) {
return;
}
@ -415,9 +415,9 @@ void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allo
map_node_to_block(best, block);
// Move the control dependence if it is pinned to not-null block.
// Don't change it in other cases: NULL or dominating control.
// Don't change it in other cases: null or dominating control.
Node* ctrl = best->in(0);
if (ctrl != NULL && get_block_for_node(ctrl) == not_null_block) {
if (ctrl != nullptr && get_block_for_node(ctrl) == not_null_block) {
// Set it to control edge of null check.
best->set_req(0, proj->in(0)->in(0));
}
@ -435,10 +435,10 @@ void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allo
// proj==Op_True --> ne test; proj==Op_False --> eq test.
// One of two graph shapes got matched:
// (IfTrue (If (Bool NE (CmpP ptr NULL))))
// (IfFalse (If (Bool EQ (CmpP ptr NULL))))
// NULL checks are always branch-if-eq. If we see a IfTrue projection
// then we are replacing a 'ne' test with a 'eq' NULL check test.
// (IfTrue (If (Bool NE (CmpP ptr null))))
// (IfFalse (If (Bool EQ (CmpP ptr null))))
// null checks are always branch-if-eq. If we see a IfTrue projection
// then we are replacing a 'ne' test with a 'eq' null check test.
// We need to flip the projections to keep the same semantics.
if( proj->Opcode() == Op_IfTrue ) {
// Swap order of projections in basic block to swap branch targets
@ -446,11 +446,11 @@ void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allo
Node *tmp2 = block->get_node(block->end_idx()+2);
block->map_node(tmp2, block->end_idx()+1);
block->map_node(tmp1, block->end_idx()+2);
Node *tmp = new Node(C->top()); // Use not NULL input
Node *tmp = new Node(C->top()); // Use not null input
tmp1->replace_by(tmp);
tmp2->replace_by(tmp1);
tmp->replace_by(tmp2);
tmp->destruct(NULL);
tmp->destruct(nullptr);
}
// Remove the existing null check; use a new implicit null check instead.
@ -466,7 +466,7 @@ void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allo
// Clean-up any dead code
for (uint i3 = 0; i3 < old_tst->req(); i3++) {
Node* in = old_tst->in(i3);
old_tst->set_req(i3, NULL);
old_tst->set_req(i3, nullptr);
if (in->outcnt() == 0) {
// Remove dead input node
in->disconnect_inputs(C);
@ -525,7 +525,7 @@ Node* PhaseCFG::select(
uint score = 0; // Bigger is better
int idx = -1; // Index in worklist
int cand_cnt = 0; // Candidate count
bool block_size_threshold_ok = (recalc_pressure_nodes != NULL) && (block->number_of_nodes() > 10);
bool block_size_threshold_ok = (recalc_pressure_nodes != nullptr) && (block->number_of_nodes() > 10);
for( uint i=0; i<cnt; i++ ) { // Inspect entire worklist
// Order in worklist is used to break ties.
@ -695,7 +695,7 @@ void PhaseCFG::adjust_register_pressure(Node* n, Block* block, intptr_t* recalc_
for (uint i = 1; i < n->req(); i++) {
bool lrg_ends = false;
Node *src_n = n->in(i);
if (src_n == NULL) continue;
if (src_n == nullptr) continue;
if (!src_n->is_Mach()) continue;
uint src = _regalloc->_lrg_map.find(src_n);
if (src == 0) continue;
@ -755,9 +755,9 @@ void PhaseCFG::adjust_register_pressure(Node* n, Block* block, intptr_t* recalc_
// if none, this live range ends and we can adjust register pressure
if (lrg_ends) {
if (finalize_mode) {
_regalloc->lower_pressure(block, 0, lrg_src, NULL, _regalloc->_sched_int_pressure, _regalloc->_sched_float_pressure);
_regalloc->lower_pressure(block, 0, lrg_src, nullptr, _regalloc->_sched_int_pressure, _regalloc->_sched_float_pressure);
} else {
_regalloc->lower_pressure(block, 0, lrg_src, NULL, _regalloc->_scratch_int_pressure, _regalloc->_scratch_float_pressure);
_regalloc->lower_pressure(block, 0, lrg_src, nullptr, _regalloc->_scratch_int_pressure, _regalloc->_scratch_float_pressure);
}
}
}
@ -805,7 +805,7 @@ void PhaseCFG::set_next_call(Block* block, Node* n, VectorSet& next_call) {
// carry lots of stuff live across a call.
void PhaseCFG::needed_for_next_call(Block* block, Node* this_call, VectorSet& next_call) {
// Find the next control-defining Node in this block
Node* call = NULL;
Node* call = nullptr;
for (DUIterator_Fast imax, i = this_call->fast_outs(imax); i < imax; i++) {
Node* m = this_call->fast_out(i);
if (get_block_for_node(m) == block && // Local-block user
@ -815,7 +815,7 @@ void PhaseCFG::needed_for_next_call(Block* block, Node* this_call, VectorSet& ne
break;
}
}
if (call == NULL) return; // No next call (e.g., block end is near)
if (call == nullptr) return; // No next call (e.g., block end is near)
// Set next-call for all inputs to this call
set_next_call(block, call, next_call);
}
@ -886,7 +886,7 @@ uint PhaseCFG::sched_call(Block* block, uint node_cnt, Node_List& worklist, Grow
block->insert_node(proj, node_cnt++);
// Select the right register save policy.
const char *save_policy = NULL;
const char *save_policy = nullptr;
switch (op) {
case Op_CallRuntime:
case Op_CallLeaf:
@ -956,7 +956,7 @@ bool PhaseCFG::schedule_local(Block* block, GrowableArray<int>& ready_cnt, Vecto
return true;
}
bool block_size_threshold_ok = (recalc_pressure_nodes != NULL) && (block->number_of_nodes() > 10);
bool block_size_threshold_ok = (recalc_pressure_nodes != nullptr) && (block->number_of_nodes() > 10);
// We track the uses of local definitions as input dependences so that
// we know when a given instruction is available to be scheduled.
@ -1002,7 +1002,7 @@ bool PhaseCFG::schedule_local(Block* block, GrowableArray<int>& ready_cnt, Vecto
// Check the precedence edges
for (uint prec = n->req(); prec < n->len(); prec++) {
Node* oop_store = n->in(prec);
if (oop_store != NULL) {
if (oop_store != nullptr) {
assert(get_block_for_node(oop_store)->_dom_depth <= block->_dom_depth, "oop_store must dominate card-mark");
}
}
@ -1022,7 +1022,7 @@ bool PhaseCFG::schedule_local(Block* block, GrowableArray<int>& ready_cnt, Vecto
// and the edge will be lost. This is why this code should be
// executed only when Precedent (== TypeFunc::Parms) edge is present.
Node *x = n->in(TypeFunc::Parms);
if (x != NULL && get_block_for_node(x) == block && n->find_prec_edge(x) != -1) {
if (x != nullptr && get_block_for_node(x) == block && n->find_prec_edge(x) != -1) {
// Old edge to node within same block will get removed, but no precedence
// edge will get added because it already exists. Update ready count.
int cnt = ready_cnt.at(n->_idx);
@ -1259,7 +1259,7 @@ Node* PhaseCFG::catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *
use_blk = use_blk->_idom;
// Find the successor
Node *fixup = NULL;
Node *fixup = nullptr;
uint j;
for( j = 0; j < def_blk->_num_succs; j++ )
@ -1284,14 +1284,14 @@ Node* PhaseCFG::catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *
for (uint k = 1; k < use_blk->num_preds(); k++) {
if (phi->in(k) != inputs[k]) {
// Not a match
fixup = NULL;
fixup = nullptr;
break;
}
}
}
// If an existing PhiNode was not found, make a new one.
if (fixup == NULL) {
if (fixup == nullptr) {
Node *new_phi = PhiNode::make(use_blk->head(), def);
use_blk->insert_node(new_phi, 1);
map_node_to_block(new_phi, use_blk);

File diff suppressed because it is too large Load Diff

View File

@ -75,7 +75,7 @@ class LibraryCallKit : public GraphKit {
LibraryCallKit(JVMState* jvms, LibraryIntrinsic* intrinsic)
: GraphKit(jvms),
_intrinsic(intrinsic),
_result(NULL)
_result(nullptr)
{
// Check if this is a root compile. In that case we don't have a caller.
if (!jvms->has_method()) {
@ -85,7 +85,7 @@ class LibraryCallKit : public GraphKit {
// and save the stack pointer value so it can used by uncommon_trap.
// We find the argument count by looking at the declared signature.
bool ignored_will_link;
ciSignature* declared_signature = NULL;
ciSignature* declared_signature = nullptr;
ciMethod* ignored_callee = caller()->get_method_at_bci(bci(), ignored_will_link, &declared_signature);
const int nargs = declared_signature->arg_size_for_bc(caller()->java_code_at_bci(bci()));
_reexecute_sp = sp() + nargs; // "push" arguments back on stack
@ -105,7 +105,7 @@ class LibraryCallKit : public GraphKit {
void push_result() {
// Push the result onto the stack.
if (!stopped() && result() != NULL) {
if (!stopped() && result() != nullptr) {
BasicType bt = result()->bottom_type()->basic_type();
push_node(bt, result());
}
@ -116,7 +116,7 @@ class LibraryCallKit : public GraphKit {
fatal("unexpected intrinsic %d: %s", vmIntrinsics::as_int(iid), vmIntrinsics::name_at(iid));
}
void set_result(Node* n) { assert(_result == NULL, "only set once"); _result = n; }
void set_result(Node* n) { assert(_result == nullptr, "only set once"); _result = n; }
void set_result(RegionNode* region, PhiNode* value);
Node* result() { return _result; }
@ -128,7 +128,7 @@ class LibraryCallKit : public GraphKit {
Node* generate_fair_guard(Node* test, RegionNode* region);
Node* generate_negative_guard(Node* index, RegionNode* region,
// resulting CastII of index:
Node* *pos_index = NULL);
Node* *pos_index = nullptr);
Node* generate_limit_guard(Node* offset, Node* subseq_length,
Node* array_length,
RegionNode* region);
@ -184,8 +184,8 @@ class LibraryCallKit : public GraphKit {
CallJavaNode* generate_method_call_virtual(vmIntrinsics::ID method_id) {
return generate_method_call(method_id, true, false);
}
Node* load_field_from_object(Node* fromObj, const char* fieldName, const char* fieldTypeString, DecoratorSet decorators = IN_HEAP, bool is_static = false, ciInstanceKlass* fromKls = NULL);
Node* field_address_from_object(Node* fromObj, const char* fieldName, const char* fieldTypeString, bool is_exact = true, bool is_static = false, ciInstanceKlass* fromKls = NULL);
Node* load_field_from_object(Node* fromObj, const char* fieldName, const char* fieldTypeString, DecoratorSet decorators = IN_HEAP, bool is_static = false, ciInstanceKlass* fromKls = nullptr);
Node* field_address_from_object(Node* fromObj, const char* fieldName, const char* fieldTypeString, bool is_exact = true, bool is_static = false, ciInstanceKlass* fromKls = nullptr);
Node* make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2, StrIntrinsicNode::ArgEnc ae);
bool inline_string_compareTo(StrIntrinsicNode::ArgEnc ae);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -86,7 +86,7 @@ void PhaseLive::compute(uint maxlrg) {
_deltas = NEW_RESOURCE_ARRAY(IndexSet*,_cfg.number_of_blocks());
memset(_deltas, 0, sizeof(IndexSet*)* _cfg.number_of_blocks());
_free_IndexSet = NULL;
_free_IndexSet = nullptr;
// Blocks having done pass-1
VectorSet first_pass;
@ -176,7 +176,7 @@ void PhaseLive::compute(uint maxlrg) {
}
}
IndexSet *free = _free_IndexSet;
while (free != NULL) {
while (free != nullptr) {
IndexSet *temp = free;
free = free->next();
temp->clear();
@ -223,7 +223,7 @@ void PhaseLive::freeset(Block *p) {
}
f->set_next(_free_IndexSet);
_free_IndexSet = f; // Drop onto free list
_deltas[p->_pre_order-1] = NULL;
_deltas[p->_pre_order-1] = nullptr;
}
// Add a live-out value to a given blocks live-out set. If it is new, then

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -78,7 +78,7 @@ public:
// Compute liveness info
void compute(uint maxlrg);
// Reset arena storage
void reset() { _live = NULL; }
void reset() { _live = nullptr; }
// Return the live-out set for this block
IndexSet *live( const Block * b ) { return &_live[b->_pre_order-1]; }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -86,7 +86,7 @@ OptoReg::Name BoxLockNode::reg(Node* box) {
// Is BoxLock node used for one simple lock region (same box and obj)?
bool BoxLockNode::is_simple_lock_region(LockNode** unique_lock, Node* obj, Node** bad_lock) {
LockNode* lock = NULL;
LockNode* lock = nullptr;
bool has_one_lock = false;
for (uint i = 0; i < this->outcnt(); i++) {
Node* n = this->raw_out(i);
@ -96,19 +96,19 @@ bool BoxLockNode::is_simple_lock_region(LockNode** unique_lock, Node* obj, Node*
// Check lock's box since box could be referenced by Lock's debug info.
if (alock->box_node() == this) {
if (alock->obj_node()->eqv_uncast(obj)) {
if ((unique_lock != NULL) && alock->is_Lock()) {
if (lock == NULL) {
if ((unique_lock != nullptr) && alock->is_Lock()) {
if (lock == nullptr) {
lock = alock->as_Lock();
has_one_lock = true;
} else if (lock != alock->as_Lock()) {
has_one_lock = false;
if (bad_lock != NULL) {
if (bad_lock != nullptr) {
*bad_lock = alock;
}
}
}
} else {
if (bad_lock != NULL) {
if (bad_lock != nullptr) {
*bad_lock = alock;
}
return false; // Different objects
@ -132,7 +132,7 @@ bool BoxLockNode::is_simple_lock_region(LockNode** unique_lock, Node* obj, Node*
// unlocks are reference only this one object.
}
#endif
if (unique_lock != NULL && has_one_lock) {
if (unique_lock != nullptr && has_one_lock) {
*unique_lock = lock;
}
return true;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -80,8 +80,8 @@ public:
FastLockNode(Node *ctrl, Node *oop, Node *box) : CmpNode(oop,box) {
init_req(0,ctrl);
init_class_id(Class_FastLock);
_rtm_counters = NULL;
_stack_rtm_counters = NULL;
_rtm_counters = nullptr;
_stack_rtm_counters = nullptr;
}
Node* obj_node() const { return in(1); }
Node* box_node() const { return in(2); }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -66,7 +66,7 @@ void PhaseIdealLoop::register_control(Node* n, IdealLoopTree *loop, Node* pred,
}
set_loop(n, loop);
// When called from beautify_loops() idom is not constructed yet.
if (_idom != NULL) {
if (_idom != nullptr) {
set_idom(n, pred, dom_depth(pred));
}
}
@ -132,7 +132,7 @@ ProjNode* PhaseIdealLoop::create_new_if_for_predicate(ProjNode* cont_proj, Node*
register_control(rgn, loop, uncommon_proj);
_igvn.replace_input_of(call, 0, rgn);
// When called from beautify_loops() idom is not constructed yet.
if (_idom != NULL) {
if (_idom != nullptr) {
set_idom(call, rgn, dom_depth(rgn));
}
// Move nodes pinned on the projection or whose control is set to
@ -146,13 +146,13 @@ ProjNode* PhaseIdealLoop::create_new_if_for_predicate(ProjNode* cont_proj, Node*
}
Node* entry = iff->in(0);
if (new_entry != NULL) {
if (new_entry != nullptr) {
// Cloning the predicate to new location.
entry = new_entry;
}
// Create new_iff
IdealLoopTree* lp = get_loop(entry);
IfNode* new_iff = NULL;
IfNode* new_iff = nullptr;
if (opcode == Op_If) {
new_iff = new IfNode(entry, iff->in(1), iff->_prob, iff->_fcnt);
} else {
@ -180,7 +180,7 @@ ProjNode* PhaseIdealLoop::create_new_if_for_predicate(ProjNode* cont_proj, Node*
_igvn.add_input_to(rgn, if_uct);
// When called from beautify_loops() idom is not constructed yet.
if (_idom != NULL) {
if (_idom != nullptr) {
Node* ridom = idom(rgn);
Node* nrdom = dom_lca_internal(ridom, new_iff);
set_idom(rgn, nrdom, dom_depth(rgn));
@ -216,10 +216,10 @@ ProjNode* PhaseIdealLoop::create_new_if_for_predicate(ProjNode* cont_proj, Node*
}
assert(!has_phi || rgn->req() > 3, "no phis when region is created");
if (new_entry == NULL) {
if (new_entry == nullptr) {
// Attach if_cont to iff
_igvn.replace_input_of(iff, 0, if_cont);
if (_idom != NULL) {
if (_idom != nullptr) {
set_idom(iff, if_cont, dom_depth(iff));
}
}
@ -263,7 +263,7 @@ Node* PhaseIdealLoop::clone_nodes_with_same_ctrl(Node* node, ProjNode* old_ctrl,
Dict old_new_mapping = clone_nodes(nodes_with_same_ctrl); // Cloned but not rewired, yet
rewire_cloned_nodes_to_ctrl(old_ctrl, new_ctrl, nodes_with_same_ctrl, old_new_mapping);
Node* clone_phi_input = static_cast<Node*>(old_new_mapping[node]);
assert(clone_phi_input != NULL && clone_phi_input->_idx >= last_idx, "must exist and be a proper clone");
assert(clone_phi_input != nullptr && clone_phi_input->_idx >= last_idx, "must exist and be a proper clone");
return clone_phi_input;
}
@ -304,7 +304,7 @@ void PhaseIdealLoop::rewire_inputs_of_clones_to_clones(Node* new_ctrl, Node* clo
if (!in->is_Phi()) {
assert(!in->is_CFG(), "must be data node");
Node* in_clone = static_cast<Node*>(old_new_mapping[in]);
if (in_clone != NULL) {
if (in_clone != nullptr) {
_igvn.replace_input_of(clone, i, in_clone);
set_ctrl(clone, new_ctrl);
}
@ -390,7 +390,7 @@ void PhaseIdealLoop::get_skeleton_predicates(Node* predicate, Unique_Node_List&
assert(rgn->is_Region() || rgn->is_Call(), "must be a region or call uct");
assert(iff->in(1)->in(1)->Opcode() == Op_Opaque1, "unexpected predicate shape");
predicate = iff->in(0);
while (predicate != NULL && predicate->is_Proj() && predicate->in(0)->is_If()) {
while (predicate != nullptr && predicate->is_Proj() && predicate->in(0)->is_If()) {
iff = predicate->in(0)->as_If();
uncommon_proj = iff->proj_out(1 - predicate->as_Proj()->_con);
if (uncommon_proj->unique_ctrl_out() != rgn) {
@ -415,8 +415,8 @@ void PhaseIdealLoop::get_skeleton_predicates(Node* predicate, Unique_Node_List&
ProjNode* PhaseIdealLoop::clone_skeleton_predicate_for_unswitched_loops(Node* iff, ProjNode* predicate,
Deoptimization::DeoptReason reason,
ProjNode* output_proj) {
Node* bol = clone_skeleton_predicate_bool(iff, NULL, NULL, output_proj);
ProjNode* proj = create_new_if_for_predicate(output_proj, NULL, reason, iff->Opcode(),
Node* bol = clone_skeleton_predicate_bool(iff, nullptr, nullptr, output_proj);
ProjNode* proj = create_new_if_for_predicate(output_proj, nullptr, reason, iff->Opcode(),
false, predicate->is_IfTrue());
_igvn.replace_input_of(proj->in(0), 1, bol);
_igvn.replace_input_of(output_proj->in(0), 0, proj);
@ -432,23 +432,23 @@ void PhaseIdealLoop::clone_predicates_to_unswitched_loop(IdealLoopTree* loop, No
Node* entry = head->skip_strip_mined()->in(LoopNode::EntryControl);
// Search original predicates
ProjNode* limit_check_proj = NULL;
ProjNode* limit_check_proj = nullptr;
limit_check_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check);
if (limit_check_proj != NULL) {
if (limit_check_proj != nullptr) {
entry = skip_loop_predicates(entry);
}
ProjNode* profile_predicate_proj = NULL;
ProjNode* predicate_proj = NULL;
ProjNode* profile_predicate_proj = nullptr;
ProjNode* predicate_proj = nullptr;
if (UseProfiledLoopPredicate) {
profile_predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_profile_predicate);
if (profile_predicate_proj != NULL) {
if (profile_predicate_proj != nullptr) {
entry = skip_loop_predicates(entry);
}
}
if (UseLoopPredicate) {
predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate);
}
if (predicate_proj != NULL) { // right pattern that can be used by loop predication
if (predicate_proj != nullptr) { // right pattern that can be used by loop predication
// clone predicate
iffast_pred = clone_predicate_to_unswitched_loop(predicate_proj, iffast_pred, Deoptimization::Reason_predicate,false);
ifslow_pred = clone_predicate_to_unswitched_loop(predicate_proj, ifslow_pred, Deoptimization::Reason_predicate,true);
@ -457,7 +457,7 @@ void PhaseIdealLoop::clone_predicates_to_unswitched_loop(IdealLoopTree* loop, No
check_created_predicate_for_unswitching(iffast_pred);
check_created_predicate_for_unswitching(ifslow_pred);
}
if (profile_predicate_proj != NULL) { // right pattern that can be used by loop predication
if (profile_predicate_proj != nullptr) { // right pattern that can be used by loop predication
// clone predicate
iffast_pred = clone_predicate_to_unswitched_loop(profile_predicate_proj, iffast_pred,Deoptimization::Reason_profile_predicate, false);
ifslow_pred = clone_predicate_to_unswitched_loop(profile_predicate_proj, ifslow_pred,Deoptimization::Reason_profile_predicate, true);
@ -466,7 +466,7 @@ void PhaseIdealLoop::clone_predicates_to_unswitched_loop(IdealLoopTree* loop, No
check_created_predicate_for_unswitching(iffast_pred);
check_created_predicate_for_unswitching(ifslow_pred);
}
if (limit_check_proj != NULL && clone_limit_check) {
if (limit_check_proj != nullptr && clone_limit_check) {
// Clone loop limit check last to insert it before loop.
// Don't clone a limit check which was already finalized
// for this counted loop (only one limit check is needed).
@ -480,7 +480,7 @@ void PhaseIdealLoop::clone_predicates_to_unswitched_loop(IdealLoopTree* loop, No
#ifndef PRODUCT
void PhaseIdealLoop::check_created_predicate_for_unswitching(const Node* new_entry) {
assert(new_entry != NULL, "IfTrue or IfFalse after clone predicate");
assert(new_entry != nullptr, "IfTrue or IfFalse after clone predicate");
if (TraceLoopPredicate) {
tty->print("Loop Predicate cloned: ");
debug_only(new_entry->in(0)->dump(););
@ -497,7 +497,7 @@ Node* PhaseIdealLoop::skip_loop_predicates(Node* entry) {
Node* rgn = uncommon_proj->unique_ctrl_out();
assert(rgn->is_Region() || rgn->is_Call(), "must be a region or call uct");
entry = entry->in(0)->in(0);
while (entry != NULL && entry->is_Proj() && entry->in(0)->is_If()) {
while (entry != nullptr && entry->is_Proj() && entry->in(0)->is_If()) {
uncommon_proj = entry->in(0)->as_If()->proj_out(1 - entry->as_Proj()->_con);
if (uncommon_proj->unique_ctrl_out() != rgn)
break;
@ -531,12 +531,12 @@ ProjNode* PhaseIdealLoop::next_predicate(ProjNode* predicate) {
//--------------------------find_predicate_insertion_point-------------------
// Find a good location to insert a predicate
ProjNode* PhaseIdealLoop::find_predicate_insertion_point(Node* start_c, Deoptimization::DeoptReason reason) {
if (start_c == NULL || !start_c->is_Proj())
return NULL;
if (start_c == nullptr || !start_c->is_Proj())
return nullptr;
if (start_c->as_Proj()->is_uncommon_trap_if_pattern(reason)) {
return start_c->as_Proj();
}
return NULL;
return nullptr;
}
//--------------------------Predicates::Predicates--------------------------
@ -564,24 +564,24 @@ PhaseIdealLoop::Predicates::Predicates(Node* entry) {
//--------------------------find_predicate------------------------------------
// Find a predicate
Node* PhaseIdealLoop::find_predicate(Node* entry) {
Node* predicate = NULL;
Node* predicate = nullptr;
predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check);
if (predicate != NULL) { // right pattern that can be used by loop predication
if (predicate != nullptr) { // right pattern that can be used by loop predication
return entry;
}
if (UseLoopPredicate) {
predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate);
if (predicate != NULL) { // right pattern that can be used by loop predication
if (predicate != nullptr) { // right pattern that can be used by loop predication
return entry;
}
}
if (UseProfiledLoopPredicate) {
predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_profile_predicate);
if (predicate != NULL) { // right pattern that can be used by loop predication
if (predicate != nullptr) { // right pattern that can be used by loop predication
return entry;
}
}
return NULL;
return nullptr;
}
//------------------------------Invariance-----------------------------------
@ -594,7 +594,7 @@ class Invariance : public StackObj {
Node_List _old_new; // map of old to new (clone)
IdealLoopTree* _lpt;
PhaseIdealLoop* _phase;
Node* _data_dependency_on; // The projection into the loop on which data nodes are dependent or NULL otherwise
Node* _data_dependency_on; // The projection into the loop on which data nodes are dependent or null otherwise
// Helper function to set up the invariance for invariance computation
// If n is a known invariant, set up directly. Otherwise, look up the
@ -606,7 +606,7 @@ class Invariance : public StackObj {
Node *n_ctrl = _phase->ctrl_or_self(n);
Node *u_ctrl = _phase->ctrl_or_self(use); // self if use is a CFG
if (_phase->is_dominator(n_ctrl, u_ctrl)) {
_stack.push(n, n->in(0) == NULL ? 1 : 0);
_stack.push(n, n->in(0) == nullptr ? 1 : 0);
}
}
}
@ -625,7 +625,7 @@ class Invariance : public StackObj {
bool all_inputs_invariant = true;
for (uint i = 0; i < n->req(); i++) {
Node* in = n->in(i);
if (in == NULL) continue;
if (in == nullptr) continue;
assert(_visited.test(in->_idx), "must have visited input");
if (!_invariant.test(in->_idx)) { // bad guy
all_inputs_invariant = false;
@ -637,14 +637,14 @@ class Invariance : public StackObj {
// loop, it was marked invariant but n is only invariant if
// it depends only on that test. Otherwise, unless that test
// is out of the loop, it's not invariant.
if (n->is_CFG() || n->depends_only_on_test() || n->in(0) == NULL || !_phase->is_member(_lpt, n->in(0))) {
if (n->is_CFG() || n->depends_only_on_test() || n->in(0) == nullptr || !_phase->is_member(_lpt, n->in(0))) {
_invariant.set(n->_idx); // I am a invariant too
}
}
} else { // process next input
_stack.set_index(idx + 1);
Node* m = n->in(idx);
if (m != NULL && !_visited.test_set(m->_idx)) {
if (m != nullptr && !_visited.test_set(m->_idx)) {
visit(n, m);
}
}
@ -660,7 +660,7 @@ class Invariance : public StackObj {
_old_new.map(n->_idx, n);
} else { // to be cloned
assert(!n->is_CFG(), "should not see CFG here");
_stack.push(n, n->in(0) == NULL ? 1 : 0);
_stack.push(n, n->in(0) == nullptr ? 1 : 0);
}
}
@ -678,13 +678,13 @@ class Invariance : public StackObj {
_phase->register_new_node(n_cl, ctrl);
for (uint i = 0; i < n->req(); i++) {
Node* in = n_cl->in(i);
if (in == NULL) continue;
if (in == nullptr) continue;
n_cl->set_req(i, _old_new[in->_idx]);
}
} else { // process next input
_stack.set_index(idx + 1);
Node* m = n->in(idx);
if (m != NULL && !_clone_visited.test_set(m->_idx)) {
if (m != nullptr && !_clone_visited.test_set(m->_idx)) {
clone_visit(m); // visit the input
}
}
@ -697,7 +697,7 @@ class Invariance : public StackObj {
_stack(area, 10 /* guess */),
_clone_visited(area), _old_new(area),
_lpt(lpt), _phase(lpt->_phase),
_data_dependency_on(NULL)
_data_dependency_on(nullptr)
{
LoopNode* head = _lpt->_head->as_Loop();
Node* entry = head->skip_strip_mined()->in(LoopNode::EntryControl);
@ -730,7 +730,7 @@ class Invariance : public StackObj {
}
// Did we explicitly mark some nodes non-loop-invariant? If so, return the entry node on which some data nodes
// are dependent that prevent loop predication. Otherwise, return NULL.
// are dependent that prevent loop predication. Otherwise, return null.
Node* data_dependency_on() {
return _data_dependency_on;
}
@ -786,7 +786,7 @@ bool IdealLoopTree::is_range_check_if(IfNode *iff, PhaseIdealLoop *phase, BasicT
range = cmp->in(2);
if (range->Opcode() != Op_LoadRange) {
const TypeInteger* tinteger = phase->_igvn.type(range)->isa_integer(bt);
if (tinteger == NULL || tinteger->empty() || tinteger->lo_as_long() < 0) {
if (tinteger == nullptr || tinteger->empty() || tinteger->lo_as_long() < 0) {
// Allow predication on positive values that aren't LoadRanges.
// This allows optimization of loops where the length of the
// array is a known value and doesn't need to be loaded back
@ -797,7 +797,7 @@ bool IdealLoopTree::is_range_check_if(IfNode *iff, PhaseIdealLoop *phase, BasicT
assert(bt == T_INT, "no LoadRange for longs");
}
scale = 0;
offset = NULL;
offset = nullptr;
if (!phase->is_scaled_iv_plus_offset(cmp->in(1), iv, bt, &scale, &offset)) {
return false;
}
@ -805,8 +805,8 @@ bool IdealLoopTree::is_range_check_if(IfNode *iff, PhaseIdealLoop *phase, BasicT
}
bool IdealLoopTree::is_range_check_if(IfNode *iff, PhaseIdealLoop *phase, Invariance& invar DEBUG_ONLY(COMMA ProjNode *predicate_proj)) const {
Node* range = NULL;
Node* offset = NULL;
Node* range = nullptr;
Node* offset = nullptr;
jlong scale = 0;
Node* iv = _head->as_BaseCountedLoop()->phi();
Compile* C = Compile::current();
@ -817,12 +817,12 @@ bool IdealLoopTree::is_range_check_if(IfNode *iff, PhaseIdealLoop *phase, Invari
if (!invar.is_invariant(range)) {
return false;
}
if (offset != NULL) {
if (offset != nullptr) {
if (!invar.is_invariant(offset)) { // offset must be invariant
return false;
}
Node* data_dependency_on = invar.data_dependency_on();
if (data_dependency_on != NULL && old_unique_idx < C->unique()) {
if (data_dependency_on != nullptr && old_unique_idx < C->unique()) {
// 'offset' node was newly created in is_range_check_if(). Check that it does not depend on the entry projection
// into the loop. If it does, we cannot perform loop predication (see Invariant::Invariant()).
assert(!offset->is_CFG(), "offset must be a data node");
@ -868,21 +868,21 @@ BoolNode* PhaseIdealLoop::rc_predicate(IdealLoopTree *loop, Node* ctrl,
int scale, Node* offset,
Node* init, Node* limit, jint stride,
Node* range, bool upper, bool &overflow, bool negate) {
jint con_limit = (limit != NULL && limit->is_Con()) ? limit->get_int() : 0;
jint con_limit = (limit != nullptr && limit->is_Con()) ? limit->get_int() : 0;
jint con_init = init->is_Con() ? init->get_int() : 0;
jint con_offset = offset->is_Con() ? offset->get_int() : 0;
stringStream* predString = NULL;
stringStream* predString = nullptr;
if (TraceLoopPredicate) {
predString = new (mtCompiler) stringStream();
predString->print("rc_predicate ");
}
overflow = false;
Node* max_idx_expr = NULL;
Node* max_idx_expr = nullptr;
const TypeInt* idx_type = TypeInt::INT;
if ((stride > 0) == (scale > 0) == upper) {
guarantee(limit != NULL, "sanity");
guarantee(limit != nullptr, "sanity");
if (TraceLoopPredicate) {
if (limit->is_Con()) {
predString->print("(%d ", con_limit);
@ -983,7 +983,7 @@ BoolNode* PhaseIdealLoop::rc_predicate(IdealLoopTree *loop, Node* ctrl,
register_new_node(max_idx_expr, ctrl);
}
CmpNode* cmp = NULL;
CmpNode* cmp = nullptr;
if (overflow) {
// Integer expressions may overflow, do long comparison
range = new ConvI2LNode(range);
@ -1011,7 +1011,7 @@ bool PhaseIdealLoop::loop_predication_should_follow_branches(IdealLoopTree *loop
return false;
}
if (predicate_proj == NULL) {
if (predicate_proj == nullptr) {
return false;
}
@ -1019,15 +1019,15 @@ bool PhaseIdealLoop::loop_predication_should_follow_branches(IdealLoopTree *loop
bool follow_branches = true;
IdealLoopTree* l = loop->_child;
// For leaf loops and loops with a single inner loop
while (l != NULL && follow_branches) {
while (l != nullptr && follow_branches) {
IdealLoopTree* child = l;
if (child->_child != NULL &&
if (child->_child != nullptr &&
child->_head->is_OuterStripMinedLoop()) {
assert(child->_child->_next == NULL, "only one inner loop for strip mined loop");
assert(child->_child->_next == nullptr, "only one inner loop for strip mined loop");
assert(child->_child->_head->is_CountedLoop() && child->_child->_head->as_CountedLoop()->is_strip_mined(), "inner loop should be strip mined");
child = child->_child;
}
if (child->_child != NULL || child->_irreducible) {
if (child->_child != nullptr || child->_irreducible) {
follow_branches = false;
}
l = l->_next;
@ -1040,7 +1040,7 @@ bool PhaseIdealLoop::loop_predication_should_follow_branches(IdealLoopTree *loop
loop_trip_cnt = head->profile_trip_cnt();
if (head->is_CountedLoop()) {
CountedLoopNode* cl = head->as_CountedLoop();
if (cl->phi() != NULL) {
if (cl->phi() != nullptr) {
const TypeInt* t = _igvn.type(cl->phi())->is_int();
float worst_case_trip_cnt = ((float)t->_hi - t->_lo) / ABS(cl->stride_con());
if (worst_case_trip_cnt < loop_trip_cnt) {
@ -1202,7 +1202,7 @@ float PathFrequency::to(Node* n) {
assert(con >= CatchProjNode::catch_all_index, "what else?");
_freqs.at_put_grow(c->_idx, 0, -1);
}
} else if (c->unique_ctrl_out_or_null() == NULL && !c->is_If() && !c->is_Jump()) {
} else if (c->unique_ctrl_out_or_null() == nullptr && !c->is_If() && !c->is_Jump()) {
ShouldNotReachHere();
} else {
c = c->in(0);
@ -1261,7 +1261,7 @@ bool PhaseIdealLoop::loop_predication_impl_helper(IdealLoopTree *loop, ProjNode*
CountedLoopNode *cl, ConNode* zero, Invariance& invar,
Deoptimization::DeoptReason reason) {
// Following are changed to nonnull when a predicate can be hoisted
ProjNode* new_predicate_proj = NULL;
ProjNode* new_predicate_proj = nullptr;
IfNode* iff = proj->in(0)->as_If();
Node* test = iff->in(1);
if (!test->is_Bool()){ //Conv2B, ...
@ -1270,7 +1270,7 @@ bool PhaseIdealLoop::loop_predication_impl_helper(IdealLoopTree *loop, ProjNode*
BoolNode* bol = test->as_Bool();
if (invar.is_invariant(bol)) {
// Invariant test
new_predicate_proj = create_new_if_for_predicate(predicate_proj, NULL,
new_predicate_proj = create_new_if_for_predicate(predicate_proj, nullptr,
reason,
iff->Opcode());
Node* ctrl = new_predicate_proj->in(0)->as_If()->in(0);
@ -1295,7 +1295,7 @@ bool PhaseIdealLoop::loop_predication_impl_helper(IdealLoopTree *loop, ProjNode*
loop->dump_head();
}
#endif
} else if (cl != NULL && loop->is_range_check_if(iff, this, invar DEBUG_ONLY(COMMA predicate_proj))) {
} else if (cl != nullptr && loop->is_range_check_if(iff, this, invar DEBUG_ONLY(COMMA predicate_proj))) {
// Range check for counted loops
const Node* cmp = bol->in(1)->as_Cmp();
Node* idx = cmp->in(1);
@ -1336,7 +1336,7 @@ bool PhaseIdealLoop::loop_predication_impl_helper(IdealLoopTree *loop, ProjNode*
// Test the lower bound
BoolNode* lower_bound_bol = rc_predicate(loop, ctrl, scale, offset, init, limit, stride, rng, false, overflow, negate);
ProjNode* lower_bound_proj = create_new_if_for_predicate(predicate_proj, NULL, reason, overflow ? Op_If : iff->Opcode());
ProjNode* lower_bound_proj = create_new_if_for_predicate(predicate_proj, nullptr, reason, overflow ? Op_If : iff->Opcode());
IfNode* lower_bound_iff = lower_bound_proj->in(0)->as_If();
_igvn.hash_delete(lower_bound_iff);
lower_bound_iff->set_req(1, lower_bound_bol);
@ -1345,7 +1345,7 @@ bool PhaseIdealLoop::loop_predication_impl_helper(IdealLoopTree *loop, ProjNode*
// Test the upper bound
BoolNode* upper_bound_bol = rc_predicate(loop, lower_bound_proj, scale, offset, init, limit, stride, rng, true, overflow, negate);
ProjNode* upper_bound_proj = create_new_if_for_predicate(predicate_proj, NULL, reason, overflow ? Op_If : iff->Opcode());
ProjNode* upper_bound_proj = create_new_if_for_predicate(predicate_proj, nullptr, reason, overflow ? Op_If : iff->Opcode());
assert(upper_bound_proj->in(0)->as_If()->in(0) == lower_bound_proj, "should dominate");
IfNode* upper_bound_iff = upper_bound_proj->in(0)->as_If();
_igvn.hash_delete(upper_bound_iff);
@ -1369,7 +1369,7 @@ bool PhaseIdealLoop::loop_predication_impl_helper(IdealLoopTree *loop, ProjNode*
// with uncommon trap.
return false;
}
assert(new_predicate_proj != NULL, "sanity");
assert(new_predicate_proj != nullptr, "sanity");
// Success - attach condition (new_predicate_bol) to predicate if
invar.map_ctrl(proj, new_predicate_proj); // so that invariance test can be appropriate
@ -1401,7 +1401,7 @@ ProjNode* PhaseIdealLoop::insert_initial_skeleton_predicate(IfNode* iff, IdealLo
Node* opaque_bol = new Opaque4Node(C, bol, _igvn.intcon(1)); // This will go away once loop opts are over
C->add_skeleton_predicate_opaq(opaque_bol);
register_new_node(opaque_bol, upper_bound_proj);
ProjNode* new_proj = create_new_if_for_predicate(predicate_proj, NULL, reason, overflow ? Op_If : iff->Opcode());
ProjNode* new_proj = create_new_if_for_predicate(predicate_proj, nullptr, reason, overflow ? Op_If : iff->Opcode());
_igvn.replace_input_of(new_proj->in(0), 1, opaque_bol);
assert(opaque_init->outcnt() > 0, "should be used");
@ -1423,7 +1423,7 @@ ProjNode* PhaseIdealLoop::insert_initial_skeleton_predicate(IfNode* iff, IdealLo
opaque_bol = new Opaque4Node(C, bol, _igvn.intcon(1));
C->add_skeleton_predicate_opaq(opaque_bol);
register_new_node(opaque_bol, new_proj);
new_proj = create_new_if_for_predicate(predicate_proj, NULL, reason, overflow ? Op_If : iff->Opcode());
new_proj = create_new_if_for_predicate(predicate_proj, nullptr, reason, overflow ? Op_If : iff->Opcode());
_igvn.replace_input_of(new_proj->in(0), 1, opaque_bol);
assert(max_value->outcnt() > 0, "should be used");
assert(skeleton_predicate_has_opaque(new_proj->in(0)->as_If()), "unexpected");
@ -1451,7 +1451,7 @@ bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) {
return false;
}
CountedLoopNode *cl = NULL;
CountedLoopNode *cl = nullptr;
if (head->is_valid_counted_loop(T_INT)) {
cl = head->as_CountedLoop();
// do nothing for iteration-splitted loops
@ -1459,21 +1459,21 @@ bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) {
// Avoid RCE if Counted loop's test is '!='.
BoolTest::mask bt = cl->loopexit()->test_trip();
if (bt != BoolTest::lt && bt != BoolTest::gt)
cl = NULL;
cl = nullptr;
}
Node* entry = head->skip_strip_mined()->in(LoopNode::EntryControl);
ProjNode *loop_limit_proj = NULL;
ProjNode *predicate_proj = NULL;
ProjNode *profile_predicate_proj = NULL;
ProjNode *loop_limit_proj = nullptr;
ProjNode *predicate_proj = nullptr;
ProjNode *profile_predicate_proj = nullptr;
// Loop limit check predicate should be near the loop.
loop_limit_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check);
if (loop_limit_proj != NULL) {
if (loop_limit_proj != nullptr) {
entry = skip_loop_predicates(loop_limit_proj);
}
bool has_profile_predicates = false;
profile_predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_profile_predicate);
if (profile_predicate_proj != NULL) {
if (profile_predicate_proj != nullptr) {
Node* n = skip_loop_predicates(entry);
// Check if predicates were already added to the profile predicate
// block
@ -1488,7 +1488,7 @@ bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) {
bool follow_branches = loop_predication_should_follow_branches(loop, profile_predicate_proj, loop_trip_cnt);
assert(!follow_branches || loop_trip_cnt >= 0, "negative trip count?");
if (predicate_proj == NULL && !follow_branches) {
if (predicate_proj == nullptr && !follow_branches) {
#ifndef PRODUCT
if (TraceLoopPredicate) {
tty->print("missing predicate:");
@ -1537,7 +1537,7 @@ bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) {
IfNode* iff = proj->in(0)->as_If();
CallStaticJavaNode* call = proj->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
if (call == NULL) {
if (call == nullptr) {
if (loop->is_loop_exit(iff)) {
// stop processing the remaining projs in the list because the execution of them
// depends on the condition of "iff" (iff->in(1)).
@ -1558,7 +1558,7 @@ bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) {
break;
}
if (predicate_proj != NULL) {
if (predicate_proj != nullptr) {
hoisted = loop_predication_impl_helper(loop, proj, predicate_proj, cl, zero, invar, Deoptimization::Reason_predicate) | hoisted;
}
} // end while

View File

@ -44,10 +44,10 @@
#include "runtime/stubRoutines.hpp"
//------------------------------is_loop_exit-----------------------------------
// Given an IfNode, return the loop-exiting projection or NULL if both
// Given an IfNode, return the loop-exiting projection or null if both
// arms remain in the loop.
Node *IdealLoopTree::is_loop_exit(Node *iff) const {
if (iff->outcnt() != 2) return NULL; // Ignore partially dead tests
if (iff->outcnt() != 2) return nullptr; // Ignore partially dead tests
PhaseIdealLoop *phase = _phase;
// Test is an IfNode, has 2 projections. If BOTH are in the loop
// we need loop unswitching instead of peeling.
@ -55,7 +55,7 @@ Node *IdealLoopTree::is_loop_exit(Node *iff) const {
return iff->raw_out(0);
if (!is_member(phase->get_loop(iff->raw_out(1))))
return iff->raw_out(1);
return NULL;
return nullptr;
}
@ -73,19 +73,19 @@ void IdealLoopTree::record_for_igvn() {
if (_head->is_CountedLoop() && _head->as_Loop()->is_strip_mined()) {
CountedLoopNode* l = _head->as_CountedLoop();
Node* outer_loop = l->outer_loop();
assert(outer_loop != NULL, "missing piece of strip mined loop");
assert(outer_loop != nullptr, "missing piece of strip mined loop");
_phase->_igvn._worklist.push(outer_loop);
Node* outer_loop_tail = l->outer_loop_tail();
assert(outer_loop_tail != NULL, "missing piece of strip mined loop");
assert(outer_loop_tail != nullptr, "missing piece of strip mined loop");
_phase->_igvn._worklist.push(outer_loop_tail);
Node* outer_loop_end = l->outer_loop_end();
assert(outer_loop_end != NULL, "missing piece of strip mined loop");
assert(outer_loop_end != nullptr, "missing piece of strip mined loop");
_phase->_igvn._worklist.push(outer_loop_end);
Node* outer_safepoint = l->outer_safepoint();
assert(outer_safepoint != NULL, "missing piece of strip mined loop");
assert(outer_safepoint != nullptr, "missing piece of strip mined loop");
_phase->_igvn._worklist.push(outer_safepoint);
Node* cle_out = _head->as_CountedLoop()->loopexit()->proj_out(false);
assert(cle_out != NULL, "missing piece of strip mined loop");
assert(cle_out != nullptr, "missing piece of strip mined loop");
_phase->_igvn._worklist.push(cle_out);
}
}
@ -115,7 +115,7 @@ void IdealLoopTree::compute_trip_count(PhaseIdealLoop* phase) {
Node* init_n = cl->init_trip();
Node* limit_n = cl->limit();
if (init_n != NULL && limit_n != NULL) {
if (init_n != nullptr && limit_n != nullptr) {
// Use longs to avoid integer overflow.
int stride_con = cl->stride_con();
const TypeInt* init_type = phase->_igvn.type(init_n)->is_int();
@ -208,7 +208,7 @@ void IdealLoopTree::compute_profile_trip_cnt(PhaseIdealLoop *phase) {
// Now compute a loop exit count
float loop_exit_cnt = 0.0f;
if (_child == NULL) {
if (_child == nullptr) {
for (uint i = 0; i < _body.size(); i++) {
Node *n = _body[i];
loop_exit_cnt += compute_profile_trip_cnt_helper(n);
@ -264,10 +264,10 @@ int IdealLoopTree::find_invariant(Node* n, PhaseIdealLoop *phase) {
//---------------------is_associative-----------------------------
// Return TRUE if "n" is an associative binary node. If "base" is
// not NULL, "n" must be re-associative with it.
// not null, "n" must be re-associative with it.
bool IdealLoopTree::is_associative(Node* n, Node* base) {
int op = n->Opcode();
if (base != NULL) {
if (base != nullptr) {
assert(is_associative(base), "Base node should be associative");
int base_op = base->Opcode();
if (base_op == Op_AddI || base_op == Op_SubI) {
@ -319,7 +319,7 @@ Node* IdealLoopTree::reassociate_add_sub(Node* n1, int inv1_idx, int inv2_idx, P
neg_inv2 = !neg_inv2;
}
bool is_int = n1->bottom_type()->isa_int() != NULL;
bool is_int = n1->bottom_type()->isa_int() != nullptr;
Node* inv1_c = phase->get_ctrl(inv1);
Node* n_inv1;
if (neg_inv1) {
@ -375,21 +375,21 @@ Node* IdealLoopTree::reassociate_add_sub(Node* n1, int inv1_idx, int inv2_idx, P
// inv1 op (x op inv2) => (inv1 op inv2) op x
//
Node* IdealLoopTree::reassociate(Node* n1, PhaseIdealLoop *phase) {
if (!is_associative(n1) || n1->outcnt() == 0) return NULL;
if (is_invariant(n1)) return NULL;
if (!is_associative(n1) || n1->outcnt() == 0) return nullptr;
if (is_invariant(n1)) return nullptr;
// Don't mess with add of constant (igvn moves them to expression tree root.)
if (n1->is_Add() && n1->in(2)->is_Con()) return NULL;
if (n1->is_Add() && n1->in(2)->is_Con()) return nullptr;
int inv1_idx = find_invariant(n1, phase);
if (!inv1_idx) return NULL;
if (!inv1_idx) return nullptr;
Node* n2 = n1->in(3 - inv1_idx);
if (!is_associative(n2, n1)) return NULL;
if (!is_associative(n2, n1)) return nullptr;
int inv2_idx = find_invariant(n2, phase);
if (!inv2_idx) return NULL;
if (!inv2_idx) return nullptr;
if (!phase->may_require_nodes(10, 10)) return NULL;
if (!phase->may_require_nodes(10, 10)) return nullptr;
Node* result = NULL;
Node* result = nullptr;
switch (n1->Opcode()) {
case Op_AddI:
case Op_AddL:
@ -417,7 +417,7 @@ Node* IdealLoopTree::reassociate(Node* n1, PhaseIdealLoop *phase) {
ShouldNotReachHere();
}
assert(result != NULL, "");
assert(result != nullptr, "");
phase->register_new_node(result, phase->get_ctrl(n1));
phase->_igvn.replace_node(n1, result);
assert(phase->get_loop(phase->get_ctrl(n1)) == this, "");
@ -432,7 +432,7 @@ void IdealLoopTree::reassociate_invariants(PhaseIdealLoop *phase) {
Node *n = _body.at(i);
for (int j = 0; j < 5; j++) {
Node* nn = reassociate(n, phase);
if (nn == NULL) break;
if (nn == nullptr) break;
n = nn; // again
}
}
@ -515,12 +515,12 @@ void PhaseIdealLoop::peeled_dom_test_elim(IdealLoopTree* loop, Node_List& old_ne
Node* test = prev->in(0);
while (test != loop->_head) { // Scan till run off top of loop
int p_op = prev->Opcode();
assert(test != NULL, "test cannot be NULL");
Node* test_cond = NULL;
assert(test != nullptr, "test cannot be null");
Node* test_cond = nullptr;
if ((p_op == Op_IfFalse || p_op == Op_IfTrue) && test->is_If()) {
test_cond = test->in(1);
}
if (test_cond != NULL && // Test?
if (test_cond != nullptr && // Test?
!test_cond->is_Con() && // And not already obvious?
// And condition is not a member of this loop?
!loop->is_member(get_loop(get_ctrl(test_cond)))) {
@ -950,14 +950,14 @@ bool IdealLoopTree::policy_unroll(PhaseIdealLoop *phase) {
Node *init_n = cl->init_trip();
Node *limit_n = cl->limit();
if (limit_n == NULL) return false; // We will dereference it below.
if (limit_n == nullptr) return false; // We will dereference it below.
// Non-constant bounds.
// Protect against over-unrolling when init or/and limit are not constant
// (so that trip_count's init value is maxint) but iv range is known.
if (init_n == NULL || !init_n->is_Con() || !limit_n->is_Con()) {
if (init_n == nullptr || !init_n->is_Con() || !limit_n->is_Con()) {
Node* phi = cl->phi();
if (phi != NULL) {
if (phi != nullptr) {
assert(phi->is_Phi() && phi->in(0) == _head, "Counted loop should have iv phi.");
const TypeInt* iv_type = phase->_igvn.type(phi)->is_int();
int next_stride = stride_con * 2; // stride after this unroll
@ -1172,8 +1172,8 @@ bool IdealLoopTree::policy_range_check(PhaseIdealLoop* phase, bool provisional,
// Try to pattern match with either cmp inputs, do not check
// whether one of the inputs is loop independent as it may not
// have had a chance to be hoisted yet.
if (!phase->is_scaled_iv_plus_offset(cmp->in(1), trip_counter, bt, NULL, NULL) &&
!phase->is_scaled_iv_plus_offset(cmp->in(2), trip_counter, bt, NULL, NULL)) {
if (!phase->is_scaled_iv_plus_offset(cmp->in(1), trip_counter, bt, nullptr, nullptr) &&
!phase->is_scaled_iv_plus_offset(cmp->in(2), trip_counter, bt, nullptr, nullptr)) {
continue;
}
} else {
@ -1193,7 +1193,7 @@ bool IdealLoopTree::policy_range_check(PhaseIdealLoop* phase, bool provisional,
}
}
if (!phase->is_scaled_iv_plus_offset(rc_exp, trip_counter, bt, NULL, NULL)) {
if (!phase->is_scaled_iv_plus_offset(rc_exp, trip_counter, bt, nullptr, nullptr)) {
continue;
}
}
@ -1244,13 +1244,13 @@ Node *PhaseIdealLoop::clone_up_backedge_goo(Node *back_ctrl, Node *preheader_ctr
// Only visit once
if (visited.test_set(n->_idx)) {
Node *x = clones.find(n->_idx);
return (x != NULL) ? x : n;
return (x != nullptr) ? x : n;
}
Node *x = NULL; // If required, a clone of 'n'
Node *x = nullptr; // If required, a clone of 'n'
// Check for 'n' being pinned in the backedge.
if (n->in(0) && n->in(0) == back_ctrl) {
assert(clones.find(n->_idx) == NULL, "dead loop");
assert(clones.find(n->_idx) == nullptr, "dead loop");
x = n->clone(); // Clone a copy of 'n' to preheader
clones.push(x, n->_idx);
x->set_req(0, preheader_ctrl); // Fix x's control input to preheader
@ -1263,7 +1263,7 @@ Node *PhaseIdealLoop::clone_up_backedge_goo(Node *back_ctrl, Node *preheader_ctr
Node *g = clone_up_backedge_goo(back_ctrl, preheader_ctrl, n->in(i), visited, clones);
if (g != n->in(i)) {
if (!x) {
assert(clones.find(n->_idx) == NULL, "dead loop");
assert(clones.find(n->_idx) == nullptr, "dead loop");
x = n->clone();
clones.push(x, n->_idx);
}
@ -1290,19 +1290,19 @@ Node* PhaseIdealLoop::cast_incr_before_loop(Node* incr, Node* ctrl, Node* loop)
return castii;
}
}
return NULL;
return nullptr;
}
#ifdef ASSERT
void PhaseIdealLoop::ensure_zero_trip_guard_proj(Node* node, bool is_main_loop) {
assert(node->is_IfProj(), "must be the zero trip guard If node");
Node* zer_bol = node->in(0)->in(1);
assert(zer_bol != NULL && zer_bol->is_Bool(), "must be Bool");
assert(zer_bol != nullptr && zer_bol->is_Bool(), "must be Bool");
Node* zer_cmp = zer_bol->in(1);
assert(zer_cmp != NULL && zer_cmp->Opcode() == Op_CmpI, "must be CmpI");
assert(zer_cmp != nullptr && zer_cmp->Opcode() == Op_CmpI, "must be CmpI");
// For the main loop, the opaque node is the second input to zer_cmp, for the post loop it's the first input node
Node* zer_opaq = zer_cmp->in(is_main_loop ? 2 : 1);
assert(zer_opaq != NULL && zer_opaq->Opcode() == Op_OpaqueZeroTripGuard, "must be OpaqueZeroTripGuard");
assert(zer_opaq != nullptr && zer_opaq->Opcode() == Op_OpaqueZeroTripGuard, "must be OpaqueZeroTripGuard");
}
#endif
@ -1319,7 +1319,7 @@ void PhaseIdealLoop::copy_skeleton_predicates_to_main_loop_helper(Node* predicat
uint dd_main_head, const uint idx_before_pre_post,
const uint idx_after_post_before_pre, Node* zero_trip_guard_proj_main,
Node* zero_trip_guard_proj_post, const Node_List &old_new) {
if (predicate != NULL) {
if (predicate != nullptr) {
#ifdef ASSERT
ensure_zero_trip_guard_proj(zero_trip_guard_proj_main, true);
ensure_zero_trip_guard_proj(zero_trip_guard_proj_post, false);
@ -1337,7 +1337,7 @@ void PhaseIdealLoop::copy_skeleton_predicates_to_main_loop_helper(Node* predicat
Node* opaque_stride = new OpaqueLoopStrideNode(C, stride);
register_new_node(opaque_stride, outer_main_head->in(LoopNode::EntryControl));
while (predicate != NULL && predicate->is_Proj() && predicate->in(0)->is_If()) {
while (predicate != nullptr && predicate->is_Proj() && predicate->in(0)->is_If()) {
iff = predicate->in(0)->as_If();
uncommon_proj = iff->proj_out(1 - predicate->as_Proj()->_con);
if (uncommon_proj->unique_ctrl_out() != rgn)
@ -1347,7 +1347,7 @@ void PhaseIdealLoop::copy_skeleton_predicates_to_main_loop_helper(Node* predicat
// Clone the skeleton predicate twice and initialize one with the initial
// value of the loop induction variable. Leave the other predicate
// to be initialized when increasing the stride during loop unrolling.
prev_proj = clone_skeleton_predicate_and_initialize(iff, opaque_init, NULL, predicate, uncommon_proj,
prev_proj = clone_skeleton_predicate_and_initialize(iff, opaque_init, nullptr, predicate, uncommon_proj,
current_proj, outer_loop, prev_proj);
assert(skeleton_predicate_has_opaque(prev_proj->in(0)->as_If()), "");
@ -1363,7 +1363,7 @@ void PhaseIdealLoop::copy_skeleton_predicates_to_main_loop_helper(Node* predicat
// Change the control if 'loop_node' is part of the main loop. If there is an old->new mapping and the index of
// 'pre_loop_node' is greater than idx_before_pre_post, then we know that 'loop_node' was cloned and is part of
// the main loop (and 'pre_loop_node' is part of the pre loop).
if (!loop_node->is_CFG() && (pre_loop_node != NULL && pre_loop_node->_idx > idx_after_post_before_pre)) {
if (!loop_node->is_CFG() && (pre_loop_node != nullptr && pre_loop_node->_idx > idx_after_post_before_pre)) {
// 'loop_node' is a data node and part of the main loop. Rewire the control to the projection of the zero-trip guard if node
// of the main loop that is immediately preceding the cloned predicates.
_igvn.replace_input_of(loop_node, 0, zero_trip_guard_proj_main);
@ -1371,7 +1371,7 @@ void PhaseIdealLoop::copy_skeleton_predicates_to_main_loop_helper(Node* predicat
} else if (loop_node->_idx > idx_before_pre_post && loop_node->_idx < idx_after_post_before_pre) {
// 'loop_node' is a data node and part of the post loop. Rewire the control to the projection of the zero-trip guard if node
// of the post loop that is immediately preceding the post loop header node (there are no cloned predicates for the post loop).
assert(pre_loop_node == NULL, "a node belonging to the post loop should not have an old_new mapping at this stage");
assert(pre_loop_node == nullptr, "a node belonging to the post loop should not have an old_new mapping at this stage");
_igvn.replace_input_of(loop_node, 0, zero_trip_guard_proj_post);
--i;
}
@ -1442,7 +1442,7 @@ bool PhaseIdealLoop::skeleton_predicate_has_opaque(IfNode* iff) {
} else {
for (uint j = 1; j < n->req(); j++) {
Node* m = n->in(j);
if (m != NULL) {
if (m != nullptr) {
wq.push(m);
}
}
@ -1466,7 +1466,7 @@ void PhaseIdealLoop::count_opaque_loop_nodes(Node* n, uint& init, uint& stride)
if (skeleton_follow_inputs(n)) {
for (uint j = 1; j < n->req(); j++) {
Node* m = n->in(j);
if (m != NULL) {
if (m != nullptr) {
wq.push(m);
}
}
@ -1482,14 +1482,14 @@ void PhaseIdealLoop::count_opaque_loop_nodes(Node* n, uint& init, uint& stride)
// Clone the skeleton predicate bool for a main or unswitched loop:
// Main loop: Set new_init and new_stride nodes as new inputs.
// Unswitched loop: new_init and new_stride are both NULL. Clone OpaqueLoopInit and OpaqueLoopStride instead.
// Unswitched loop: new_init and new_stride are both null. Clone OpaqueLoopInit and OpaqueLoopStride instead.
Node* PhaseIdealLoop::clone_skeleton_predicate_bool(Node* iff, Node* new_init, Node* new_stride, Node* control) {
Node_Stack to_clone(2);
to_clone.push(iff->in(1), 1);
uint current = C->unique();
Node* result = NULL;
bool is_unswitched_loop = new_init == NULL && new_stride == NULL;
assert(new_init != NULL || is_unswitched_loop, "new_init must be set when new_stride is non-null");
Node* result = nullptr;
bool is_unswitched_loop = new_init == nullptr && new_stride == nullptr;
assert(new_init != nullptr || is_unswitched_loop, "new_init must be set when new_stride is non-null");
// Look for the opaque node to replace with the new value
// and clone everything in between. We keep the Opaque4 node
// so the duplicated predicates are eliminated once loop
@ -1510,18 +1510,18 @@ Node* PhaseIdealLoop::clone_skeleton_predicate_bool(Node* iff, Node* new_init, N
}
int op = m->Opcode();
if (op == Op_OpaqueLoopInit) {
if (is_unswitched_loop && m->_idx < current && new_init == NULL) {
if (is_unswitched_loop && m->_idx < current && new_init == nullptr) {
new_init = m->clone();
register_new_node(new_init, control);
}
n->set_req(i, new_init);
} else {
assert(op == Op_OpaqueLoopStride, "unexpected opaque node");
if (is_unswitched_loop && m->_idx < current && new_stride == NULL) {
if (is_unswitched_loop && m->_idx < current && new_stride == nullptr) {
new_stride = m->clone();
register_new_node(new_stride, control);
}
if (new_stride != NULL) {
if (new_stride != nullptr) {
n->set_req(i, new_stride);
}
}
@ -1551,9 +1551,9 @@ Node* PhaseIdealLoop::clone_skeleton_predicate_bool(Node* iff, Node* new_init, N
next->set_req(j, cur);
}
}
} while (result == NULL);
} while (result == nullptr);
assert(result->_idx >= current, "new node expected");
assert(!is_unswitched_loop || new_init != NULL, "new_init must always be found and cloned");
assert(!is_unswitched_loop || new_init != nullptr, "new_init must always be found and cloned");
return result;
}
@ -1589,15 +1589,15 @@ void PhaseIdealLoop::copy_skeleton_predicates_to_main_loop(CountedLoopNode* pre_
Node* zero_trip_guard_proj_post, const Node_List &old_new) {
if (UseLoopPredicate) {
Node* entry = pre_head->in(LoopNode::EntryControl);
Node* predicate = NULL;
Node* predicate = nullptr;
predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check);
if (predicate != NULL) {
if (predicate != nullptr) {
entry = skip_loop_predicates(entry);
}
Node* profile_predicate = NULL;
Node* profile_predicate = nullptr;
if (UseProfiledLoopPredicate) {
profile_predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_profile_predicate);
if (profile_predicate != NULL) {
if (profile_predicate != nullptr) {
entry = skip_loop_predicates(entry);
}
}
@ -1658,7 +1658,7 @@ void PhaseIdealLoop::insert_pre_post_loops(IdealLoopTree *loop, Node_List &old_n
// Add the post loop
const uint idx_before_pre_post = Compile::current()->unique();
CountedLoopNode *post_head = NULL;
CountedLoopNode *post_head = nullptr;
Node* post_incr = incr;
Node* main_exit = insert_post_loop(loop, old_new, main_head, main_end, post_incr, limit, post_head);
const uint idx_after_post_before_pre = Compile::current()->unique();
@ -1764,7 +1764,7 @@ void PhaseIdealLoop::insert_pre_post_loops(IdealLoopTree *loop, Node_List &old_n
// CastII for the main loop:
Node* castii = cast_incr_before_loop(pre_incr, min_taken, main_head);
assert(castii != NULL, "no castII inserted");
assert(castii != nullptr, "no castII inserted");
assert(post_head->in(1)->is_IfProj(), "must be zero-trip guard If node projection of the post loop");
copy_skeleton_predicates_to_main_loop(pre_head, castii, stride, outer_loop, outer_main_head, dd_main_head,
idx_before_pre_post, idx_after_post_before_pre, min_taken, post_head->in(1), old_new);
@ -1889,7 +1889,7 @@ void PhaseIdealLoop::insert_vector_post_loop(IdealLoopTree *loop, Node_List &old
Node *limit = main_end->limit();
// In this case we throw away the result as we are not using it to connect anything else.
CountedLoopNode *post_head = NULL;
CountedLoopNode *post_head = nullptr;
insert_post_loop(loop, old_new, main_head, main_end, incr, limit, post_head);
copy_skeleton_predicates_to_post_loop(main_head->skip_strip_mined(), post_head, incr, main_head->stride());
@ -1936,7 +1936,7 @@ void PhaseIdealLoop::insert_scalar_rced_post_loop(IdealLoopTree *loop, Node_List
Node *limit = main_end->limit();
// In this case we throw away the result as we are not using it to connect anything else.
CountedLoopNode *post_head = NULL;
CountedLoopNode *post_head = nullptr;
insert_post_loop(loop, old_new, main_head, main_end, incr, limit, post_head);
copy_skeleton_predicates_to_post_loop(main_head->skip_strip_mined(), post_head, incr, main_head->stride());
@ -2042,7 +2042,7 @@ Node *PhaseIdealLoop::insert_post_loop(IdealLoopTree* loop, Node_List& old_new,
// CastII for the new post loop:
incr = cast_incr_before_loop(zer_opaq->in(1), zer_taken, post_head);
assert(incr != NULL, "no castII inserted");
assert(incr != nullptr, "no castII inserted");
return new_main_exit;
}
@ -2074,7 +2074,7 @@ void PhaseIdealLoop::update_main_loop_skeleton_predicates(Node* ctrl, CountedLoo
Node* max_value = _igvn.intcon(new_stride_con);
set_ctrl(max_value, C->root());
while (entry != NULL && entry->is_Proj() && entry->in(0)->is_If()) {
while (entry != nullptr && entry->is_Proj() && entry->in(0)->is_If()) {
IfNode* iff = entry->in(0)->as_If();
ProjNode* proj = iff->proj_out(1 - entry->as_Proj()->_con);
if (proj->unique_ctrl_out()->Opcode() != Op_Halt) {
@ -2112,7 +2112,7 @@ void PhaseIdealLoop::copy_skeleton_predicates_to_post_loop(LoopNode* main_loop_h
Node* ctrl = main_loop_entry;
Node* prev_proj = post_loop_entry;
while (ctrl != NULL && ctrl->is_Proj() && ctrl->in(0)->is_If()) {
while (ctrl != nullptr && ctrl->is_Proj() && ctrl->in(0)->is_If()) {
IfNode* iff = ctrl->in(0)->as_If();
ProjNode* proj = iff->proj_out(1 - ctrl->as_Proj()->_con);
if (proj->unique_ctrl_out()->Opcode() != Op_Halt) {
@ -2215,7 +2215,7 @@ void PhaseIdealLoop::do_unroll(IdealLoopTree *loop, Node_List &old_new, bool adj
Node *init = loop_head->init_trip();
Node *stride = loop_head->stride();
Node *opaq = NULL;
Node *opaq = nullptr;
if (adjust_min_trip) { // If not maximally unrolling, need adjustment
// Search for zero-trip guard.
@ -2223,7 +2223,7 @@ void PhaseIdealLoop::do_unroll(IdealLoopTree *loop, Node_List &old_new, bool adj
// graph shape is encountered, the compiler bails out loop unrolling;
// compilation of the method will still succeed.
opaq = loop_head->is_canonical_loop_entry();
if (opaq == NULL) {
if (opaq == nullptr) {
return;
}
// Zero-trip test uses an 'opaque' node which is not shared.
@ -2232,7 +2232,7 @@ void PhaseIdealLoop::do_unroll(IdealLoopTree *loop, Node_List &old_new, bool adj
C->set_major_progress();
Node* new_limit = NULL;
Node* new_limit = nullptr;
int stride_con = stride->get_int();
int stride_p = (stride_con > 0) ? stride_con : -stride_con;
uint old_trip_count = loop_head->trip_count();
@ -2274,7 +2274,7 @@ void PhaseIdealLoop::do_unroll(IdealLoopTree *loop, Node_List &old_new, bool adj
// adjustment underflows or overflows, then the main loop is skipped.
Node* cmp = loop_end->cmp_node();
assert(cmp->in(2) == limit, "sanity");
assert(opaq != NULL && opaq->in(1) == limit, "sanity");
assert(opaq != nullptr && opaq->in(1) == limit, "sanity");
// Verify that policy_unroll result is still valid.
const TypeInt* limit_type = _igvn.type(limit)->is_int();
@ -2308,9 +2308,9 @@ void PhaseIdealLoop::do_unroll(IdealLoopTree *loop, Node_List &old_new, bool adj
assert(bt == BoolTest::lt || bt == BoolTest::gt, "canonical test is expected");
Node* underflow_clamp = _igvn.intcon((stride_con > 0) ? min_jint : max_jint);
set_ctrl(underflow_clamp, C->root());
Node* limit_before_underflow = NULL;
Node* prev_limit = NULL;
Node* bol = limit->is_CMove() ? limit->in(CMoveNode::Condition) : NULL;
Node* limit_before_underflow = nullptr;
Node* prev_limit = nullptr;
Node* bol = limit->is_CMove() ? limit->in(CMoveNode::Condition) : nullptr;
if (loop_head->unrolled_count() > 1 &&
limit->is_CMove() && limit->Opcode() == Op_CMoveI &&
limit->in(CMoveNode::IfTrue) == underflow_clamp &&
@ -2340,7 +2340,7 @@ void PhaseIdealLoop::do_unroll(IdealLoopTree *loop, Node_List &old_new, bool adj
// | | |
// CMoveINode ([min_jint..hi] / [lo..max_jing])
//
assert(limit_before_underflow != NULL && prev_limit != NULL, "must find them");
assert(limit_before_underflow != nullptr && prev_limit != nullptr, "must find them");
Node* new_limit_with_underflow = new SubINode(prev_limit, stride);
register_new_node(new_limit_with_underflow, ctrl);
// We must compare with limit_before_underflow, prev_limit may already have underflowed.
@ -2359,7 +2359,7 @@ void PhaseIdealLoop::do_unroll(IdealLoopTree *loop, Node_List &old_new, bool adj
register_new_node(new_limit, ctrl);
}
assert(new_limit != NULL, "");
assert(new_limit != nullptr, "");
// Replace in loop test.
assert(loop_end->in(1)->in(1) == cmp, "sanity");
if (cmp->outcnt() == 1 && loop_end->in(1)->outcnt() == 1) {
@ -2513,9 +2513,9 @@ void PhaseIdealLoop::mark_reductions(IdealLoopTree *loop) {
// For definitions which are loop inclusive and not tripcounts.
Node* def_node = phi->in(LoopNode::LoopBackControl);
if (def_node != NULL) {
if (def_node != nullptr) {
Node* n_ctrl = get_ctrl(def_node);
if (n_ctrl != NULL && loop->is_member(get_loop(n_ctrl))) {
if (n_ctrl != nullptr && loop->is_member(get_loop(n_ctrl))) {
// Now test it to see if it fits the standard pattern for a reduction operator.
int opc = def_node->Opcode();
if (opc != ReductionNode::opcode(opc, def_node->bottom_type()->basic_type())
@ -2566,7 +2566,7 @@ void PhaseIdealLoop::mark_reductions(IdealLoopTree *loop) {
Node* PhaseIdealLoop::adjust_limit(bool is_positive_stride, Node* scale, Node* offset, Node* rc_limit, Node* old_limit, Node* pre_ctrl, bool round) {
Node* sub = new SubLNode(rc_limit, offset);
register_new_node(sub, pre_ctrl);
Node* limit = new DivLNode(NULL, sub, scale);
Node* limit = new DivLNode(nullptr, sub, scale);
register_new_node(limit, pre_ctrl);
// When the absolute value of scale is greater than one, the division
@ -2622,8 +2622,8 @@ Node* PhaseIdealLoop::adjust_limit(bool is_positive_stride, Node* scale, Node* o
// holds true in the main-loop. Stride, scale, offset and limit are all loop
// invariant. Further, stride and scale are constants (offset and limit often are).
void PhaseIdealLoop::add_constraint(jlong stride_con, jlong scale_con, Node* offset, Node* low_limit, Node* upper_limit, Node* pre_ctrl, Node** pre_limit, Node** main_limit) {
assert(_igvn.type(offset)->isa_long() != NULL && _igvn.type(low_limit)->isa_long() != NULL &&
_igvn.type(upper_limit)->isa_long() != NULL, "arguments should be long values");
assert(_igvn.type(offset)->isa_long() != nullptr && _igvn.type(low_limit)->isa_long() != nullptr &&
_igvn.type(upper_limit)->isa_long() != nullptr, "arguments should be long values");
// For a positive stride, we need to reduce the main-loop limit and
// increase the pre-loop limit. This is reversed for a negative stride.
@ -2728,10 +2728,10 @@ bool PhaseIdealLoop::is_scaled_iv(Node* exp, Node* iv, BasicType bt, jlong* p_sc
exp = exp->uncast(); //strip casts
assert(exp_bt == T_INT || exp_bt == T_LONG, "unexpected int type");
if (is_iv(exp, iv, exp_bt)) {
if (p_scale != NULL) {
if (p_scale != nullptr) {
*p_scale = 1;
}
if (p_short_scale != NULL) {
if (p_short_scale != nullptr) {
*p_short_scale = false;
}
return true;
@ -2751,10 +2751,10 @@ bool PhaseIdealLoop::is_scaled_iv(Node* exp, Node* iv, BasicType bt, jlong* p_sc
if (scale == 0) {
return false; // might be top
}
if (p_scale != NULL) {
if (p_scale != nullptr) {
*p_scale = scale;
}
if (p_short_scale != NULL) {
if (p_short_scale != nullptr) {
// (ConvI2L (MulI iv K)) can be 64-bit linear if iv is kept small enough...
*p_short_scale = (exp_bt != bt && scale != 1);
}
@ -2772,10 +2772,10 @@ bool PhaseIdealLoop::is_scaled_iv(Node* exp, Node* iv, BasicType bt, jlong* p_sc
} else if (exp_bt == T_LONG) {
scale = java_shift_left((jlong)1, (julong)shift_amount);
}
if (p_scale != NULL) {
if (p_scale != nullptr) {
*p_scale = scale;
}
if (p_short_scale != NULL) {
if (p_short_scale != nullptr) {
// (ConvI2L (MulI iv K)) can be 64-bit linear if iv is kept small enough...
*p_short_scale = (exp_bt != bt && scale != 1);
}
@ -2797,10 +2797,10 @@ bool PhaseIdealLoop::is_scaled_iv(Node* exp, Node* iv, BasicType bt, jlong* p_sc
// overflow but that's fine because result wraps.
return false;
}
if (p_scale != NULL) {
if (p_scale != nullptr) {
*p_scale = scale_sum;
}
if (p_short_scale != NULL) {
if (p_short_scale != nullptr) {
*p_short_scale = short_scale_l && short_scale_r;
}
return true;
@ -2815,10 +2815,10 @@ bool PhaseIdealLoop::is_scaled_iv(Node* exp, Node* iv, BasicType bt, jlong* p_sc
return false;
}
scale = java_multiply(scale, (jlong)-1);
if (p_scale != NULL) {
if (p_scale != nullptr) {
*p_scale = scale;
}
if (p_short_scale != NULL) {
if (p_short_scale != nullptr) {
// (ConvI2L (MulI iv K)) can be 64-bit linear if iv is kept small enough...
*p_short_scale = *p_short_scale || (exp_bt != bt && scale != 1);
}
@ -2840,10 +2840,10 @@ bool PhaseIdealLoop::is_scaled_iv(Node* exp, Node* iv, BasicType bt, jlong* p_sc
// result may also overflow but that's fine because result wraps.
return false;
}
if (p_scale != NULL) {
if (p_scale != nullptr) {
*p_scale = scale_diff;
}
if (p_short_scale != NULL) {
if (p_short_scale != nullptr) {
*p_short_scale = short_scale_l && short_scale_r;
}
return true;
@ -2876,10 +2876,10 @@ bool PhaseIdealLoop::is_scaled_iv_plus_offset(Node* exp, Node* iv, BasicType bt,
BasicType exp_bt = bt;
exp = exp->uncast();
if (is_scaled_iv(exp, iv, exp_bt, &scale, p_short_scale)) {
if (p_scale != NULL) {
if (p_scale != nullptr) {
*p_scale = scale;
}
if (p_offset != NULL) {
if (p_offset != nullptr) {
Node *zero = _igvn.zerocon(bt);
set_ctrl(zero, C->root());
*p_offset = zero;
@ -2895,16 +2895,16 @@ bool PhaseIdealLoop::is_scaled_iv_plus_offset(Node* exp, Node* iv, BasicType bt,
}
int opc = exp->Opcode();
int which = 0; // this is which subexpression we find the iv in
Node* offset = NULL;
Node* offset = nullptr;
if (opc == Op_Add(exp_bt)) {
// Check for a scaled IV in (AddX (MulX iv S) E) or (AddX E (MulX iv S)).
if (is_scaled_iv(exp->in(which = 1), iv, bt, &scale, p_short_scale) ||
is_scaled_iv(exp->in(which = 2), iv, bt, &scale, p_short_scale)) {
offset = exp->in(which == 1 ? 2 : 1); // the other argument
if (p_scale != NULL) {
if (p_scale != nullptr) {
*p_scale = scale;
}
if (p_offset != NULL) {
if (p_offset != nullptr) {
*p_offset = offset;
}
return true;
@ -2927,10 +2927,10 @@ bool PhaseIdealLoop::is_scaled_iv_plus_offset(Node* exp, Node* iv, BasicType bt,
}
scale = java_multiply(scale, (jlong)-1);
}
if (p_scale != NULL) {
if (p_scale != nullptr) {
*p_scale = scale;
}
if (p_offset != NULL) {
if (p_offset != nullptr) {
if (which == 1) { // must negate the extracted offset
Node *zero = _igvn.integercon(0, exp_bt);
set_ctrl(zero, C->root());
@ -2957,12 +2957,12 @@ bool PhaseIdealLoop::is_scaled_iv_plus_extra_offset(Node* exp1, Node* offset3, N
// By the time we reach here, it is unlikely that exp1 is a simple iv*K.
// If is a linear iv transform, it is probably an add or subtract.
// Let's collect the internal offset2 from it.
Node* offset2 = NULL;
Node* offset2 = nullptr;
if (offset3->is_Con() &&
depth < 2 &&
is_scaled_iv_plus_offset(exp1, iv, bt, p_scale,
&offset2, p_short_scale, depth+1)) {
if (p_offset != NULL) {
if (p_offset != nullptr) {
Node* ctrl_off2 = get_ctrl(offset2);
Node* offset = AddNode::make(offset2, offset3, bt);
register_new_node(offset, ctrl_off2);
@ -2979,10 +2979,10 @@ Node* PhaseIdealLoop::add_range_check_predicate(IdealLoopTree* loop, CountedLoop
Node* predicate_proj, int scale_con, Node* offset,
Node* limit, jint stride_con, Node* value) {
bool overflow = false;
BoolNode* bol = rc_predicate(loop, predicate_proj, scale_con, offset, value, NULL, stride_con, limit, (stride_con > 0) != (scale_con > 0), overflow, false);
BoolNode* bol = rc_predicate(loop, predicate_proj, scale_con, offset, value, nullptr, stride_con, limit, (stride_con > 0) != (scale_con > 0), overflow, false);
Node* opaque_bol = new Opaque4Node(C, bol, _igvn.intcon(1));
register_new_node(opaque_bol, predicate_proj);
IfNode* new_iff = NULL;
IfNode* new_iff = nullptr;
if (overflow) {
new_iff = new IfNode(predicate_proj, opaque_bol, PROB_MAX, COUNT_UNKNOWN);
} else {
@ -3030,7 +3030,7 @@ void PhaseIdealLoop::do_range_check(IdealLoopTree *loop, Node_List &old_new) {
// Check graph shape. Cannot optimize a loop if zero-trip
// Opaque1 node is optimized away and then another round
// of loop opts attempted.
if (cl->is_canonical_loop_entry() == NULL) {
if (cl->is_canonical_loop_entry() == nullptr) {
return;
}
@ -3065,7 +3065,7 @@ void PhaseIdealLoop::do_range_check(IdealLoopTree *loop, Node_List &old_new) {
// Ensure the original loop limit is available from the
// pre-loop Opaque1 node.
Node *orig_limit = pre_opaq->original_loop_limit();
if (orig_limit == NULL || _igvn.type(orig_limit) == Type::TOP) {
if (orig_limit == nullptr || _igvn.type(orig_limit) == Type::TOP) {
return;
}
// Must know if its a count-up or count-down loop
@ -3131,7 +3131,7 @@ void PhaseIdealLoop::do_range_check(IdealLoopTree *loop, Node_List &old_new) {
}
// Check for scaled induction variable plus an offset
Node *offset = NULL;
Node *offset = nullptr;
if (!is_scaled_iv_plus_offset(rc_exp, trip_counter, &scale_con, &offset)) {
continue;
@ -3252,7 +3252,7 @@ void PhaseIdealLoop::do_range_check(IdealLoopTree *loop, Node_List &old_new) {
if (cd->is_Load() && cd->depends_only_on_test()) { // Loads can now float around in the loop
// Allow the load to float around in the loop, or before it
// but NOT before the pre-loop.
_igvn.replace_input_of(cd, 0, ctrl); // ctrl, not NULL
_igvn.replace_input_of(cd, 0, ctrl); // ctrl, not null
--i;
--imax;
}
@ -3341,7 +3341,7 @@ bool PhaseIdealLoop::multi_version_post_loops(IdealLoopTree *rce_loop, IdealLoop
}
// Find RCE'd post loop so that we can stage its guard.
if (legacy_cl->is_canonical_loop_entry() == NULL) {
if (legacy_cl->is_canonical_loop_entry() == nullptr) {
return multi_version_succeeded;
}
Node* ctrl = legacy_cl->in(LoopNode::EntryControl);
@ -3349,19 +3349,19 @@ bool PhaseIdealLoop::multi_version_post_loops(IdealLoopTree *rce_loop, IdealLoop
// Now we test that both the post loops are connected
Node* post_loop_region = iffm->in(0);
if (post_loop_region == NULL) return multi_version_succeeded;
if (post_loop_region == nullptr) return multi_version_succeeded;
if (!post_loop_region->is_Region()) return multi_version_succeeded;
Node* covering_region = post_loop_region->in(RegionNode::Control+1);
if (covering_region == NULL) return multi_version_succeeded;
if (covering_region == nullptr) return multi_version_succeeded;
if (!covering_region->is_Region()) return multi_version_succeeded;
Node* p_f = covering_region->in(RegionNode::Control);
if (p_f == NULL) return multi_version_succeeded;
if (p_f == nullptr) return multi_version_succeeded;
if (!p_f->is_IfFalse()) return multi_version_succeeded;
if (!p_f->in(0)->is_CountedLoopEnd()) return multi_version_succeeded;
CountedLoopEndNode* rce_loop_end = p_f->in(0)->as_CountedLoopEnd();
if (rce_loop_end == NULL) return multi_version_succeeded;
if (rce_loop_end == nullptr) return multi_version_succeeded;
CountedLoopNode* rce_cl = rce_loop_end->loopnode();
if (rce_cl == NULL || !rce_cl->is_post_loop()) return multi_version_succeeded;
if (rce_cl == nullptr || !rce_cl->is_post_loop()) return multi_version_succeeded;
CountedLoopNode *known_rce_cl = rce_loop->_head->as_CountedLoop();
if (rce_cl != known_rce_cl) return multi_version_succeeded;
@ -3385,7 +3385,7 @@ bool PhaseIdealLoop::multi_version_post_loops(IdealLoopTree *rce_loop, IdealLoop
// we have a work list. Now we will try to transform the if guard to cause
// the loop pair to be multi version executed with the determination left to runtime
// or the optimizer if full information is known about the given arrays at compile time.
Node *last_min = NULL;
Node *last_min = nullptr;
multi_version_succeeded = true;
while (worklist.size()) {
Node* rc_iffm = worklist.pop();
@ -3556,7 +3556,7 @@ void IdealLoopTree::remove_main_post_loops(CountedLoopNode *cl, PhaseIdealLoop *
}
// Can we find the main loop?
if (_next == NULL) {
if (_next == nullptr) {
return;
}
@ -3616,11 +3616,11 @@ bool IdealLoopTree::do_remove_empty_loop(PhaseIdealLoop *phase) {
#ifdef ASSERT
// Ensure at most one used phi exists, which is the iv.
Node* iv = NULL;
Node* iv = nullptr;
for (DUIterator_Fast imax, i = cl->fast_outs(imax); i < imax; i++) {
Node* n = cl->fast_out(i);
if ((n->Opcode() == Op_Phi) && (n->outcnt() > 0)) {
assert(iv == NULL, "Too many phis");
assert(iv == nullptr, "Too many phis");
iv = n;
}
}
@ -3830,7 +3830,7 @@ void IdealLoopTree::collect_loop_core_nodes(PhaseIdealLoop* phase, Unique_Node_L
Node* n = wq.at(i);
for (uint j = 0; j < n->req(); ++j) {
Node* in = n->in(j);
if (in != NULL) {
if (in != nullptr) {
if (phase->get_loop(phase->ctrl_or_self(in)) == this) {
wq.push(in);
}
@ -4076,21 +4076,21 @@ bool PhaseIdealLoop::do_intrinsify_fill() {
// value in a unit stride loop,
bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& store_value,
Node*& shift, Node*& con) {
const char* msg = NULL;
Node* msg_node = NULL;
const char* msg = nullptr;
Node* msg_node = nullptr;
store_value = NULL;
con = NULL;
shift = NULL;
store_value = nullptr;
con = nullptr;
shift = nullptr;
// Process the loop looking for stores. If there are multiple
// stores or extra control flow give at this point.
CountedLoopNode* head = lpt->_head->as_CountedLoop();
for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) {
for (uint i = 0; msg == nullptr && i < lpt->_body.size(); i++) {
Node* n = lpt->_body.at(i);
if (n->outcnt() == 0) continue; // Ignore dead
if (n->is_Store()) {
if (store != NULL) {
if (store != nullptr) {
msg = "multiple stores";
break;
}
@ -4113,12 +4113,12 @@ bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& st
}
}
if (store == NULL) {
if (store == nullptr) {
// No store in loop
return false;
}
if (msg == NULL && head->stride_con() != 1) {
if (msg == nullptr && head->stride_con() != 1) {
// could handle negative strides too
if (head->stride_con() < 0) {
msg = "negative stride";
@ -4127,12 +4127,12 @@ bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& st
}
}
if (msg == NULL && !store->in(MemNode::Address)->is_AddP()) {
if (msg == nullptr && !store->in(MemNode::Address)->is_AddP()) {
msg = "can't handle store address";
msg_node = store->in(MemNode::Address);
}
if (msg == NULL &&
if (msg == nullptr &&
(!store->in(MemNode::Memory)->is_Phi() ||
store->in(MemNode::Memory)->in(LoopNode::LoopBackControl) != store)) {
msg = "store memory isn't proper phi";
@ -4142,17 +4142,17 @@ bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& st
// Make sure there is an appropriate fill routine
BasicType t = store->as_Mem()->memory_type();
const char* fill_name;
if (msg == NULL &&
StubRoutines::select_fill_function(t, false, fill_name) == NULL) {
if (msg == nullptr &&
StubRoutines::select_fill_function(t, false, fill_name) == nullptr) {
msg = "unsupported store";
msg_node = store;
}
if (msg != NULL) {
if (msg != nullptr) {
#ifndef PRODUCT
if (TraceOptimizeFill) {
tty->print_cr("not fill intrinsic candidate: %s", msg);
if (msg_node != NULL) msg_node->dump();
if (msg_node != nullptr) msg_node->dump();
}
#endif
return false;
@ -4161,15 +4161,15 @@ bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& st
// Make sure the address expression can be handled. It should be
// head->phi * elsize + con. head->phi might have a ConvI2L(CastII()).
Node* elements[4];
Node* cast = NULL;
Node* conv = NULL;
Node* cast = nullptr;
Node* conv = nullptr;
bool found_index = false;
int count = store->in(MemNode::Address)->as_AddP()->unpack_offsets(elements, ARRAY_SIZE(elements));
for (int e = 0; e < count; e++) {
Node* n = elements[e];
if (n->is_Con() && con == NULL) {
if (n->is_Con() && con == nullptr) {
con = n;
} else if (n->Opcode() == Op_LShiftX && shift == NULL) {
} else if (n->Opcode() == Op_LShiftX && shift == nullptr) {
Node* value = n->in(1);
#ifdef _LP64
if (value->Opcode() == Op_ConvI2L) {
@ -4193,7 +4193,7 @@ bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& st
shift = n;
}
}
} else if (n->Opcode() == Op_ConvI2L && conv == NULL) {
} else if (n->Opcode() == Op_ConvI2L && conv == nullptr) {
conv = n;
n = n->in(1);
if (n->Opcode() == Op_CastII &&
@ -4226,16 +4226,16 @@ bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& st
}
// byte sized items won't have a shift
if (msg == NULL && shift == NULL && t != T_BYTE && t != T_BOOLEAN) {
if (msg == nullptr && shift == nullptr && t != T_BYTE && t != T_BOOLEAN) {
msg = "can't find shift";
msg_node = store;
}
if (msg != NULL) {
if (msg != nullptr) {
#ifndef PRODUCT
if (TraceOptimizeFill) {
tty->print_cr("not fill intrinsic: %s", msg);
if (msg_node != NULL) msg_node->dump();
if (msg_node != nullptr) msg_node->dump();
}
#endif
return false;
@ -4264,7 +4264,7 @@ bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& st
if (cast) ok.set(cast->_idx);
if (conv) ok.set(conv->_idx);
for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) {
for (uint i = 0; msg == nullptr && i < lpt->_body.size(); i++) {
Node* n = lpt->_body.at(i);
if (n->outcnt() == 0) continue; // Ignore dead
if (ok.test(n->_idx)) continue;
@ -4278,7 +4278,7 @@ bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& st
}
// Make sure no unexpected values are used outside the loop
for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) {
for (uint i = 0; msg == nullptr && i < lpt->_body.size(); i++) {
Node* n = lpt->_body.at(i);
// These values can be replaced with other nodes if they are used
// outside the loop.
@ -4295,9 +4295,9 @@ bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& st
#ifdef ASSERT
if (TraceOptimizeFill) {
if (msg != NULL) {
if (msg != nullptr) {
tty->print_cr("no fill intrinsic: %s", msg);
if (msg_node != NULL) msg_node->dump();
if (msg_node != nullptr) msg_node->dump();
} else {
tty->print_cr("fill intrinsic for:");
}
@ -4308,7 +4308,7 @@ bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& st
}
#endif
return msg == NULL;
return msg == nullptr;
}
@ -4329,16 +4329,16 @@ bool PhaseIdealLoop::intrinsify_fill(IdealLoopTree* lpt) {
// Check that the body only contains a store of a loop invariant
// value that is indexed by the loop phi.
Node* store = NULL;
Node* store_value = NULL;
Node* shift = NULL;
Node* offset = NULL;
Node* store = nullptr;
Node* store_value = nullptr;
Node* shift = nullptr;
Node* offset = nullptr;
if (!match_fill_loop(lpt, store, store_value, shift, offset)) {
return false;
}
Node* exit = head->loopexit()->proj_out_or_null(0);
if (exit == NULL) {
if (exit == nullptr) {
return false;
}
@ -4359,7 +4359,7 @@ bool PhaseIdealLoop::intrinsify_fill(IdealLoopTree* lpt) {
index = new ConvI2LNode(index);
_igvn.register_new_node_with_optimizer(index);
#endif
if (shift != NULL) {
if (shift != nullptr) {
// byte arrays don't require a shift but others do.
index = new LShiftXNode(index, shift->in(2));
_igvn.register_new_node_with_optimizer(index);
@ -4368,10 +4368,10 @@ bool PhaseIdealLoop::intrinsify_fill(IdealLoopTree* lpt) {
_igvn.register_new_node_with_optimizer(from);
// For normal array fills, C2 uses two AddP nodes for array element
// addressing. But for array fills with Unsafe call, there's only one
// AddP node adding an absolute offset, so we do a NULL check here.
assert(offset != NULL || C->has_unsafe_access(),
// AddP node adding an absolute offset, so we do a null check here.
assert(offset != nullptr || C->has_unsafe_access(),
"Only array fills with unsafe have no extra offset");
if (offset != NULL) {
if (offset != nullptr) {
from = new AddPNode(base, from, offset);
_igvn.register_new_node_with_optimizer(from);
}
@ -4394,7 +4394,7 @@ bool PhaseIdealLoop::intrinsify_fill(IdealLoopTree* lpt) {
BasicType t = store->as_Mem()->memory_type();
bool aligned = false;
if (offset != NULL && head->init_trip()->is_Con()) {
if (offset != nullptr && head->init_trip()->is_Con()) {
int element_size = type2aelembytes(t);
aligned = (offset->find_intptr_t_type()->get_con() + head->init_trip()->get_int() * element_size) % HeapWordSize == 0;
}
@ -4402,7 +4402,7 @@ bool PhaseIdealLoop::intrinsify_fill(IdealLoopTree* lpt) {
// Build a call to the fill routine
const char* fill_name;
address fill = StubRoutines::select_fill_function(t, aligned, fill_name);
assert(fill != NULL, "what?");
assert(fill != nullptr, "what?");
// Convert float/double to int/long for fill routines
if (t == T_FLOAT) {
@ -4446,7 +4446,7 @@ bool PhaseIdealLoop::intrinsify_fill(IdealLoopTree* lpt) {
// If this fill is tightly coupled to an allocation and overwrites
// the whole body, allow it to take over the zeroing.
AllocateNode* alloc = AllocateNode::Ideal_allocation(base, this);
if (alloc != NULL && alloc->is_AllocateArray()) {
if (alloc != nullptr && alloc->is_AllocateArray()) {
Node* length = alloc->as_AllocateArray()->Ideal_length();
if (head->limit() == length &&
head->init_trip() == _igvn.intcon(0)) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2006, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2006, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -74,7 +74,7 @@ bool IdealLoopTree::policy_unswitching( PhaseIdealLoop *phase ) const {
if (head->unswitch_count() + 1 > head->unswitch_max()) {
return false;
}
if (phase->find_unswitching_candidate(this) == NULL) {
if (phase->find_unswitching_candidate(this) == nullptr) {
return false;
}
@ -88,7 +88,7 @@ IfNode* PhaseIdealLoop::find_unswitching_candidate(const IdealLoopTree *loop) co
// Find first invariant test that doesn't exit the loop
LoopNode *head = loop->_head->as_Loop();
IfNode* unswitch_iff = NULL;
IfNode* unswitch_iff = nullptr;
Node* n = head->in(LoopNode::LoopBackControl);
while (n != head) {
Node* n_dom = idom(n);
@ -120,9 +120,9 @@ void PhaseIdealLoop::do_unswitching(IdealLoopTree *loop, Node_List &old_new) {
LoopNode *head = loop->_head->as_Loop();
Node* entry = head->skip_strip_mined()->in(LoopNode::EntryControl);
if (find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check) != NULL
|| (UseProfiledLoopPredicate && find_predicate_insertion_point(entry, Deoptimization::Reason_profile_predicate) != NULL)
|| (UseLoopPredicate && find_predicate_insertion_point(entry, Deoptimization::Reason_predicate) != NULL)) {
if (find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check) != nullptr
|| (UseProfiledLoopPredicate && find_predicate_insertion_point(entry, Deoptimization::Reason_profile_predicate) != nullptr)
|| (UseLoopPredicate && find_predicate_insertion_point(entry, Deoptimization::Reason_predicate) != nullptr)) {
assert(entry->is_IfProj(), "sanity - must be ifProj since there is at least one predicate");
if (entry->outcnt() > 1) {
// Bailout if there are loop predicates from which there are additional control dependencies (i.e. from
@ -133,7 +133,7 @@ void PhaseIdealLoop::do_unswitching(IdealLoopTree *loop, Node_List &old_new) {
}
// Find first invariant test that doesn't exit the loop
IfNode* unswitch_iff = find_unswitching_candidate((const IdealLoopTree *)loop);
assert(unswitch_iff != NULL, "should be at least one");
assert(unswitch_iff != nullptr, "should be at least one");
#ifndef PRODUCT
if (TraceLoopOpts) {
@ -155,7 +155,7 @@ void PhaseIdealLoop::do_unswitching(IdealLoopTree *loop, Node_List &old_new) {
assert(proj_true->is_IfTrue(), "must be true projection");
entry = head->skip_strip_mined()->in(LoopNode::EntryControl);
Node* predicate = find_predicate(entry);
if (predicate == NULL) {
if (predicate == nullptr) {
// No empty predicate
Node* uniqc = proj_true->unique_ctrl_out();
assert((uniqc == head && !head->is_strip_mined()) || (uniqc == head->in(LoopNode::EntryControl)
@ -166,13 +166,13 @@ void PhaseIdealLoop::do_unswitching(IdealLoopTree *loop, Node_List &old_new) {
Node* proj_before_first_empty_predicate = skip_loop_predicates(entry);
if (UseProfiledLoopPredicate) {
predicate = find_predicate(proj_before_first_empty_predicate);
if (predicate != NULL) {
if (predicate != nullptr) {
proj_before_first_empty_predicate = skip_loop_predicates(predicate);
}
}
if (UseLoopPredicate) {
predicate = find_predicate(proj_before_first_empty_predicate);
if (predicate != NULL) {
if (predicate != nullptr) {
proj_before_first_empty_predicate = skip_loop_predicates(predicate);
}
}
@ -343,9 +343,9 @@ LoopNode* PhaseIdealLoop::create_reserve_version_of_loop(IdealLoopTree *loop, Co
CountedLoopReserveKit::CountedLoopReserveKit(PhaseIdealLoop* phase, IdealLoopTree *loop, bool active = true) :
_phase(phase),
_lpt(loop),
_lp(NULL),
_iff(NULL),
_lp_reserved(NULL),
_lp(nullptr),
_iff(nullptr),
_lp_reserved(nullptr),
_has_reserved(false),
_use_new(false),
_active(active)

File diff suppressed because it is too large Load Diff

View File

@ -144,9 +144,9 @@ public:
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual int Opcode() const;
bool can_be_counted_loop(PhaseTransform* phase) const {
return req() == 3 && in(0) != NULL &&
in(1) != NULL && phase->type(in(1)) != Type::TOP &&
in(2) != NULL && phase->type(in(2)) != Type::TOP;
return req() == 3 && in(0) != nullptr &&
in(1) != nullptr && phase->type(in(1)) != Type::TOP &&
in(2) != nullptr && phase->type(in(2)) != Type::TOP;
}
bool is_valid_counted_loop(BasicType bt) const;
#ifndef PRODUCT
@ -155,10 +155,10 @@ public:
void verify_strip_mined(int expect_skeleton) const NOT_DEBUG_RETURN;
virtual LoopNode* skip_strip_mined(int expect_skeleton = 1) { return this; }
virtual IfTrueNode* outer_loop_tail() const { ShouldNotReachHere(); return NULL; }
virtual OuterStripMinedLoopEndNode* outer_loop_end() const { ShouldNotReachHere(); return NULL; }
virtual IfFalseNode* outer_loop_exit() const { ShouldNotReachHere(); return NULL; }
virtual SafePointNode* outer_safepoint() const { ShouldNotReachHere(); return NULL; }
virtual IfTrueNode* outer_loop_tail() const { ShouldNotReachHere(); return nullptr; }
virtual OuterStripMinedLoopEndNode* outer_loop_end() const { ShouldNotReachHere(); return nullptr; }
virtual IfFalseNode* outer_loop_exit() const { ShouldNotReachHere(); return nullptr; }
virtual SafePointNode* outer_safepoint() const { ShouldNotReachHere(); return nullptr; }
};
//------------------------------Counted Loops----------------------------------
@ -383,12 +383,12 @@ public:
init_class_id(Class_BaseCountedLoopEnd);
}
Node *cmp_node() const { return (in(TestValue)->req() >=2) ? in(TestValue)->in(1) : NULL; }
Node* incr() const { Node* tmp = cmp_node(); return (tmp && tmp->req() == 3) ? tmp->in(1) : NULL; }
Node* limit() const { Node* tmp = cmp_node(); return (tmp && tmp->req() == 3) ? tmp->in(2) : NULL; }
Node* stride() const { Node* tmp = incr(); return (tmp && tmp->req() == 3) ? tmp->in(2) : NULL; }
Node* init_trip() const { Node* tmp = phi(); return (tmp && tmp->req() == 3) ? tmp->in(1) : NULL; }
bool stride_is_con() const { Node *tmp = stride(); return (tmp != NULL && tmp->is_Con()); }
Node *cmp_node() const { return (in(TestValue)->req() >=2) ? in(TestValue)->in(1) : nullptr; }
Node* incr() const { Node* tmp = cmp_node(); return (tmp && tmp->req() == 3) ? tmp->in(1) : nullptr; }
Node* limit() const { Node* tmp = cmp_node(); return (tmp && tmp->req() == 3) ? tmp->in(2) : nullptr; }
Node* stride() const { Node* tmp = incr(); return (tmp && tmp->req() == 3) ? tmp->in(2) : nullptr; }
Node* init_trip() const { Node* tmp = phi(); return (tmp && tmp->req() == 3) ? tmp->in(1) : nullptr; }
bool stride_is_con() const { Node *tmp = stride(); return (tmp != nullptr && tmp->is_Con()); }
PhiNode* phi() const {
Node* tmp = incr();
@ -398,7 +398,7 @@ public:
return phi->as_Phi();
}
}
return NULL;
return nullptr;
}
BaseCountedLoopNode* loopnode() const {
@ -406,15 +406,15 @@ public:
// have been optimized out by the IGVN so be cautious with the
// pattern matching on the graph
PhiNode* iv_phi = phi();
if (iv_phi == NULL) {
return NULL;
if (iv_phi == nullptr) {
return nullptr;
}
Node* ln = iv_phi->in(0);
if (!ln->is_BaseCountedLoop() || ln->as_BaseCountedLoop()->loopexit_or_null() != this) {
return NULL;
return nullptr;
}
if (ln->as_BaseCountedLoop()->bt() != bt()) {
return NULL;
return nullptr;
}
return ln->as_BaseCountedLoop();
}
@ -470,54 +470,54 @@ public:
inline BaseCountedLoopEndNode* BaseCountedLoopNode::loopexit_or_null() const {
Node* bctrl = back_control();
if (bctrl == NULL) return NULL;
if (bctrl == nullptr) return nullptr;
Node* lexit = bctrl->in(0);
if (!lexit->is_BaseCountedLoopEnd()) {
return NULL;
return nullptr;
}
BaseCountedLoopEndNode* result = lexit->as_BaseCountedLoopEnd();
if (result->bt() != bt()) {
return NULL;
return nullptr;
}
return result;
}
inline BaseCountedLoopEndNode* BaseCountedLoopNode::loopexit() const {
BaseCountedLoopEndNode* cle = loopexit_or_null();
assert(cle != NULL, "loopexit is NULL");
assert(cle != nullptr, "loopexit is null");
return cle;
}
inline Node* BaseCountedLoopNode::init_trip() const {
BaseCountedLoopEndNode* cle = loopexit_or_null();
return cle != NULL ? cle->init_trip() : NULL;
return cle != nullptr ? cle->init_trip() : nullptr;
}
inline Node* BaseCountedLoopNode::stride() const {
BaseCountedLoopEndNode* cle = loopexit_or_null();
return cle != NULL ? cle->stride() : NULL;
return cle != nullptr ? cle->stride() : nullptr;
}
inline bool BaseCountedLoopNode::stride_is_con() const {
BaseCountedLoopEndNode* cle = loopexit_or_null();
return cle != NULL && cle->stride_is_con();
return cle != nullptr && cle->stride_is_con();
}
inline Node* BaseCountedLoopNode::limit() const {
BaseCountedLoopEndNode* cle = loopexit_or_null();
return cle != NULL ? cle->limit() : NULL;
return cle != nullptr ? cle->limit() : nullptr;
}
inline Node* BaseCountedLoopNode::incr() const {
BaseCountedLoopEndNode* cle = loopexit_or_null();
return cle != NULL ? cle->incr() : NULL;
return cle != nullptr ? cle->incr() : nullptr;
}
inline Node* BaseCountedLoopNode::phi() const {
BaseCountedLoopEndNode* cle = loopexit_or_null();
return cle != NULL ? cle->phi() : NULL;
return cle != nullptr ? cle->phi() : nullptr;
}
inline jlong BaseCountedLoopNode::stride_con() const {
BaseCountedLoopEndNode* cle = loopexit_or_null();
return cle != NULL ? cle->stride_con() : 0;
return cle != nullptr ? cle->stride_con() : 0;
}
@ -630,12 +630,12 @@ public:
_local_loop_unroll_limit(0), _local_loop_unroll_factor(0),
_nest(0), _irreducible(0), _has_call(0), _has_sfpt(0), _rce_candidate(0),
_has_range_checks(0), _has_range_checks_computed(0),
_safepts(NULL),
_required_safept(NULL),
_safepts(nullptr),
_required_safept(nullptr),
_allow_optimizations(true)
{
precond(_head != NULL);
precond(_tail != NULL);
precond(_head != nullptr);
precond(_tail != nullptr);
}
// Is 'l' a member of 'this'?
@ -761,7 +761,7 @@ public:
// are combined with an associative binary. Helper for reassociate_invariants.
int find_invariant(Node* n, PhaseIdealLoop *phase);
// Return TRUE if "n" is associative.
bool is_associative(Node* n, Node* base=NULL);
bool is_associative(Node* n, Node* base=nullptr);
// Return true if n is invariant
bool is_invariant(Node* n) const;
@ -769,11 +769,11 @@ public:
// Put loop body on igvn work list
void record_for_igvn();
bool is_root() { return _parent == NULL; }
bool is_root() { return _parent == nullptr; }
// A proper/reducible loop w/o any (occasional) dead back-edge.
bool is_loop() { return !_irreducible && !tail()->is_top(); }
bool is_counted() { return is_loop() && _head->is_CountedLoop(); }
bool is_innermost() { return is_loop() && _child == NULL; }
bool is_innermost() { return is_loop() && _child == nullptr; }
void remove_main_post_loops(CountedLoopNode *cl, PhaseIdealLoop *phase);
@ -913,8 +913,8 @@ private:
// 2) a use is the same as the current LCA passed as 'n1'
Node *dom_lca_for_get_late_ctrl( Node *lca, Node *n, Node *tag ) {
assert( n->is_CFG(), "" );
// Fast-path NULL lca
if( lca != NULL && lca != n ) {
// Fast-path null lca
if( lca != nullptr && lca != n ) {
assert( lca->is_CFG(), "" );
// find LCA of all uses
n = dom_lca_for_get_late_ctrl_internal( lca, n, tag );
@ -925,7 +925,7 @@ private:
// Helper function for directing control inputs away from CFG split points.
Node *find_non_split_ctrl( Node *ctrl ) const {
if (ctrl != NULL) {
if (ctrl != nullptr) {
if (ctrl->is_MultiBranch()) {
ctrl = ctrl->in(0);
}
@ -969,8 +969,8 @@ public:
PhaseIterGVN &igvn() const { return _igvn; }
bool has_node( Node* n ) const {
guarantee(n != NULL, "No Node.");
return _nodes[n->_idx] != NULL;
guarantee(n != nullptr, "No Node.");
return _nodes[n->_idx] != nullptr;
}
// check if transform created new nodes that need _ctrl recorded
Node *get_late_ctrl( Node *n, Node *early );
@ -989,8 +989,8 @@ public:
IdealLoopTree* old_loop = get_loop(get_ctrl(n));
IdealLoopTree* new_loop = get_loop(ctrl);
if (old_loop != new_loop) {
if (old_loop->_child == NULL) old_loop->_body.yank(n);
if (new_loop->_child == NULL) new_loop->_body.push(n);
if (old_loop->_child == nullptr) old_loop->_body.yank(n);
if (new_loop->_child == nullptr) new_loop->_body.push(n);
}
set_ctrl(n, ctrl);
}
@ -1133,10 +1133,10 @@ public:
Node* idom_no_update(uint didx) const {
assert(didx < _idom_size, "oob");
Node* n = _idom[didx];
assert(n != NULL,"Bad immediate dominator info.");
while (n->in(0) == NULL) { // Skip dead CFG nodes
assert(n != nullptr,"Bad immediate dominator info.");
while (n->in(0) == nullptr) { // Skip dead CFG nodes
n = (Node*)(((intptr_t)_nodes[n->_idx]) & ~1);
assert(n != NULL,"Bad immediate dominator info.");
assert(n != nullptr,"Bad immediate dominator info.");
}
return n;
}
@ -1152,7 +1152,7 @@ public:
}
uint dom_depth(Node* d) const {
guarantee(d != NULL, "Null dominator info.");
guarantee(d != nullptr, "Null dominator info.");
guarantee(d->_idx < _idom_size, "");
return _dom_depth[d->_idx];
}
@ -1202,7 +1202,7 @@ public:
bool _has_irreducible_loops;
// Per-Node transform
virtual Node* transform(Node* n) { return NULL; }
virtual Node* transform(Node* n) { return nullptr; }
Node* loop_exit_control(Node* x, IdealLoopTree* loop);
Node* loop_exit_test(Node* back_control, IdealLoopTree* loop, Node*& incr, Node*& limit, BoolTest::mask& bt, float& cl_prob);
@ -1249,7 +1249,7 @@ public:
// normal "loop-exit" condition. All uses of loop-invariant old-loop values
// now come from (one or more) Phis that merge their new-loop equivalents.
// Parameter side_by_side_idom:
// When side_by_size_idom is NULL, the dominator tree is constructed for
// When side_by_size_idom is null, the dominator tree is constructed for
// the clone loop to dominate the original. Used in construction of
// pre-main-post loop sequence.
// When nonnull, the clone and original are side-by-side, both are
@ -1264,7 +1264,7 @@ public:
// strip mined loop.
};
void clone_loop( IdealLoopTree *loop, Node_List &old_new, int dom_depth,
CloneLoopMode mode, Node* side_by_side_idom = NULL);
CloneLoopMode mode, Node* side_by_side_idom = nullptr);
void clone_loop_handle_data_uses(Node* old, Node_List &old_new,
IdealLoopTree* loop, IdealLoopTree* companion_loop,
Node_List*& split_if_set, Node_List*& split_bool_set,
@ -1319,12 +1319,12 @@ public:
bool is_iv(Node* exp, Node* iv, BasicType bt);
// Return true if exp is a scaled induction var plus (or minus) constant
bool is_scaled_iv_plus_offset(Node* exp, Node* iv, BasicType bt, jlong* p_scale, Node** p_offset, bool* p_short_scale = NULL, int depth = 0);
bool is_scaled_iv_plus_offset(Node* exp, Node* iv, BasicType bt, jlong* p_scale, Node** p_offset, bool* p_short_scale = nullptr, int depth = 0);
bool is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset) {
jlong long_scale;
if (is_scaled_iv_plus_offset(exp, iv, T_INT, &long_scale, p_offset)) {
int int_scale = checked_cast<int>(long_scale);
if (p_scale != NULL) {
if (p_scale != nullptr) {
*p_scale = int_scale;
}
return true;
@ -1564,7 +1564,7 @@ public:
Node *split_thru_phi( Node *n, Node *region, int policy );
// Found an If getting its condition-code input from a Phi in the
// same block. Split thru the Region.
void do_split_if(Node *iff, RegionNode** new_false_region = NULL, RegionNode** new_true_region = NULL);
void do_split_if(Node *iff, RegionNode** new_false_region = nullptr, RegionNode** new_true_region = nullptr);
// Conversion of fill/copy patterns into intrinsic versions
bool do_intrinsify_fill();
@ -1575,7 +1575,7 @@ public:
private:
// Return a type based on condition control flow
const TypeInt* filtered_type( Node *n, Node* n_ctrl);
const TypeInt* filtered_type( Node *n ) { return filtered_type(n, NULL); }
const TypeInt* filtered_type( Node *n ) { return filtered_type(n, nullptr); }
// Helpers for filtered type
const TypeInt* filtered_type_from_dominators( Node* val, Node *val_ctrl);
@ -1778,7 +1778,7 @@ public:
_check_at_final(chk == BUDGET_CHECK),
_nodes_at_begin(0)
{
precond(_phase != NULL);
precond(_phase != nullptr);
_nodes_at_begin = _phase->require_nodes_begin();
}
@ -1877,7 +1877,7 @@ class CountedLoopReserveKit {
inline Node* IdealLoopTree::tail() {
// Handle lazy update of _tail field.
if (_tail->in(0) == NULL) {
if (_tail->in(0) == nullptr) {
_tail = _phase->get_ctrl(_tail);
}
return _tail;
@ -1885,7 +1885,7 @@ inline Node* IdealLoopTree::tail() {
inline Node* IdealLoopTree::head() {
// Handle lazy update of _head field.
if (_head->in(0) == NULL) {
if (_head->in(0) == nullptr) {
_head = _phase->get_ctrl(_head);
}
return _head;
@ -1907,7 +1907,7 @@ private:
public:
LoopTreeIterator(IdealLoopTree* root) : _root(root), _curnt(root) {}
bool done() { return _curnt == NULL; } // Finished iterating?
bool done() { return _curnt == nullptr; } // Finished iterating?
void next(); // Advance to next loop tree

File diff suppressed because it is too large Load Diff

View File

@ -48,7 +48,7 @@ relocInfo::relocType MachOper::constant_reloc() const { return relocInfo::none;
jdouble MachOper::constantD() const { ShouldNotReachHere(); return 0.0; }
jfloat MachOper::constantF() const { ShouldNotReachHere(); return 0.0; }
jlong MachOper::constantL() const { ShouldNotReachHere(); return CONST64(0) ; }
TypeOopPtr *MachOper::oop() const { return NULL; }
TypeOopPtr *MachOper::oop() const { return nullptr; }
int MachOper::ccode() const { return 0x00; }
// A zero, default, indicates this value is not needed.
// May need to lookup the base register, as done in int_ and ext_format
@ -80,7 +80,7 @@ const Type *MachOper::type() const {
//------------------------------in_RegMask-------------------------------------
const RegMask *MachOper::in_RegMask(int index) const {
ShouldNotReachHere();
return NULL;
return nullptr;
}
//------------------------------dump_spec--------------------------------------
@ -186,7 +186,7 @@ bool MachNode::cmp( const Node &node ) const {
// Return an equivalent instruction using memory for cisc_operand position
MachNode *MachNode::cisc_version(int offset) {
ShouldNotCallThis();
return NULL;
return nullptr;
}
void MachNode::use_cisc_RegMask() {
@ -214,7 +214,7 @@ const RegMask &MachNode::in_RegMask( uint idx ) const {
}
const RegMask *rm = cisc_RegMask();
if( rm == NULL || (int)opcnt != cisc_operand() ) {
if( rm == nullptr || (int)opcnt != cisc_operand() ) {
rm = _opnds[opcnt]->in_RegMask(idx-skipped);
}
return *rm;
@ -228,9 +228,9 @@ const MachOper* MachNode::memory_inputs(Node* &base, Node* &index) const {
base = NodeSentinel;
index = NodeSentinel;
} else {
base = NULL;
index = NULL;
if (oper != NULL) {
base = nullptr;
index = nullptr;
if (oper != nullptr) {
// It has a unique memory operand. Find its index.
int oper_idx = num_opnds();
while (--oper_idx >= 0) {
@ -259,36 +259,36 @@ const Node* MachNode::get_base_and_disp(intptr_t &offset, const TypePtr* &adr_ty
Node* index;
const MachOper* oper = memory_inputs(base, index);
if (oper == NULL) {
// Base has been set to NULL
if (oper == nullptr) {
// Base has been set to null
offset = 0;
} else if (oper == (MachOper*)-1) {
// Base has been set to NodeSentinel
// There is not a unique memory use here. We will fall to AliasIdxBot.
offset = Type::OffsetBot;
} else {
// Base may be NULL, even if offset turns out to be != 0
// Base may be null, even if offset turns out to be != 0
intptr_t disp = oper->constant_disp();
int scale = oper->scale();
// Now we have collected every part of the ADLC MEMORY_INTER.
// See if it adds up to a base + offset.
if (index != NULL) {
if (index != nullptr) {
const Type* t_index = index->bottom_type();
if (t_index->isa_narrowoop() || t_index->isa_narrowklass()) { // EncodeN, LoadN, LoadConN, LoadNKlass,
// EncodeNKlass, LoadConNklass.
// Memory references through narrow oops have a
// funny base so grab the type from the index:
// [R12 + narrow_oop_reg<<3 + offset]
assert(base == NULL, "Memory references through narrow oops have no base");
assert(base == nullptr, "Memory references through narrow oops have no base");
offset = disp;
adr_type = t_index->make_ptr()->add_offset(offset);
return NULL;
return nullptr;
} else if (!index->is_Con()) {
disp = Type::OffsetBot;
} else if (disp != Type::OffsetBot) {
const TypeX* ti = t_index->isa_intptr_t();
if (ti == NULL) {
if (ti == nullptr) {
disp = Type::OffsetBot; // a random constant??
} else {
disp += ti->get_con() << scale;
@ -302,8 +302,8 @@ const Node* MachNode::get_base_and_disp(intptr_t &offset, const TypePtr* &adr_ty
// Lookup the TypePtr used by indOffset32X, a compile-time constant oop,
// Add the offset determined by the "base", or use Type::OffsetBot.
if( adr_type == TYPE_PTR_SENTINAL ) {
const TypePtr *t_disp = oper->disp_as_type(); // only !NULL for indOffset32X
if (t_disp != NULL) {
const TypePtr *t_disp = oper->disp_as_type(); // only not null for indOffset32X
if (t_disp != nullptr) {
offset = Type::OffsetBot;
const Type* t_base = base->bottom_type();
if (t_base->isa_intptr_t()) {
@ -313,10 +313,10 @@ const Node* MachNode::get_base_and_disp(intptr_t &offset, const TypePtr* &adr_ty
}
}
adr_type = t_disp->add_offset(offset);
} else if( base == NULL && offset != 0 && offset != Type::OffsetBot ) {
} else if( base == nullptr && offset != 0 && offset != Type::OffsetBot ) {
// Use ideal type if it is oop ptr.
const TypePtr *tp = oper->type()->isa_ptr();
if( tp != NULL) {
if( tp != nullptr) {
adr_type = tp;
}
}
@ -341,12 +341,12 @@ const class TypePtr *MachNode::adr_type() const {
// %%%%% Someday we'd like to allow constant oop offsets which
// would let Intel load from static globals in 1 instruction.
// Currently Intel requires 2 instructions and a register temp.
if (base == NULL) {
// NULL base, zero offset means no memory at all (a null pointer!)
if (base == nullptr) {
// null base, zero offset means no memory at all (a null pointer!)
if (offset == 0) {
return NULL;
return nullptr;
}
// NULL base, any offset means any pointer whatever
// null base, any offset means any pointer whatever
if (offset == Type::OffsetBot) {
return TypePtr::BOTTOM;
}
@ -379,7 +379,7 @@ const class TypePtr *MachNode::adr_type() const {
const TypePtr *tp = t->isa_ptr();
// be conservative if we do not recognize the type
if (tp == NULL) {
if (tp == nullptr) {
assert(false, "this path may produce not optimal code");
return TypePtr::BOTTOM;
}
@ -508,7 +508,7 @@ bool MachNode::rematerialize() const {
void MachNode::dump_spec(outputStream *st) const {
uint cnt = num_opnds();
for( uint i=0; i<cnt; i++ ) {
if (_opnds[i] != NULL) {
if (_opnds[i] != nullptr) {
_opnds[i]->dump_spec(st);
} else {
st->print(" _");
@ -532,10 +532,10 @@ void MachNode::dump_format(PhaseRegAlloc *ra, outputStream *st) const {
//=============================================================================
#ifndef PRODUCT
void MachTypeNode::dump_spec(outputStream *st) const {
if (_bottom_type != NULL) {
if (_bottom_type != nullptr) {
_bottom_type->dump_on(st);
} else {
st->print(" NULL");
st->print(" null");
}
if (barrier_data() != 0) {
st->print(" barrier(");
@ -610,16 +610,16 @@ const TypePtr *MachProjNode::adr_type() const {
if (bottom_type() == Type::MEMORY) {
// in(0) might be a narrow MemBar; otherwise we will report TypePtr::BOTTOM
Node* ctrl = in(0);
if (ctrl == NULL) return NULL; // node is dead
if (ctrl == nullptr) return nullptr; // node is dead
const TypePtr* adr_type = ctrl->adr_type();
#ifdef ASSERT
if (!VMError::is_error_reported() && !Node::in_dump())
assert(adr_type != NULL, "source must have adr_type");
assert(adr_type != nullptr, "source must have adr_type");
#endif
return adr_type;
}
assert(bottom_type()->base() != Type::Memory, "no other memories?");
return NULL;
return nullptr;
}
#ifndef PRODUCT
@ -683,9 +683,9 @@ const Type* MachCallNode::Value(PhaseGVN* phase) const { return tf()->range(); }
#ifndef PRODUCT
void MachCallNode::dump_spec(outputStream *st) const {
st->print("# ");
if (tf() != NULL) tf()->dump_on(st);
if (tf() != nullptr) tf()->dump_on(st);
if (_cnt != COUNT_UNKNOWN) st->print(" C=%f",_cnt);
if (jvms() != NULL) jvms()->dump_spec(st);
if (jvms() != nullptr) jvms()->dump_spec(st);
}
#endif
@ -779,7 +779,7 @@ bool MachCallStaticJavaNode::cmp( const Node &n ) const {
//----------------------------uncommon_trap_request----------------------------
// If this is an uncommon trap, return the request code, else zero.
int MachCallStaticJavaNode::uncommon_trap_request() const {
if (_name != NULL && !strcmp(_name, "uncommon_trap")) {
if (_name != nullptr && !strcmp(_name, "uncommon_trap")) {
return CallStaticJavaNode::extract_uncommon_trap_request(this);
}
return 0;
@ -799,7 +799,7 @@ void MachCallStaticJavaNode::dump_trap_args(outputStream *st) const {
void MachCallStaticJavaNode::dump_spec(outputStream *st) const {
st->print("Static ");
if (_name != NULL) {
if (_name != nullptr) {
st->print("wrapper for: %s", _name );
dump_trap_args(st);
st->print(" ");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -166,7 +166,7 @@ public:
// Access the TypeKlassPtr of operands with a base==RegI and disp==RegP
// Only returns non-null value for x86_32.ad's indOffset32X
virtual const TypePtr *disp_as_type() const { return NULL; }
virtual const TypePtr *disp_as_type() const { return nullptr; }
// Return the label
virtual Label *label() const;
@ -199,7 +199,7 @@ public:
// Check whether o is a valid oper.
static bool notAnOper(const MachOper *o) {
if (o == NULL) return true;
if (o == nullptr) return true;
if (((intptr_t)o & 1) != 0) return true;
if (*(address*)o == badAddress) return true; // kill by Node::destruct
return false;
@ -212,7 +212,7 @@ public:
// ADLC inherit from this class.
class MachNode : public Node {
public:
MachNode() : Node((uint)0), _barrier(0), _num_opnds(0), _opnds(NULL) {
MachNode() : Node((uint)0), _barrier(0), _num_opnds(0), _opnds(nullptr) {
init_class_id(Class_Mach);
}
// Required boilerplate
@ -264,7 +264,7 @@ public:
virtual const RegMask &in_RegMask(uint) const;
// cisc-spillable instructions redefine for use by in_RegMask
virtual const RegMask *cisc_RegMask() const { return NULL; }
virtual const RegMask *cisc_RegMask() const { return nullptr; }
// If this instruction is a 2-address instruction, then return the
// index of the input which must match the output. Not necessary
@ -334,7 +334,7 @@ public:
}
// If this is a memory op, return the base pointer and fixed offset.
// If there are no such, return NULL. If there are multiple addresses
// If there are no such, return null. If there are multiple addresses
// or the address is indeterminate (rare cases) then return (Node*)-1,
// which serves as node bottom.
// If the offset is not statically determined, set it to Type::OffsetBot.
@ -346,14 +346,14 @@ public:
// Helper for get_base_and_disp: find the base and index input nodes.
// Returns the MachOper as determined by memory_operand(), for use, if
// needed by the caller. If (MachOper *)-1 is returned, base and index
// are set to NodeSentinel. If (MachOper *) NULL is returned, base and
// index are set to NULL.
// are set to NodeSentinel. If null is returned, base and
// index are set to null.
const MachOper* memory_inputs(Node* &base, Node* &index) const;
// Helper for memory_inputs: Which operand carries the necessary info?
// By default, returns NULL, which means there is no such operand.
// By default, returns null, which means there is no such operand.
// If it returns (MachOper*)-1, this means there are multiple memories.
virtual const MachOper* memory_operand() const { return NULL; }
virtual const MachOper* memory_operand() const { return nullptr; }
// Call "get_base_and_disp" to decide which category of memory is used here.
virtual const class TypePtr *adr_type() const;
@ -400,7 +400,7 @@ public:
// Define the following defaults for non-matched machine nodes
virtual uint oper_input_base() const { return 0; }
virtual uint rule() const { return 9999999; }
virtual const class Type *bottom_type() const { return _opnds == NULL ? Type::CONTROL : MachNode::bottom_type(); }
virtual const class Type *bottom_type() const { return _opnds == nullptr ? Type::CONTROL : MachNode::bottom_type(); }
};
//------------------------------MachTypeNode----------------------------
@ -600,7 +600,7 @@ public:
MachIdealNode(), _in(&in), _out(&out), _type(n->bottom_type()), _spill_type(spill_type) {
init_class_id(Class_MachSpillCopy);
init_flags(Flag_is_Copy);
add_req(NULL);
add_req(nullptr);
add_req(n);
}
virtual uint size_of() const { return sizeof(*this); }
@ -668,7 +668,7 @@ class MachMergeNode : public MachIdealNode {
public:
MachMergeNode(Node *n1) {
init_class_id(Class_MachMerge);
add_req(NULL);
add_req(nullptr);
add_req(n1);
}
virtual const RegMask &out_RegMask() const { return in(1)->out_RegMask(); }
@ -694,7 +694,7 @@ public:
virtual void save_label(Label** label, uint* block_num) = 0;
// Support for short branches
virtual MachNode *short_branch_version() { return NULL; }
virtual MachNode *short_branch_version() { return nullptr; }
virtual bool pinned() const { return true; };
};
@ -839,7 +839,7 @@ public:
OopMap* oop_map() const { return _oop_map; }
void set_oop_map(OopMap* om) { _oop_map = om; }
MachSafePointNode() : MachReturnNode(), _oop_map(NULL), _jvms(NULL), _jvmadj(0), _has_ea_local_in_scope(false) {
MachSafePointNode() : MachReturnNode(), _oop_map(nullptr), _jvms(nullptr), _jvmadj(0), _has_ea_local_in_scope(false) {
init_class_id(Class_MachSafePoint);
}
@ -951,11 +951,11 @@ public:
if (_override_symbolic_info) {
// Attach corresponding Method* to the call site, so VM can use it during resolution
// instead of querying symbolic info from bytecode.
assert(_method != NULL, "method should be set");
assert(_method != nullptr, "method should be set");
assert(_method->constant_encoding()->is_method(), "should point to a Method");
return cbuf.oop_recorder()->find_index(_method->constant_encoding());
}
return 0; // Use symbolic info from bytecode (resolved_method == NULL).
return 0; // Use symbolic info from bytecode (resolved_method is null).
}
#ifndef PRODUCT
@ -1005,7 +1005,7 @@ class MachCallRuntimeNode : public MachCallNode {
virtual bool cmp( const Node &n ) const;
virtual uint size_of() const; // Size is bigger
public:
const char *_name; // Printable name, if _method is NULL
const char *_name; // Printable name, if _method is null
bool _leaf_no_fp; // Is this CallLeafNoFP?
MachCallRuntimeNode() : MachCallNode() {
init_class_id(Class_MachCallRuntime);
@ -1064,7 +1064,7 @@ public:
init_class_id(Class_MachTemp);
_num_opnds = 1;
_opnds = _opnd_array;
add_req(NULL);
add_req(nullptr);
_opnds[0] = oper;
}
virtual uint size_of() const { return sizeof(MachTempNode); }
@ -1096,7 +1096,7 @@ public:
virtual MachOper *clone() const;
virtual Label *label() const { assert(_label != NULL, "need Label"); return _label; }
virtual Label *label() const { assert(_label != nullptr, "need Label"); return _label; }
virtual uint opcode() const;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -75,7 +75,7 @@ int PhaseMacroExpand::replace_input(Node *use, Node *oldref, Node *newref) {
else
use->set_prec(j, newref);
nreplacements++;
} else if (j >= req && uin == NULL) {
} else if (j >= req && uin == nullptr) {
break;
}
}
@ -83,7 +83,7 @@ int PhaseMacroExpand::replace_input(Node *use, Node *oldref, Node *newref) {
}
void PhaseMacroExpand::migrate_outs(Node *old, Node *target) {
assert(old != NULL, "sanity");
assert(old != nullptr, "sanity");
for (DUIterator_Fast imax, i = old->fast_outs(imax); i < imax; i++) {
Node* use = old->fast_out(i);
_igvn.rehash_node_delayed(use);
@ -143,9 +143,9 @@ CallNode* PhaseMacroExpand::make_slow_call(CallNode *oldcall, const TypeFunc* sl
// Slow path call has no side-effects, uses few values
copy_predefined_input_for_runtime_call(slow_path, oldcall, call );
if (parm0 != NULL) call->init_req(TypeFunc::Parms+0, parm0);
if (parm1 != NULL) call->init_req(TypeFunc::Parms+1, parm1);
if (parm2 != NULL) call->init_req(TypeFunc::Parms+2, parm2);
if (parm0 != nullptr) call->init_req(TypeFunc::Parms+0, parm0);
if (parm1 != nullptr) call->init_req(TypeFunc::Parms+1, parm1);
if (parm2 != nullptr) call->init_req(TypeFunc::Parms+2, parm2);
call->copy_call_debug_info(&_igvn, oldcall);
call->set_cnt(PROB_UNLIKELY_MAG(4)); // Same effect as RC_UNCOMMON.
_igvn.replace_node(oldcall, call);
@ -190,9 +190,9 @@ static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_me
}
mem = in->in(TypeFunc::Memory);
} else if (in->is_MemBar()) {
ArrayCopyNode* ac = NULL;
ArrayCopyNode* ac = nullptr;
if (ArrayCopyNode::may_modify(tinst, in->as_MemBar(), phase, ac)) {
if (ac != NULL) {
if (ac != nullptr) {
assert(ac->is_clonebasic(), "Only basic clone is a non escaping clone");
return ac;
}
@ -230,7 +230,7 @@ static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_me
InitializeNode* init = alloc->as_Allocate()->initialization();
// We are looking for stored value, return Initialize node
// or memory edge from Allocate node.
if (init != NULL) {
if (init != nullptr) {
return init;
} else {
return alloc->in(TypeFunc::Memory); // It will produce zero value (see callers).
@ -239,7 +239,7 @@ static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_me
// Otherwise skip it (the call updated 'mem' value).
} else if (mem->Opcode() == Op_SCMemProj) {
mem = mem->in(0);
Node* adr = NULL;
Node* adr = nullptr;
if (mem->is_LoadStore()) {
adr = mem->in(MemNode::Address);
} else {
@ -252,7 +252,7 @@ static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_me
if (adr_idx == alias_idx) {
DEBUG_ONLY(mem->dump();)
assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field");
return NULL;
return nullptr;
}
mem = mem->in(MemNode::Memory);
} else if (mem->Opcode() == Op_StrInflatedCopy) {
@ -262,7 +262,7 @@ static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_me
if (adr_idx == alias_idx) {
DEBUG_ONLY(mem->dump();)
assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field");
return NULL;
return nullptr;
}
mem = mem->in(MemNode::Memory);
} else {
@ -281,7 +281,7 @@ Node* PhaseMacroExpand::make_arraycopy_load(ArrayCopyNode* ac, intptr_t offset,
bt = T_OBJECT;
type = ftype->make_oopptr();
}
Node* res = NULL;
Node* res = nullptr;
if (ac->is_clonebasic()) {
assert(ac->in(ArrayCopyNode::Src) != ac->in(ArrayCopyNode::Dest), "clone source equals destination");
Node* base = ac->in(ArrayCopyNode::Src);
@ -299,8 +299,8 @@ Node* PhaseMacroExpand::make_arraycopy_load(ArrayCopyNode* ac, intptr_t offset,
const TypeInt* src_pos_t = _igvn.type(src_pos)->is_int();
const TypeInt* dest_pos_t = _igvn.type(dest_pos)->is_int();
Node* adr = NULL;
const TypePtr* adr_type = NULL;
Node* adr = nullptr;
const TypePtr* adr_type = nullptr;
if (src_pos_t->is_con() && dest_pos_t->is_con()) {
intptr_t off = ((src_pos_t->get_con() - dest_pos_t->get_con()) << shift) + offset;
Node* base = ac->in(ArrayCopyNode::Src);
@ -324,7 +324,7 @@ Node* PhaseMacroExpand::make_arraycopy_load(ArrayCopyNode* ac, intptr_t offset,
if (ac->in(ArrayCopyNode::Src) == ac->in(ArrayCopyNode::Dest)) {
// Non constant offset in the array: we can't statically
// determine the value
return NULL;
return nullptr;
}
}
MergeMemNode* mergemen = _igvn.transform(MergeMemNode::make(mem))->as_MergeMem();
@ -332,21 +332,21 @@ Node* PhaseMacroExpand::make_arraycopy_load(ArrayCopyNode* ac, intptr_t offset,
res = ArrayCopyNode::load(bs, &_igvn, ctl, mergemen, adr, adr_type, type, bt);
}
}
if (res != NULL) {
if (res != nullptr) {
if (ftype->isa_narrowoop()) {
// PhaseMacroExpand::scalar_replacement adds DecodeN nodes
res = _igvn.transform(new EncodePNode(res, ftype));
}
return res;
}
return NULL;
return nullptr;
}
//
// Given a Memory Phi, compute a value Phi containing the values from stores
// on the input paths.
// Note: this function is recursive, its depth is limited by the "level" argument
// Returns the computed Phi, or NULL if it cannot compute it.
// Returns the computed Phi, or null if it cannot compute it.
Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type *phi_type, const TypeOopPtr *adr_t, AllocateNode *alloc, Node_Stack *value_phis, int level) {
assert(mem->is_Phi(), "sanity");
int alias_idx = C->get_alias_index(adr_t);
@ -364,26 +364,26 @@ Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type *
}
// Check if an appropriate new value phi already exists.
Node* new_phi = value_phis->find(mem->_idx);
if (new_phi != NULL)
if (new_phi != nullptr)
return new_phi;
if (level <= 0) {
return NULL; // Give up: phi tree too deep
return nullptr; // Give up: phi tree too deep
}
Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory);
Node *alloc_mem = alloc->in(TypeFunc::Memory);
uint length = mem->req();
GrowableArray <Node *> values(length, length, NULL);
GrowableArray <Node *> values(length, length, nullptr);
// create a new Phi for the value
PhiNode *phi = new PhiNode(mem->in(0), phi_type, NULL, mem->_idx, instance_id, alias_idx, offset);
PhiNode *phi = new PhiNode(mem->in(0), phi_type, nullptr, mem->_idx, instance_id, alias_idx, offset);
transform_later(phi);
value_phis->push(phi, mem->_idx);
for (uint j = 1; j < length; j++) {
Node *in = mem->in(j);
if (in == NULL || in->is_top()) {
if (in == nullptr || in->is_top()) {
values.at_put(j, in);
} else {
Node *val = scan_mem_chain(in, alias_idx, offset, start_mem, alloc, &_igvn);
@ -395,8 +395,8 @@ Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type *
if (val->is_Initialize()) {
val = val->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn);
}
if (val == NULL) {
return NULL; // can't find a value on this path
if (val == nullptr) {
return nullptr; // can't find a value on this path
}
if (val == mem) {
values.at_put(j, mem);
@ -412,8 +412,8 @@ Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type *
values.at_put(j, _igvn.zerocon(ft));
} else if (val->is_Phi()) {
val = value_from_mem_phi(val, ft, phi_type, adr_t, alloc, value_phis, level-1);
if (val == NULL) {
return NULL;
if (val == nullptr) {
return nullptr;
}
values.at_put(j, val);
} else if (val->Opcode() == Op_SCMemProj) {
@ -421,17 +421,17 @@ Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type *
val->in(0)->Opcode() == Op_EncodeISOArray ||
val->in(0)->Opcode() == Op_StrCompressedCopy, "sanity");
assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field");
return NULL;
return nullptr;
} else if (val->is_ArrayCopy()) {
Node* res = make_arraycopy_load(val->as_ArrayCopy(), offset, val->in(0), val->in(TypeFunc::Memory), ft, phi_type, alloc);
if (res == NULL) {
return NULL;
if (res == nullptr) {
return nullptr;
}
values.at_put(j, res);
} else {
DEBUG_ONLY( val->dump(); )
assert(false, "unknown node on this path");
return NULL; // unknown node on this path
return nullptr; // unknown node on this path
}
}
}
@ -463,14 +463,14 @@ Node *PhaseMacroExpand::value_from_mem(Node *sfpt_mem, Node *sfpt_ctl, BasicType
Node *mem = sfpt_mem;
while (!done) {
if (visited.test_set(mem->_idx)) {
return NULL; // found a loop, give up
return nullptr; // found a loop, give up
}
mem = scan_mem_chain(mem, alias_idx, offset, start_mem, alloc, &_igvn);
if (mem == start_mem || mem == alloc_mem) {
done = true; // hit a sentinel, return appropriate 0 value
} else if (mem->is_Initialize()) {
mem = mem->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn);
if (mem == NULL) {
if (mem == nullptr) {
done = true; // Something go wrong.
} else if (mem->is_Store()) {
const TypePtr* atype = mem->as_Store()->adr_type();
@ -479,27 +479,27 @@ Node *PhaseMacroExpand::value_from_mem(Node *sfpt_mem, Node *sfpt_ctl, BasicType
}
} else if (mem->is_Store()) {
const TypeOopPtr* atype = mem->as_Store()->adr_type()->isa_oopptr();
assert(atype != NULL, "address type must be oopptr");
assert(atype != nullptr, "address type must be oopptr");
assert(C->get_alias_index(atype) == alias_idx &&
atype->is_known_instance_field() && atype->offset() == offset &&
atype->instance_id() == instance_id, "store is correct memory slice");
done = true;
} else if (mem->is_Phi()) {
// try to find a phi's unique input
Node *unique_input = NULL;
Node *unique_input = nullptr;
Node *top = C->top();
for (uint i = 1; i < mem->req(); i++) {
Node *n = scan_mem_chain(mem->in(i), alias_idx, offset, start_mem, alloc, &_igvn);
if (n == NULL || n == top || n == mem) {
if (n == nullptr || n == top || n == mem) {
continue;
} else if (unique_input == NULL) {
} else if (unique_input == nullptr) {
unique_input = n;
} else if (unique_input != n) {
unique_input = top;
break;
}
}
if (unique_input != NULL && unique_input != top) {
if (unique_input != nullptr && unique_input != top) {
mem = unique_input;
} else {
done = true;
@ -511,7 +511,7 @@ Node *PhaseMacroExpand::value_from_mem(Node *sfpt_mem, Node *sfpt_ctl, BasicType
assert(false, "unexpected node");
}
}
if (mem != NULL) {
if (mem != nullptr) {
if (mem == start_mem || mem == alloc_mem) {
// hit a sentinel, return appropriate 0 value
return _igvn.zerocon(ft);
@ -524,7 +524,7 @@ Node *PhaseMacroExpand::value_from_mem(Node *sfpt_mem, Node *sfpt_ctl, BasicType
// attempt to produce a Phi reflecting the values on the input paths of the Phi
Node_Stack value_phis(8);
Node* phi = value_from_mem_phi(mem, ft, ftype, adr_t, alloc, &value_phis, ValueSearchLimit);
if (phi != NULL) {
if (phi != nullptr) {
return phi;
} else {
// Kill all new Phis
@ -546,27 +546,27 @@ Node *PhaseMacroExpand::value_from_mem(Node *sfpt_mem, Node *sfpt_ctl, BasicType
}
}
// Something go wrong.
return NULL;
return nullptr;
}
// Check the possibility of scalar replacement.
bool PhaseMacroExpand::can_eliminate_allocation(AllocateNode *alloc, GrowableArray <SafePointNode *>& safepoints) {
// Scan the uses of the allocation to check for anything that would
// prevent us from eliminating it.
NOT_PRODUCT( const char* fail_eliminate = NULL; )
DEBUG_ONLY( Node* disq_node = NULL; )
NOT_PRODUCT( const char* fail_eliminate = nullptr; )
DEBUG_ONLY( Node* disq_node = nullptr; )
bool can_eliminate = true;
Node* res = alloc->result_cast();
const TypeOopPtr* res_type = NULL;
if (res == NULL) {
const TypeOopPtr* res_type = nullptr;
if (res == nullptr) {
// All users were eliminated.
} else if (!res->is_CheckCastPP()) {
NOT_PRODUCT(fail_eliminate = "Allocation does not have unique CheckCastPP";)
can_eliminate = false;
} else {
res_type = _igvn.type(res)->isa_oopptr();
if (res_type == NULL) {
if (res_type == nullptr) {
NOT_PRODUCT(fail_eliminate = "Neither instance or array allocation";)
can_eliminate = false;
} else if (res_type->isa_aryptr()) {
@ -578,7 +578,7 @@ bool PhaseMacroExpand::can_eliminate_allocation(AllocateNode *alloc, GrowableArr
}
}
if (can_eliminate && res != NULL) {
if (can_eliminate && res != nullptr) {
BarrierSetC2 *bs = BarrierSet::barrier_set()->barrier_set_c2();
for (DUIterator_Fast jmax, j = res->fast_outs(jmax);
j < jmax && can_eliminate; j++) {
@ -622,9 +622,9 @@ bool PhaseMacroExpand::can_eliminate_allocation(AllocateNode *alloc, GrowableArr
can_eliminate = false;
}
Node* sfptMem = sfpt->memory();
if (sfptMem == NULL || sfptMem->is_top()) {
if (sfptMem == nullptr || sfptMem->is_top()) {
DEBUG_ONLY(disq_node = use;)
NOT_PRODUCT(fail_eliminate = "NULL or TOP memory";)
NOT_PRODUCT(fail_eliminate = "null or TOP memory";)
can_eliminate = false;
} else {
safepoints.append_if_missing(sfpt);
@ -654,18 +654,18 @@ bool PhaseMacroExpand::can_eliminate_allocation(AllocateNode *alloc, GrowableArr
if (PrintEliminateAllocations) {
if (can_eliminate) {
tty->print("Scalar ");
if (res == NULL)
if (res == nullptr)
alloc->dump();
else
res->dump();
} else if (alloc->_is_scalar_replaceable) {
tty->print("NotScalar (%s)", fail_eliminate);
if (res == NULL)
if (res == nullptr)
alloc->dump();
else
res->dump();
#ifdef ASSERT
if (disq_node != NULL) {
if (disq_node != nullptr) {
tty->print(" >>>> ");
disq_node->dump();
}
@ -680,21 +680,21 @@ bool PhaseMacroExpand::can_eliminate_allocation(AllocateNode *alloc, GrowableArr
bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray <SafePointNode *>& safepoints) {
GrowableArray <SafePointNode *> safepoints_done;
ciInstanceKlass* iklass = NULL;
ciInstanceKlass* iklass = nullptr;
int nfields = 0;
int array_base = 0;
int element_size = 0;
BasicType basic_elem_type = T_ILLEGAL;
const Type* field_type = NULL;
const Type* field_type = nullptr;
Node* res = alloc->result_cast();
assert(res == NULL || res->is_CheckCastPP(), "unexpected AllocateNode result");
const TypeOopPtr* res_type = NULL;
if (res != NULL) { // Could be NULL when there are no users
assert(res == nullptr || res->is_CheckCastPP(), "unexpected AllocateNode result");
const TypeOopPtr* res_type = nullptr;
if (res != nullptr) { // Could be null when there are no users
res_type = _igvn.type(res)->isa_oopptr();
}
if (res != NULL) {
if (res != nullptr) {
if (res_type->isa_instptr()) {
// find the fields of the class which will be needed for safepoint debug information
iklass = res_type->is_instptr()->instance_klass();
@ -716,7 +716,7 @@ bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray <Sa
SafePointNode* sfpt = safepoints.pop();
Node* mem = sfpt->memory();
Node* ctl = sfpt->control();
assert(sfpt->jvms() != NULL, "missed JVMS");
assert(sfpt->jvms() != nullptr, "missed JVMS");
// Fields of scalar objs are referenced only at the end
// of regular debuginfo at the last (youngest) JVMS.
// Record relative start index.
@ -732,8 +732,8 @@ bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray <Sa
// Scan object's fields adding an input to the safepoint for each field.
for (int j = 0; j < nfields; j++) {
intptr_t offset;
ciField* field = NULL;
if (iklass != NULL) {
ciField* field = nullptr;
if (iklass != nullptr) {
field = iklass->nonstatic_field_at(j);
offset = field->offset();
ciType* elem_type = field->type();
@ -743,12 +743,12 @@ bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray <Sa
if (is_reference_type(basic_elem_type)) {
if (!elem_type->is_loaded()) {
field_type = TypeInstPtr::BOTTOM;
} else if (field != NULL && field->is_static_constant()) {
} else if (field != nullptr && field->is_static_constant()) {
ciObject* con = field->constant_value().as_object();
// Do not "join" in the previous type; it doesn't add value,
// and may yield a vacuous result if the field is of interface type.
field_type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
assert(field_type != NULL, "field singleton type must be consistent");
assert(field_type != nullptr, "field singleton type must be consistent");
} else {
field_type = TypeOopPtr::make_from_klass(elem_type->as_klass());
}
@ -766,7 +766,7 @@ bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray <Sa
const TypeOopPtr *field_addr_type = res_type->add_offset(offset)->isa_oopptr();
Node *field_val = value_from_mem(mem, ctl, basic_elem_type, field_type, field_addr_type, alloc);
if (field_val == NULL) {
if (field_val == nullptr) {
// We weren't able to find a value for this field,
// give up on eliminating this allocation.
@ -804,7 +804,7 @@ bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray <Sa
}
#ifndef PRODUCT
if (PrintEliminateAllocations) {
if (field != NULL) {
if (field != nullptr) {
tty->print("=== At SafePoint node %d can't find value of Field: ",
sfpt->_idx);
field->print();
@ -815,7 +815,7 @@ bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray <Sa
sfpt->_idx, j);
}
tty->print(", which prevents elimination of: ");
if (res == NULL)
if (res == nullptr)
alloc->dump();
else
res->dump();
@ -850,10 +850,10 @@ bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray <Sa
static void disconnect_projections(MultiNode* n, PhaseIterGVN& igvn) {
Node* ctl_proj = n->proj_out_or_null(TypeFunc::Control);
Node* mem_proj = n->proj_out_or_null(TypeFunc::Memory);
if (ctl_proj != NULL) {
if (ctl_proj != nullptr) {
igvn.replace_node(ctl_proj, n->in(0));
}
if (mem_proj != NULL) {
if (mem_proj != nullptr) {
igvn.replace_node(mem_proj, n->in(TypeFunc::Memory));
}
}
@ -861,7 +861,7 @@ static void disconnect_projections(MultiNode* n, PhaseIterGVN& igvn) {
// Process users of eliminated allocation.
void PhaseMacroExpand::process_users_of_allocation(CallNode *alloc) {
Node* res = alloc->result_cast();
if (res != NULL) {
if (res != nullptr) {
for (DUIterator_Last jmin, j = res->last_outs(jmin); j >= jmin; ) {
Node *use = res->last_out(j);
uint oc1 = res->outcnt();
@ -940,7 +940,7 @@ void PhaseMacroExpand::process_users_of_allocation(CallNode *alloc) {
//
// Process other users of allocation's projections
//
if (_callprojs.resproj != NULL && _callprojs.resproj->outcnt() != 0) {
if (_callprojs.resproj != nullptr && _callprojs.resproj->outcnt() != 0) {
// First disconnect stores captured by Initialize node.
// If Initialize node is eliminated first in the following code,
// it will kill such stores and DUIterator_Last will assert.
@ -960,16 +960,16 @@ void PhaseMacroExpand::process_users_of_allocation(CallNode *alloc) {
InitializeNode *init = use->as_Initialize();
assert(init->outcnt() <= 2, "only a control and memory projection expected");
Node *ctrl_proj = init->proj_out_or_null(TypeFunc::Control);
if (ctrl_proj != NULL) {
if (ctrl_proj != nullptr) {
_igvn.replace_node(ctrl_proj, init->in(TypeFunc::Control));
#ifdef ASSERT
// If the InitializeNode has no memory out, it will die, and tmp will become NULL
// If the InitializeNode has no memory out, it will die, and tmp will become null
Node* tmp = init->in(TypeFunc::Control);
assert(tmp == NULL || tmp == _callprojs.fallthrough_catchproj, "allocation control projection");
assert(tmp == nullptr || tmp == _callprojs.fallthrough_catchproj, "allocation control projection");
#endif
}
Node *mem_proj = init->proj_out_or_null(TypeFunc::Memory);
if (mem_proj != NULL) {
if (mem_proj != nullptr) {
Node *mem = init->in(TypeFunc::Memory);
#ifdef ASSERT
if (mem->is_MergeMem()) {
@ -986,22 +986,22 @@ void PhaseMacroExpand::process_users_of_allocation(CallNode *alloc) {
j -= (oc1 - _callprojs.resproj->outcnt());
}
}
if (_callprojs.fallthrough_catchproj != NULL) {
if (_callprojs.fallthrough_catchproj != nullptr) {
_igvn.replace_node(_callprojs.fallthrough_catchproj, alloc->in(TypeFunc::Control));
}
if (_callprojs.fallthrough_memproj != NULL) {
if (_callprojs.fallthrough_memproj != nullptr) {
_igvn.replace_node(_callprojs.fallthrough_memproj, alloc->in(TypeFunc::Memory));
}
if (_callprojs.catchall_memproj != NULL) {
if (_callprojs.catchall_memproj != nullptr) {
_igvn.replace_node(_callprojs.catchall_memproj, C->top());
}
if (_callprojs.fallthrough_ioproj != NULL) {
if (_callprojs.fallthrough_ioproj != nullptr) {
_igvn.replace_node(_callprojs.fallthrough_ioproj, alloc->in(TypeFunc::I_O));
}
if (_callprojs.catchall_ioproj != NULL) {
if (_callprojs.catchall_ioproj != nullptr) {
_igvn.replace_node(_callprojs.catchall_ioproj, C->top());
}
if (_callprojs.catchall_catchproj != NULL) {
if (_callprojs.catchall_catchproj != nullptr) {
_igvn.replace_node(_callprojs.catchall_catchproj, C->top());
}
}
@ -1023,7 +1023,7 @@ bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) {
bool boxing_alloc = C->eliminate_boxing() &&
tklass->isa_instklassptr() &&
tklass->is_instklassptr()->instance_klass()->is_box_klass();
if (!alloc->_is_scalar_replaceable && (!boxing_alloc || (res != NULL))) {
if (!alloc->_is_scalar_replaceable && (!boxing_alloc || (res != nullptr))) {
return false;
}
@ -1035,7 +1035,7 @@ bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) {
}
if (!alloc->_is_scalar_replaceable) {
assert(res == NULL, "sanity");
assert(res == nullptr, "sanity");
// We can only eliminate allocation if all debug info references
// are already replaced with SafePointScalarObject because
// we can't search for a fields value without instance_id.
@ -1049,11 +1049,11 @@ bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) {
}
CompileLog* log = C->log();
if (log != NULL) {
if (log != nullptr) {
log->head("eliminate_allocation type='%d'",
log->identify(tklass->exact_klass()));
JVMState* p = alloc->jvms();
while (p != NULL) {
while (p != nullptr) {
log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
p = p->caller();
}
@ -1076,25 +1076,25 @@ bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) {
bool PhaseMacroExpand::eliminate_boxing_node(CallStaticJavaNode *boxing) {
// EA should remove all uses of non-escaping boxing node.
if (!C->eliminate_boxing() || boxing->proj_out_or_null(TypeFunc::Parms) != NULL) {
if (!C->eliminate_boxing() || boxing->proj_out_or_null(TypeFunc::Parms) != nullptr) {
return false;
}
assert(boxing->result_cast() == NULL, "unexpected boxing node result");
assert(boxing->result_cast() == nullptr, "unexpected boxing node result");
boxing->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/);
const TypeTuple* r = boxing->tf()->range();
assert(r->cnt() > TypeFunc::Parms, "sanity");
const TypeInstPtr* t = r->field_at(TypeFunc::Parms)->isa_instptr();
assert(t != NULL, "sanity");
assert(t != nullptr, "sanity");
CompileLog* log = C->log();
if (log != NULL) {
if (log != nullptr) {
log->head("eliminate_boxing type='%d'",
log->identify(t->instance_klass()));
JVMState* p = boxing->jvms();
while (p != NULL) {
while (p != nullptr) {
log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
p = p->caller();
}
@ -1126,7 +1126,7 @@ Node* PhaseMacroExpand::make_load(Node* ctl, Node* mem, Node* base, int offset,
Node* PhaseMacroExpand::make_store(Node* ctl, Node* mem, Node* base, int offset, Node* value, BasicType bt) {
Node* adr = basic_plus_adr(base, offset);
mem = StoreNode::make(_igvn, ctl, mem, adr, NULL, value, bt, MemNode::unordered);
mem = StoreNode::make(_igvn, ctl, mem, adr, nullptr, value, bt, MemNode::unordered);
transform_later(mem);
return mem;
}
@ -1199,15 +1199,15 @@ void PhaseMacroExpand::expand_allocate_common(
Node* size_in_bytes = alloc->in(AllocateNode::AllocSize);
Node* klass_node = alloc->in(AllocateNode::KlassNode);
Node* initial_slow_test = alloc->in(AllocateNode::InitialTest);
assert(ctrl != NULL, "must have control");
assert(ctrl != nullptr, "must have control");
// We need a Region and corresponding Phi's to merge the slow-path and fast-path results.
// they will not be used if "always_slow" is set
enum { slow_result_path = 1, fast_result_path = 2 };
Node *result_region = NULL;
Node *result_phi_rawmem = NULL;
Node *result_phi_rawoop = NULL;
Node *result_phi_i_o = NULL;
Node *result_region = nullptr;
Node *result_phi_rawmem = nullptr;
Node *result_phi_rawoop = nullptr;
Node *result_phi_i_o = nullptr;
// The initial slow comparison is a size check, the comparison
// we want to do is a BoolTest::gt
@ -1219,7 +1219,7 @@ void PhaseMacroExpand::expand_allocate_common(
// 1 - always too big or negative
assert(tv <= 1, "0 or 1 if a constant");
expand_fast_path = (tv == 0);
initial_slow_test = NULL;
initial_slow_test = nullptr;
} else {
initial_slow_test = BoolNode::make_predicate(initial_slow_test, &_igvn);
}
@ -1227,16 +1227,16 @@ void PhaseMacroExpand::expand_allocate_common(
if (!UseTLAB) {
// Force slow-path allocation
expand_fast_path = false;
initial_slow_test = NULL;
initial_slow_test = nullptr;
}
bool allocation_has_use = (alloc->result_cast() != NULL);
bool allocation_has_use = (alloc->result_cast() != nullptr);
if (!allocation_has_use) {
InitializeNode* init = alloc->initialization();
if (init != NULL) {
if (init != nullptr) {
init->remove(&_igvn);
}
if (expand_fast_path && (initial_slow_test == NULL)) {
if (expand_fast_path && (initial_slow_test == nullptr)) {
// Remove allocation node and return.
// Size is a non-negative constant -> no initial check needed -> directly to fast path.
// Also, no usages -> empty fast path -> no fall out to slow path -> nothing left.
@ -1244,7 +1244,7 @@ void PhaseMacroExpand::expand_allocate_common(
if (PrintEliminateAllocations) {
tty->print("NotUsed ");
Node* res = alloc->proj_out_or_null(TypeFunc::Parms);
if (res != NULL) {
if (res != nullptr) {
res->dump();
} else {
alloc->dump();
@ -1257,11 +1257,11 @@ void PhaseMacroExpand::expand_allocate_common(
}
enum { too_big_or_final_path = 1, need_gc_path = 2 };
Node *slow_region = NULL;
Node *slow_region = nullptr;
Node *toobig_false = ctrl;
// generate the initial test if necessary
if (initial_slow_test != NULL ) {
if (initial_slow_test != nullptr ) {
assert (expand_fast_path, "Only need test if there is a fast path");
slow_region = new RegionNode(3);
@ -1314,16 +1314,16 @@ void PhaseMacroExpand::expand_allocate_common(
Node* fast_oop_ctrl;
Node* fast_oop_rawmem;
if (allocation_has_use) {
Node* needgc_ctrl = NULL;
Node* needgc_ctrl = nullptr;
result_phi_rawoop = new PhiNode(result_region, TypeRawPtr::BOTTOM);
intx prefetch_lines = length != NULL ? AllocatePrefetchLines : AllocateInstancePrefetchLines;
intx prefetch_lines = length != nullptr ? AllocatePrefetchLines : AllocateInstancePrefetchLines;
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
Node* fast_oop = bs->obj_allocate(this, mem, toobig_false, size_in_bytes, i_o, needgc_ctrl,
fast_oop_ctrl, fast_oop_rawmem,
prefetch_lines);
if (initial_slow_test != NULL) {
if (initial_slow_test != nullptr) {
// This completes all paths into the slow merge point
slow_region->init_req(need_gc_path, needgc_ctrl);
transform_later(slow_region);
@ -1342,7 +1342,7 @@ void PhaseMacroExpand::expand_allocate_common(
result_phi_rawoop->init_req(fast_result_path, fast_oop);
} else {
assert (initial_slow_test != NULL, "sanity");
assert (initial_slow_test != nullptr, "sanity");
fast_oop_ctrl = toobig_false;
fast_oop_rawmem = mem;
transform_later(slow_region);
@ -1368,7 +1368,7 @@ void PhaseMacroExpand::expand_allocate_common(
call->init_req(TypeFunc::FramePtr, alloc->in(TypeFunc::FramePtr));
call->init_req(TypeFunc::Parms+0, klass_node);
if (length != NULL) {
if (length != nullptr) {
call->init_req(TypeFunc::Parms+1, length);
}
@ -1378,7 +1378,7 @@ void PhaseMacroExpand::expand_allocate_common(
// For array allocations, copy the valid length check to the call node so Compile::final_graph_reshaping() can verify
// that the call has the expected number of CatchProj nodes (in case the allocation always fails and the fallthrough
// path dies).
if (valid_length_test != NULL) {
if (valid_length_test != nullptr) {
call->add_req(valid_length_test);
}
if (expand_fast_path) {
@ -1407,13 +1407,13 @@ void PhaseMacroExpand::expand_allocate_common(
// the control and i_o paths. Replace the control memory projection with
// result_phi_rawmem (unless we are only generating a slow call when
// both memory projections are combined)
if (expand_fast_path && _callprojs.fallthrough_memproj != NULL) {
if (expand_fast_path && _callprojs.fallthrough_memproj != nullptr) {
migrate_outs(_callprojs.fallthrough_memproj, result_phi_rawmem);
}
// Now change uses of catchall_memproj to use fallthrough_memproj and delete
// catchall_memproj so we end up with a call that has only 1 memory projection.
if (_callprojs.catchall_memproj != NULL ) {
if (_callprojs.fallthrough_memproj == NULL) {
if (_callprojs.catchall_memproj != nullptr ) {
if (_callprojs.fallthrough_memproj == nullptr) {
_callprojs.fallthrough_memproj = new ProjNode(call, TypeFunc::Memory);
transform_later(_callprojs.fallthrough_memproj);
}
@ -1426,13 +1426,13 @@ void PhaseMacroExpand::expand_allocate_common(
// otherwise incoming i_o become dead when only a slow call is generated
// (it is different from memory projections where both projections are
// combined in such case).
if (_callprojs.fallthrough_ioproj != NULL) {
if (_callprojs.fallthrough_ioproj != nullptr) {
migrate_outs(_callprojs.fallthrough_ioproj, result_phi_i_o);
}
// Now change uses of catchall_ioproj to use fallthrough_ioproj and delete
// catchall_ioproj so we end up with a call that has only 1 i_o projection.
if (_callprojs.catchall_ioproj != NULL ) {
if (_callprojs.fallthrough_ioproj == NULL) {
if (_callprojs.catchall_ioproj != nullptr ) {
if (_callprojs.fallthrough_ioproj == nullptr) {
_callprojs.fallthrough_ioproj = new ProjNode(call, TypeFunc::I_O);
transform_later(_callprojs.fallthrough_ioproj);
}
@ -1456,7 +1456,7 @@ void PhaseMacroExpand::expand_allocate_common(
return;
}
if (_callprojs.fallthrough_catchproj != NULL) {
if (_callprojs.fallthrough_catchproj != nullptr) {
ctrl = _callprojs.fallthrough_catchproj->clone();
transform_later(ctrl);
_igvn.replace_node(_callprojs.fallthrough_catchproj, result_region);
@ -1464,7 +1464,7 @@ void PhaseMacroExpand::expand_allocate_common(
ctrl = top();
}
Node *slow_result;
if (_callprojs.resproj == NULL) {
if (_callprojs.resproj == nullptr) {
// no uses of the allocation result
slow_result = top();
} else {
@ -1493,7 +1493,7 @@ void PhaseMacroExpand::yank_alloc_node(AllocateNode* alloc) {
Node* i_o = alloc->in(TypeFunc::I_O);
alloc->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/);
if (_callprojs.resproj != NULL) {
if (_callprojs.resproj != nullptr) {
for (DUIterator_Fast imax, i = _callprojs.resproj->fast_outs(imax); i < imax; i++) {
Node* use = _callprojs.resproj->fast_out(i);
use->isa_MemBar()->remove(&_igvn);
@ -1503,32 +1503,32 @@ void PhaseMacroExpand::yank_alloc_node(AllocateNode* alloc) {
assert(_callprojs.resproj->outcnt() == 0, "all uses must be deleted");
_igvn.remove_dead_node(_callprojs.resproj);
}
if (_callprojs.fallthrough_catchproj != NULL) {
if (_callprojs.fallthrough_catchproj != nullptr) {
migrate_outs(_callprojs.fallthrough_catchproj, ctrl);
_igvn.remove_dead_node(_callprojs.fallthrough_catchproj);
}
if (_callprojs.catchall_catchproj != NULL) {
if (_callprojs.catchall_catchproj != nullptr) {
_igvn.rehash_node_delayed(_callprojs.catchall_catchproj);
_callprojs.catchall_catchproj->set_req(0, top());
}
if (_callprojs.fallthrough_proj != NULL) {
if (_callprojs.fallthrough_proj != nullptr) {
Node* catchnode = _callprojs.fallthrough_proj->unique_ctrl_out();
_igvn.remove_dead_node(catchnode);
_igvn.remove_dead_node(_callprojs.fallthrough_proj);
}
if (_callprojs.fallthrough_memproj != NULL) {
if (_callprojs.fallthrough_memproj != nullptr) {
migrate_outs(_callprojs.fallthrough_memproj, mem);
_igvn.remove_dead_node(_callprojs.fallthrough_memproj);
}
if (_callprojs.fallthrough_ioproj != NULL) {
if (_callprojs.fallthrough_ioproj != nullptr) {
migrate_outs(_callprojs.fallthrough_ioproj, i_o);
_igvn.remove_dead_node(_callprojs.fallthrough_ioproj);
}
if (_callprojs.catchall_memproj != NULL) {
if (_callprojs.catchall_memproj != nullptr) {
_igvn.rehash_node_delayed(_callprojs.catchall_memproj);
_callprojs.catchall_memproj->set_req(0, top());
}
if (_callprojs.catchall_ioproj != NULL) {
if (_callprojs.catchall_ioproj != nullptr) {
_igvn.rehash_node_delayed(_callprojs.catchall_ioproj);
_callprojs.catchall_ioproj->set_req(0, top());
}
@ -1562,8 +1562,8 @@ void PhaseMacroExpand::expand_initialize_membar(AllocateNode* alloc, InitializeN
// not escape.
if (!alloc->does_not_escape_thread() &&
!alloc->is_allocation_MemBar_redundant() &&
(init == NULL || !init->is_complete_with_arraycopy())) {
if (init == NULL || init->req() < InitializeNode::RawStores) {
(init == nullptr || !init->is_complete_with_arraycopy())) {
if (init == nullptr || init->req() < InitializeNode::RawStores) {
// No InitializeNode or no stores captured by zeroing
// elimination. Simply add the MemBarStoreStore after object
// initialization.
@ -1606,10 +1606,10 @@ void PhaseMacroExpand::expand_initialize_membar(AllocateNode* alloc, InitializeN
// All nodes that depended on the InitializeNode for control
// and memory must now depend on the MemBarNode that itself
// depends on the InitializeNode
if (init_ctrl != NULL) {
if (init_ctrl != nullptr) {
_igvn.replace_node(init_ctrl, ctrl);
}
if (init_mem != NULL) {
if (init_mem != nullptr) {
_igvn.replace_node(init_mem, mem);
}
}
@ -1665,7 +1665,7 @@ PhaseMacroExpand::initialize_object(AllocateNode* alloc,
int header_size = alloc->minimum_header_size(); // conservatively small
// Array length
if (length != NULL) { // Arrays need length field
if (length != nullptr) { // Arrays need length field
rawmem = make_store(control, rawmem, object, arrayOopDesc::length_offset_in_bytes(), length, T_INT);
// conservatively small header size:
header_size = arrayOopDesc::base_offset_in_bytes(T_BYTE);
@ -1679,7 +1679,7 @@ PhaseMacroExpand::initialize_object(AllocateNode* alloc,
}
// Clear the object body, if necessary.
if (init == NULL) {
if (init == nullptr) {
// The init has somehow disappeared; be cautious and clear everything.
//
// This can happen if a node is allocated but an uncommon trap occurs
@ -1865,9 +1865,9 @@ Node* PhaseMacroExpand::prefetch_allocation(Node* i_o, Node*& needgc_false,
void PhaseMacroExpand::expand_allocate(AllocateNode *alloc) {
expand_allocate_common(alloc, NULL,
expand_allocate_common(alloc, nullptr,
OptoRuntime::new_instance_Type(),
OptoRuntime::new_instance_Java(), NULL);
OptoRuntime::new_instance_Java(), nullptr);
}
void PhaseMacroExpand::expand_allocate_array(AllocateArrayNode *alloc) {
@ -1877,8 +1877,8 @@ void PhaseMacroExpand::expand_allocate_array(AllocateArrayNode *alloc) {
Node* klass_node = alloc->in(AllocateNode::KlassNode);
const TypeAryKlassPtr* ary_klass_t = _igvn.type(klass_node)->isa_aryklassptr();
address slow_call_address; // Address of slow call
if (init != NULL && init->is_complete_with_arraycopy() &&
ary_klass_t && ary_klass_t->elem()->isa_klassptr() == NULL) {
if (init != nullptr && init->is_complete_with_arraycopy() &&
ary_klass_t && ary_klass_t->elem()->isa_klassptr() == nullptr) {
// Don't zero type array during slow allocation in VM since
// it will be initialized later by arraycopy in compiled code.
slow_call_address = OptoRuntime::new_array_nozero_Java();
@ -1911,7 +1911,7 @@ void PhaseMacroExpand::mark_eliminated_box(Node* oldbox, Node* obj) {
// eliminated even if different objects are referenced in one locked region
// (for example, OSR compilation of nested loop inside locked scope).
if (EliminateNestedLocks ||
oldbox->as_BoxLock()->is_simple_lock_region(NULL, obj, NULL)) {
oldbox->as_BoxLock()->is_simple_lock_region(nullptr, obj, nullptr)) {
// Box is used only in one lock region. Mark this box as eliminated.
_igvn.hash_delete(oldbox);
oldbox->as_BoxLock()->set_eliminated(); // This changes box's hash value
@ -2003,7 +2003,7 @@ void PhaseMacroExpand::mark_eliminated_locking_nodes(AbstractLockNode *alock) {
} else if (!alock->is_non_esc_obj()) { // Not eliminated or coarsened
// Only Lock node has JVMState needed here.
// Not that preceding claim is documented anywhere else.
if (alock->jvms() != NULL) {
if (alock->jvms() != nullptr) {
if (alock->as_Lock()->is_nested_lock_region()) {
// Mark eliminated related nested locks and unlocks.
Node* obj = alock->obj_node();
@ -2031,7 +2031,7 @@ void PhaseMacroExpand::mark_eliminated_locking_nodes(AbstractLockNode *alock) {
} else {
#ifdef ASSERT
alock->log_lock_optimization(C, "eliminate_lock_NOT_nested_lock_region");
if (C->log() != NULL)
if (C->log() != nullptr)
alock->as_Lock()->is_nested_lock_region(C); // rerun for debugging output
#endif
}
@ -2089,14 +2089,14 @@ bool PhaseMacroExpand::eliminate_locking_node(AbstractLockNode *alock) {
Node* mem = alock->in(TypeFunc::Memory);
Node* ctrl = alock->in(TypeFunc::Control);
guarantee(ctrl != NULL, "missing control projection, cannot replace_node() with NULL");
guarantee(ctrl != nullptr, "missing control projection, cannot replace_node() with null");
alock->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/);
// There are 2 projections from the lock. The lock node will
// be deleted when its last use is subsumed below.
assert(alock->outcnt() == 2 &&
_callprojs.fallthrough_proj != NULL &&
_callprojs.fallthrough_memproj != NULL,
_callprojs.fallthrough_proj != nullptr &&
_callprojs.fallthrough_memproj != nullptr,
"Unexpected projections from Lock/Unlock");
Node* fallthroughproj = _callprojs.fallthrough_proj;
@ -2108,7 +2108,7 @@ bool PhaseMacroExpand::eliminate_locking_node(AbstractLockNode *alock) {
if (alock->is_Lock()) {
// Search for MemBarAcquireLock node and delete it also.
MemBarNode* membar = fallthroughproj->unique_ctrl_out()->as_MemBar();
assert(membar != NULL && membar->Opcode() == Op_MemBarAcquireLock, "");
assert(membar != nullptr && membar->Opcode() == Op_MemBarAcquireLock, "");
Node* ctrlproj = membar->proj_out(TypeFunc::Control);
Node* memproj = membar->proj_out(TypeFunc::Memory);
_igvn.replace_node(ctrlproj, fallthroughproj);
@ -2168,8 +2168,8 @@ void PhaseMacroExpand::expand_lock_node(LockNode *lock) {
// Make slow path call
CallNode *call = make_slow_call((CallNode *) lock, OptoRuntime::complete_monitor_enter_Type(),
OptoRuntime::complete_monitor_locking_Java(), NULL, slow_path,
obj, box, NULL);
OptoRuntime::complete_monitor_locking_Java(), nullptr, slow_path,
obj, box, nullptr);
call->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/);
@ -2177,8 +2177,8 @@ void PhaseMacroExpand::expand_lock_node(LockNode *lock) {
// de-opted. So the compiler thinks the slow-call can never throw an
// exception. If it DOES throw an exception we would need the debug
// info removed first (since if it throws there is no monitor).
assert(_callprojs.fallthrough_ioproj == NULL && _callprojs.catchall_ioproj == NULL &&
_callprojs.catchall_memproj == NULL && _callprojs.catchall_catchproj == NULL, "Unexpected projection from Lock");
assert(_callprojs.fallthrough_ioproj == nullptr && _callprojs.catchall_ioproj == nullptr &&
_callprojs.catchall_memproj == nullptr && _callprojs.catchall_catchproj == nullptr, "Unexpected projection from Lock");
// Capture slow path
// disconnect fall-through projection from call and create a new one
@ -2232,8 +2232,8 @@ void PhaseMacroExpand::expand_unlock_node(UnlockNode *unlock) {
"complete_monitor_unlocking_C", slow_path, obj, box, thread);
call->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/);
assert(_callprojs.fallthrough_ioproj == NULL && _callprojs.catchall_ioproj == NULL &&
_callprojs.catchall_memproj == NULL && _callprojs.catchall_catchproj == NULL, "Unexpected projection from Lock");
assert(_callprojs.fallthrough_ioproj == nullptr && _callprojs.catchall_ioproj == nullptr &&
_callprojs.catchall_memproj == nullptr && _callprojs.catchall_catchproj == nullptr, "Unexpected projection from Lock");
// No exceptions for unlocking
// Capture slow path
@ -2257,7 +2257,7 @@ void PhaseMacroExpand::expand_unlock_node(UnlockNode *unlock) {
}
void PhaseMacroExpand::expand_subtypecheck_node(SubTypeCheckNode *check) {
assert(check->in(SubTypeCheckNode::Control) == NULL, "should be pinned");
assert(check->in(SubTypeCheckNode::Control) == nullptr, "should be pinned");
Node* bol = check->unique_out();
Node* obj_or_subklass = check->in(SubTypeCheckNode::ObjOrSubKlass);
Node* superklass = check->in(SubTypeCheckNode::SuperKlass);
@ -2276,15 +2276,15 @@ void PhaseMacroExpand::expand_subtypecheck_node(SubTypeCheckNode *check) {
Node* iffalse = iff->as_If()->proj_out(0);
Node* ctrl = iff->in(0);
Node* subklass = NULL;
Node* subklass = nullptr;
if (_igvn.type(obj_or_subklass)->isa_klassptr()) {
subklass = obj_or_subklass;
} else {
Node* k_adr = basic_plus_adr(obj_or_subklass, oopDesc::klass_offset_in_bytes());
subklass = _igvn.transform(LoadKlassNode::make(_igvn, NULL, C->immutable_memory(), k_adr, TypeInstPtr::KLASS));
subklass = _igvn.transform(LoadKlassNode::make(_igvn, nullptr, C->immutable_memory(), k_adr, TypeInstPtr::KLASS));
}
Node* not_subtype_ctrl = Phase::gen_subtype_check(subklass, superklass, &ctrl, NULL, _igvn);
Node* not_subtype_ctrl = Phase::gen_subtype_check(subklass, superklass, &ctrl, nullptr, _igvn);
_igvn.replace_input_of(iff, 0, C->top());
_igvn.replace_node(iftrue, not_subtype_ctrl);
@ -2429,7 +2429,7 @@ bool PhaseMacroExpand::expand_macro_nodes() {
(bol->_test._test == BoolTest::ne), "");
IfNode* ifn = bol->unique_out()->as_If();
assert((ifn->outcnt() == 2) &&
ifn->proj_out(1)->is_uncommon_trap_proj(Deoptimization::Reason_rtm_state_change) != NULL, "");
ifn->proj_out(1)->is_uncommon_trap_proj(Deoptimization::Reason_rtm_state_change) != nullptr, "");
#endif
Node* repl = n->in(1);
if (!_has_locks) {
@ -2486,7 +2486,7 @@ bool PhaseMacroExpand::expand_macro_nodes() {
int macro_count = C->macro_count();
Node * n = C->macro_node(macro_count-1);
assert(n->is_macro(), "only macro nodes expected here");
if (_igvn.type(n) == Type::TOP || (n->in(0) != NULL && n->in(0)->is_top())) {
if (_igvn.type(n) == Type::TOP || (n->in(0) != nullptr && n->in(0)->is_top())) {
// node is unreachable, so don't try to expand it
C->remove_macro_node(n);
continue;
@ -2539,7 +2539,7 @@ bool PhaseMacroExpand::expand_macro_nodes() {
int macro_count = C->macro_count();
Node * n = C->macro_node(macro_count-1);
assert(n->is_macro(), "only macro nodes expected here");
if (_igvn.type(n) == Type::TOP || (n->in(0) != NULL && n->in(0)->is_top())) {
if (_igvn.type(n) == Type::TOP || (n->in(0) != nullptr && n->in(0)->is_top())) {
// node is unreachable, so don't try to expand it
C->remove_macro_node(n);
continue;
@ -2594,7 +2594,7 @@ int PhaseMacroExpand::count_MemBar(Compile *C) {
}
Unique_Node_List ideal_nodes;
int total = 0;
ideal_nodes.map(C->live_nodes(), NULL);
ideal_nodes.map(C->live_nodes(), nullptr);
ideal_nodes.push(C->root());
for (uint next = 0; next < ideal_nodes.size(); ++next) {
Node* n = ideal_nodes.at(next);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -67,10 +67,10 @@ public:
const TypeFunc* call_type, address call_addr,
const char* call_name,
const TypePtr* adr_type,
Node* parm0 = NULL, Node* parm1 = NULL,
Node* parm2 = NULL, Node* parm3 = NULL,
Node* parm4 = NULL, Node* parm5 = NULL,
Node* parm6 = NULL, Node* parm7 = NULL);
Node* parm0 = nullptr, Node* parm1 = nullptr,
Node* parm2 = nullptr, Node* parm3 = nullptr,
Node* parm4 = nullptr, Node* parm5 = nullptr,
Node* parm6 = nullptr, Node* parm7 = nullptr);
address basictype2arraycopy(BasicType t,
Node* src_offset,
@ -111,7 +111,7 @@ private:
void expand_unlock_node(UnlockNode *unlock);
// More helper methods modeled after GraphKit for array copy
void insert_mem_bar(Node** ctrl, Node** mem, int opcode, Node* precedent = NULL);
void insert_mem_bar(Node** ctrl, Node** mem, int opcode, Node* precedent = nullptr);
Node* array_element_address(Node* ary, Node* idx, BasicType elembt);
Node* ConvI2L(Node* offset);
@ -139,7 +139,7 @@ private:
Node* copy_length,
bool disjoint_bases = false,
bool length_never_negative = false,
RegionNode* slow_region = NULL);
RegionNode* slow_region = nullptr);
void generate_clear_array(Node* ctrl, MergeMemNode* merge_mem,
const TypePtr* adr_type,
Node* dest,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -83,17 +83,17 @@ Node* PhaseMacroExpand::make_leaf_call(Node* ctrl, Node* mem,
call->init_req(TypeFunc::ReturnAdr, top());
call->init_req(TypeFunc::FramePtr, top());
// Hook each parm in order. Stop looking at the first NULL.
if (parm0 != NULL) { call->init_req(TypeFunc::Parms+0, parm0);
if (parm1 != NULL) { call->init_req(TypeFunc::Parms+1, parm1);
if (parm2 != NULL) { call->init_req(TypeFunc::Parms+2, parm2);
if (parm3 != NULL) { call->init_req(TypeFunc::Parms+3, parm3);
if (parm4 != NULL) { call->init_req(TypeFunc::Parms+4, parm4);
if (parm5 != NULL) { call->init_req(TypeFunc::Parms+5, parm5);
if (parm6 != NULL) { call->init_req(TypeFunc::Parms+6, parm6);
if (parm7 != NULL) { call->init_req(TypeFunc::Parms+7, parm7);
// Hook each parm in order. Stop looking at the first null.
if (parm0 != nullptr) { call->init_req(TypeFunc::Parms+0, parm0);
if (parm1 != nullptr) { call->init_req(TypeFunc::Parms+1, parm1);
if (parm2 != nullptr) { call->init_req(TypeFunc::Parms+2, parm2);
if (parm3 != nullptr) { call->init_req(TypeFunc::Parms+3, parm3);
if (parm4 != nullptr) { call->init_req(TypeFunc::Parms+4, parm4);
if (parm5 != nullptr) { call->init_req(TypeFunc::Parms+5, parm5);
if (parm6 != nullptr) { call->init_req(TypeFunc::Parms+6, parm6);
if (parm7 != nullptr) { call->init_req(TypeFunc::Parms+7, parm7);
/* close each nested if ===> */ } } } } } } } }
assert(call->in(call->req()-1) != NULL, "must initialize all parms");
assert(call->in(call->req()-1) != nullptr, "must initialize all parms");
return call;
}
@ -106,19 +106,19 @@ Node* PhaseMacroExpand::make_leaf_call(Node* ctrl, Node* mem,
// In all cases, GraphKit::control() is updated to the fast path.
// The returned value represents the control for the slow path.
// The return value is never 'top'; it is either a valid control
// or NULL if it is obvious that the slow path can never be taken.
// Also, if region and the slow control are not NULL, the slow edge
// or null if it is obvious that the slow path can never be taken.
// Also, if region and the slow control are not null, the slow edge
// is appended to the region.
Node* PhaseMacroExpand::generate_guard(Node** ctrl, Node* test, RegionNode* region, float true_prob) {
if ((*ctrl)->is_top()) {
// Already short circuited.
return NULL;
return nullptr;
}
// Build an if node and its projections.
// If test is true we take the slow path, which we assume is uncommon.
if (_igvn.type(test) == TypeInt::ZERO) {
// The slow branch is never taken. No need to build this guard.
return NULL;
return nullptr;
}
IfNode* iff = new IfNode(*ctrl, test, true_prob, COUNT_UNKNOWN);
@ -127,7 +127,7 @@ Node* PhaseMacroExpand::generate_guard(Node** ctrl, Node* test, RegionNode* regi
Node* if_slow = new IfTrueNode(iff);
transform_later(if_slow);
if (region != NULL) {
if (region != nullptr) {
region->add_req(if_slow);
}
@ -199,11 +199,11 @@ void PhaseMacroExpand::generate_partial_inlining_block(Node** ctrl, MergeMemNode
RegionNode** exit_block, Node** result_memory, Node* length,
Node* src_start, Node* dst_start, BasicType type) {
const TypePtr *src_adr_type = _igvn.type(src_start)->isa_ptr();
Node* inline_block = NULL;
Node* stub_block = NULL;
Node* inline_block = nullptr;
Node* stub_block = nullptr;
int const_len = -1;
const TypeInt* lty = NULL;
const TypeInt* lty = nullptr;
uint shift = exact_log2(type2aelembytes(type));
if (length->Opcode() == Op_ConvI2L) {
lty = _igvn.type(length->in(1))->isa_int();
@ -234,7 +234,7 @@ void PhaseMacroExpand::generate_partial_inlining_block(Node** ctrl, MergeMemNode
transform_later(cmp_le);
Node* bol_le = new BoolNode(cmp_le, BoolTest::le);
transform_later(bol_le);
inline_block = generate_guard(ctrl, bol_le, NULL, PROB_FAIR);
inline_block = generate_guard(ctrl, bol_le, nullptr, PROB_FAIR);
stub_block = *ctrl;
Node* mask_gen = VectorMaskGenNode::make(casted_length, type);
@ -269,16 +269,16 @@ void PhaseMacroExpand::generate_partial_inlining_block(Node** ctrl, MergeMemNode
Node* PhaseMacroExpand::generate_nonpositive_guard(Node** ctrl, Node* index, bool never_negative) {
if ((*ctrl)->is_top()) return NULL;
if ((*ctrl)->is_top()) return nullptr;
if (_igvn.type(index)->higher_equal(TypeInt::POS1)) // [1,maxint]
return NULL; // index is already adequately typed
return nullptr; // index is already adequately typed
Node* cmp_le = new CmpINode(index, intcon(0));
transform_later(cmp_le);
BoolTest::mask le_or_eq = (never_negative ? BoolTest::eq : BoolTest::le);
Node* bol_le = new BoolNode(cmp_le, le_or_eq);
transform_later(bol_le);
Node* is_notp = generate_guard(ctrl, bol_le, NULL, PROB_MIN);
Node* is_notp = generate_guard(ctrl, bol_le, nullptr, PROB_MIN);
return is_notp;
}
@ -318,8 +318,8 @@ address PhaseMacroExpand::basictype2arraycopy(BasicType t,
// or they are identical (which we can treat as disjoint.) We can also
// treat a copy with a destination index less that the source index
// as disjoint since a low->high copy will work correctly in this case.
if (src_offset_inttype != NULL && src_offset_inttype->is_con() &&
dest_offset_inttype != NULL && dest_offset_inttype->is_con()) {
if (src_offset_inttype != nullptr && src_offset_inttype->is_con() &&
dest_offset_inttype != nullptr && dest_offset_inttype->is_con()) {
// both indices are constants
int s_offs = src_offset_inttype->get_con();
int d_offs = dest_offset_inttype->get_con();
@ -327,7 +327,7 @@ address PhaseMacroExpand::basictype2arraycopy(BasicType t,
aligned = ((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) &&
((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0);
if (s_offs >= d_offs) disjoint = true;
} else if (src_offset == dest_offset && src_offset != NULL) {
} else if (src_offset == dest_offset && src_offset != nullptr) {
// This can occur if the offsets are identical non-constants.
disjoint = true;
}
@ -380,7 +380,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode*
bool disjoint_bases,
bool length_never_negative,
RegionNode* slow_region) {
if (slow_region == NULL) {
if (slow_region == nullptr) {
slow_region = new RegionNode(1);
transform_later(slow_region);
}
@ -398,7 +398,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode*
&& !(UseTLAB && ZeroTLAB) // pointless if already zeroed
&& basic_elem_type != T_CONFLICT // avoid corner case
&& !src->eqv_uncast(dest)
&& alloc != NULL
&& alloc != nullptr
&& _igvn.find_int_con(alloc->in(AllocateNode::ALength), 1) > 0) {
assert(ac->is_alloc_tightly_coupled(), "sanity");
// acopy to uninitialized tightly coupled allocations
@ -423,7 +423,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode*
}
} else {
// No zeroing elimination needed here.
alloc = NULL;
alloc = nullptr;
acopy_to_uninitialized = false;
//original_dest = dest;
//dest_needs_zeroing = false;
@ -455,9 +455,9 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode*
// Checked control path:
Node* checked_control = top();
Node* checked_mem = NULL;
Node* checked_i_o = NULL;
Node* checked_value = NULL;
Node* checked_mem = nullptr;
Node* checked_i_o = nullptr;
Node* checked_value = nullptr;
if (basic_elem_type == T_CONFLICT) {
assert(!dest_needs_zeroing, "");
@ -465,7 +465,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode*
adr_type,
src, src_offset, dest, dest_offset,
copy_length, acopy_to_uninitialized);
if (cv == NULL) cv = intcon(-1); // failure (no stub available)
if (cv == nullptr) cv = intcon(-1); // failure (no stub available)
checked_control = *ctrl;
checked_i_o = *io;
checked_mem = mem->memory_at(alias_idx);
@ -474,7 +474,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode*
}
Node* not_pos = generate_nonpositive_guard(ctrl, copy_length, length_never_negative);
if (not_pos != NULL) {
if (not_pos != nullptr) {
Node* local_ctrl = not_pos, *local_io = *io;
MergeMemNode* local_mem = MergeMemNode::make(mem);
transform_later(local_mem);
@ -495,7 +495,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode*
// Clear the whole thing since there are no source elements to copy.
generate_clear_array(local_ctrl, local_mem,
adr_type, dest, basic_elem_type,
intcon(0), NULL,
intcon(0), nullptr,
alloc->in(AllocateNode::AllocSize));
// Use a secondary InitializeNode as raw memory barrier.
// Currently it is needed only on this path since other
@ -533,7 +533,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode*
generate_clear_array(*ctrl, mem,
adr_type, dest, basic_elem_type,
intcon(0), dest_offset,
NULL);
nullptr);
}
// Next, perform a dynamic check on the tail length.
@ -541,16 +541,16 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode*
// There are two wins: Avoid generating the ClearArray
// with its attendant messy index arithmetic, and upgrade
// the copy to a more hardware-friendly word size of 64 bits.
Node* tail_ctl = NULL;
Node* tail_ctl = nullptr;
if (!(*ctrl)->is_top() && !dest_tail->eqv_uncast(dest_length)) {
Node* cmp_lt = transform_later( new CmpINode(dest_tail, dest_length) );
Node* bol_lt = transform_later( new BoolNode(cmp_lt, BoolTest::lt) );
tail_ctl = generate_slow_guard(ctrl, bol_lt, NULL);
assert(tail_ctl != NULL || !(*ctrl)->is_top(), "must be an outcome");
tail_ctl = generate_slow_guard(ctrl, bol_lt, nullptr);
assert(tail_ctl != nullptr || !(*ctrl)->is_top(), "must be an outcome");
}
// At this point, let's assume there is no tail.
if (!(*ctrl)->is_top() && alloc != NULL && basic_elem_type != T_OBJECT) {
if (!(*ctrl)->is_top() && alloc != nullptr && basic_elem_type != T_OBJECT) {
// There is no tail. Try an upgrade to a 64-bit copy.
bool didit = false;
{
@ -575,13 +575,13 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode*
}
// Clear the tail, if any.
if (tail_ctl != NULL) {
Node* notail_ctl = (*ctrl)->is_top() ? NULL : *ctrl;
if (tail_ctl != nullptr) {
Node* notail_ctl = (*ctrl)->is_top() ? nullptr : *ctrl;
*ctrl = tail_ctl;
if (notail_ctl == NULL) {
if (notail_ctl == nullptr) {
generate_clear_array(*ctrl, mem,
adr_type, dest, basic_elem_type,
dest_tail, NULL,
dest_tail, nullptr,
dest_size);
} else {
// Make a local merge.
@ -591,7 +591,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode*
done_mem->init_req(1, mem->memory_at(alias_idx));
generate_clear_array(*ctrl, mem,
adr_type, dest, basic_elem_type,
dest_tail, NULL,
dest_tail, nullptr,
dest_size);
done_ctl->init_req(2, *ctrl);
done_mem->init_req(2, mem->memory_at(alias_idx));
@ -620,7 +620,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode*
Node* src_klass = ac->in(ArrayCopyNode::SrcKlass);
Node* dest_klass = ac->in(ArrayCopyNode::DestKlass);
assert(src_klass != NULL && dest_klass != NULL, "should have klasses");
assert(src_klass != nullptr && dest_klass != nullptr, "should have klasses");
// Generate the subtype check.
// This might fold up statically, or then again it might not.
@ -643,14 +643,14 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode*
// (At this point we can assume disjoint_bases, since types differ.)
int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
Node* p1 = basic_plus_adr(dest_klass, ek_offset);
Node* n1 = LoadKlassNode::make(_igvn, NULL, C->immutable_memory(), p1, TypeRawPtr::BOTTOM);
Node* n1 = LoadKlassNode::make(_igvn, nullptr, C->immutable_memory(), p1, TypeRawPtr::BOTTOM);
Node* dest_elem_klass = transform_later(n1);
Node* cv = generate_checkcast_arraycopy(&local_ctrl, &local_mem,
adr_type,
dest_elem_klass,
src, src_offset, dest, dest_offset,
ConvI2X(copy_length), acopy_to_uninitialized);
if (cv == NULL) cv = intcon(-1); // failure (no stub available)
if (cv == nullptr) cv = intcon(-1); // failure (no stub available)
checked_control = local_ctrl;
checked_i_o = *io;
checked_mem = local_mem->memory_at(alias_idx);
@ -660,7 +660,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode*
// At this point we know we do not need type checks on oop stores.
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
if (!bs->array_copy_requires_gc_barriers(alloc != NULL, copy_type, false, false, BarrierSetC2::Expansion)) {
if (!bs->array_copy_requires_gc_barriers(alloc != nullptr, copy_type, false, false, BarrierSetC2::Expansion)) {
// If we do not need gc barriers, copy using the jint or jlong stub.
copy_type = LP64_ONLY(UseCompressedOops ? T_INT : T_LONG) NOT_LP64(T_INT);
assert(type2aelembytes(basic_elem_type) == type2aelembytes(copy_type),
@ -686,7 +686,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode*
}
// Here are all the slow paths up to this point, in one bundle:
assert(slow_region != NULL, "allocated on entry");
assert(slow_region != nullptr, "allocated on entry");
slow_control = slow_region;
DEBUG_ONLY(slow_region = (RegionNode*)badAddress);
@ -729,7 +729,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode*
slow_i_o = slow_i_o2;
slow_mem = slow_mem2;
if (alloc != NULL) {
if (alloc != nullptr) {
// We'll restart from the very beginning, after zeroing the whole thing.
// This can cause double writes, but that's OK since dest is brand new.
// So we ignore the low 31 bits of the value returned from the stub.
@ -769,7 +769,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode*
if (dest_needs_zeroing) {
generate_clear_array(local_ctrl, local_mem,
adr_type, dest, basic_elem_type,
intcon(0), NULL,
intcon(0), nullptr,
alloc->in(AllocateNode::AllocSize));
}
@ -789,7 +789,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode*
// Remove unused edges.
for (uint i = 1; i < result_region->req(); i++) {
if (result_region->in(i) == NULL) {
if (result_region->in(i) == nullptr) {
result_region->init_req(i, top());
}
}
@ -801,7 +801,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode*
// mem no longer guaranteed to stay a MergeMemNode
Node* out_mem = mem;
DEBUG_ONLY(mem = NULL);
DEBUG_ONLY(mem = nullptr);
// The memory edges above are precise in order to model effects around
// array copies accurately to allow value numbering of field loads around
@ -815,7 +815,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode*
// the membar also.
//
// Do not let reads from the cloned object float above the arraycopy.
if (alloc != NULL && !alloc->initialization()->does_not_escape()) {
if (alloc != nullptr && !alloc->initialization()->does_not_escape()) {
// Do not let stores that initialize this object be reordered with
// a subsequent store that would make this object accessible by
// other threads.
@ -831,7 +831,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode*
}
_igvn.replace_node(_callprojs.fallthrough_memproj, out_mem);
if (_callprojs.fallthrough_ioproj != NULL) {
if (_callprojs.fallthrough_ioproj != nullptr) {
_igvn.replace_node(_callprojs.fallthrough_ioproj, *io);
}
_igvn.replace_node(_callprojs.fallthrough_catchproj, *ctrl);
@ -839,9 +839,9 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode*
#ifdef ASSERT
const TypeOopPtr* dest_t = _igvn.type(dest)->is_oopptr();
if (dest_t->is_known_instance() && !is_partial_array_copy) {
ArrayCopyNode* ac = NULL;
ArrayCopyNode* ac = nullptr;
assert(ArrayCopyNode::may_modify(dest_t, (*ctrl)->in(0)->as_MemBar(), &_igvn, ac), "dependency on arraycopy lost");
assert(ac == NULL, "no arraycopy anymore");
assert(ac == nullptr, "no arraycopy anymore");
}
#endif
@ -865,12 +865,12 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode*
// dest oop of the destination array
// basic_elem_type element type of the destination
// slice_idx array index of first element to store
// slice_len number of elements to store (or NULL)
// slice_len number of elements to store (or null)
// dest_size total size in bytes of the array object
//
// Exactly one of slice_len or dest_size must be non-NULL.
// If dest_size is non-NULL, zeroing extends to the end of the object.
// If slice_len is non-NULL, the slice_idx value must be a constant.
// Exactly one of slice_len or dest_size must be non-null.
// If dest_size is non-null, zeroing extends to the end of the object.
// If slice_len is non-null, the slice_idx value must be a constant.
void PhaseMacroExpand::generate_clear_array(Node* ctrl, MergeMemNode* merge_mem,
const TypePtr* adr_type,
Node* dest,
@ -879,9 +879,9 @@ void PhaseMacroExpand::generate_clear_array(Node* ctrl, MergeMemNode* merge_mem,
Node* slice_len,
Node* dest_size) {
// one or the other but not both of slice_len and dest_size:
assert((slice_len != NULL? 1: 0) + (dest_size != NULL? 1: 0) == 1, "");
if (slice_len == NULL) slice_len = top();
if (dest_size == NULL) dest_size = top();
assert((slice_len != nullptr? 1: 0) + (dest_size != nullptr? 1: 0) == 1, "");
if (slice_len == nullptr) slice_len = top();
if (dest_size == nullptr) dest_size = top();
uint alias_idx = C->get_alias_index(adr_type);
@ -1041,10 +1041,10 @@ bool PhaseMacroExpand::generate_block_arraycopy(Node** ctrl, MergeMemNode** mem,
countx = transform_later(new SubXNode(countx, MakeConX(dest_off)));
countx = transform_later(new URShiftXNode(countx, intcon(LogBytesPerLong)));
bool disjoint_bases = true; // since alloc != NULL
bool disjoint_bases = true; // since alloc isn't null
generate_unchecked_arraycopy(ctrl, mem,
adr_type, T_LONG, disjoint_bases,
sptr, NULL, dptr, NULL, countx, dest_uninitialized);
sptr, nullptr, dptr, nullptr, countx, dest_uninitialized);
return true;
}
@ -1098,12 +1098,12 @@ MergeMemNode* PhaseMacroExpand::generate_slow_arraycopy(ArrayCopyNode *ac,
transform_later(out_mem);
// When src is negative and arraycopy is before an infinite loop,_callprojs.fallthrough_ioproj
// could be NULL. Skip clone and update NULL fallthrough_ioproj.
if (_callprojs.fallthrough_ioproj != NULL) {
// could be null. Skip clone and update null fallthrough_ioproj.
if (_callprojs.fallthrough_ioproj != nullptr) {
*io = _callprojs.fallthrough_ioproj->clone();
transform_later(*io);
} else {
*io = NULL;
*io = nullptr;
}
return out_mem;
@ -1116,11 +1116,11 @@ Node* PhaseMacroExpand::generate_checkcast_arraycopy(Node** ctrl, MergeMemNode**
Node* src, Node* src_offset,
Node* dest, Node* dest_offset,
Node* copy_length, bool dest_uninitialized) {
if ((*ctrl)->is_top()) return NULL;
if ((*ctrl)->is_top()) return nullptr;
address copyfunc_addr = StubRoutines::checkcast_arraycopy(dest_uninitialized);
if (copyfunc_addr == NULL) { // Stub was not generated, go slow path.
return NULL;
if (copyfunc_addr == nullptr) { // Stub was not generated, go slow path.
return nullptr;
}
// Pick out the parameters required to perform a store-check
@ -1129,7 +1129,7 @@ Node* PhaseMacroExpand::generate_checkcast_arraycopy(Node** ctrl, MergeMemNode**
// super_check_offset, for the desired klass.
int sco_offset = in_bytes(Klass::super_check_offset_offset());
Node* p3 = basic_plus_adr(dest_elem_klass, sco_offset);
Node* n3 = new LoadINode(NULL, *mem /*memory(p3)*/, p3, _igvn.type(p3)->is_ptr(), TypeInt::INT, MemNode::unordered);
Node* n3 = new LoadINode(nullptr, *mem /*memory(p3)*/, p3, _igvn.type(p3)->is_ptr(), TypeInt::INT, MemNode::unordered);
Node* check_offset = ConvI2X(transform_later(n3));
Node* check_value = dest_elem_klass;
@ -1154,12 +1154,12 @@ Node* PhaseMacroExpand::generate_generic_arraycopy(Node** ctrl, MergeMemNode** m
Node* src, Node* src_offset,
Node* dest, Node* dest_offset,
Node* copy_length, bool dest_uninitialized) {
if ((*ctrl)->is_top()) return NULL;
if ((*ctrl)->is_top()) return nullptr;
assert(!dest_uninitialized, "Invariant");
address copyfunc_addr = StubRoutines::generic_arraycopy();
if (copyfunc_addr == NULL) { // Stub was not generated, go slow path.
return NULL;
if (copyfunc_addr == nullptr) { // Stub was not generated, go slow path.
return nullptr;
}
const TypeFunc* call_type = OptoRuntime::generic_arraycopy_Type();
@ -1186,7 +1186,7 @@ bool PhaseMacroExpand::generate_unchecked_arraycopy(Node** ctrl, MergeMemNode**
Node* src_start = src;
Node* dest_start = dest;
if (src_offset != NULL || dest_offset != NULL) {
if (src_offset != nullptr || dest_offset != nullptr) {
src_start = array_element_address(src, src_offset, basic_elem_type);
dest_start = array_element_address(dest, dest_offset, basic_elem_type);
}
@ -1197,8 +1197,8 @@ bool PhaseMacroExpand::generate_unchecked_arraycopy(Node** ctrl, MergeMemNode**
basictype2arraycopy(basic_elem_type, src_offset, dest_offset,
disjoint_bases, copyfunc_name, dest_uninitialized);
Node* result_memory = NULL;
RegionNode* exit_block = NULL;
Node* result_memory = nullptr;
RegionNode* exit_block = nullptr;
if (ArrayOperationPartialInlineSize > 0 && is_subword_type(basic_elem_type) &&
Matcher::vector_width_in_bytes(basic_elem_type) >= 16) {
generate_partial_inlining_block(ctrl, mem, adr_type, &exit_block, &result_memory,
@ -1242,7 +1242,7 @@ void PhaseMacroExpand::expand_arraycopy_node(ArrayCopyNode *ac) {
Node* dest = ac->in(ArrayCopyNode::Dest);
Node* dest_offset = ac->in(ArrayCopyNode::DestPos);
Node* length = ac->in(ArrayCopyNode::Length);
MergeMemNode* merge_mem = NULL;
MergeMemNode* merge_mem = nullptr;
if (ac->is_clonebasic()) {
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
@ -1253,10 +1253,10 @@ void PhaseMacroExpand::expand_arraycopy_node(ArrayCopyNode *ac) {
merge_mem = MergeMemNode::make(mem);
transform_later(merge_mem);
AllocateArrayNode* alloc = NULL;
AllocateArrayNode* alloc = nullptr;
if (ac->is_alloc_tightly_coupled()) {
alloc = AllocateArrayNode::Ideal_array_allocation(dest, &_igvn);
assert(alloc != NULL, "expect alloc");
assert(alloc != nullptr, "expect alloc");
}
const TypePtr* adr_type = _igvn.type(dest)->is_oopptr()->add_offset(Type::OffsetBot);
@ -1271,10 +1271,10 @@ void PhaseMacroExpand::expand_arraycopy_node(ArrayCopyNode *ac) {
return;
}
AllocateArrayNode* alloc = NULL;
AllocateArrayNode* alloc = nullptr;
if (ac->is_alloc_tightly_coupled()) {
alloc = AllocateArrayNode::Ideal_array_allocation(dest, &_igvn);
assert(alloc != NULL, "expect alloc");
assert(alloc != nullptr, "expect alloc");
}
assert(ac->is_arraycopy() || ac->is_arraycopy_validated(), "should be an arraycopy");
@ -1292,10 +1292,10 @@ void PhaseMacroExpand::expand_arraycopy_node(ArrayCopyNode *ac) {
BasicType src_elem = T_CONFLICT;
BasicType dest_elem = T_CONFLICT;
if (top_src != NULL && top_src->elem() != Type::BOTTOM) {
if (top_src != nullptr && top_src->elem() != Type::BOTTOM) {
src_elem = top_src->elem()->array_element_basic_type();
}
if (top_dest != NULL && top_dest->elem() != Type::BOTTOM) {
if (top_dest != nullptr && top_dest->elem() != Type::BOTTOM) {
dest_elem = top_dest->elem()->array_element_basic_type();
}
if (is_reference_type(src_elem, true)) src_elem = T_OBJECT;
@ -1319,7 +1319,7 @@ void PhaseMacroExpand::expand_arraycopy_node(ArrayCopyNode *ac) {
}
// Call StubRoutines::generic_arraycopy stub.
Node* mem = generate_arraycopy(ac, NULL, &ctrl, merge_mem, &io,
Node* mem = generate_arraycopy(ac, nullptr, &ctrl, merge_mem, &io,
TypeRawPtr::BOTTOM, T_CONFLICT,
src, src_offset, dest, dest_offset, length,
// If a negative length guard was generated for the ArrayCopyNode,
@ -1341,7 +1341,7 @@ void PhaseMacroExpand::expand_arraycopy_node(ArrayCopyNode *ac) {
}
_igvn.replace_node(_callprojs.fallthrough_memproj, merge_mem);
if (_callprojs.fallthrough_ioproj != NULL) {
if (_callprojs.fallthrough_ioproj != nullptr) {
_igvn.replace_node(_callprojs.fallthrough_ioproj, io);
}
_igvn.replace_node(_callprojs.fallthrough_catchproj, ctrl);
@ -1390,7 +1390,7 @@ void PhaseMacroExpand::expand_arraycopy_node(ArrayCopyNode *ac) {
// (7) src_offset + length must not exceed length of src.
Node* alen = ac->in(ArrayCopyNode::SrcLen);
assert(alen != NULL, "need src len");
assert(alen != nullptr, "need src len");
generate_limit_guard(&ctrl,
src_offset, length,
alen,
@ -1398,7 +1398,7 @@ void PhaseMacroExpand::expand_arraycopy_node(ArrayCopyNode *ac) {
// (8) dest_offset + length must not exceed length of dest.
alen = ac->in(ArrayCopyNode::DestLen);
assert(alen != NULL, "need dest len");
assert(alen != nullptr, "need dest len");
generate_limit_guard(&ctrl,
dest_offset, length,
alen,
@ -1408,7 +1408,7 @@ void PhaseMacroExpand::expand_arraycopy_node(ArrayCopyNode *ac) {
// The generate_arraycopy subroutine checks this.
}
// This is where the memory effects are placed:
const TypePtr* adr_type = NULL;
const TypePtr* adr_type = nullptr;
if (ac->_dest_type != TypeOopPtr::BOTTOM) {
adr_type = ac->_dest_type->add_offset(Type::OffsetBot)->is_ptr();
} else {

View File

@ -83,52 +83,52 @@ Matcher::Matcher()
_register_save_type(register_save_type) {
C->set_matcher(this);
idealreg2spillmask [Op_RegI] = NULL;
idealreg2spillmask [Op_RegN] = NULL;
idealreg2spillmask [Op_RegL] = NULL;
idealreg2spillmask [Op_RegF] = NULL;
idealreg2spillmask [Op_RegD] = NULL;
idealreg2spillmask [Op_RegP] = NULL;
idealreg2spillmask [Op_VecA] = NULL;
idealreg2spillmask [Op_VecS] = NULL;
idealreg2spillmask [Op_VecD] = NULL;
idealreg2spillmask [Op_VecX] = NULL;
idealreg2spillmask [Op_VecY] = NULL;
idealreg2spillmask [Op_VecZ] = NULL;
idealreg2spillmask [Op_RegFlags] = NULL;
idealreg2spillmask [Op_RegVectMask] = NULL;
idealreg2spillmask [Op_RegI] = nullptr;
idealreg2spillmask [Op_RegN] = nullptr;
idealreg2spillmask [Op_RegL] = nullptr;
idealreg2spillmask [Op_RegF] = nullptr;
idealreg2spillmask [Op_RegD] = nullptr;
idealreg2spillmask [Op_RegP] = nullptr;
idealreg2spillmask [Op_VecA] = nullptr;
idealreg2spillmask [Op_VecS] = nullptr;
idealreg2spillmask [Op_VecD] = nullptr;
idealreg2spillmask [Op_VecX] = nullptr;
idealreg2spillmask [Op_VecY] = nullptr;
idealreg2spillmask [Op_VecZ] = nullptr;
idealreg2spillmask [Op_RegFlags] = nullptr;
idealreg2spillmask [Op_RegVectMask] = nullptr;
idealreg2debugmask [Op_RegI] = NULL;
idealreg2debugmask [Op_RegN] = NULL;
idealreg2debugmask [Op_RegL] = NULL;
idealreg2debugmask [Op_RegF] = NULL;
idealreg2debugmask [Op_RegD] = NULL;
idealreg2debugmask [Op_RegP] = NULL;
idealreg2debugmask [Op_VecA] = NULL;
idealreg2debugmask [Op_VecS] = NULL;
idealreg2debugmask [Op_VecD] = NULL;
idealreg2debugmask [Op_VecX] = NULL;
idealreg2debugmask [Op_VecY] = NULL;
idealreg2debugmask [Op_VecZ] = NULL;
idealreg2debugmask [Op_RegFlags] = NULL;
idealreg2debugmask [Op_RegVectMask] = NULL;
idealreg2debugmask [Op_RegI] = nullptr;
idealreg2debugmask [Op_RegN] = nullptr;
idealreg2debugmask [Op_RegL] = nullptr;
idealreg2debugmask [Op_RegF] = nullptr;
idealreg2debugmask [Op_RegD] = nullptr;
idealreg2debugmask [Op_RegP] = nullptr;
idealreg2debugmask [Op_VecA] = nullptr;
idealreg2debugmask [Op_VecS] = nullptr;
idealreg2debugmask [Op_VecD] = nullptr;
idealreg2debugmask [Op_VecX] = nullptr;
idealreg2debugmask [Op_VecY] = nullptr;
idealreg2debugmask [Op_VecZ] = nullptr;
idealreg2debugmask [Op_RegFlags] = nullptr;
idealreg2debugmask [Op_RegVectMask] = nullptr;
idealreg2mhdebugmask[Op_RegI] = NULL;
idealreg2mhdebugmask[Op_RegN] = NULL;
idealreg2mhdebugmask[Op_RegL] = NULL;
idealreg2mhdebugmask[Op_RegF] = NULL;
idealreg2mhdebugmask[Op_RegD] = NULL;
idealreg2mhdebugmask[Op_RegP] = NULL;
idealreg2mhdebugmask[Op_VecA] = NULL;
idealreg2mhdebugmask[Op_VecS] = NULL;
idealreg2mhdebugmask[Op_VecD] = NULL;
idealreg2mhdebugmask[Op_VecX] = NULL;
idealreg2mhdebugmask[Op_VecY] = NULL;
idealreg2mhdebugmask[Op_VecZ] = NULL;
idealreg2mhdebugmask[Op_RegFlags] = NULL;
idealreg2mhdebugmask[Op_RegVectMask] = NULL;
idealreg2mhdebugmask[Op_RegI] = nullptr;
idealreg2mhdebugmask[Op_RegN] = nullptr;
idealreg2mhdebugmask[Op_RegL] = nullptr;
idealreg2mhdebugmask[Op_RegF] = nullptr;
idealreg2mhdebugmask[Op_RegD] = nullptr;
idealreg2mhdebugmask[Op_RegP] = nullptr;
idealreg2mhdebugmask[Op_VecA] = nullptr;
idealreg2mhdebugmask[Op_VecS] = nullptr;
idealreg2mhdebugmask[Op_VecD] = nullptr;
idealreg2mhdebugmask[Op_VecX] = nullptr;
idealreg2mhdebugmask[Op_VecY] = nullptr;
idealreg2mhdebugmask[Op_VecZ] = nullptr;
idealreg2mhdebugmask[Op_RegFlags] = nullptr;
idealreg2mhdebugmask[Op_RegVectMask] = nullptr;
debug_only(_mem_node = NULL;) // Ideal memory node consumed by mach node
debug_only(_mem_node = nullptr;) // Ideal memory node consumed by mach node
}
//------------------------------warp_incoming_stk_arg------------------------
@ -172,7 +172,7 @@ void Matcher::verify_new_nodes_only(Node* xroot) {
assert(C->node_arena()->contains(n), "dead node");
for (uint j = 0; j < n->req(); j++) {
Node* in = n->in(j);
if (in != NULL) {
if (in != nullptr) {
assert(C->node_arena()->contains(in), "dead node");
if (!visited.test(in->_idx)) {
worklist.push(in);
@ -325,7 +325,7 @@ void Matcher::match( ) {
C->print_method(PHASE_BEFORE_MATCHING, 1);
// Create new ideal node ConP #NULL even if it does exist in old space
// Create new ideal node ConP #null even if it does exist in old space
// to avoid false sharing if the corresponding mach node is not used.
// The corresponding mach node is only used in rare cases for derived
// pointers.
@ -336,10 +336,10 @@ void Matcher::match( ) {
// Save debug and profile information for nodes in old space:
_old_node_note_array = C->node_note_array();
if (_old_node_note_array != NULL) {
if (_old_node_note_array != nullptr) {
C->set_node_note_array(new(C->comp_arena()) GrowableArray<Node_Notes*>
(C->comp_arena(), _old_node_note_array->length(),
0, NULL));
0, nullptr));
}
// Pre-size the new_node table to avoid the need for range checks.
@ -356,7 +356,7 @@ void Matcher::match( ) {
C->set_cached_top_node(xform( C->top(), live_nodes ));
if (!C->failing()) {
Node* xroot = xform( C->root(), 1 );
if (xroot == NULL) {
if (xroot == nullptr) {
Matcher::soft_match_failure(); // recursive matching process failed
C->record_method_not_compilable("instruction match failed");
} else {
@ -373,22 +373,22 @@ void Matcher::match( ) {
}
}
// Generate new mach node for ConP #NULL
assert(new_ideal_null != NULL, "sanity");
// Generate new mach node for ConP #null
assert(new_ideal_null != nullptr, "sanity");
_mach_null = match_tree(new_ideal_null);
// Don't set control, it will confuse GCM since there are no uses.
// The control will be set when this node is used first time
// in find_base_for_derived().
assert(_mach_null != NULL, "");
assert(_mach_null != nullptr, "");
C->set_root(xroot->is_Root() ? xroot->as_Root() : NULL);
C->set_root(xroot->is_Root() ? xroot->as_Root() : nullptr);
#ifdef ASSERT
verify_new_nodes_only(xroot);
#endif
}
}
if (C->top() == NULL || C->root() == NULL) {
if (C->top() == nullptr || C->root() == nullptr) {
C->record_method_not_compilable("graph lost"); // %%% cannot happen?
}
if (C->failing()) {
@ -1025,7 +1025,7 @@ static void match_alias_type(Compile* C, Node* n, Node* m) {
for (uint i = 1; i < n->req(); i++) {
Node* n1 = n->in(i);
const TypePtr* n1at = n1->adr_type();
if (n1at != NULL) {
if (n1at != nullptr) {
nat = n1at;
nidx = C->get_alias_index(n1at);
}
@ -1076,7 +1076,7 @@ static void match_alias_type(Compile* C, Node* n, Node* m) {
case Op_OnSpinWait:
case Op_EncodeISOArray:
nidx = Compile::AliasIdxTop;
nat = NULL;
nat = nullptr;
break;
}
}
@ -1099,10 +1099,10 @@ Node *Matcher::transform( Node *n ) { ShouldNotCallThis(); return n; }
Node *Matcher::xform( Node *n, int max_stack ) {
// Use one stack to keep both: child's node/state and parent's node/index
MStack mstack(max_stack * 2 * 2); // usually: C->live_nodes() * 2 * 2
mstack.push(n, Visit, NULL, -1); // set NULL as parent to indicate root
mstack.push(n, Visit, nullptr, -1); // set null as parent to indicate root
while (mstack.is_nonempty()) {
C->check_node_count(NodeLimitFudgeFactor, "too many nodes matching instructions");
if (C->failing()) return NULL;
if (C->failing()) return nullptr;
n = mstack.node(); // Leave node on stack
Node_State nstate = mstack.state();
if (nstate == Visit) {
@ -1119,17 +1119,17 @@ Node *Matcher::xform( Node *n, int max_stack ) {
// Calls match special. They match alone with no children.
// Their children, the incoming arguments, match normally.
m = n->is_SafePoint() ? match_sfpt(n->as_SafePoint()):match_tree(n);
if (C->failing()) return NULL;
if (m == NULL) { Matcher::soft_match_failure(); return NULL; }
if (C->failing()) return nullptr;
if (m == nullptr) { Matcher::soft_match_failure(); return nullptr; }
if (n->is_MemBar()) {
m->as_MachMemBar()->set_adr_type(n->adr_type());
}
} else { // Nothing the matcher cares about
if (n->is_Proj() && n->in(0) != NULL && n->in(0)->is_Multi()) { // Projections?
if (n->is_Proj() && n->in(0) != nullptr && n->in(0)->is_Multi()) { // Projections?
// Convert to machine-dependent projection
m = n->in(0)->as_Multi()->match( n->as_Proj(), this );
NOT_PRODUCT(record_new2old(m, n);)
if (m->in(0) != NULL) // m might be top
if (m->in(0) != nullptr) // m might be top
collect_null_checks(m, n);
} else { // Else just a regular 'ol guy
m = n->clone(); // So just clone into new-space
@ -1141,7 +1141,7 @@ Node *Matcher::xform( Node *n, int max_stack ) {
}
set_new_node(n, m); // Map old to new
if (_old_node_note_array != NULL) {
if (_old_node_note_array != nullptr) {
Node_Notes* nn = C->locate_node_notes(_old_node_note_array,
n->_idx);
C->set_node_notes_at(m->_idx, nn);
@ -1159,7 +1159,7 @@ Node *Matcher::xform( Node *n, int max_stack ) {
// Put precedence edges on stack first (match them last).
for (i = oldn->req(); (uint)i < oldn->len(); i++) {
Node *m = oldn->in(i);
if (m == NULL) break;
if (m == nullptr) break;
// set -1 to call add_prec() instead of set_req() during Step1
mstack.push(m, Visit, n, -1);
}
@ -1167,7 +1167,7 @@ Node *Matcher::xform( Node *n, int max_stack ) {
// Handle precedence edges for interior nodes
for (i = n->len()-1; (uint)i >= n->req(); i--) {
Node *m = n->in(i);
if (m == NULL || C->node_arena()->contains(m)) continue;
if (m == nullptr || C->node_arena()->contains(m)) continue;
n->rm_prec(i);
// set -1 to call add_prec() instead of set_req() during Step1
mstack.push(m, Visit, n, -1);
@ -1202,7 +1202,7 @@ Node *Matcher::xform( Node *n, int max_stack ) {
// And now walk his children, and convert his inputs to new-space.
for( ; i >= 0; --i ) { // For all normal inputs do
Node *m = n->in(i); // Get input
if(m != NULL)
if(m != nullptr)
mstack.push(m, Visit, n, i);
}
@ -1210,7 +1210,7 @@ Node *Matcher::xform( Node *n, int max_stack ) {
else if (nstate == Post_Visit) {
// Set xformed input
Node *p = mstack.parent();
if (p != NULL) { // root doesn't have parent
if (p != nullptr) { // root doesn't have parent
int i = (int)mstack.index();
if (i >= 0)
p->set_req(i, n); // required input
@ -1256,13 +1256,13 @@ OptoReg::Name Matcher::warp_outgoing_stk_arg( VMReg reg, OptoReg::Name begin_out
// They match alone with no children. Their children, the incoming
// arguments, match normally.
MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) {
MachSafePointNode *msfpt = NULL;
MachCallNode *mcall = NULL;
MachSafePointNode *msfpt = nullptr;
MachCallNode *mcall = nullptr;
uint cnt;
// Split out case for SafePoint vs Call
CallNode *call;
const TypeTuple *domain;
ciMethod* method = NULL;
ciMethod* method = nullptr;
bool is_method_handle_invoke = false; // for special kill effects
if( sfpt->is_Call() ) {
call = sfpt->as_Call();
@ -1271,8 +1271,8 @@ MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) {
// Match just the call, nothing else
MachNode *m = match_tree(call);
if (C->failing()) return NULL;
if( m == NULL ) { Matcher::soft_match_failure(); return NULL; }
if (C->failing()) return nullptr;
if( m == nullptr ) { Matcher::soft_match_failure(); return nullptr; }
// Copy data from the Ideal SafePoint to the machine version
mcall = m->as_MachCall();
@ -1312,10 +1312,10 @@ MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) {
}
// This is a non-call safepoint
else {
call = NULL;
domain = NULL;
call = nullptr;
domain = nullptr;
MachNode *mn = match_tree(sfpt);
if (C->failing()) return NULL;
if (C->failing()) return nullptr;
msfpt = mn->as_MachSafePoint();
cnt = TypeFunc::Parms;
}
@ -1340,7 +1340,7 @@ MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) {
OptoReg::Name out_arg_limit_per_call = begin_out_arg_area;
// Calls to C may hammer extra stack slots above and beyond any arguments.
// These are usually backing store for register arguments for varargs.
if( call != NULL && call->is_CallRuntime() )
if( call != nullptr && call->is_CallRuntime() )
out_arg_limit_per_call = OptoReg::add(out_arg_limit_per_call,C->varargs_C_out_slots_killed());
@ -1451,7 +1451,7 @@ MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) {
}
// Debug inputs begin just after the last incoming parameter
assert((mcall == NULL) || (mcall->jvms() == NULL) ||
assert((mcall == nullptr) || (mcall->jvms() == nullptr) ||
(mcall->jvms()->debug_start() + mcall->_jvmadj == mcall->tf()->domain()->cnt()), "");
// Add additional edges.
@ -1490,18 +1490,18 @@ MachNode *Matcher::match_tree( const Node *n ) {
Node *mem = n->is_Store() ? n->in(MemNode::Memory) : (Node*)1 ;
#ifdef ASSERT
Node* save_mem_node = _mem_node;
_mem_node = n->is_Store() ? (Node*)n : NULL;
_mem_node = n->is_Store() ? (Node*)n : nullptr;
#endif
// State object for root node of match tree
// Allocate it on _states_arena - stack allocation can cause stack overflow.
State *s = new (&_states_arena) State;
s->_kids[0] = NULL;
s->_kids[1] = NULL;
s->_kids[0] = nullptr;
s->_kids[1] = nullptr;
s->_leaf = (Node*)n;
// Label the input tree, allocating labels from top-level arena
Node* root_mem = mem;
Label_Root(n, s, n->in(0), root_mem);
if (C->failing()) return NULL;
if (C->failing()) return nullptr;
// The minimum cost match for the whole tree is found at the root State
uint mincost = max_juint;
@ -1521,7 +1521,7 @@ MachNode *Matcher::match_tree( const Node *n ) {
s->dump();
#endif
Matcher::soft_match_failure();
return NULL;
return nullptr;
}
// Reduce input tree based upon the state labels to machine Nodes
MachNode *m = ReduceInst(s, s->rule(mincost), mem);
@ -1568,7 +1568,7 @@ static bool match_into_reg( const Node *n, Node *m, Node *control, int i, bool s
Node* m_control = m->in(0);
// Control of load's memory can post-dominates load's control.
// So use it since load can't float above its memory.
Node* mem_control = (m->is_Load()) ? m->in(MemNode::Memory)->in(0) : NULL;
Node* mem_control = (m->is_Load()) ? m->in(MemNode::Memory)->in(0) : nullptr;
if (control && m_control && control != m_control && control != mem_control) {
// Actually, we can live with the most conservative control we
@ -1622,7 +1622,7 @@ Node* Matcher::Label_Root(const Node* n, State* svec, Node* control, Node*& mem)
LabelRootDepth++;
if (LabelRootDepth > MaxLabelRootDepth) {
C->record_method_not_compilable("Out of stack space, increase MaxLabelRootDepth");
return NULL;
return nullptr;
}
uint care = 0; // Edges matcher cares about
uint cnt = n->req();
@ -1632,13 +1632,13 @@ Node* Matcher::Label_Root(const Node* n, State* svec, Node* control, Node*& mem)
// Can only subsume a child into your match-tree if that child's memory state
// is not modified along the path to another input.
// It is unsafe even if the other inputs are separate roots.
Node *input_mem = NULL;
Node *input_mem = nullptr;
for( i = 1; i < cnt; i++ ) {
if( !n->match_edge(i) ) continue;
Node *m = n->in(i); // Get ith input
assert( m, "expect non-null children" );
if( m->is_Load() ) {
if( input_mem == NULL ) {
if( input_mem == nullptr ) {
input_mem = m->in(MemNode::Memory);
if (mem == (Node*)1) {
// Save this memory to bail out if there's another memory access
@ -1660,8 +1660,8 @@ Node* Matcher::Label_Root(const Node* n, State* svec, Node* control, Node*& mem)
assert( care <= 2, "binary only for now" );
// Recursively label the State tree.
s->_kids[0] = NULL;
s->_kids[1] = NULL;
s->_kids[0] = nullptr;
s->_kids[1] = nullptr;
s->_leaf = m;
// Check for leaves of the State Tree; things that cannot be a part of
@ -1686,11 +1686,11 @@ Node* Matcher::Label_Root(const Node* n, State* svec, Node* control, Node*& mem)
} else {
// If match tree has no control and we do, adopt it for entire tree
if( control == NULL && m->in(0) != NULL && m->req() > 1 )
if( control == nullptr && m->in(0) != nullptr && m->req() > 1 )
control = m->in(0); // Pick up control
// Else match as a normal part of the match tree.
control = Label_Root(m, s, control, mem);
if (C->failing()) return NULL;
if (C->failing()) return nullptr;
}
}
@ -1718,36 +1718,36 @@ Node* Matcher::Label_Root(const Node* n, State* svec, Node* control, Node*& mem)
// program. The register allocator is free to split uses later to
// split live ranges.
MachNode* Matcher::find_shared_node(Node* leaf, uint rule) {
if (!leaf->is_Con() && !leaf->is_DecodeNarrowPtr()) return NULL;
if (!leaf->is_Con() && !leaf->is_DecodeNarrowPtr()) return nullptr;
// See if this Con has already been reduced using this rule.
if (_shared_nodes.Size() <= leaf->_idx) return NULL;
if (_shared_nodes.Size() <= leaf->_idx) return nullptr;
MachNode* last = (MachNode*)_shared_nodes.at(leaf->_idx);
if (last != NULL && rule == last->rule()) {
if (last != nullptr && rule == last->rule()) {
// Don't expect control change for DecodeN
if (leaf->is_DecodeNarrowPtr())
return last;
// Get the new space root.
Node* xroot = new_node(C->root());
if (xroot == NULL) {
if (xroot == nullptr) {
// This shouldn't happen give the order of matching.
return NULL;
return nullptr;
}
// Shared constants need to have their control be root so they
// can be scheduled properly.
Node* control = last->in(0);
if (control != xroot) {
if (control == NULL || control == C->root()) {
if (control == nullptr || control == C->root()) {
last->set_req(0, xroot);
} else {
assert(false, "unexpected control");
return NULL;
return nullptr;
}
}
return last;
}
return NULL;
return nullptr;
}
@ -1773,15 +1773,15 @@ MachNode *Matcher::ReduceInst( State *s, int rule, Node *&mem ) {
assert( rule >= NUM_OPERANDS, "called with operand rule" );
MachNode* shared_node = find_shared_node(s->_leaf, rule);
if (shared_node != NULL) {
if (shared_node != nullptr) {
return shared_node;
}
// Build the object to represent this state & prepare for recursive calls
MachNode *mach = s->MachNodeGenerator(rule);
guarantee(mach != NULL, "Missing MachNode");
guarantee(mach != nullptr, "Missing MachNode");
mach->_opnds[0] = s->MachOperGenerator(_reduceOp[rule]);
assert( mach->_opnds[0] != NULL, "Missing result operand" );
assert( mach->_opnds[0] != nullptr, "Missing result operand" );
Node *leaf = s->_leaf;
NOT_PRODUCT(record_new2old(mach, leaf);)
// Check for instruction or instruction chain rule
@ -1804,14 +1804,14 @@ MachNode *Matcher::ReduceInst( State *s, int rule, Node *&mem ) {
#ifdef ASSERT
// Verify adr type after matching memory operation
const MachOper* oper = mach->memory_operand();
if (oper != NULL && oper != (MachOper*)-1) {
if (oper != nullptr && oper != (MachOper*)-1) {
// It has a unique memory operand. Find corresponding ideal mem node.
Node* m = NULL;
Node* m = nullptr;
if (leaf->is_Mem()) {
m = leaf;
} else {
m = _mem_node;
assert(m != NULL && m->is_Mem(), "expecting memory node");
assert(m != nullptr && m->is_Mem(), "expecting memory node");
}
const Type* mach_at = mach->adr_type();
// DecodeN node consumed by an address may have different type
@ -1850,7 +1850,7 @@ MachNode *Matcher::ReduceInst( State *s, int rule, Node *&mem ) {
ex->in(1)->set_req(0, C->root());
// Remove old node from the graph
for( uint i=0; i<mach->req(); i++ ) {
mach->set_req(i,NULL);
mach->set_req(i,nullptr);
}
NOT_PRODUCT(record_new2old(ex, s->_leaf);)
}
@ -1883,7 +1883,7 @@ MachNode *Matcher::ReduceInst( State *s, int rule, Node *&mem ) {
void Matcher::handle_precedence_edges(Node* n, MachNode *mach) {
for (uint i = n->req(); i < n->len(); i++) {
if (n->in(i) != NULL) {
if (n->in(i) != nullptr) {
mach->add_prec(n->in(i));
}
}
@ -1930,15 +1930,15 @@ uint Matcher::ReduceInst_Interior( State *s, int rule, Node *&mem, MachNode *mac
debug_only( if( mem == (Node*)1 ) _mem_node = s->_leaf;)
mem = mem2;
}
if( s->_leaf->in(0) != NULL && s->_leaf->req() > 1) {
if( mach->in(0) == NULL )
if( s->_leaf->in(0) != nullptr && s->_leaf->req() > 1) {
if( mach->in(0) == nullptr )
mach->set_req(0, s->_leaf->in(0));
}
// Now recursively walk the state tree & add operand list.
for( uint i=0; i<2; i++ ) { // binary tree
State *newstate = s->_kids[i];
if( newstate == NULL ) break; // Might only have 1 child
if( newstate == nullptr ) break; // Might only have 1 child
// 'op' is what I am expecting to receive
int op;
if( i == 0 ) {
@ -1993,10 +1993,10 @@ uint Matcher::ReduceInst_Interior( State *s, int rule, Node *&mem, MachNode *mac
void Matcher::ReduceOper( State *s, int rule, Node *&mem, MachNode *mach ) {
assert( rule < _LAST_MACH_OPER, "called with operand rule" );
State *kid = s->_kids[0];
assert( kid == NULL || s->_leaf->in(0) == NULL, "internal operands have no control" );
assert( kid == nullptr || s->_leaf->in(0) == nullptr, "internal operands have no control" );
// Leaf? And not subsumed?
if( kid == NULL && !_swallowed[rule] ) {
if( kid == nullptr && !_swallowed[rule] ) {
mach->add_req( s->_leaf ); // Add leaf pointer
return; // Bail out
}
@ -2017,7 +2017,7 @@ void Matcher::ReduceOper( State *s, int rule, Node *&mem, MachNode *mach ) {
}
}
for (uint i = 0; kid != NULL && i < 2; kid = s->_kids[1], i++) { // binary tree
for (uint i = 0; kid != nullptr && i < 2; kid = s->_kids[1], i++) { // binary tree
int newrule;
if( i == 0) {
newrule = kid->rule(_leftOp[rule]);
@ -2057,7 +2057,7 @@ OptoReg::Name Matcher::find_receiver() {
}
bool Matcher::is_vshift_con_pattern(Node* n, Node* m) {
if (n != NULL && m != NULL) {
if (n != nullptr && m != nullptr) {
return VectorNode::is_vector_shift(n) &&
VectorNode::is_vector_shift_count(m) && m->in(1)->is_Con();
}
@ -2137,8 +2137,8 @@ void Matcher::find_shared(Node* n) {
}
for (int i = n->req() - 1; i >= 0; --i) { // For my children
Node* m = n->in(i); // Get ith input
if (m == NULL) {
continue; // Ignore NULLs
if (m == nullptr) {
continue; // Ignore nulls
}
if (clone_node(n, m, mstack)) {
continue;
@ -2561,7 +2561,7 @@ void Matcher::collect_null_checks( Node *proj, Node *orig_proj ) {
// Look for DecodeN node which should be pinned to orig_proj.
// On platforms (Sparc) which can not handle 2 adds
// in addressing mode we have to keep a DecodeN node and
// use it to do implicit NULL check in address.
// use it to do implicit null check in address.
//
// DecodeN node was pinned to non-null path (orig_proj) during
// CastPP transformation in final_graph_reshaping_impl().
@ -2571,9 +2571,9 @@ void Matcher::collect_null_checks( Node *proj, Node *orig_proj ) {
Node* d = orig_proj->raw_out(i);
if (d->is_DecodeN() && d->in(1) == val) {
val = d;
val->set_req(0, NULL); // Unpin now.
val->set_req(0, nullptr); // Unpin now.
// Mark this as special case to distinguish from
// a regular case: CmpP(DecodeN, NULL).
// a regular case: CmpP(DecodeN, null).
val = (Node*)(((intptr_t)val) | 1);
break;
}
@ -2587,7 +2587,7 @@ void Matcher::collect_null_checks( Node *proj, Node *orig_proj ) {
}
//---------------------------validate_null_checks------------------------------
// Its possible that the value being NULL checked is not the root of a match
// Its possible that the value being null checked is not the root of a match
// tree. If so, I cannot use the value in an implicit null check.
void Matcher::validate_null_checks( ) {
uint cnt = _null_check_tests.size();
@ -2599,12 +2599,12 @@ void Matcher::validate_null_checks( ) {
if (has_new_node(val)) {
Node* new_val = new_node(val);
if (is_decoden) {
assert(val->is_DecodeNarrowPtr() && val->in(0) == NULL, "sanity");
assert(val->is_DecodeNarrowPtr() && val->in(0) == nullptr, "sanity");
// Note: new_val may have a control edge if
// the original ideal node DecodeN was matched before
// it was unpinned in Matcher::collect_null_checks().
// Unpin the mach node and mark it.
new_val->set_req(0, NULL);
new_val->set_req(0, nullptr);
new_val = (Node*)(((intptr_t)new_val) | 1);
}
// Is a match-tree root, so replace with the matched value
@ -2630,15 +2630,15 @@ bool Matcher::gen_narrow_oop_implicit_null_checks() {
}
return CompressedOops::use_implicit_null_checks() &&
(narrow_oop_use_complex_address() ||
CompressedOops::base() != NULL);
CompressedOops::base() != nullptr);
}
// Compute RegMask for an ideal register.
const RegMask* Matcher::regmask_for_ideal_register(uint ideal_reg, Node* ret) {
const Type* t = Type::mreg2type[ideal_reg];
if (t == NULL) {
if (t == nullptr) {
assert(ideal_reg >= Op_VecA && ideal_reg <= Op_VecZ, "not a vector: %d", ideal_reg);
return NULL; // not supported
return nullptr; // not supported
}
Node* fp = ret->in(TypeFunc::FramePtr);
Node* mem = ret->in(TypeFunc::Memory);
@ -2647,25 +2647,25 @@ const RegMask* Matcher::regmask_for_ideal_register(uint ideal_reg, Node* ret) {
Node* spill;
switch (ideal_reg) {
case Op_RegN: spill = new LoadNNode(NULL, mem, fp, atp, t->is_narrowoop(), mo); break;
case Op_RegI: spill = new LoadINode(NULL, mem, fp, atp, t->is_int(), mo); break;
case Op_RegP: spill = new LoadPNode(NULL, mem, fp, atp, t->is_ptr(), mo); break;
case Op_RegF: spill = new LoadFNode(NULL, mem, fp, atp, t, mo); break;
case Op_RegD: spill = new LoadDNode(NULL, mem, fp, atp, t, mo); break;
case Op_RegL: spill = new LoadLNode(NULL, mem, fp, atp, t->is_long(), mo); break;
case Op_RegN: spill = new LoadNNode(nullptr, mem, fp, atp, t->is_narrowoop(), mo); break;
case Op_RegI: spill = new LoadINode(nullptr, mem, fp, atp, t->is_int(), mo); break;
case Op_RegP: spill = new LoadPNode(nullptr, mem, fp, atp, t->is_ptr(), mo); break;
case Op_RegF: spill = new LoadFNode(nullptr, mem, fp, atp, t, mo); break;
case Op_RegD: spill = new LoadDNode(nullptr, mem, fp, atp, t, mo); break;
case Op_RegL: spill = new LoadLNode(nullptr, mem, fp, atp, t->is_long(), mo); break;
case Op_VecA: // fall-through
case Op_VecS: // fall-through
case Op_VecD: // fall-through
case Op_VecX: // fall-through
case Op_VecY: // fall-through
case Op_VecZ: spill = new LoadVectorNode(NULL, mem, fp, atp, t->is_vect()); break;
case Op_VecZ: spill = new LoadVectorNode(nullptr, mem, fp, atp, t->is_vect()); break;
case Op_RegVectMask: return Matcher::predicate_reg_mask();
default: ShouldNotReachHere();
}
MachNode* mspill = match_tree(spill);
assert(mspill != NULL, "matching failed: %d", ideal_reg);
assert(mspill != nullptr, "matching failed: %d", ideal_reg);
// Handle generic vector operand case
if (Matcher::supports_generic_vector_operands && t->isa_vect()) {
specialize_mach_node(mspill);
@ -2701,7 +2701,7 @@ void Matcher::specialize_temp_node(MachTempNode* tmp, MachNode* use, uint idx) {
// Compute concrete vector operand for a generic DEF/USE vector operand (of mach node m at index idx).
MachOper* Matcher::specialize_vector_operand(MachNode* m, uint opnd_idx) {
assert(Matcher::is_generic_vector(m->_opnds[opnd_idx]), "repeated updates");
Node* def = NULL;
Node* def = nullptr;
if (opnd_idx == 0) { // DEF
def = m; // use mach node itself to compute vector operand type
} else {
@ -2743,7 +2743,7 @@ void Matcher::specialize_generic_vector_operands() {
while (live_nodes.size() > 0) {
MachNode* m = live_nodes.pop()->isa_Mach();
if (m != NULL) {
if (m != nullptr) {
if (Matcher::is_reg2reg_move(m)) {
// Register allocator properly handles vec <=> leg moves using register masks.
int opnd_idx = m->operand_index(1);
@ -2799,7 +2799,7 @@ bool Matcher::verify_after_postselect_cleanup() {
C->identify_useful_nodes(useful);
for (uint i = 0; i < useful.size(); i++) {
MachNode* m = useful.at(i)->isa_Mach();
if (m != NULL) {
if (m != nullptr) {
assert(!Matcher::is_reg2reg_move(m), "no MoveVec nodes allowed");
for (uint j = 0; j < m->num_opnds(); j++) {
assert(!Matcher::is_generic_vector(m->_opnds[j]), "no generic vector operands allowed");
@ -2822,7 +2822,7 @@ bool Matcher::post_store_load_barrier(const Node* vmb) {
const MemBarNode* membar = vmb->as_MemBar();
// Get the Ideal Proj node, ctrl, that can be used to iterate forward
Node* ctrl = NULL;
Node* ctrl = nullptr;
for (DUIterator_Fast imax, i = membar->fast_outs(imax); i < imax; i++) {
Node* p = membar->fast_out(i);
assert(p->is_Proj(), "only projections here");
@ -2832,7 +2832,7 @@ bool Matcher::post_store_load_barrier(const Node* vmb) {
break;
}
}
assert((ctrl != NULL), "missing control projection");
assert((ctrl != nullptr), "missing control projection");
for (DUIterator_Fast jmax, j = ctrl->fast_outs(jmax); j < jmax; j++) {
Node *x = ctrl->fast_out(j);
@ -2905,7 +2905,7 @@ bool Matcher::branches_to_uncommon_trap(const Node *n) {
assert(n->is_If(), "You should only call this on if nodes.");
IfNode *ifn = n->as_If();
Node *ifFalse = NULL;
Node *ifFalse = nullptr;
for (DUIterator_Fast imax, i = ifn->fast_outs(imax); i < imax; i++) {
if (ifn->fast_out(i)->is_IfFalse()) {
ifFalse = ifn->fast_out(i);
@ -2917,9 +2917,9 @@ bool Matcher::branches_to_uncommon_trap(const Node *n) {
Node *reg = ifFalse;
int cnt = 4; // We must protect against cycles. Limit to 4 iterations.
// Alternatively use visited set? Seems too expensive.
while (reg != NULL && cnt > 0) {
CallNode *call = NULL;
RegionNode *nxt_reg = NULL;
while (reg != nullptr && cnt > 0) {
CallNode *call = nullptr;
RegionNode *nxt_reg = nullptr;
for (DUIterator_Fast imax, i = reg->fast_outs(imax); i < imax; i++) {
Node *o = reg->fast_out(i);
if (o->is_Call()) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -149,10 +149,10 @@ private:
// Accessors for the inherited field PhaseTransform::_nodes:
void grow_new_node_array(uint idx_limit) {
_nodes.map(idx_limit-1, NULL);
_nodes.map(idx_limit-1, nullptr);
}
bool has_new_node(const Node* n) const {
return _nodes.at(n->_idx) != NULL;
return _nodes.at(n->_idx) != nullptr;
}
Node* new_node(const Node* n) const {
assert(has_new_node(n), "set before get");
@ -170,7 +170,7 @@ private:
Node* _mem_node; // Ideal memory node consumed by mach node
#endif
// Mach node for ConP #NULL
// Mach node for ConP #null
MachNode* _mach_null;
void handle_precedence_edges(Node* n, MachNode *mach);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -192,8 +192,8 @@ struct IdealHelper {
const Type* type1 = phase->type(arg1);
const Type* type2 = phase->type(arg2);
if (type1 == NULL || type2 == NULL) {
return NULL;
if (type1 == nullptr || type2 == nullptr) {
return nullptr;
}
if (type1 != Type::TOP && type1->singleton() &&
@ -204,9 +204,9 @@ struct IdealHelper {
Node* con_result = ConINode::make(0);
return con_result;
}
return NULL;
return nullptr;
}
return NULL;
return nullptr;
}
static const Type* Value(const OverflowOp* node, PhaseTransform* phase) {
@ -218,7 +218,7 @@ struct IdealHelper {
const TypeClass* i1 = TypeClass::as_self(t1);
const TypeClass* i2 = TypeClass::as_self(t2);
if (i1 == NULL || i2 == NULL) {
if (i1 == nullptr || i2 == nullptr) {
return TypeInt::CC;
}

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,7 +37,7 @@ class PhaseCCP;
class PhaseTransform;
//------------------------------MemNode----------------------------------------
// Load or Store, possibly throwing a NULL pointer exception
// Load or Store, possibly throwing a null pointer exception
class MemNode : public Node {
private:
bool _unaligned_access; // Unaligned access from unsafe
@ -92,7 +92,7 @@ protected:
debug_only(_adr_type=at; adr_type();)
}
virtual Node* find_previous_arraycopy(PhaseTransform* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const { return NULL; }
virtual Node* find_previous_arraycopy(PhaseTransform* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const { return nullptr; }
ArrayCopyNode* find_array_copy_clone(PhaseTransform* phase, Node* ld_alloc, Node* mem) const;
static bool check_if_adr_maybe_raw(Node* adr);
@ -111,10 +111,10 @@ public:
virtual const class TypePtr *adr_type() const; // returns bottom_type of address
// Shared code for Ideal methods:
Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit NULL.
Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit null.
// Helper function for adr_type() implementations.
static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL);
static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = nullptr);
// Raw access function, to allow copying of adr_type efficiently in
// product builds and retain the debug info for debug builds.
@ -262,13 +262,13 @@ public:
virtual const Type *bottom_type() const;
// Following method is copied from TypeNode:
void set_type(const Type* t) {
assert(t != NULL, "sanity");
assert(t != nullptr, "sanity");
debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
*(const Type**)&_type = t; // cast away const-ness
// If this node is in the hash table, make sure it doesn't need a rehash.
assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
}
const Type* type() const { assert(_type != NULL, "sanity"); return _type; };
const Type* type() const { assert(_type != nullptr, "sanity"); return _type; };
// Do not match memory edge
virtual uint match_edge(uint idx) const;
@ -808,7 +808,7 @@ public:
virtual const Type *bottom_type() const {return Type::MEMORY;}
virtual const TypePtr *adr_type() const {
Node* ctrl = in(0);
if (ctrl == NULL) return NULL; // node is dead
if (ctrl == nullptr) return nullptr; // node is dead
return ctrl->in(MemNode::Memory)->adr_type();
}
virtual uint ideal_reg() const { return 0;} // memory projections don't have a register
@ -1168,7 +1168,7 @@ public:
// Optional 'precedent' becomes an extra edge if not null.
static MemBarNode* make(Compile* C, int opcode,
int alias_idx = Compile::AliasIdxBot,
Node* precedent = NULL);
Node* precedent = nullptr);
MemBarNode* trailing_membar() const;
MemBarNode* leading_membar() const;
@ -1362,12 +1362,12 @@ public:
intptr_t can_capture_store(StoreNode* st, PhaseGVN* phase, bool can_reshape);
// Capture another store; reformat it to write my internal raw memory.
// Return the captured copy, else NULL if there is some sort of problem.
// Return the captured copy, else null if there is some sort of problem.
Node* capture_store(StoreNode* st, intptr_t start, PhaseGVN* phase, bool can_reshape);
// Find captured store which corresponds to the range [start..start+size).
// Return my own memory projection (meaning the initial zero bits)
// if there is no such store. Return NULL if there is a problem.
// if there is no such store. Return null if there is a problem.
Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseTransform* phase);
// Called when the associated AllocateNode is expanded into CFG.
@ -1431,7 +1431,7 @@ public:
static Node* make_empty_memory(); // where the sentinel comes from
bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); }
// hook for the iterator, to perform any necessary setup
void iteration_setup(const MergeMemNode* other = NULL);
void iteration_setup(const MergeMemNode* other = nullptr);
// push sentinels until I am at least as long as the other (semantic no-op)
void grow_to_match(const MergeMemNode* other);
bool verify_sparse() const PRODUCT_RETURN0;
@ -1451,7 +1451,7 @@ class MergeMemStream : public StackObj {
Node* _mem2;
int _cnt2;
void init(MergeMemNode* mm, const MergeMemNode* mm2 = NULL) {
void init(MergeMemNode* mm, const MergeMemNode* mm2 = nullptr) {
// subsume_node will break sparseness at times, whenever a memory slice
// folds down to a copy of the base ("fat") memory. In such a case,
// the raw edge will update to base, although it should be top.
@ -1465,15 +1465,15 @@ class MergeMemStream : public StackObj {
//
// Also, iteration_setup repairs sparseness.
assert(mm->verify_sparse(), "please, no dups of base");
assert(mm2==NULL || mm2->verify_sparse(), "please, no dups of base");
assert(mm2==nullptr || mm2->verify_sparse(), "please, no dups of base");
_mm = mm;
_mm_base = mm->base_memory();
_mm2 = mm2;
_cnt = mm->req();
_idx = Compile::AliasIdxBot-1; // start at the base memory
_mem = NULL;
_mem2 = NULL;
_mem = nullptr;
_mem2 = nullptr;
}
#ifdef ASSERT
@ -1531,7 +1531,7 @@ class MergeMemStream : public StackObj {
return _mm_base;
}
const MergeMemNode* all_memory2() const {
assert(_mm2 != NULL, "");
assert(_mm2 != nullptr, "");
return _mm2;
}
bool at_base_memory() const {
@ -1602,7 +1602,7 @@ class MergeMemStream : public StackObj {
private:
// find the next item, which might be empty
bool next(bool have_mm2) {
assert((_mm2 != NULL) == have_mm2, "use other next");
assert((_mm2 != nullptr) == have_mm2, "use other next");
assert_synch();
if (++_idx < _cnt) {
// Note: This iterator allows _mm to be non-sparse.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -75,12 +75,12 @@
// Return a node which is more "ideal" than the current node.
// Move constants to the right.
Node *CMoveNode::Ideal(PhaseGVN *phase, bool can_reshape) {
if (in(0) != NULL && remove_dead_region(phase, can_reshape)) {
if (in(0) != nullptr && remove_dead_region(phase, can_reshape)) {
return this;
}
// Don't bother trying to transform a dead node
if (in(0) != NULL && in(0)->is_top()) {
return NULL;
if (in(0) != nullptr && in(0)->is_top()) {
return nullptr;
}
assert(in(Condition) != this &&
in(IfFalse) != this &&
@ -88,14 +88,14 @@ Node *CMoveNode::Ideal(PhaseGVN *phase, bool can_reshape) {
if (phase->type(in(Condition)) == Type::TOP ||
phase->type(in(IfFalse)) == Type::TOP ||
phase->type(in(IfTrue)) == Type::TOP) {
return NULL;
return nullptr;
}
// Canonicalize the node by moving constants to the right input.
if (in(Condition)->is_Bool() && phase->type(in(IfFalse))->singleton() && !phase->type(in(IfTrue))->singleton()) {
BoolNode* b = in(Condition)->as_Bool()->negate(phase);
return make(in(Control), phase->transform(b), in(IfTrue), in(IfFalse), _type);
}
return NULL;
return nullptr;
}
//------------------------------is_cmove_id------------------------------------
@ -108,7 +108,7 @@ Node *CMoveNode::is_cmove_id( PhaseTransform *phase, Node *cmp, Node *t, Node *f
// Give up this identity check for floating points because it may choose incorrect
// value around 0.0 and -0.0
if ( cmp->Opcode()==Op_CmpF || cmp->Opcode()==Op_CmpD )
return NULL;
return nullptr;
// Check for "(t==f)?t:f;" and replace with "f"
if( b->_test._test == BoolTest::eq )
return f;
@ -117,7 +117,7 @@ Node *CMoveNode::is_cmove_id( PhaseTransform *phase, Node *cmp, Node *t, Node *f
if( b->_test._test == BoolTest::ne )
return t;
}
return NULL;
return nullptr;
}
//------------------------------Identity---------------------------------------
@ -180,7 +180,7 @@ CMoveNode *CMoveNode::make(Node *c, Node *bol, Node *left, Node *right, const Ty
case T_NARROWOOP: return new CMoveNNode( c, bol, left, right, t );
default:
ShouldNotReachHere();
return NULL;
return nullptr;
}
}
@ -214,26 +214,26 @@ Node *CMoveINode::Ideal(PhaseGVN *phase, bool can_reshape) {
if( phase->type(in(IfFalse)) == TypeInt::ZERO && phase->type(in(IfTrue)) == TypeInt::ONE ) {
flip = 1 - flip;
} else if( phase->type(in(IfFalse)) == TypeInt::ONE && phase->type(in(IfTrue)) == TypeInt::ZERO ) {
} else return NULL;
} else return nullptr;
// Check for eq/ne test
if( !in(1)->is_Bool() ) return NULL;
if( !in(1)->is_Bool() ) return nullptr;
BoolNode *bol = in(1)->as_Bool();
if( bol->_test._test == BoolTest::eq ) {
} else if( bol->_test._test == BoolTest::ne ) {
flip = 1-flip;
} else return NULL;
} else return nullptr;
// Check for vs 0 or 1
if( !bol->in(1)->is_Cmp() ) return NULL;
if( !bol->in(1)->is_Cmp() ) return nullptr;
const CmpNode *cmp = bol->in(1)->as_Cmp();
if( phase->type(cmp->in(2)) == TypeInt::ZERO ) {
} else if( phase->type(cmp->in(2)) == TypeInt::ONE ) {
// Allow cmp-vs-1 if the other input is bounded by 0-1
if( phase->type(cmp->in(1)) != TypeInt::BOOL )
return NULL;
return nullptr;
flip = 1 - flip;
} else return NULL;
} else return nullptr;
// Convert to a bool (flipped)
// Build int->bool conversion
@ -258,7 +258,7 @@ Node *CMoveFNode::Ideal(PhaseGVN *phase, bool can_reshape) {
int phi_x_idx = 0; // Index of phi input where to find naked x
// Find the Bool
if( !in(1)->is_Bool() ) return NULL;
if( !in(1)->is_Bool() ) return nullptr;
BoolNode *bol = in(1)->as_Bool();
// Check bool sense
switch( bol->_test._test ) {
@ -266,13 +266,13 @@ Node *CMoveFNode::Ideal(PhaseGVN *phase, bool can_reshape) {
case BoolTest::le: cmp_zero_idx = 2; phi_x_idx = IfFalse; break;
case BoolTest::gt: cmp_zero_idx = 2; phi_x_idx = IfTrue; break;
case BoolTest::ge: cmp_zero_idx = 1; phi_x_idx = IfFalse; break;
default: return NULL; break;
default: return nullptr; break;
}
// Find zero input of CmpF; the other input is being abs'd
Node *cmpf = bol->in(1);
if( cmpf->Opcode() != Op_CmpF ) return NULL;
Node *X = NULL;
if( cmpf->Opcode() != Op_CmpF ) return nullptr;
Node *X = nullptr;
bool flip = false;
if( phase->type(cmpf->in(cmp_zero_idx)) == TypeF::ZERO ) {
X = cmpf->in(3 - cmp_zero_idx);
@ -281,18 +281,18 @@ Node *CMoveFNode::Ideal(PhaseGVN *phase, bool can_reshape) {
X = cmpf->in(cmp_zero_idx);
flip = true;
} else {
return NULL;
return nullptr;
}
// If X is found on the appropriate phi input, find the subtract on the other
if( X != in(phi_x_idx) ) return NULL;
if( X != in(phi_x_idx) ) return nullptr;
int phi_sub_idx = phi_x_idx == IfTrue ? IfFalse : IfTrue;
Node *sub = in(phi_sub_idx);
// Allow only SubF(0,X) and fail out for all others; NegF is not OK
if( sub->Opcode() != Op_SubF ||
sub->in(2) != X ||
phase->type(sub->in(1)) != TypeF::ZERO ) return NULL;
phase->type(sub->in(1)) != TypeF::ZERO ) return nullptr;
Node *abs = new AbsFNode( X );
if( flip )
@ -314,7 +314,7 @@ Node *CMoveDNode::Ideal(PhaseGVN *phase, bool can_reshape) {
int phi_x_idx = 0; // Index of phi input where to find naked x
// Find the Bool
if( !in(1)->is_Bool() ) return NULL;
if( !in(1)->is_Bool() ) return nullptr;
BoolNode *bol = in(1)->as_Bool();
// Check bool sense
switch( bol->_test._test ) {
@ -322,13 +322,13 @@ Node *CMoveDNode::Ideal(PhaseGVN *phase, bool can_reshape) {
case BoolTest::le: cmp_zero_idx = 2; phi_x_idx = IfFalse; break;
case BoolTest::gt: cmp_zero_idx = 2; phi_x_idx = IfTrue; break;
case BoolTest::ge: cmp_zero_idx = 1; phi_x_idx = IfFalse; break;
default: return NULL; break;
default: return nullptr; break;
}
// Find zero input of CmpD; the other input is being abs'd
Node *cmpd = bol->in(1);
if( cmpd->Opcode() != Op_CmpD ) return NULL;
Node *X = NULL;
if( cmpd->Opcode() != Op_CmpD ) return nullptr;
Node *X = nullptr;
bool flip = false;
if( phase->type(cmpd->in(cmp_zero_idx)) == TypeD::ZERO ) {
X = cmpd->in(3 - cmp_zero_idx);
@ -337,18 +337,18 @@ Node *CMoveDNode::Ideal(PhaseGVN *phase, bool can_reshape) {
X = cmpd->in(cmp_zero_idx);
flip = true;
} else {
return NULL;
return nullptr;
}
// If X is found on the appropriate phi input, find the subtract on the other
if( X != in(phi_x_idx) ) return NULL;
if( X != in(phi_x_idx) ) return nullptr;
int phi_sub_idx = phi_x_idx == IfTrue ? IfFalse : IfTrue;
Node *sub = in(phi_sub_idx);
// Allow only SubD(0,X) and fail out for all others; NegD is not OK
if( sub->Opcode() != Op_SubD ||
sub->in(2) != X ||
phase->type(sub->in(1)) != TypeD::ZERO ) return NULL;
phase->type(sub->in(1)) != TypeD::ZERO ) return nullptr;
Node *abs = new AbsDNode( X );
if( flip )
@ -364,7 +364,7 @@ Node* MoveNode::Ideal(PhaseGVN* phase, bool can_reshape) {
// Fold reinterpret cast into memory operation:
// MoveX2Y (LoadX mem) => LoadY mem
LoadNode* ld = in(1)->isa_Load();
if (ld != NULL && (ld->outcnt() == 1)) { // replace only
if (ld != nullptr && (ld->outcnt() == 1)) { // replace only
const Type* rt = bottom_type();
if (ld->has_reinterpret_variant(rt)) {
if (phase->C->post_loop_opts_phase()) {
@ -375,7 +375,7 @@ Node* MoveNode::Ideal(PhaseGVN* phase, bool can_reshape) {
}
}
}
return NULL;
return nullptr;
}
Node* MoveNode::Identity(PhaseGVN* phase) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -39,7 +39,7 @@ class CMoveNode : public TypeNode {
{
init_class_id(Class_CMove);
// all inputs are nullified in Node::Node(int)
// init_req(Control,NULL);
// init_req(Control,nullptr);
init_req(Condition,bol);
init_req(IfFalse,left);
init_req(IfTrue,right);
@ -100,7 +100,7 @@ class CMoveNNode : public CMoveNode {
//
class MoveNode : public Node {
protected:
MoveNode(Node* value) : Node(NULL, value) {
MoveNode(Node* value) : Node(nullptr, value) {
init_class_id(Class_Move);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -61,7 +61,7 @@ Node* MulNode::Identity(PhaseGVN* phase) {
Node *MulNode::Ideal(PhaseGVN *phase, bool can_reshape) {
Node* in1 = in(1);
Node* in2 = in(2);
Node* progress = NULL; // Progress flag
Node* progress = nullptr; // Progress flag
// This code is used by And nodes too, but some conversions are
// only valid for the actual Mul nodes.
@ -123,7 +123,7 @@ Node *MulNode::Ideal(PhaseGVN *phase, bool can_reshape) {
if( t2->singleton() && // Right input is a constant?
op != Op_MulF && // Float & double cannot reassociate
op != Op_MulD ) {
if( t2 == Type::TOP ) return NULL;
if( t2 == Type::TOP ) return nullptr;
Node *mul1 = in(1);
#ifdef ASSERT
// Check for dead loop
@ -221,7 +221,7 @@ MulNode* MulNode::make(Node* in1, Node* in2, BasicType bt) {
default:
fatal("Not implemented for %s", type2name(bt));
}
return NULL;
return nullptr;
}
@ -239,7 +239,7 @@ Node *MulINode::Ideal(PhaseGVN *phase, bool can_reshape) {
// Now we have a constant Node on the right and the constant in con.
if (con == 1) {
// By one is handled by Identity call
return NULL;
return nullptr;
}
// Check for negative constant; if so negate the final result
@ -251,7 +251,7 @@ Node *MulINode::Ideal(PhaseGVN *phase, bool can_reshape) {
}
// Get low bit; check for being the only bit
Node *res = NULL;
Node *res = nullptr;
unsigned int bit1 = submultiple_power_of_2(abs_con);
if (bit1 == abs_con) { // Found a power of 2?
res = new LShiftINode(in(1), phase->intcon(log2i_exact(bit1)));
@ -334,7 +334,7 @@ Node *MulLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// Now we have a constant Node on the right and the constant in con.
if (con == 1) {
// By one is handled by Identity call
return NULL;
return nullptr;
}
// Check for negative constant; if so negate the final result
@ -345,7 +345,7 @@ Node *MulLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
}
// Get low bit; check for being the only bit
Node *res = NULL;
Node *res = nullptr;
julong bit1 = submultiple_power_of_2(abs_con);
if (bit1 == abs_con) { // Found a power of 2?
res = new LShiftLNode(in(1), phase->intcon(log2i_exact(bit1)));
@ -429,7 +429,7 @@ Node* MulFNode::Ideal(PhaseGVN* phase, bool can_reshape) {
const TypeF *t2 = phase->type(in(2))->isa_float_constant();
// x * 2 -> x + x
if (t2 != NULL && t2->getf() == 2) {
if (t2 != nullptr && t2->getf() == 2) {
Node* base = in(1);
return new AddFNode(base, base);
}
@ -452,7 +452,7 @@ Node* MulDNode::Ideal(PhaseGVN* phase, bool can_reshape) {
const TypeD *t2 = phase->type(in(2))->isa_double_constant();
// x * 2 -> x + x
if (t2 != NULL && t2->getd() == 2) {
if (t2 != nullptr && t2->getd() == 2) {
Node* base = in(1);
return new AddDNode(base, base);
}
@ -548,7 +548,7 @@ Node* AndINode::Identity(PhaseGVN* phase) {
int con = t2->get_con();
// Masking off high bits which are always zero is useless.
const TypeInt* t1 = phase->type(in(1))->isa_int();
if (t1 != NULL && t1->_lo >= 0) {
if (t1 != nullptr && t1->_lo >= 0) {
jint t1_support = right_n_bits(1 + log2i_graceful(t1->_hi));
if ((t1_support & con) == t1_support)
return in1;
@ -573,7 +573,7 @@ Node* AndINode::Identity(PhaseGVN* phase) {
Node *AndINode::Ideal(PhaseGVN *phase, bool can_reshape) {
// pattern similar to (v1 + (v2 << 2)) & 3 transformed to v1 & 3
Node* progress = AndIL_add_shift_and_mask(phase, T_INT);
if (progress != NULL) {
if (progress != nullptr) {
return progress;
}
@ -686,7 +686,7 @@ Node* AndLNode::Identity(PhaseGVN* phase) {
jlong con = t2->get_con();
// Masking off high bits which are always zero is useless.
const TypeLong* t1 = phase->type( in(1) )->isa_long();
if (t1 != NULL && t1->_lo >= 0) {
if (t1 != nullptr && t1->_lo >= 0) {
int bit_count = log2i_graceful(t1->_hi) + 1;
jlong t1_support = jlong(max_julong >> (BitsPerJavaLong - bit_count));
if ((t1_support & con) == t1_support)
@ -713,7 +713,7 @@ Node* AndLNode::Identity(PhaseGVN* phase) {
Node *AndLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// pattern similar to (v1 + (v2 << 2)) & 3 transformed to v1 & 3
Node* progress = AndIL_add_shift_and_mask(phase, T_LONG);
if (progress != NULL) {
if (progress != nullptr) {
return progress;
}
@ -765,14 +765,14 @@ LShiftNode* LShiftNode::make(Node* in1, Node* in2, BasicType bt) {
default:
fatal("Not implemented for %s", type2name(bt));
}
return NULL;
return nullptr;
}
//=============================================================================
static bool const_shift_count(PhaseGVN* phase, Node* shiftNode, int* count) {
const TypeInt* tcount = phase->type(shiftNode->in(2))->isa_int();
if (tcount != NULL && tcount->is_con()) {
if (tcount != nullptr && tcount->is_con()) {
*count = tcount->get_con();
return true;
}
@ -816,7 +816,7 @@ Node* LShiftINode::Identity(PhaseGVN* phase) {
Node *LShiftINode::Ideal(PhaseGVN *phase, bool can_reshape) {
int con = maskShiftAmount(phase, this, BitsPerJavaInteger);
if (con == 0) {
return NULL;
return nullptr;
}
// Left input is an add?
@ -872,7 +872,7 @@ Node *LShiftINode::Ideal(PhaseGVN *phase, bool can_reshape) {
phase->type(add1->in(2)) == TypeInt::make( bits_mask ) )
return new LShiftINode( add1->in(1), in(2) );
return NULL;
return nullptr;
}
//------------------------------Value------------------------------------------
@ -939,7 +939,7 @@ Node* LShiftLNode::Identity(PhaseGVN* phase) {
Node *LShiftLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
int con = maskShiftAmount(phase, this, BitsPerJavaLong);
if (con == 0) {
return NULL;
return nullptr;
}
// Left input is an add?
@ -995,7 +995,7 @@ Node *LShiftLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
phase->type(add1->in(2)) == TypeLong::make( bits_mask ) )
return new LShiftLNode( add1->in(1), in(2) );
return NULL;
return nullptr;
}
//------------------------------Value------------------------------------------
@ -1063,7 +1063,7 @@ Node* RShiftINode::Identity(PhaseGVN* phase) {
int lo = (-1 << (BitsPerJavaInteger - ((uint)count)-1)); // FFFF8000
int hi = ~lo; // 00007FFF
const TypeInt* t11 = phase->type(in(1)->in(1))->isa_int();
if (t11 == NULL) {
if (t11 == nullptr) {
return this;
}
// Does actual value fit inside of mask?
@ -1079,11 +1079,11 @@ Node* RShiftINode::Identity(PhaseGVN* phase) {
Node *RShiftINode::Ideal(PhaseGVN *phase, bool can_reshape) {
// Inputs may be TOP if they are dead.
const TypeInt *t1 = phase->type(in(1))->isa_int();
if (!t1) return NULL; // Left input is an integer
if (!t1) return nullptr; // Left input is an integer
const TypeInt *t3; // type of in(1).in(2)
int shift = maskShiftAmount(phase, this, BitsPerJavaInteger);
if (shift == 0) {
return NULL;
return nullptr;
}
// Check for (x & 0xFF000000) >> 24, whose mask can be made smaller.
@ -1101,7 +1101,7 @@ Node *RShiftINode::Ideal(PhaseGVN *phase, bool can_reshape) {
// Check for "(short[i] <<16)>>16" which simply sign-extends
const Node *shl = in(1);
if( shl->Opcode() != Op_LShiftI ) return NULL;
if( shl->Opcode() != Op_LShiftI ) return nullptr;
if( shift == 16 &&
(t3 = phase->type(shl->in(2))->isa_int()) &&
@ -1137,7 +1137,7 @@ Node *RShiftINode::Ideal(PhaseGVN *phase, bool can_reshape) {
}
}
return NULL;
return nullptr;
}
//------------------------------Value------------------------------------------
@ -1277,7 +1277,7 @@ Node* URShiftINode::Identity(PhaseGVN* phase) {
t_lshift_count == phase->type(in(2))) {
Node *x = add->in(1)->in(1);
const TypeInt *t_x = phase->type(x)->isa_int();
if (t_x != NULL && 0 <= t_x->_lo && t_x->_hi <= (max_jint>>LogBytesPerWord)) {
if (t_x != nullptr && 0 <= t_x->_lo && t_x->_hi <= (max_jint>>LogBytesPerWord)) {
return x;
}
}
@ -1291,7 +1291,7 @@ Node* URShiftINode::Identity(PhaseGVN* phase) {
Node *URShiftINode::Ideal(PhaseGVN *phase, bool can_reshape) {
int con = maskShiftAmount(phase, this, BitsPerJavaInteger);
if (con == 0) {
return NULL;
return nullptr;
}
// We'll be wanting the right-shift amount as a mask of that many bits
@ -1363,7 +1363,7 @@ Node *URShiftINode::Ideal(PhaseGVN *phase, bool can_reshape) {
}
}
return NULL;
return nullptr;
}
//------------------------------Value------------------------------------------
@ -1455,7 +1455,7 @@ Node* URShiftLNode::Identity(PhaseGVN* phase) {
Node *URShiftLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
int con = maskShiftAmount(phase, this, BitsPerJavaLong);
if (con == 0) {
return NULL;
return nullptr;
}
// We'll be wanting the right-shift amount as a mask of that many bits
@ -1508,7 +1508,7 @@ Node *URShiftLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
return new URShiftLNode(in11, phase->intcon(63));
}
}
return NULL;
return nullptr;
}
//------------------------------Value------------------------------------------
@ -1702,7 +1702,7 @@ Node* RotateLeftNode::Ideal(PhaseGVN *phase, bool can_reshape) {
return new RotateRightNode(in(1), phase->intcon(64 - (lshift & 63)), TypeLong::LONG);
}
}
return NULL;
return nullptr;
}
Node* RotateRightNode::Identity(PhaseGVN* phase) {
@ -1781,27 +1781,27 @@ const Type* RotateRightNode::Value(PhaseGVN* phase) const {
// Because the optimization might work for a non-constant
// mask M, we check the AndX for both operand orders.
bool MulNode::AndIL_shift_and_mask_is_always_zero(PhaseGVN* phase, Node* shift, Node* mask, BasicType bt, bool check_reverse) {
if (mask == NULL || shift == NULL) {
if (mask == nullptr || shift == nullptr) {
return false;
}
shift = shift->uncast();
if (shift == NULL) {
if (shift == nullptr) {
return false;
}
const TypeInteger* mask_t = phase->type(mask)->isa_integer(bt);
const TypeInteger* shift_t = phase->type(shift)->isa_integer(bt);
if (mask_t == NULL || shift_t == NULL) {
if (mask_t == nullptr || shift_t == nullptr) {
return false;
}
BasicType shift_bt = bt;
if (bt == T_LONG && shift->Opcode() == Op_ConvI2L) {
bt = T_INT;
Node* val = shift->in(1);
if (val == NULL) {
if (val == nullptr) {
return false;
}
val = val->uncast();
if (val == NULL) {
if (val == nullptr) {
return false;
}
if (val->Opcode() == Op_LShiftI) {
@ -1819,7 +1819,7 @@ bool MulNode::AndIL_shift_and_mask_is_always_zero(PhaseGVN* phase, Node* shift,
return false;
}
Node* shift2 = shift->in(2);
if (shift2 == NULL) {
if (shift2 == nullptr) {
return false;
}
const Type* shift2_t = phase->type(shift2);
@ -1852,8 +1852,8 @@ bool MulNode::AndIL_shift_and_mask_is_always_zero(PhaseGVN* phase, Node* shift,
Node* MulNode::AndIL_add_shift_and_mask(PhaseGVN* phase, BasicType bt) {
Node* add = in(1);
Node* mask = in(2);
if (add == NULL || mask == NULL) {
return NULL;
if (add == nullptr || mask == nullptr) {
return nullptr;
}
int addidx = 0;
if (add->Opcode() == Op_Add(bt)) {
@ -1866,7 +1866,7 @@ Node* MulNode::AndIL_add_shift_and_mask(PhaseGVN* phase, BasicType bt) {
if (addidx > 0) {
Node* add1 = add->in(1);
Node* add2 = add->in(2);
if (add1 != NULL && add2 != NULL) {
if (add1 != nullptr && add2 != nullptr) {
if (AndIL_shift_and_mask_is_always_zero(phase, add1, mask, bt, false)) {
set_req_X(addidx, add2, phase);
return this;
@ -1876,5 +1876,5 @@ Node* MulNode::AndIL_add_shift_and_mask(PhaseGVN* phase, BasicType bt) {
}
}
}
return NULL;
return nullptr;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -41,7 +41,7 @@ class PhaseTransform;
class MulNode : public Node {
virtual uint hash() const;
public:
MulNode(Node *in1, Node *in2): Node(NULL,in1,in2) {
MulNode(Node *in1, Node *in2): Node(nullptr,in1,in2) {
init_class_id(Class_Mul);
}
@ -227,7 +227,7 @@ public:
class LShiftNode : public Node {
public:
LShiftNode(Node *in1, Node *in2) : Node(NULL,in1,in2) {
LShiftNode(Node *in1, Node *in2) : Node(nullptr,in1,in2) {
init_class_id(Class_LShift);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -59,24 +59,24 @@ ProjNode* MultiNode::proj_out_or_null(uint which_proj) const {
continue;
}
}
return NULL;
return nullptr;
}
ProjNode* MultiNode::proj_out_or_null(uint which_proj, bool is_io_use) const {
for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
ProjNode* proj = fast_out(i)->isa_Proj();
if (proj != NULL && (proj->_con == which_proj) && (proj->_is_io_use == is_io_use)) {
if (proj != nullptr && (proj->_con == which_proj) && (proj->_is_io_use == is_io_use)) {
return proj;
}
}
return NULL;
return nullptr;
}
// Get a named projection
ProjNode* MultiNode::proj_out(uint which_proj) const {
assert((Opcode() != Op_If && Opcode() != Op_RangeCheck) || outcnt() == 2, "bad if #1");
ProjNode* p = proj_out_or_null(which_proj);
assert(p != NULL, "named projection %u not found", which_proj);
assert(p != nullptr, "named projection %u not found", which_proj);
return p;
}
@ -113,7 +113,7 @@ const Type* ProjNode::proj_type(const Type* t) const {
}
const Type *ProjNode::bottom_type() const {
if (in(0) == NULL) return Type::TOP;
if (in(0) == nullptr) return Type::TOP;
return proj_type(in(0)->bottom_type());
}
@ -121,16 +121,16 @@ const TypePtr *ProjNode::adr_type() const {
if (bottom_type() == Type::MEMORY) {
// in(0) might be a narrow MemBar; otherwise we will report TypePtr::BOTTOM
Node* ctrl = in(0);
if (ctrl == NULL) return NULL; // node is dead
if (ctrl == nullptr) return nullptr; // node is dead
const TypePtr* adr_type = ctrl->adr_type();
#ifdef ASSERT
if (!VMError::is_error_reported() && !Node::in_dump())
assert(adr_type != NULL, "source must have adr_type");
assert(adr_type != nullptr, "source must have adr_type");
#endif
return adr_type;
}
assert(bottom_type()->base() != Type::Memory, "no other memories?");
return NULL;
return nullptr;
}
bool ProjNode::pinned() const { return in(0)->pinned(); }
@ -142,7 +142,7 @@ void ProjNode::dump_compact_spec(outputStream *st) const {
Node* o = this->out(i);
if (not_a_node(o)) {
st->print("[?]");
} else if (o == NULL) {
} else if (o == nullptr) {
st->print("[_]");
} else {
st->print("[%d]", o->_idx);
@ -155,7 +155,7 @@ void ProjNode::dump_compact_spec(outputStream *st) const {
//----------------------------check_con----------------------------------------
void ProjNode::check_con() const {
Node* n = in(0);
if (n == NULL) return; // should be assert, but NodeHash makes bogons
if (n == nullptr) return; // should be assert, but NodeHash makes bogons
if (n->is_Mach()) return; // mach. projs. are not type-safe
if (n->is_Start()) return; // alas, starts can have mach. projs. also
if (_con == SCMemProjNode::SCMEMPROJCON ) return;
@ -166,7 +166,7 @@ void ProjNode::check_con() const {
//------------------------------Value------------------------------------------
const Type* ProjNode::Value(PhaseGVN* phase) const {
if (in(0) == NULL) return Type::TOP;
if (in(0) == nullptr) return Type::TOP;
return proj_type(phase->type(in(0)));
}
@ -183,14 +183,14 @@ uint ProjNode::ideal_reg() const {
//-------------------------------is_uncommon_trap_proj----------------------------
// Return uncommon trap call node if proj is for "proj->[region->..]call_uct"
// NULL otherwise
// null otherwise
CallStaticJavaNode* ProjNode::is_uncommon_trap_proj(Deoptimization::DeoptReason reason) {
int path_limit = 10;
Node* out = this;
for (int ct = 0; ct < path_limit; ct++) {
out = out->unique_ctrl_out_or_null();
if (out == NULL)
return NULL;
if (out == nullptr)
return nullptr;
if (out->is_CallStaticJava()) {
CallStaticJavaNode* call = out->as_CallStaticJava();
int req = call->uncommon_trap_request();
@ -200,12 +200,12 @@ CallStaticJavaNode* ProjNode::is_uncommon_trap_proj(Deoptimization::DeoptReason
return call;
}
}
return NULL; // don't do further after call
return nullptr; // don't do further after call
}
if (out->Opcode() != Op_Region)
return NULL;
return nullptr;
}
return NULL;
return nullptr;
}
//-------------------------------is_uncommon_trap_if_pattern-------------------------
@ -213,31 +213,31 @@ CallStaticJavaNode* ProjNode::is_uncommon_trap_proj(Deoptimization::DeoptReason
// |
// V
// other_proj->[region->..]call_uct"
// NULL otherwise
// null otherwise
// "must_reason_predicate" means the uct reason must be Reason_predicate
CallStaticJavaNode* ProjNode::is_uncommon_trap_if_pattern(Deoptimization::DeoptReason reason) {
Node *in0 = in(0);
if (!in0->is_If()) return NULL;
if (!in0->is_If()) return nullptr;
// Variation of a dead If node.
if (in0->outcnt() < 2) return NULL;
if (in0->outcnt() < 2) return nullptr;
IfNode* iff = in0->as_If();
// we need "If(Conv2B(Opaque1(...)))" pattern for reason_predicate
if (reason != Deoptimization::Reason_none) {
if (iff->in(1)->Opcode() != Op_Conv2B ||
iff->in(1)->in(1)->Opcode() != Op_Opaque1) {
return NULL;
return nullptr;
}
}
ProjNode* other_proj = iff->proj_out(1-_con);
CallStaticJavaNode* call = other_proj->is_uncommon_trap_proj(reason);
if (call != NULL) {
if (call != nullptr) {
assert(reason == Deoptimization::Reason_none ||
Compile::current()->is_predicate_opaq(iff->in(1)->in(1)), "should be on the list");
return call;
}
return NULL;
return nullptr;
}
ProjNode* ProjNode::other_if_proj() const {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -92,13 +92,13 @@ public:
#endif
// Return uncommon trap call node if proj is for "proj->[region->..]call_uct"
// NULL otherwise
// null otherwise
CallStaticJavaNode* is_uncommon_trap_proj(Deoptimization::DeoptReason reason);
// Return uncommon trap call node for "if(test)-> proj -> ...
// |
// V
// other_proj->[region->..]call_uct"
// NULL otherwise
// null otherwise
CallStaticJavaNode* is_uncommon_trap_if_pattern(Deoptimization::DeoptReason reason);
// Return other proj node when this is a If proj node

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,7 +34,7 @@ class EncodeNarrowPtrNode : public TypeNode {
EncodeNarrowPtrNode(Node* value, const Type* type):
TypeNode(type, 2) {
init_class_id(Class_EncodeNarrowPtr);
init_req(0, NULL);
init_req(0, nullptr);
init_req(1, value);
}
public:
@ -77,7 +77,7 @@ class DecodeNarrowPtrNode : public TypeNode {
DecodeNarrowPtrNode(Node* value, const Type* type):
TypeNode(type, 2) {
init_class_id(Class_DecodeNarrowPtr);
init_req(0, NULL);
init_req(0, nullptr);
init_req(1, value);
}
public:

View File

@ -67,7 +67,7 @@ extern int nodes_created;
//-------------------------- construct_node------------------------------------
// Set a breakpoint here to identify where a particular node index is built.
void Node::verify_construction() {
_debug_orig = NULL;
_debug_orig = nullptr;
int old_debug_idx = Compile::debug_idx();
int new_debug_idx = old_debug_idx + 1;
if (new_debug_idx > 0) {
@ -95,7 +95,7 @@ void Node::verify_construction() {
BREAKPOINT;
}
#if OPTO_DU_ITERATOR_ASSERT
_last_del = NULL;
_last_del = nullptr;
_del_tick = 0;
#endif
_hash_lock = 0;
@ -110,7 +110,7 @@ void DUIterator_Common::sample(const Node* node) {
_node = node;
_outcnt = node->_outcnt;
_del_tick = node->_del_tick;
_last = NULL;
_last = nullptr;
}
void DUIterator_Common::verify(const Node* node, bool at_end_ok) {
@ -291,7 +291,7 @@ void DUIterator_Last::verify_step(uint num_edges) {
// This constant used to initialize _out may be any non-null value.
// The value NULL is reserved for the top node only.
// The value null is reserved for the top node only.
#define NO_OUT_ARRAY ((Node**)-1)
// Out-of-line code from node constructors.
@ -313,7 +313,7 @@ inline int Node::Init(int req) {
}
// If there are default notes floating around, capture them:
Node_Notes* nn = C->default_node_notes();
if (nn != NULL) init_node_notes(C, idx, nn);
if (nn != nullptr) init_node_notes(C, idx, nn);
// Note: At this point, C is dead,
// and we begin to initialize the new Node.
@ -338,11 +338,11 @@ Node::Node(uint req)
debug_only( verify_construction() );
NOT_PRODUCT(nodes_created++);
if (req == 0) {
_in = NULL;
_in = nullptr;
} else {
Node** to = _in;
for(uint i = 0; i < req; i++) {
to[i] = NULL;
to[i] = nullptr;
}
}
}
@ -357,7 +357,7 @@ Node::Node(Node *n0)
debug_only( verify_construction() );
NOT_PRODUCT(nodes_created++);
assert( is_not_dead(n0), "can not use dead node");
_in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
_in[0] = n0; if (n0 != nullptr) n0->add_out((Node *)this);
}
//------------------------------Node-------------------------------------------
@ -371,8 +371,8 @@ Node::Node(Node *n0, Node *n1)
NOT_PRODUCT(nodes_created++);
assert( is_not_dead(n0), "can not use dead node");
assert( is_not_dead(n1), "can not use dead node");
_in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
_in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
_in[0] = n0; if (n0 != nullptr) n0->add_out((Node *)this);
_in[1] = n1; if (n1 != nullptr) n1->add_out((Node *)this);
}
//------------------------------Node-------------------------------------------
@ -387,9 +387,9 @@ Node::Node(Node *n0, Node *n1, Node *n2)
assert( is_not_dead(n0), "can not use dead node");
assert( is_not_dead(n1), "can not use dead node");
assert( is_not_dead(n2), "can not use dead node");
_in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
_in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
_in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
_in[0] = n0; if (n0 != nullptr) n0->add_out((Node *)this);
_in[1] = n1; if (n1 != nullptr) n1->add_out((Node *)this);
_in[2] = n2; if (n2 != nullptr) n2->add_out((Node *)this);
}
//------------------------------Node-------------------------------------------
@ -405,10 +405,10 @@ Node::Node(Node *n0, Node *n1, Node *n2, Node *n3)
assert( is_not_dead(n1), "can not use dead node");
assert( is_not_dead(n2), "can not use dead node");
assert( is_not_dead(n3), "can not use dead node");
_in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
_in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
_in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
_in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this);
_in[0] = n0; if (n0 != nullptr) n0->add_out((Node *)this);
_in[1] = n1; if (n1 != nullptr) n1->add_out((Node *)this);
_in[2] = n2; if (n2 != nullptr) n2->add_out((Node *)this);
_in[3] = n3; if (n3 != nullptr) n3->add_out((Node *)this);
}
//------------------------------Node-------------------------------------------
@ -425,11 +425,11 @@ Node::Node(Node *n0, Node *n1, Node *n2, Node *n3, Node *n4)
assert( is_not_dead(n2), "can not use dead node");
assert( is_not_dead(n3), "can not use dead node");
assert( is_not_dead(n4), "can not use dead node");
_in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
_in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
_in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
_in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this);
_in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this);
_in[0] = n0; if (n0 != nullptr) n0->add_out((Node *)this);
_in[1] = n1; if (n1 != nullptr) n1->add_out((Node *)this);
_in[2] = n2; if (n2 != nullptr) n2->add_out((Node *)this);
_in[3] = n3; if (n3 != nullptr) n3->add_out((Node *)this);
_in[4] = n4; if (n4 != nullptr) n4->add_out((Node *)this);
}
//------------------------------Node-------------------------------------------
@ -448,12 +448,12 @@ Node::Node(Node *n0, Node *n1, Node *n2, Node *n3,
assert( is_not_dead(n3), "can not use dead node");
assert( is_not_dead(n4), "can not use dead node");
assert( is_not_dead(n5), "can not use dead node");
_in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
_in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
_in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
_in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this);
_in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this);
_in[5] = n5; if (n5 != NULL) n5->add_out((Node *)this);
_in[0] = n0; if (n0 != nullptr) n0->add_out((Node *)this);
_in[1] = n1; if (n1 != nullptr) n1->add_out((Node *)this);
_in[2] = n2; if (n2 != nullptr) n2->add_out((Node *)this);
_in[3] = n3; if (n3 != nullptr) n3->add_out((Node *)this);
_in[4] = n4; if (n4 != nullptr) n4->add_out((Node *)this);
_in[5] = n5; if (n5 != nullptr) n5->add_out((Node *)this);
}
//------------------------------Node-------------------------------------------
@ -473,13 +473,13 @@ Node::Node(Node *n0, Node *n1, Node *n2, Node *n3,
assert( is_not_dead(n4), "can not use dead node");
assert( is_not_dead(n5), "can not use dead node");
assert( is_not_dead(n6), "can not use dead node");
_in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
_in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
_in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
_in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this);
_in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this);
_in[5] = n5; if (n5 != NULL) n5->add_out((Node *)this);
_in[6] = n6; if (n6 != NULL) n6->add_out((Node *)this);
_in[0] = n0; if (n0 != nullptr) n0->add_out((Node *)this);
_in[1] = n1; if (n1 != nullptr) n1->add_out((Node *)this);
_in[2] = n2; if (n2 != nullptr) n2->add_out((Node *)this);
_in[3] = n3; if (n3 != nullptr) n3->add_out((Node *)this);
_in[4] = n4; if (n4 != nullptr) n4->add_out((Node *)this);
_in[5] = n5; if (n5 != nullptr) n5->add_out((Node *)this);
_in[6] = n6; if (n6 != nullptr) n6->add_out((Node *)this);
}
#ifdef __clang__
@ -508,7 +508,7 @@ Node *Node::clone() const {
for( i = 0; i < len(); i++ ) {
Node *x = in(i);
n->_in[i] = x;
if (x != NULL) x->add_out(n);
if (x != nullptr) x->add_out(n);
}
if (is_macro()) {
C->add_macro_node(n);
@ -557,7 +557,7 @@ Node *Node::clone() const {
if (n->is_Call()) {
// CallGenerator is linked to the original node.
CallGenerator* cg = n->as_Call()->generator();
if (cg != NULL) {
if (cg != nullptr) {
CallGenerator* cloned_cg = cg->with_call_node(n->as_Call());
n->as_Call()->set_generator(cloned_cg);
@ -583,10 +583,10 @@ void Node::setup_is_top() {
if (this == (Node*)Compile::current()->top()) {
// This node has just become top. Kill its out array.
_outcnt = _outmax = 0;
_out = NULL; // marker value for top
_out = nullptr; // marker value for top
assert(is_top(), "must be top");
} else {
if (_out == NULL) _out = NO_OUT_ARRAY;
if (_out == nullptr) _out = NO_OUT_ARRAY;
assert(!is_top(), "must not be top");
}
}
@ -594,8 +594,8 @@ void Node::setup_is_top() {
//------------------------------~Node------------------------------------------
// Fancy destructor; eagerly attempt to reclaim Node numberings and storage
void Node::destruct(PhaseValues* phase) {
Compile* compile = (phase != NULL) ? phase->C : Compile::current();
if (phase != NULL && phase->is_IterGVN()) {
Compile* compile = (phase != nullptr) ? phase->C : Compile::current();
if (phase != nullptr && phase->is_IterGVN()) {
phase->is_IterGVN()->_worklist.remove(this);
}
// If this is the most recently created node, reclaim its index. Otherwise,
@ -607,12 +607,12 @@ void Node::destruct(PhaseValues* phase) {
}
// Clear debug info:
Node_Notes* nn = compile->node_notes_at(_idx);
if (nn != NULL) nn->clear();
if (nn != nullptr) nn->clear();
// Walk the input array, freeing the corresponding output edges
_cnt = _max; // forget req/prec distinction
uint i;
for( i = 0; i < _max; i++ ) {
set_req(i, NULL);
set_req(i, nullptr);
//assert(def->out(def->outcnt()-1) == (Node *)this,"bad def-use hacking in reclaim");
}
assert(outcnt() == 0, "deleting a node must not leave a dangling use");
@ -645,7 +645,7 @@ void Node::destruct(PhaseValues* phase) {
int out_edge_size = _outmax*sizeof(void*);
char *in_array = ((char*)_in);
char *edge_end = in_array + edge_size;
char *out_array = (char*)(_out == NO_OUT_ARRAY? NULL: _out);
char *out_array = (char*)(_out == NO_OUT_ARRAY? nullptr: _out);
int node_size = size_of();
#ifdef ASSERT
@ -687,10 +687,10 @@ void Node::grow(uint len) {
_max = 4;
_in = (Node**)arena->Amalloc(4*sizeof(Node*));
Node** to = _in;
to[0] = NULL;
to[1] = NULL;
to[2] = NULL;
to[3] = NULL;
to[0] = nullptr;
to[1] = nullptr;
to[2] = nullptr;
to[3] = nullptr;
return;
}
new_max = next_power_of_2(len);
@ -698,7 +698,7 @@ void Node::grow(uint len) {
// Previously I was using only powers-of-2 which peaked at 128 edges.
//if( new_max >= limit ) new_max = limit-1;
_in = (Node**)arena->Arealloc(_in, _max*sizeof(Node*), new_max*sizeof(Node*));
Copy::zero_to_bytes(&_in[_max], (new_max-_max)*sizeof(Node*)); // NULL all new space
Copy::zero_to_bytes(&_in[_max], (new_max-_max)*sizeof(Node*)); // null all new space
_max = new_max; // Record new max length
// This assertion makes sure that Node::_max is wide enough to
// represent the numerical value of new_max.
@ -720,9 +720,9 @@ void Node::out_grow( uint len ) {
// Trimming to limit allows a uint8 to handle up to 255 edges.
// Previously I was using only powers-of-2 which peaked at 128 edges.
//if( new_max >= limit ) new_max = limit-1;
assert(_out != NULL && _out != NO_OUT_ARRAY, "out must have sensible value");
assert(_out != nullptr && _out != NO_OUT_ARRAY, "out must have sensible value");
_out = (Node**)arena->Arealloc(_out,_outmax*sizeof(Node*),new_max*sizeof(Node*));
//Copy::zero_to_bytes(&_out[_outmax], (new_max-_outmax)*sizeof(Node*)); // NULL all new space
//Copy::zero_to_bytes(&_out[_outmax], (new_max-_outmax)*sizeof(Node*)); // null all new space
_outmax = new_max; // Record new max length
// This assertion makes sure that Node::_max is wide enough to
// represent the numerical value of new_max.
@ -736,7 +736,7 @@ bool Node::is_dead() const {
if( is_top() || is_Mach() || (Opcode() == Op_Node && _outcnt > 0) )
return false;
for( uint i = 0; i < _max; i++ )
if( _in[i] != NULL )
if( _in[i] != nullptr )
return false;
return true;
}
@ -767,7 +767,7 @@ bool Node::is_reachable_from_root() const {
//------------------------------is_unreachable---------------------------------
bool Node::is_unreachable(PhaseIterGVN &igvn) const {
assert(!is_Mach(), "doesn't work with MachNodes");
return outcnt() == 0 || igvn.type(this) == Type::TOP || (in(0) != NULL && in(0)->is_top());
return outcnt() == 0 || igvn.type(this) == Type::TOP || (in(0) != nullptr && in(0)->is_top());
}
//------------------------------add_req----------------------------------------
@ -776,19 +776,19 @@ void Node::add_req( Node *n ) {
assert( is_not_dead(n), "can not use dead node");
// Look to see if I can move precedence down one without reallocating
if( (_cnt >= _max) || (in(_max-1) != NULL) )
if( (_cnt >= _max) || (in(_max-1) != nullptr) )
grow( _max+1 );
// Find a precedence edge to move
if( in(_cnt) != NULL ) { // Next precedence edge is busy?
if( in(_cnt) != nullptr ) { // Next precedence edge is busy?
uint i;
for( i=_cnt; i<_max; i++ )
if( in(i) == NULL ) // Find the NULL at end of prec edge list
if( in(i) == nullptr ) // Find the null at end of prec edge list
break; // There must be one, since we grew the array
_in[i] = in(_cnt); // Move prec over, making space for req edge
}
_in[_cnt++] = n; // Stuff over old prec edge
if (n != NULL) n->add_out((Node *)this);
if (n != nullptr) n->add_out((Node *)this);
Compile::current()->record_modified_node(this);
}
@ -808,10 +808,10 @@ void Node::add_req_batch( Node *n, uint m ) {
grow( _max+m );
// Find a precedence edge to move
if( _in[_cnt] != NULL ) { // Next precedence edge is busy?
if( _in[_cnt] != nullptr ) { // Next precedence edge is busy?
uint i;
for( i=_cnt; i<_max; i++ )
if( _in[i] == NULL ) // Find the NULL at end of prec edge list
if( _in[i] == nullptr ) // Find the null at end of prec edge list
break; // There must be one, since we grew the array
// Slide all the precs over by m positions (assume #prec << m).
Copy::conjoint_words_to_higher((HeapWord*)&_in[_cnt], (HeapWord*)&_in[_cnt+m], ((i-_cnt)*sizeof(Node*)));
@ -823,7 +823,7 @@ void Node::add_req_batch( Node *n, uint m ) {
}
// Insert multiple out edges on the node.
if (n != NULL && !n->is_top()) {
if (n != nullptr && !n->is_top()) {
for(uint i=0; i<m; i++ ) {
n->add_out((Node *)this);
}
@ -839,7 +839,7 @@ void Node::del_req( uint idx ) {
"remove node from hash table before modifying it");
// First remove corresponding def-use edge
Node *n = in(idx);
if (n != NULL) n->del_out((Node *)this);
if (n != nullptr) n->del_out((Node *)this);
_in[idx] = in(--_cnt); // Compact the array
// Avoid spec violation: Gap in prec edges.
close_prec_gap_at(_cnt);
@ -854,7 +854,7 @@ void Node::del_req_ordered( uint idx ) {
"remove node from hash table before modifying it");
// First remove corresponding def-use edge
Node *n = in(idx);
if (n != NULL) n->del_out((Node *)this);
if (n != nullptr) n->del_out((Node *)this);
if (idx < --_cnt) { // Not last edge ?
Copy::conjoint_words_to_lower((HeapWord*)&_in[idx+1], (HeapWord*)&_in[idx], ((_cnt-idx)*sizeof(Node*)));
}
@ -867,14 +867,14 @@ void Node::del_req_ordered( uint idx ) {
// Insert a new required input at the end
void Node::ins_req( uint idx, Node *n ) {
assert( is_not_dead(n), "can not use dead node");
add_req(NULL); // Make space
add_req(nullptr); // Make space
assert( idx < _max, "Must have allocated enough space");
// Slide over
if(_cnt-idx-1 > 0) {
Copy::conjoint_words_to_higher((HeapWord*)&_in[idx], (HeapWord*)&_in[idx+1], ((_cnt-idx-1)*sizeof(Node*)));
}
_in[idx] = n; // Stuff over old required edge
if (n != NULL) n->add_out((Node *)this); // Add reciprocal def-use edge
if (n != nullptr) n->add_out((Node *)this); // Add reciprocal def-use edge
Compile::current()->record_modified_node(this);
}
@ -893,13 +893,13 @@ int Node::replace_edge(Node* old, Node* neww, PhaseGVN* gvn) {
for (uint i = 0; i < len(); i++) {
if (in(i) == old) {
if (i < req()) {
if (gvn != NULL) {
if (gvn != nullptr) {
set_req_X(i, neww, gvn);
} else {
set_req(i, neww);
}
} else {
assert(gvn == NULL || gvn->is_IterGVN() == NULL, "no support for igvn here");
assert(gvn == nullptr || gvn->is_IterGVN() == nullptr, "no support for igvn here");
assert(find_prec_edge(neww) == -1, "spec violation: duplicated prec edge (node %d -> %d)", _idx, neww->_idx);
set_prec(i, neww);
}
@ -925,7 +925,7 @@ int Node::replace_edges_in_range(Node* old, Node* neww, int start, int end, Phas
}
//-------------------------disconnect_inputs-----------------------------------
// NULL out all inputs to eliminate incoming Def-Use edges.
// null out all inputs to eliminate incoming Def-Use edges.
void Node::disconnect_inputs(Compile* C) {
// the layout of Node::_in
// r: a required input, null is allowed
@ -944,7 +944,7 @@ void Node::disconnect_inputs(Compile* C) {
// Remove precedence edges if any exist
// Note: Safepoints may have precedence edges, even during parsing
for (uint i = len(); i > req(); ) {
rm_prec(--i); // no-op if _in[i] is nullptr
rm_prec(--i); // no-op if _in[i] is null
}
#ifdef ASSERT
@ -981,12 +981,12 @@ Node* Node::find_out_with(int opcode) {
return use;
}
}
return NULL;
return nullptr;
}
// Return true if the current node has an out that matches opcode.
bool Node::has_out_with(int opcode) {
return (find_out_with(opcode) != NULL);
return (find_out_with(opcode) != nullptr);
}
// Return true if the current node has an out that matches any of the opcodes.
@ -1017,7 +1017,7 @@ Node* Node::uncast_helper(const Node* p, bool keep_deps) {
}
assert(depth_count++ < K, "infinite loop in Node::uncast_helper");
#endif
if (p == NULL || p->req() != 2) {
if (p == nullptr || p->req() != 2) {
break;
} else if (p->is_ConstraintCast()) {
if (keep_deps && p->as_ConstraintCast()->carry_dependency()) {
@ -1033,36 +1033,36 @@ Node* Node::uncast_helper(const Node* p, bool keep_deps) {
//------------------------------add_prec---------------------------------------
// Add a new precedence input. Precedence inputs are unordered, with
// duplicates removed and NULLs packed down at the end.
// duplicates removed and nulls packed down at the end.
void Node::add_prec( Node *n ) {
assert( is_not_dead(n), "can not use dead node");
// Check for NULL at end
// Check for null at end
if( _cnt >= _max || in(_max-1) )
grow( _max+1 );
// Find a precedence edge to move
uint i = _cnt;
while( in(i) != NULL ) {
while( in(i) != nullptr ) {
if (in(i) == n) return; // Avoid spec violation: duplicated prec edge.
i++;
}
_in[i] = n; // Stuff prec edge over NULL
if ( n != NULL) n->add_out((Node *)this); // Add mirror edge
_in[i] = n; // Stuff prec edge over null
if ( n != nullptr) n->add_out((Node *)this); // Add mirror edge
#ifdef ASSERT
while ((++i)<_max) { assert(_in[i] == NULL, "spec violation: Gap in prec edges (node %d)", _idx); }
while ((++i)<_max) { assert(_in[i] == nullptr, "spec violation: Gap in prec edges (node %d)", _idx); }
#endif
Compile::current()->record_modified_node(this);
}
//------------------------------rm_prec----------------------------------------
// Remove a precedence input. Precedence inputs are unordered, with
// duplicates removed and NULLs packed down at the end.
// duplicates removed and nulls packed down at the end.
void Node::rm_prec( uint j ) {
assert(j < _max, "oob: i=%d, _max=%d", j, _max);
assert(j >= _cnt, "not a precedence edge");
if (_in[j] == NULL) return; // Avoid spec violation: Gap in prec edges.
if (_in[j] == nullptr) return; // Avoid spec violation: Gap in prec edges.
_in[j]->del_out((Node *)this);
close_prec_gap_at(j);
Compile::current()->record_modified_node(this);
@ -1075,12 +1075,12 @@ uint Node::size_of() const { return sizeof(*this); }
uint Node::ideal_reg() const { return 0; }
//------------------------------jvms-------------------------------------------
JVMState* Node::jvms() const { return NULL; }
JVMState* Node::jvms() const { return nullptr; }
#ifdef ASSERT
//------------------------------jvms-------------------------------------------
bool Node::verify_jvms(const JVMState* using_jvms) const {
for (JVMState* jvms = this->jvms(); jvms != NULL; jvms = jvms->caller()) {
for (JVMState* jvms = this->jvms(); jvms != nullptr; jvms = jvms->caller()) {
if (jvms == using_jvms) return true;
}
return false;
@ -1160,13 +1160,13 @@ const Type* Node::Value(PhaseGVN* phase) const {
// pointer. If ANY change is made, it must return the root of the reshaped
// graph - even if the root is the same Node. Example: swapping the inputs
// to an AddINode gives the same answer and same root, but you still have to
// return the 'this' pointer instead of NULL.
// return the 'this' pointer instead of null.
//
// You cannot return an OLD Node, except for the 'this' pointer. Use the
// Identity call to return an old Node; basically if Identity can find
// another Node have the Ideal call make no change and return NULL.
// another Node have the Ideal call make no change and return null.
// Example: AddINode::Ideal must check for add of zero; in this case it
// returns NULL instead of doing any graph reshaping.
// returns null instead of doing any graph reshaping.
//
// You cannot modify any old Nodes except for the 'this' pointer. Due to
// sharing there may be other users of the old Nodes relying on their current
@ -1201,7 +1201,7 @@ const Type* Node::Value(PhaseGVN* phase) const {
// the same Opcode as the 'this' pointer use 'clone'.
//
Node *Node::Ideal(PhaseGVN *phase, bool can_reshape) {
return NULL; // Default to being Ideal already
return nullptr; // Default to being Ideal already
}
// Some nodes have specific Ideal subgraph transformations only if they are
@ -1237,17 +1237,17 @@ bool Node::has_special_unique_user() const {
//--------------------------find_exact_control---------------------------------
// Skip Proj and CatchProj nodes chains. Check for Null and Top.
Node* Node::find_exact_control(Node* ctrl) {
if (ctrl == NULL && this->is_Region())
if (ctrl == nullptr && this->is_Region())
ctrl = this->as_Region()->is_copy();
if (ctrl != NULL && ctrl->is_CatchProj()) {
if (ctrl != nullptr && ctrl->is_CatchProj()) {
if (ctrl->as_CatchProj()->_con == CatchProjNode::fall_through_index)
ctrl = ctrl->in(0);
if (ctrl != NULL && !ctrl->is_top())
if (ctrl != nullptr && !ctrl->is_top())
ctrl = ctrl->in(0);
}
if (ctrl != NULL && ctrl->is_Proj())
if (ctrl != nullptr && ctrl->is_Proj())
ctrl = ctrl->in(0);
return ctrl;
@ -1261,7 +1261,7 @@ Node* Node::find_exact_control(Node* ctrl) {
// not an exhaustive search for a counterexample.
bool Node::dominates(Node* sub, Node_List &nlist) {
assert(this->is_CFG(), "expecting control");
assert(sub != NULL && sub->is_CFG(), "expecting control");
assert(sub != nullptr && sub->is_CFG(), "expecting control");
// detect dead cycle without regions
int iterations_without_region_limit = DominatorSearchLimit;
@ -1278,7 +1278,7 @@ bool Node::dominates(Node* sub, Node_List &nlist) {
// same region again, go through a different input. Eventually we
// will either exit through the loop head, or give up.
// (If we get confused, break out and return a conservative 'false'.)
while (sub != NULL) {
while (sub != nullptr) {
if (sub->is_top()) break; // Conservative answer for dead code.
if (sub == dom) {
if (nlist.size() == 0) {
@ -1345,7 +1345,7 @@ bool Node::dominates(Node* sub, Node_List &nlist) {
uint skip = region_was_visited_before ? 1 : 0;
for (uint i = 1; i < sub->req(); i++) {
Node* in = sub->in(i);
if (in != NULL && !in->is_top() && in != sub) {
if (in != nullptr && !in->is_top() && in != sub) {
if (skip == 0) {
up = in;
break;
@ -1426,7 +1426,7 @@ static void kill_dead_code( Node *dead, PhaseIterGVN *igvn ) {
Node* in = use->in(j);
if (in == dead) { // Turn all dead inputs into TOP
use->set_req(j, top);
} else if (in != NULL && !in->is_top()) {
} else if (in != nullptr && !in->is_top()) {
dead_use = false;
}
}
@ -1450,7 +1450,7 @@ static void kill_dead_code( Node *dead, PhaseIterGVN *igvn ) {
// Kill all inputs to the dead guy
for (uint i=0; i < dead->req(); i++) {
Node *n = dead->in(i); // Get input to dead guy
if (n != NULL && !n->is_top()) { // Input is valid?
if (n != nullptr && !n->is_top()) { // Input is valid?
dead->set_req(i, top); // Smash input away
if (n->outcnt() == 0) { // Input also goes dead?
if (!n->is_Con())
@ -1499,7 +1499,7 @@ bool Node::remove_dead_region(PhaseGVN *phase, bool can_reshape) {
uint Node::hash() const {
uint sum = 0;
for( uint i=0; i<_cnt; i++ ) // Add in all inputs
sum = (sum<<1)-(uintptr_t)in(i); // Ignore embedded NULLs
sum = (sum<<1)-(uintptr_t)in(i); // Ignore embedded nulls
return (sum>>2) + _cnt + Opcode();
}
@ -1536,7 +1536,7 @@ const TypeInt* Node::find_int_type() const {
assert(is_Mach(), "should be ConNode(TypeNode) or else a MachNode");
return this->bottom_type()->isa_int();
}
return NULL;
return nullptr;
}
const TypeInteger* Node::find_integer_type(BasicType bt) const {
@ -1546,7 +1546,7 @@ const TypeInteger* Node::find_integer_type(BasicType bt) const {
assert(is_Mach(), "should be ConNode(TypeNode) or else a MachNode");
return this->bottom_type()->isa_integer(bt);
}
return NULL;
return nullptr;
}
// Get a pointer constant from a ConstNode.
@ -1571,7 +1571,7 @@ const TypeLong* Node::find_long_type() const {
assert(is_Mach(), "should be ConNode(TypeNode) or else a MachNode");
return this->bottom_type()->isa_long();
}
return NULL;
return nullptr;
}
@ -1581,9 +1581,9 @@ const TypeLong* Node::find_long_type() const {
const TypePtr* Node::get_ptr_type() const {
const TypePtr* tp = this->bottom_type()->make_ptr();
#ifdef ASSERT
if (tp == NULL) {
if (tp == nullptr) {
this->dump(1);
assert((tp != NULL), "unexpected node type");
assert((tp != nullptr), "unexpected node type");
}
#endif
return tp;
@ -1766,8 +1766,8 @@ Node* Node::find_ctrl(int idx) {
//------------------------------find-------------------------------------------
// Tries to find the node with the index |idx| starting from this node. If idx is negative,
// the search also includes forward (out) edges. Returns NULL if not found.
// If only_ctrl is set, the search will only be done on control nodes. Returns NULL if
// the search also includes forward (out) edges. Returns null if not found.
// If only_ctrl is set, the search will only be done on control nodes. Returns null if
// not found or if the node to be found is not a control node (search will not find it).
Node* Node::find(const int idx, bool only_ctrl) {
ResourceMark rm;
@ -2070,10 +2070,10 @@ void PrintBFS::print_options_help(bool print_examples) {
tty->print("Arguments:\n");
tty->print(" this/start: staring point of BFS\n");
tty->print(" target:\n");
tty->print(" if nullptr: simple BFS\n");
tty->print(" if null: simple BFS\n");
tty->print(" else: shortest path or all paths between this/start and target\n");
tty->print(" options:\n");
tty->print(" if nullptr: same as \"cdmox@B\"\n");
tty->print(" if null: same as \"cdmox@B\"\n");
tty->print(" else: use combination of following characters\n");
tty->print(" h: display this help info\n");
tty->print(" H: display this help info, with examples\n");
@ -2447,7 +2447,7 @@ const char *Node::Name() const { return NodeClassNames[Opcode()]; }
static bool is_disconnected(const Node* n) {
for (uint i = 0; i < n->req(); i++) {
if (n->in(i) != NULL) return false;
if (n->in(i) != nullptr) return false;
}
return true;
}
@ -2456,15 +2456,15 @@ static bool is_disconnected(const Node* n) {
void Node::dump_orig(outputStream *st, bool print_key) const {
Compile* C = Compile::current();
Node* orig = _debug_orig;
if (not_a_node(orig)) orig = NULL;
if (orig != NULL && !C->node_arena()->contains(orig)) orig = NULL;
if (orig == NULL) return;
if (not_a_node(orig)) orig = nullptr;
if (orig != nullptr && !C->node_arena()->contains(orig)) orig = nullptr;
if (orig == nullptr) return;
if (print_key) {
st->print(" !orig=");
}
Node* fast = orig->debug_orig(); // tortoise & hare algorithm to detect loops
if (not_a_node(fast)) fast = NULL;
while (orig != NULL) {
if (not_a_node(fast)) fast = nullptr;
while (orig != nullptr) {
bool discon = is_disconnected(orig); // if discon, print [123] else 123
if (discon) st->print("[");
if (!Compile::current()->node_arena()->contains(orig))
@ -2472,16 +2472,16 @@ void Node::dump_orig(outputStream *st, bool print_key) const {
st->print("%d", orig->_idx);
if (discon) st->print("]");
orig = orig->debug_orig();
if (not_a_node(orig)) orig = NULL;
if (orig != NULL && !C->node_arena()->contains(orig)) orig = NULL;
if (orig != NULL) st->print(",");
if (fast != NULL) {
if (not_a_node(orig)) orig = nullptr;
if (orig != nullptr && !C->node_arena()->contains(orig)) orig = nullptr;
if (orig != nullptr) st->print(",");
if (fast != nullptr) {
// Step fast twice for each single step of orig:
fast = fast->debug_orig();
if (not_a_node(fast)) fast = NULL;
if (fast != NULL && fast != orig) {
if (not_a_node(fast)) fast = nullptr;
if (fast != nullptr && fast != orig) {
fast = fast->debug_orig();
if (not_a_node(fast)) fast = NULL;
if (not_a_node(fast)) fast = nullptr;
}
if (fast == orig) {
st->print("...");
@ -2494,16 +2494,16 @@ void Node::dump_orig(outputStream *st, bool print_key) const {
void Node::set_debug_orig(Node* orig) {
_debug_orig = orig;
if (BreakAtNode == 0) return;
if (not_a_node(orig)) orig = NULL;
if (not_a_node(orig)) orig = nullptr;
int trip = 10;
while (orig != NULL) {
while (orig != nullptr) {
if (orig->debug_idx() == BreakAtNode || (int)orig->_idx == BreakAtNode) {
tty->print_cr("BreakAtNode: _idx=%d _debug_idx=%d orig._idx=%d orig._debug_idx=%d",
this->_idx, this->debug_idx(), orig->_idx, orig->debug_idx());
BREAKPOINT;
}
orig = orig->debug_orig();
if (not_a_node(orig)) orig = NULL;
if (not_a_node(orig)) orig = nullptr;
if (trip-- <= 0) break;
}
}
@ -2552,7 +2552,7 @@ void Node::dump(const char* suffix, bool mark, outputStream* st, DumpConfig* dc)
const Type *t = bottom_type();
if (t != NULL && (t->isa_instptr() || t->isa_instklassptr())) {
if (t != nullptr && (t->isa_instptr() || t->isa_instklassptr())) {
const TypeInstPtr *toop = t->isa_instptr();
const TypeInstKlassPtr *tkls = t->isa_instklassptr();
if (toop) {
@ -2578,8 +2578,8 @@ void Node::dump(const char* suffix, bool mark, outputStream* st, DumpConfig* dc)
if (is_new) {
DEBUG_ONLY(dump_orig(st));
Node_Notes* nn = C->node_notes_at(_idx);
if (nn != NULL && !nn->is_clear()) {
if (nn->jvms() != NULL) {
if (nn != nullptr && !nn->is_clear()) {
if (nn->jvms() != nullptr) {
st->print(" !jvms:");
nn->jvms()->dump_spec(st);
}
@ -2599,7 +2599,7 @@ void Node::dump_req(outputStream* st, DumpConfig* dc) const {
// Dump the required input edges
for (uint i = 0; i < req(); i++) { // For all required inputs
Node* d = in(i);
if (d == NULL) {
if (d == nullptr) {
st->print("_ ");
} else if (not_a_node(d)) {
st->print("not_a_node "); // uninitialized, sentinel, garbage, etc.
@ -2617,7 +2617,7 @@ void Node::dump_prec(outputStream* st, DumpConfig* dc) const {
int any_prec = 0;
for (uint i = req(); i < len(); i++) { // For all precedence inputs
Node* p = in(i);
if (p != NULL) {
if (p != nullptr) {
if (!any_prec++) st->print(" |");
if (not_a_node(p)) { st->print("not_a_node "); continue; }
p->dump_idx(false, st, dc);
@ -2633,7 +2633,7 @@ void Node::dump_out(outputStream* st, DumpConfig* dc) const {
// Dump the output edges
for (uint i = 0; i < _outcnt; i++) { // For all outputs
Node* u = _out[i];
if (u == NULL) {
if (u == nullptr) {
st->print("_ ");
} else if (not_a_node(u)) {
st->print("not_a_node ");
@ -2691,7 +2691,7 @@ void Node::verify(int verify_depth, VectorSet& visited, Node_List& worklist) {
Node* n = worklist[list_index];
if (n->is_Con() && n->bottom_type() == Type::TOP) {
if (C->cached_top_node() == NULL) {
if (C->cached_top_node() == nullptr) {
C->set_cached_top_node((Node*)n);
}
assert(C->cached_top_node() == n, "TOP node must be unique");
@ -2787,14 +2787,14 @@ void Node_Array::insert(uint i, Node* n) {
void Node_Array::remove(uint i) {
Copy::conjoint_words_to_lower((HeapWord*)&_nodes[i + 1], (HeapWord*)&_nodes[i], ((_max - i - 1) * sizeof(Node*)));
_nodes[_max - 1] = NULL;
_nodes[_max - 1] = nullptr;
}
void Node_Array::dump() const {
#ifndef PRODUCT
for (uint i = 0; i < _max; i++) {
Node* nn = _nodes[i];
if (nn != NULL) {
if (nn != nullptr) {
tty->print("%5d--> ",i); nn->dump();
}
}
@ -2809,7 +2809,7 @@ bool Node::is_iteratively_computed() {
if (ideal_reg()) { // does operation have a result register?
for (uint i = 1; i < req(); i++) {
Node* n = in(i);
if (n != NULL && n->is_Phi()) {
if (n != nullptr && n->is_Phi()) {
for (uint j = 1; j < n->req(); j++) {
if (n->in(j) == this) {
return true;
@ -2823,7 +2823,7 @@ bool Node::is_iteratively_computed() {
//--------------------------find_similar------------------------------
// Return a node with opcode "opc" and same inputs as "this" if one can
// be found; Otherwise return NULL;
// be found; Otherwise return null;
Node* Node::find_similar(int opc) {
if (req() >= 2) {
Node* def = in(1);
@ -2846,19 +2846,19 @@ Node* Node::find_similar(int opc) {
}
}
}
return NULL;
return nullptr;
}
//--------------------------unique_ctrl_out_or_null-------------------------
// Return the unique control out if only one. Null if none or more than one.
Node* Node::unique_ctrl_out_or_null() const {
Node* found = NULL;
Node* found = nullptr;
for (uint i = 0; i < outcnt(); i++) {
Node* use = raw_out(i);
if (use->is_CFG() && use != this) {
if (found != NULL) {
return NULL;
if (found != nullptr) {
return nullptr;
}
found = use;
}
@ -2870,12 +2870,12 @@ Node* Node::unique_ctrl_out_or_null() const {
// Return the unique control out. Asserts if none or more than one control out.
Node* Node::unique_ctrl_out() const {
Node* ctrl = unique_ctrl_out_or_null();
assert(ctrl != NULL, "control out is assumed to be unique");
assert(ctrl != nullptr, "control out is assumed to be unique");
return ctrl;
}
void Node::ensure_control_or_add_prec(Node* c) {
if (in(0) == NULL) {
if (in(0) == nullptr) {
set_req(0, c);
} else if (in(0) != c) {
add_prec(c);
@ -2886,7 +2886,7 @@ bool Node::is_dead_loop_safe() const {
if (is_Phi()) {
return true;
}
if (is_Proj() && in(0) == NULL) {
if (is_Proj() && in(0) == nullptr) {
return true;
}
if ((_flags & (Flag_is_dead_loop_safe | Flag_is_Con)) != 0) {
@ -2939,7 +2939,7 @@ void Node_List::dump_simple() const {
if( _nodes[i] ) {
tty->print(" %d", _nodes[i]->_idx);
} else {
tty->print(" NULL");
tty->print(" null");
}
}
#endif
@ -2965,7 +2965,7 @@ void Unique_Node_List::remove(Node* n) {
void Unique_Node_List::remove_useless_nodes(VectorSet &useful) {
for (uint i = 0; i < size(); ++i) {
Node *n = at(i);
assert( n != NULL, "Did not expect null entries in worklist");
assert( n != nullptr, "Did not expect null entries in worklist");
if (!useful.test(n->_idx)) {
_in_worklist.remove(n->_idx);
map(i, Node_List::pop());
@ -2993,7 +2993,7 @@ Node* Node_Stack::find(uint idx) const {
return node_at(i);
}
}
return NULL;
return nullptr;
}
//=============================================================================

View File

@ -254,7 +254,7 @@ public:
// Create a new Node with given input edges.
// This version requires use of the "edge-count" new.
// E.g. new (C,3) FooNode( C, NULL, left, right );
// E.g. new (C,3) FooNode( C, nullptr, left, right );
Node( Node *n0 );
Node( Node *n0, Node *n1 );
Node( Node *n0, Node *n1, Node *n2 );
@ -270,10 +270,10 @@ public:
// Clone a Node, immediately supplying one or two new edges.
// The first and second arguments, if non-null, replace in(1) and in(2),
// respectively.
Node* clone_with_data_edge(Node* in1, Node* in2 = NULL) const {
Node* clone_with_data_edge(Node* in1, Node* in2 = nullptr) const {
Node* nn = clone();
if (in1 != NULL) nn->set_req(1, in1);
if (in2 != NULL) nn->set_req(2, in2);
if (in1 != nullptr) nn->set_req(1, in1);
if (in2 != nullptr) nn->set_req(2, in2);
return nn;
}
@ -292,10 +292,10 @@ protected:
Node **_out; // Array of def-use references to Nodes
// Input edges are split into two categories. Required edges are required
// for semantic correctness; order is important and NULLs are allowed.
// for semantic correctness; order is important and nulls are allowed.
// Precedence edges are used to help determine execution order and are
// added, e.g., for scheduling purposes. They are unordered and not
// duplicated; they have no embedded NULLs. Edges from 0 to _cnt-1
// duplicated; they have no embedded nulls. Edges from 0 to _cnt-1
// are required, from _cnt to _max-1 are precedence edges.
node_idx_t _cnt; // Total number of required Node inputs.
@ -390,8 +390,8 @@ protected:
// Reference to the i'th input Node. Error if out of bounds.
Node* in(uint i) const { assert(i < _max, "oob: i=%d, _max=%d", i, _max); return _in[i]; }
// Reference to the i'th input Node. NULL if out of bounds.
Node* lookup(uint i) const { return ((i < _max) ? _in[i] : NULL); }
// Reference to the i'th input Node. null if out of bounds.
Node* lookup(uint i) const { return ((i < _max) ? _in[i] : nullptr); }
// Reference to the i'th output Node. Error if out of bounds.
// Use this accessor sparingly. We are going trying to use iterators instead.
Node* raw_out(uint i) const { assert(i < _outcnt,"oob"); return _out[i]; }
@ -434,9 +434,9 @@ protected:
assert( !VerifyHashTableKeys || _hash_lock == 0,
"remove node from hash table before modifying it");
Node** p = &_in[i]; // cache this._in, across the del_out call
if (*p != NULL) (*p)->del_out((Node *)this);
if (*p != nullptr) (*p)->del_out((Node *)this);
(*p) = n;
if (n != NULL) n->add_out((Node *)this);
if (n != nullptr) n->add_out((Node *)this);
Compile::current()->record_modified_node(this);
}
// Light version of set_req() to init inputs after node creation.
@ -446,9 +446,9 @@ protected:
assert( i < _cnt, "oob");
assert( !VerifyHashTableKeys || _hash_lock == 0,
"remove node from hash table before modifying it");
assert( _in[i] == NULL, "sanity");
assert( _in[i] == nullptr, "sanity");
_in[i] = n;
if (n != NULL) n->add_out((Node *)this);
if (n != nullptr) n->add_out((Node *)this);
Compile::current()->record_modified_node(this);
}
// Find first occurrence of n among my edges:
@ -456,22 +456,22 @@ protected:
int find_prec_edge(Node* n) {
for (uint i = req(); i < len(); i++) {
if (_in[i] == n) return i;
if (_in[i] == NULL) {
DEBUG_ONLY( while ((++i) < len()) assert(_in[i] == NULL, "Gap in prec edges!"); )
if (_in[i] == nullptr) {
DEBUG_ONLY( while ((++i) < len()) assert(_in[i] == nullptr, "Gap in prec edges!"); )
break;
}
}
return -1;
}
int replace_edge(Node* old, Node* neww, PhaseGVN* gvn = NULL);
int replace_edge(Node* old, Node* neww, PhaseGVN* gvn = nullptr);
int replace_edges_in_range(Node* old, Node* neww, int start, int end, PhaseGVN* gvn);
// NULL out all inputs to eliminate incoming Def-Use edges.
// null out all inputs to eliminate incoming Def-Use edges.
void disconnect_inputs(Compile* C);
// Quickly, return true if and only if I am Compile::current()->top().
bool is_top() const {
assert((this == (Node*) Compile::current()->top()) == (_out == NULL), "");
return (_out == NULL);
assert((this == (Node*) Compile::current()->top()) == (_out == nullptr), "");
return (_out == nullptr);
}
// Reaffirm invariants for is_top. (Only from Compile::set_cached_top_node.)
void setup_is_top();
@ -519,14 +519,14 @@ private:
void close_prec_gap_at(uint gap) {
assert(_cnt <= gap && gap < _max, "no valid prec edge");
uint i = gap;
Node *last = NULL;
Node *last = nullptr;
for (; i < _max-1; ++i) {
Node *next = _in[i+1];
if (next == NULL) break;
if (next == nullptr) break;
last = next;
}
_in[gap] = last; // Move last slot to empty one.
_in[i] = NULL; // NULL out last slot.
_in[gap] = last; // Move last slot to empty one.
_in[i] = nullptr; // null out last slot.
}
public:
@ -553,11 +553,11 @@ public:
assert(i >= _cnt, "not a precedence edge");
// Avoid spec violation: duplicated prec edge.
if (_in[i] == n) return;
if (n == NULL || find_prec_edge(n) != -1) {
if (n == nullptr || find_prec_edge(n) != -1) {
rm_prec(i);
return;
}
if (_in[i] != NULL) _in[i]->del_out((Node *)this);
if (_in[i] != nullptr) _in[i]->del_out((Node *)this);
_in[i] = n;
n->add_out((Node *)this);
Compile::current()->record_modified_node(this);
@ -582,7 +582,7 @@ public:
// Iterators over input Nodes for a Node X are written as:
// for( i = 0; i < X.req(); i++ ) ... X[i] ...
// NOTE: Required edges can contain embedded NULL pointers.
// NOTE: Required edges can contain embedded null pointers.
//----------------- Other Node Properties
@ -837,11 +837,11 @@ public:
return ((_class_id & ClassMask_##type) == Class_##type); \
} \
type##Node *as_##type() const { \
assert(is_##type(), "invalid node class: %s", Name()); \
assert(is_##type(), "invalid node class: %s", Name()); \
return (type##Node*)this; \
} \
type##Node* isa_##type() const { \
return (is_##type()) ? as_##type() : NULL; \
return (is_##type()) ? as_##type() : nullptr; \
}
DEFINE_CLASS_QUERY(AbstractLock)
@ -1000,7 +1000,7 @@ public:
// The node is a "macro" node which needs to be expanded before matching
bool is_macro() const { return (_flags & Flag_is_macro) != 0; }
// The node is expensive: the best control is set during loop opts
bool is_expensive() const { return (_flags & Flag_is_expensive) != 0 && in(0) != NULL; }
bool is_expensive() const { return (_flags & Flag_is_expensive) != 0 && in(0) != nullptr; }
// An arithmetic node which accumulates a data in a loop.
// It must have the loop's phi as input and provide a def to the phi.
@ -1026,10 +1026,10 @@ public:
void raise_bottom_type(const Type* new_type);
// Get the address type with which this node uses and/or defs memory,
// or NULL if none. The address type is conservatively wide.
// or null if none. The address type is conservatively wide.
// Returns non-null for calls, membars, loads, stores, etc.
// Returns TypePtr::BOTTOM if the node touches memory "broadly".
virtual const class TypePtr *adr_type() const { return NULL; }
virtual const class TypePtr *adr_type() const { return nullptr; }
// Return an existing node which computes the same function as this node.
// The optimistic combined algorithm requires this to return a Node which
@ -1087,7 +1087,7 @@ public:
bool is_cloop_ind_var() const;
// Return a node with opcode "opc" and same inputs as "this" if one can
// be found; Otherwise return NULL;
// be found; Otherwise return null;
Node* find_similar(int opc);
// Return the unique control out if only one. Null if none or more than one.
@ -1117,7 +1117,7 @@ public:
// Should we clone rather than spill this instruction?
bool rematerialize() const;
// Return JVM State Object if this Node carries debug info, or NULL otherwise
// Return JVM State Object if this Node carries debug info, or null otherwise
virtual JVMState* jvms() const;
// Print as assembly
@ -1133,12 +1133,12 @@ public:
// return value_if_unknown.
jint find_int_con(jint value_if_unknown) const {
const TypeInt* t = find_int_type();
return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown;
return (t != nullptr && t->is_con()) ? t->get_con() : value_if_unknown;
}
// Return the constant, knowing it is an integer constant already
jint get_int() const {
const TypeInt* t = find_int_type();
guarantee(t != NULL, "must be con");
guarantee(t != nullptr, "must be con");
return t->get_con();
}
// Here's where the work is done. Can produce non-constant int types too.
@ -1148,23 +1148,23 @@ public:
// Same thing for long (and intptr_t, via type.hpp):
jlong get_long() const {
const TypeLong* t = find_long_type();
guarantee(t != NULL, "must be con");
guarantee(t != nullptr, "must be con");
return t->get_con();
}
jlong find_long_con(jint value_if_unknown) const {
const TypeLong* t = find_long_type();
return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown;
return (t != nullptr && t->is_con()) ? t->get_con() : value_if_unknown;
}
const TypeLong* find_long_type() const;
jlong get_integer_as_long(BasicType bt) const {
const TypeInteger* t = find_integer_type(bt);
guarantee(t != NULL && t->is_con(), "must be con");
guarantee(t != nullptr && t->is_con(), "must be con");
return t->get_con_as_long(bt);
}
jlong find_integer_as_long(BasicType bt, jlong value_if_unknown) const {
const TypeInteger* t = find_integer_type(bt);
if (t == NULL || !t->is_con()) return value_if_unknown;
if (t == nullptr || !t->is_con()) return value_if_unknown;
return t->get_con_as_long(bt);
}
const TypePtr* get_ptr_type() const;
@ -1258,7 +1258,7 @@ public:
};
inline bool not_a_node(const Node* n) {
if (n == NULL) return true;
if (n == nullptr) return true;
if (((intptr_t)n & 1) != 0) return true; // uninitialized, etc.
if (*(address*)n == badAddress) return true; // kill by Node::destruct
return false;
@ -1518,7 +1518,7 @@ class SimpleDUIterator : public StackObj {
//-----------------------------------------------------------------------------
// Map dense integer indices to Nodes. Uses classic doubling-array trick.
// Abstractly provides an infinite array of Node*'s, initialized to NULL.
// Abstractly provides an infinite array of Node*'s, initialized to null.
// Note that the constructor just zeros things, and since I use Arena
// allocation I do not need a destructor to reclaim storage.
class Node_Array : public AnyObj {
@ -1535,15 +1535,15 @@ public:
}
Node_Array(Node_Array* na) : _a(na->_a), _max(na->_max), _nodes(na->_nodes) {}
Node *operator[] ( uint i ) const // Lookup, or NULL for not mapped
{ return (i<_max) ? _nodes[i] : (Node*)NULL; }
Node *operator[] ( uint i ) const // Lookup, or null for not mapped
{ return (i<_max) ? _nodes[i] : (Node*)nullptr; }
Node* at(uint i) const { assert(i<_max,"oob"); return _nodes[i]; }
Node** adr() { return _nodes; }
// Extend the mapping: index i maps to Node *n.
void map( uint i, Node *n ) { if( i>=_max ) grow(i); _nodes[i] = n; }
void insert( uint i, Node *n );
void remove( uint i ); // Remove, preserving order
// Clear all entries in _nodes to NULL but keep storage
// Clear all entries in _nodes to null but keep storage
void clear() {
Copy::zero_to_bytes(_nodes, _max * sizeof(Node*));
}
@ -1646,7 +1646,7 @@ public:
void add(Node* node) {
if (not_a_node(node)) {
return; // Gracefully handle NULL, -1, 0xabababab, etc.
return; // Gracefully handle null, -1, 0xabababab, etc.
}
if (_visited_set[node] == nullptr) {
_visited_set.Insert(node, node);
@ -1756,7 +1756,7 @@ class Node_Notes {
JVMState* _jvms;
public:
Node_Notes(JVMState* jvms = NULL) {
Node_Notes(JVMState* jvms = nullptr) {
_jvms = jvms;
}
@ -1765,12 +1765,12 @@ public:
// True if there is nothing here.
bool is_clear() {
return (_jvms == NULL);
return (_jvms == nullptr);
}
// Make there be nothing here.
void clear() {
_jvms = NULL;
_jvms = nullptr;
}
// Make a new, clean node notes.
@ -1789,8 +1789,8 @@ public:
// Absorb any information from source.
bool update_from(Node_Notes* source) {
bool changed = false;
if (source != NULL) {
if (source->jvms() != NULL) {
if (source != nullptr) {
if (source->jvms() != nullptr) {
set_jvms(source->jvms());
changed = true;
}
@ -1805,22 +1805,22 @@ Compile::locate_node_notes(GrowableArray<Node_Notes*>* arr,
int idx, bool can_grow) {
assert(idx >= 0, "oob");
int block_idx = (idx >> _log2_node_notes_block_size);
int grow_by = (block_idx - (arr == NULL? 0: arr->length()));
int grow_by = (block_idx - (arr == nullptr? 0: arr->length()));
if (grow_by >= 0) {
if (!can_grow) return NULL;
if (!can_grow) return nullptr;
grow_node_notes(arr, grow_by + 1);
}
if (arr == NULL) return NULL;
if (arr == nullptr) return nullptr;
// (Every element of arr is a sub-array of length _node_notes_block_size.)
return arr->at(block_idx) + (idx & (_node_notes_block_size-1));
}
inline bool
Compile::set_node_notes_at(int idx, Node_Notes* value) {
if (value == NULL || value->is_clear())
if (value == nullptr || value->is_clear())
return false; // nothing to write => write nothing
Node_Notes* loc = locate_node_notes(_node_note_array, idx, true);
assert(loc != NULL, "");
assert(loc != nullptr, "");
return loc->update_from(value);
}
@ -1835,13 +1835,13 @@ protected:
const Type* const _type;
public:
void set_type(const Type* t) {
assert(t != NULL, "sanity");
assert(t != nullptr, "sanity");
debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
*(const Type**)&_type = t; // cast away const-ness
// If this node is in the hash table, make sure it doesn't need a rehash.
assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
}
const Type* type() const { assert(_type != NULL, "sanity"); return _type; };
const Type* type() const { assert(_type != nullptr, "sanity"); return _type; };
TypeNode( const Type *t, uint required ) : Node(required), _type(t) {
init_class_id(Class_Type);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -76,7 +76,7 @@ Node *ProfileBooleanNode::Ideal(PhaseGVN *phase, bool can_reshape) {
_delay_removal = false;
return this;
} else {
return NULL;
return nullptr;
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -36,7 +36,7 @@ class Opaque1Node : public Node {
virtual uint hash() const ; // { return NO_HASH; }
virtual bool cmp( const Node &n ) const;
public:
Opaque1Node(Compile* C, Node *n) : Node(NULL, n) {
Opaque1Node(Compile* C, Node *n) : Node(nullptr, n) {
// Put it on the Macro nodes list to removed during macro nodes expansion.
init_flags(Flag_is_macro);
init_class_id(Class_Opaque1);
@ -44,13 +44,13 @@ class Opaque1Node : public Node {
}
// Special version for the pre-loop to hold the original loop limit
// which is consumed by range check elimination.
Opaque1Node(Compile* C, Node *n, Node* orig_limit) : Node(NULL, n, orig_limit) {
Opaque1Node(Compile* C, Node *n, Node* orig_limit) : Node(nullptr, n, orig_limit) {
// Put it on the Macro nodes list to removed during macro nodes expansion.
init_flags(Flag_is_macro);
init_class_id(Class_Opaque1);
C->add_macro_node(this);
}
Node* original_loop_limit() { return req()==3 ? in(2) : NULL; }
Node* original_loop_limit() { return req()==3 ? in(2) : nullptr; }
virtual int Opcode() const;
virtual const Type *bottom_type() const { return TypeInt::INT; }
virtual Node* Identity(PhaseGVN* phase);
@ -115,7 +115,7 @@ class Opaque3Node : public Node {
// GraphKit::must_be_not_null().
class Opaque4Node : public Node {
public:
Opaque4Node(Compile* C, Node *tst, Node* final_tst) : Node(NULL, tst, final_tst) {
Opaque4Node(Compile* C, Node *tst, Node* final_tst) : Node(nullptr, tst, final_tst) {
init_flags(Flag_is_macro);
C->add_macro_node(this);
}

View File

@ -225,29 +225,29 @@ PhaseOutput::PhaseOutput()
_handler_table(),
_inc_table(),
_stub_list(),
_oop_map_set(NULL),
_scratch_buffer_blob(NULL),
_scratch_locs_memory(NULL),
_oop_map_set(nullptr),
_scratch_buffer_blob(nullptr),
_scratch_locs_memory(nullptr),
_scratch_const_size(-1),
_in_scratch_emit_size(false),
_frame_slots(0),
_code_offsets(),
_node_bundling_limit(0),
_node_bundling_base(NULL),
_node_bundling_base(nullptr),
_orig_pc_slot(0),
_orig_pc_slot_offset_in_bytes(0),
_buf_sizes(),
_block(NULL),
_block(nullptr),
_index(0) {
C->set_output(this);
if (C->stub_name() == NULL) {
if (C->stub_name() == nullptr) {
_orig_pc_slot = C->fixed_slots() - (sizeof(address) / VMRegImpl::stack_slot_size);
}
}
PhaseOutput::~PhaseOutput() {
C->set_output(NULL);
if (_scratch_buffer_blob != NULL) {
C->set_output(nullptr);
if (_scratch_buffer_blob != nullptr) {
BufferBlob::free(_scratch_buffer_blob);
}
}
@ -348,7 +348,7 @@ void PhaseOutput::Output() {
// Complete sizing of codebuffer
CodeBuffer* cb = init_buffer();
if (cb == NULL || C->failing()) {
if (cb == nullptr || C->failing()) {
return;
}
@ -369,7 +369,7 @@ bool PhaseOutput::need_stack_bang(int frame_size_in_bytes) const {
// unexpected stack overflow (compiled method stack banging should
// guarantee it doesn't happen) so we always need the stack bang in
// a debug VM.
return (C->stub_function() == NULL &&
return (C->stub_function() == nullptr &&
(C->has_java_calls() || frame_size_in_bytes > (int)(os::vm_page_size())>>3
DEBUG_ONLY(|| true)));
}
@ -379,7 +379,7 @@ bool PhaseOutput::need_register_stack_bang() const {
// This is only used on architectures which have split register
// and memory stacks (ie. IA64).
// Bang if the method is not a stub function and has java calls
return (C->stub_function() == NULL && C->has_java_calls());
return (C->stub_function() == nullptr && C->has_java_calls());
}
@ -585,8 +585,8 @@ void PhaseOutput::shorten_branches(uint* blk_starts) {
for (uint i = 0; i < nblocks; i++) {
Block* block = C->cfg()->get_block(i);
int idx = jmp_nidx[i];
MachNode* mach = (idx == -1) ? NULL: block->get_node(idx)->as_Mach();
if (mach != NULL && mach->may_be_short_branch()) {
MachNode* mach = (idx == -1) ? nullptr: block->get_node(idx)->as_Mach();
if (mach != nullptr && mach->may_be_short_branch()) {
#ifdef ASSERT
assert(jmp_size[i] > 0 && mach->is_MachBranch(), "sanity");
int j;
@ -650,7 +650,7 @@ void PhaseOutput::shorten_branches(uint* blk_starts) {
has_short_branch_candidate = true;
}
} // (mach->may_be_short_branch())
if (mach != NULL && (mach->may_be_short_branch() ||
if (mach != nullptr && (mach->may_be_short_branch() ||
mach->avoid_back_to_back(MachNode::AVOID_AFTER))) {
last_may_be_short_branch_adr = blk_starts[i] + jmp_offset[i] + jmp_size[i];
}
@ -716,12 +716,12 @@ PhaseOutput::sv_for_node_id(GrowableArray<ScopeValue*> *objs, int id) {
}
}
// Otherwise..
return NULL;
return nullptr;
}
void PhaseOutput::set_sv_for_object_node(GrowableArray<ScopeValue*> *objs,
ObjectValue* sv ) {
assert(sv_for_node_id(objs, sv->id()) == NULL, "Precondition");
assert(sv_for_node_id(objs, sv->id()) == nullptr, "Precondition");
objs->append(sv);
}
@ -750,7 +750,7 @@ void PhaseOutput::FillLocArray( int idx, MachSafePointNode* sfpt, Node *local,
SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject();
ObjectValue* sv = sv_for_node_id(objs, spobj->_idx);
if (sv == NULL) {
if (sv == nullptr) {
ciKlass* cik = t->is_oopptr()->exact_klass();
assert(cik->is_instance_klass() ||
cik->is_array_klass(), "Not supported allocation.");
@ -845,7 +845,7 @@ void PhaseOutput::FillLocArray( int idx, MachSafePointNode* sfpt, Node *local,
ShouldNotReachHere(); // Caller should skip 2nd halves
break;
case Type::AnyPtr:
array->append(new ConstantOopWriteValue(NULL));
array->append(new ConstantOopWriteValue(nullptr));
break;
case Type::AryPtr:
case Type::InstPtr: // fall through
@ -853,7 +853,7 @@ void PhaseOutput::FillLocArray( int idx, MachSafePointNode* sfpt, Node *local,
break;
case Type::NarrowOop:
if (t == TypeNarrowOop::NULL_PTR) {
array->append(new ConstantOopWriteValue(NULL));
array->append(new ConstantOopWriteValue(nullptr));
} else {
array->append(new ConstantOopWriteValue(t->make_ptr()->isa_oopptr()->const_oop()->constant_encoding()));
}
@ -945,7 +945,7 @@ void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
// Add the safepoint in the DebugInfoRecorder
if( !mach->is_MachCall() ) {
mcall = NULL;
mcall = nullptr;
C->debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
} else {
mcall = mach->as_MachCall();
@ -968,7 +968,7 @@ void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
}
// Loop over the JVMState list to add scope information
// Do not skip safepoints with a NULL method, they need monitor info
// Do not skip safepoints with a null method, they need monitor info
JVMState* youngest_jvms = sfn->jvms();
int max_depth = youngest_jvms->depth();
@ -981,13 +981,13 @@ void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
for (int depth = 1; depth <= max_depth; depth++) {
JVMState* jvms = youngest_jvms->of_depth(depth);
int idx;
ciMethod* method = jvms->has_method() ? jvms->method() : NULL;
ciMethod* method = jvms->has_method() ? jvms->method() : nullptr;
// Safepoints that do not have method() set only provide oop-map and monitor info
// to support GC; these do not support deoptimization.
int num_locs = (method == NULL) ? 0 : jvms->loc_size();
int num_exps = (method == NULL) ? 0 : jvms->stk_size();
int num_locs = (method == nullptr) ? 0 : jvms->loc_size();
int num_exps = (method == nullptr) ? 0 : jvms->stk_size();
int num_mon = jvms->nof_monitors();
assert(method == NULL || jvms->bci() < 0 || num_locs == method->max_locals(),
assert(method == nullptr || jvms->bci() < 0 || num_locs == method->max_locals(),
"JVMS local count must match that of the method");
// Add Local and Expression Stack Information
@ -1022,12 +1022,12 @@ void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
Node* obj_node = sfn->monitor_obj(jvms, idx);
// Create ScopeValue for object
ScopeValue *scval = NULL;
ScopeValue *scval = nullptr;
if (obj_node->is_SafePointScalarObject()) {
SafePointScalarObjectNode* spobj = obj_node->as_SafePointScalarObject();
scval = PhaseOutput::sv_for_node_id(objs, spobj->_idx);
if (scval == NULL) {
if (scval == nullptr) {
const Type *t = spobj->bottom_type();
ciKlass* cik = t->is_oopptr()->exact_klass();
assert(cik->is_instance_klass() ||
@ -1111,7 +1111,7 @@ class NonSafepointEmitter {
public:
NonSafepointEmitter(Compile* compile) {
this->C = compile;
_pending_jvms = NULL;
_pending_jvms = nullptr;
_pending_offset = 0;
}
@ -1119,19 +1119,19 @@ class NonSafepointEmitter {
if (!C->debug_info()->recording_non_safepoints()) return;
Node_Notes* nn = C->node_notes_at(n->_idx);
if (nn == NULL || nn->jvms() == NULL) return;
if (_pending_jvms != NULL &&
if (nn == nullptr || nn->jvms() == nullptr) return;
if (_pending_jvms != nullptr &&
_pending_jvms->same_calls_as(nn->jvms())) {
// Repeated JVMS? Stretch it up here.
_pending_offset = pc_offset;
} else {
if (_pending_jvms != NULL &&
if (_pending_jvms != nullptr &&
_pending_offset < pc_offset) {
emit_non_safepoint();
}
_pending_jvms = NULL;
_pending_jvms = nullptr;
if (pc_offset > C->debug_info()->last_pc_offset()) {
// This is the only way _pending_jvms can become non-NULL:
// This is the only way _pending_jvms can become non-null:
_pending_jvms = nn->jvms();
_pending_offset = pc_offset;
}
@ -1140,19 +1140,19 @@ class NonSafepointEmitter {
// Stay out of the way of real safepoints:
void observe_safepoint(JVMState* jvms, int pc_offset) {
if (_pending_jvms != NULL &&
if (_pending_jvms != nullptr &&
!_pending_jvms->same_calls_as(jvms) &&
_pending_offset < pc_offset) {
emit_non_safepoint();
}
_pending_jvms = NULL;
_pending_jvms = nullptr;
}
void flush_at_end() {
if (_pending_jvms != NULL) {
if (_pending_jvms != nullptr) {
emit_non_safepoint();
}
_pending_jvms = NULL;
_pending_jvms = nullptr;
}
};
@ -1161,7 +1161,7 @@ void NonSafepointEmitter::emit_non_safepoint() {
int pc_offset = _pending_offset;
// Clear it now:
_pending_jvms = NULL;
_pending_jvms = nullptr;
DebugInformationRecorder* debug_info = C->debug_info();
assert(debug_info->recording_non_safepoints(), "sanity");
@ -1172,7 +1172,7 @@ void NonSafepointEmitter::emit_non_safepoint() {
// Visit scopes from oldest to youngest.
for (int depth = 1; depth <= max_depth; depth++) {
JVMState* jvms = youngest_jvms->of_depth(depth);
ciMethod* method = jvms->has_method() ? jvms->method() : NULL;
ciMethod* method = jvms->has_method() ? jvms->method() : nullptr;
assert(!jvms->should_reexecute() || depth==max_depth, "reexecute allowed only for the youngest");
methodHandle null_mh;
debug_info->describe_scope(pc_offset, null_mh, method, jvms->bci(), jvms->should_reexecute());
@ -1270,9 +1270,9 @@ CodeBuffer* PhaseOutput::init_buffer() {
cb->initialize(total_req, _buf_sizes._reloc);
// Have we run out of code space?
if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
if ((cb->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
C->record_failure("CodeCache is full");
return NULL;
return nullptr;
}
// Configure the code buffer.
cb->initialize_consts_size(const_req);
@ -1327,13 +1327,13 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
// Create an array of unused labels, one for each basic block, if printing is enabled
#if defined(SUPPORT_OPTO_ASSEMBLY)
int* node_offsets = NULL;
int* node_offsets = nullptr;
uint node_offset_limit = C->unique();
if (C->print_assembly()) {
node_offsets = NEW_RESOURCE_ARRAY(int, node_offset_limit);
}
if (node_offsets != NULL) {
if (node_offsets != nullptr) {
// We need to initialize. Unused array elements may contain garbage and mess up PrintOptoAssembly.
memset(node_offsets, 0, node_offset_limit*sizeof(int));
}
@ -1356,7 +1356,7 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
}
// Now fill in the code buffer
Node* delay_slot = NULL;
Node* delay_slot = nullptr;
for (uint i = 0; i < nblocks; i++) {
Block* block = C->cfg()->get_block(i);
_block = block;
@ -1397,7 +1397,7 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
// See if delay slots are supported
if (valid_bundle_info(n) && node_bundling(n)->used_in_unconditional_delay()) {
assert(delay_slot == NULL, "no use of delay slot node");
assert(delay_slot == nullptr, "no use of delay slot node");
assert(n->size(C->regalloc()) == Pipeline::instr_unit_size(), "delay slot instruction wrong size");
delay_slot = n;
@ -1447,7 +1447,7 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
C->cfg()->map_node_to_block(nop, block);
// Ensure enough space.
cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
if ((cb->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
C->record_failure("CodeCache is full");
return;
}
@ -1476,7 +1476,7 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
if (!is_mcall) {
MachSafePointNode *sfn = mach->as_MachSafePoint();
// !!!!! Stubs only need an oopmap right now, so bail out
if (sfn->jvms()->method() == NULL) {
if (sfn->jvms()->method() == nullptr) {
// Write the oopmap directly to the code blob??!!
continue;
}
@ -1503,7 +1503,7 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
bool delay_slot_is_used = valid_bundle_info(n) &&
C->output()->node_bundling(n)->use_unconditional_delay();
if (!delay_slot_is_used && mach->may_be_short_branch()) {
assert(delay_slot == NULL, "not expecting delay slot node");
assert(delay_slot == nullptr, "not expecting delay slot node");
int br_size = n->size(C->regalloc());
int offset = blk_starts[block_num] - current_offset;
if (block_num >= i) {
@ -1568,7 +1568,7 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
int count = 0;
for (uint prec = mach->req(); prec < mach->len(); prec++) {
Node *oop_store = mach->in(prec); // Precedence edge
if (oop_store == NULL) continue;
if (oop_store == nullptr) continue;
count++;
uint i4;
for (i4 = 0; i4 < last_inst; ++i4) {
@ -1599,14 +1599,14 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
// Verify that there is sufficient space remaining
cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
if ((cb->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
C->record_failure("CodeCache is full");
return;
}
// Save the offset for the listing
#if defined(SUPPORT_OPTO_ASSEMBLY)
if ((node_offsets != NULL) && (n->_idx < node_offset_limit)) {
if ((node_offsets != nullptr) && (n->_idx < node_offset_limit)) {
node_offsets[n->_idx] = cb->insts_size();
}
#endif
@ -1662,14 +1662,14 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
// See if this instruction has a delay slot
if (valid_bundle_info(n) && node_bundling(n)->use_unconditional_delay()) {
guarantee(delay_slot != NULL, "expecting delay slot node");
guarantee(delay_slot != nullptr, "expecting delay slot node");
// Back up 1 instruction
cb->set_insts_end(cb->insts_end() - Pipeline::instr_unit_size());
// Save the offset for the listing
#if defined(SUPPORT_OPTO_ASSEMBLY)
if ((node_offsets != NULL) && (delay_slot->_idx < node_offset_limit)) {
if ((node_offsets != nullptr) && (delay_slot->_idx < node_offset_limit)) {
node_offsets[delay_slot->_idx] = cb->insts_size();
}
#endif
@ -1678,9 +1678,9 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
if (delay_slot->is_MachSafePoint()) {
MachNode *mach = delay_slot->as_Mach();
// !!!!! Stubs only need an oopmap right now, so bail out
if (!mach->is_MachCall() && mach->as_MachSafePoint()->jvms()->method() == NULL) {
if (!mach->is_MachCall() && mach->as_MachSafePoint()->jvms()->method() == nullptr) {
// Write the oopmap directly to the code blob??!!
delay_slot = NULL;
delay_slot = nullptr;
continue;
}
@ -1695,7 +1695,7 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
delay_slot->emit(*cb, C->regalloc());
// Don't reuse it
delay_slot = NULL;
delay_slot = nullptr;
}
} // End for all instructions in block
@ -1783,7 +1783,7 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
}
// One last check for failed CodeBuffer::expand:
if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
if ((cb->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
C->record_failure("CodeCache is full");
return;
}
@ -1805,7 +1805,7 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
// make sure the end tag is coherent, and that xmlStream->pop_tag is done thread safe.
ResourceMark rm;
stringStream method_metadata_str;
if (C->method() != NULL) {
if (C->method() != nullptr) {
C->method()->print_metadata(&method_metadata_str);
}
stringStream dump_asm_str;
@ -1816,21 +1816,21 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
// This output goes directly to the tty, not the compiler log.
// To enable tools to match it up with the compilation activity,
// be sure to tag this tty output with the compile ID.
if (xtty != NULL) {
if (xtty != nullptr) {
xtty->head("opto_assembly compile_id='%d'%s", C->compile_id(),
C->is_osr_compilation() ? " compile_kind='osr'" : "");
}
if (C->method() != NULL) {
if (C->method() != nullptr) {
tty->print_cr("----------------------- MetaData before Compile_id = %d ------------------------", C->compile_id());
tty->print_raw(method_metadata_str.freeze());
} else if (C->stub_name() != NULL) {
} else if (C->stub_name() != nullptr) {
tty->print_cr("----------------------------- RuntimeStub %s -------------------------------", C->stub_name());
}
tty->cr();
tty->print_cr("------------------------ OptoAssembly for Compile_id = %d -----------------------", C->compile_id());
tty->print_raw(dump_asm_str.freeze());
tty->print_cr("--------------------------------------------------------------------------------");
if (xtty != NULL) {
if (xtty != nullptr) {
xtty->tail("opto_assembly");
}
}
@ -1844,7 +1844,7 @@ void PhaseOutput::FillExceptionTables(uint cnt, uint *call_returns, uint *inct_s
uint inct_cnt = 0;
for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
Block* block = C->cfg()->get_block(i);
Node *n = NULL;
Node *n = nullptr;
int j;
// Find the branch; ignore trailing NOPs.
@ -1905,7 +1905,7 @@ void PhaseOutput::FillExceptionTables(uint cnt, uint *call_returns, uint *inct_s
// Set the offset of the return from the call
assert(handler_bcis.find(-1) != -1, "must have default handler");
_handler_table.add_subtable(call_return, &handler_bcis, NULL, &handler_pcos);
_handler_table.add_subtable(call_return, &handler_bcis, nullptr, &handler_pcos);
continue;
}
@ -1943,7 +1943,7 @@ Scheduling::Scheduling(Arena *arena, Compile &compile)
_available(arena),
_reg_node(arena),
_pinch_free_list(arena),
_next_node(NULL),
_next_node(nullptr),
_bundle_instr_count(0),
_bundle_cycle_number(0),
_bundle_use(0, 0, resource_count, &_bundle_use_elements[0])
@ -2155,9 +2155,9 @@ Node * Scheduling::ChooseNodeToBundle() {
#ifndef PRODUCT
if (_cfg->C->trace_opto_output())
tty->print("# ChooseNodeToBundle: NULL\n");
tty->print("# ChooseNodeToBundle: null\n");
#endif
return (NULL);
return (nullptr);
}
// Fast path, if only 1 instruction in the bundle
@ -2506,7 +2506,7 @@ void Scheduling::ComputeUseCount(const Block *bb) {
_scheduled.clear();
// No delay slot specified
_unconditional_delay_slot = NULL;
_unconditional_delay_slot = nullptr;
#ifdef ASSERT
for( uint i=0; i < bb->number_of_nodes(); i++ )
@ -2568,7 +2568,7 @@ void Scheduling::DoScheduling() {
tty->print("# -> DoScheduling\n");
#endif
Block *succ_bb = NULL;
Block *succ_bb = nullptr;
Block *bb;
Compile* C = Compile::current();
@ -2667,7 +2667,7 @@ void Scheduling::DoScheduling() {
// Schedule the remaining instructions in the block
while ( _available.size() > 0 ) {
Node *n = ChooseNodeToBundle();
guarantee(n != NULL, "no nodes available");
guarantee(n != nullptr, "no nodes available");
AddNodeToBundle(n,bb);
}
@ -2742,7 +2742,7 @@ void Scheduling::verify_do_def( Node *n, OptoReg::Name def, const char *msg ) {
prior_use->dump();
assert(edge_from_to(prior_use,n), "%s", msg);
}
_reg_node.map(def,NULL); // Kill live USEs
_reg_node.map(def,nullptr); // Kill live USEs
}
}
@ -2819,7 +2819,7 @@ void Scheduling::anti_do_def( Block *b, Node *def, OptoReg::Name def_reg, int is
}
Node *pinch = _reg_node[def_reg]; // Get pinch point
if ((pinch == NULL) || _cfg->get_block_for_node(pinch) != b || // No pinch-point yet?
if ((pinch == nullptr) || _cfg->get_block_for_node(pinch) != b || // No pinch-point yet?
is_def ) { // Check for a true def (not a kill)
_reg_node.map(def_reg,def); // Record def/kill as the optimistic pinch-point
return;
@ -2829,7 +2829,7 @@ void Scheduling::anti_do_def( Block *b, Node *def, OptoReg::Name def_reg, int is
debug_only( def = (Node*)((intptr_t)0xdeadbeef); )
// After some number of kills there _may_ be a later def
Node *later_def = NULL;
Node *later_def = nullptr;
Compile* C = Compile::current();
@ -2851,9 +2851,9 @@ void Scheduling::anti_do_def( Block *b, Node *def, OptoReg::Name def_reg, int is
_reg_node.map(def_reg,pinch); // Record pinch-point
//regalloc()->set_bad(pinch->_idx); // Already initialized this way.
if( later_def->outcnt() == 0 || later_def->ideal_reg() == MachProjNode::fat_proj ) { // Distinguish def from kill
pinch->init_req(0, C->top()); // set not NULL for the next call
pinch->init_req(0, C->top()); // set not null for the next call
add_prec_edge_from_to(later_def,pinch); // Add edge from kill to pinch
later_def = NULL; // and no later def
later_def = nullptr; // and no later def
}
pinch->set_req(0,later_def); // Hook later def so we can find it
} else { // Else have valid pinch point
@ -2872,7 +2872,7 @@ void Scheduling::anti_do_def( Block *b, Node *def, OptoReg::Name def_reg, int is
if( _regalloc->get_reg_first(uses->in(i)) == def_reg ||
_regalloc->get_reg_second(uses->in(i)) == def_reg ) {
// Yes, found a use/kill pinch-point
pinch->set_req(0,NULL); //
pinch->set_req(0,nullptr); //
pinch->replace_by(kill); // Move anti-dep edges up
pinch = kill;
_reg_node.map(def_reg,pinch);
@ -2890,7 +2890,7 @@ void Scheduling::anti_do_use( Block *b, Node *use, OptoReg::Name use_reg ) {
return;
Node *pinch = _reg_node[use_reg]; // Get pinch point
// Check for no later def_reg/kill in block
if ((pinch != NULL) && _cfg->get_block_for_node(pinch) == b &&
if ((pinch != nullptr) && _cfg->get_block_for_node(pinch) == b &&
// Use has to be block-local as well
_cfg->get_block_for_node(use) == b) {
if( pinch->Opcode() == Op_Node && // Real pinch-point (not optimistic?)
@ -2942,14 +2942,14 @@ void Scheduling::ComputeRegisterAntidependencies(Block *b) {
// put an edge from the pinch point to the USE.
// To be expedient, the _reg_node array is pre-allocated for the whole
// compilation. _reg_node is lazily initialized; it either contains a NULL,
// compilation. _reg_node is lazily initialized; it either contains a null,
// or a valid def/kill/pinch-point, or a leftover node from some prior
// block. Leftover node from some prior block is treated like a NULL (no
// block. Leftover node from some prior block is treated like a null (no
// prior def, so no anti-dependence needed). Valid def is distinguished by
// it being in the current block.
bool fat_proj_seen = false;
uint last_safept = _bb_end-1;
Node* end_node = (_bb_end-1 >= _bb_start) ? b->get_node(last_safept) : NULL;
Node* end_node = (_bb_end-1 >= _bb_start) ? b->get_node(last_safept) : nullptr;
Node* last_safept_node = end_node;
for( uint i = _bb_end-1; i >= _bb_start; i-- ) {
Node *n = b->get_node(i);
@ -3067,12 +3067,12 @@ void Scheduling::garbage_collect_pinch_nodes() {
int trace_cnt = 0;
for (uint k = 0; k < _reg_node.Size(); k++) {
Node* pinch = _reg_node[k];
if ((pinch != NULL) && pinch->Opcode() == Op_Node &&
if ((pinch != nullptr) && pinch->Opcode() == Op_Node &&
// no predecence input edges
(pinch->req() == pinch->len() || pinch->in(pinch->req()) == NULL) ) {
(pinch->req() == pinch->len() || pinch->in(pinch->req()) == nullptr) ) {
cleanup_pinch(pinch);
_pinch_free_list.push(pinch);
_reg_node.map(k, NULL);
_reg_node.map(k, nullptr);
#ifndef PRODUCT
if (_cfg->C->trace_opto_output()) {
trace_cnt++;
@ -3107,7 +3107,7 @@ void Scheduling::cleanup_pinch( Node *pinch ) {
i -= uses_found; // we deleted 1 or more copies of this edge
}
// May have a later_def entry
pinch->set_req(0, NULL);
pinch->set_req(0, nullptr);
}
#ifndef PRODUCT
@ -3160,10 +3160,10 @@ void PhaseOutput::init_scratch_buffer_blob(int const_size) {
// constant section is big enough, use it. Otherwise free the
// current and allocate a new one.
BufferBlob* blob = scratch_buffer_blob();
if ((blob != NULL) && (const_size <= _scratch_const_size)) {
if ((blob != nullptr) && (const_size <= _scratch_const_size)) {
// Use the current blob.
} else {
if (blob != NULL) {
if (blob != nullptr) {
BufferBlob::free(blob);
}
@ -3174,7 +3174,7 @@ void PhaseOutput::init_scratch_buffer_blob(int const_size) {
// Record the buffer blob for next time.
set_scratch_buffer_blob(blob);
// Have we run out of code space?
if (scratch_buffer_blob() == NULL) {
if (scratch_buffer_blob() == nullptr) {
// Let CompilerBroker disable further compilations.
C->record_failure("Not enough space for scratch buffer in CodeCache");
return;
@ -3204,7 +3204,7 @@ uint PhaseOutput::scratch_emit_size(const Node* n) {
// The allocation of the scratch buffer blob is particularly
// expensive, since it has to grab the code cache lock.
BufferBlob* blob = this->scratch_buffer_blob();
assert(blob != NULL, "Initialize BufferBlob at start");
assert(blob != nullptr, "Initialize BufferBlob at start");
assert(blob->size() > MAX_inst_size, "sanity");
relocInfo* locs_buf = scratch_locs_memory();
address blob_begin = blob->content_begin();
@ -3213,7 +3213,7 @@ uint PhaseOutput::scratch_emit_size(const Node* n) {
CodeBuffer buf(blob_begin, blob_end - blob_begin);
buf.initialize_consts_size(_scratch_const_size);
buf.initialize_stubs_size(MAX_stubs_size);
assert(locs_buf != NULL, "sanity");
assert(locs_buf != nullptr, "sanity");
int lsize = MAX_locs_size / 3;
buf.consts()->initialize_shared_locs(&locs_buf[lsize * 0], lsize);
buf.insts()->initialize_shared_locs( &locs_buf[lsize * 1], lsize);
@ -3226,7 +3226,7 @@ uint PhaseOutput::scratch_emit_size(const Node* n) {
// Do the emission.
Label fakeL; // Fake label for branch instructions.
Label* saveL = NULL;
Label* saveL = nullptr;
uint save_bnum = 0;
bool is_branch = n->is_MachBranch();
if (is_branch) {
@ -3252,7 +3252,7 @@ uint PhaseOutput::scratch_emit_size(const Node* n) {
void PhaseOutput::install() {
if (!C->should_install_code()) {
return;
} else if (C->stub_function() != NULL) {
} else if (C->stub_function() != nullptr) {
install_stub(C->stub_name());
} else {
install_code(C->method(),
@ -3304,14 +3304,14 @@ void PhaseOutput::install_code(ciMethod* target,
0,
C->rtm_state());
if (C->log() != NULL) { // Print code cache state into compiler log
if (C->log() != nullptr) { // Print code cache state into compiler log
C->log()->code_cache_state();
}
}
}
void PhaseOutput::install_stub(const char* stub_name) {
// Entry point will be accessed using stub_entry_point();
if (code_buffer() == NULL) {
if (code_buffer() == nullptr) {
Matcher::soft_match_failure();
} else {
if (PrintAssembly && (WizardMode || Verbose))
@ -3329,7 +3329,7 @@ void PhaseOutput::install_stub(const char* stub_name) {
frame_size_in_words(),
oop_map_set(),
false);
assert(rs != NULL && rs->is_runtime_stub(), "sanity check");
assert(rs != nullptr && rs->is_runtime_stub(), "sanity check");
C->set_stub_entry_point(rs->entry_point());
}
@ -3372,7 +3372,7 @@ void PhaseOutput::dump_asm_on(outputStream* st, int* pcs, uint pc_limit) {
int pc_digits = 3; // #chars required for pc
int sb_chars = 3; // #chars for "start bundle" indicator
int tab_size = 8;
if (pcs != NULL) {
if (pcs != nullptr) {
int max_pc = 0;
for (uint i = 0; i < pc_limit; i++) {
max_pc = (max_pc < pcs[i]) ? pcs[i] : max_pc;
@ -3391,7 +3391,7 @@ void PhaseOutput::dump_asm_on(outputStream* st, int* pcs, uint pc_limit) {
char starts_bundle = ' ';
C->regalloc()->dump_frame();
Node *n = NULL;
Node *n = nullptr;
for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
if (VMThread::should_terminate()) {
cut_short = true;
@ -3402,7 +3402,7 @@ void PhaseOutput::dump_asm_on(outputStream* st, int* pcs, uint pc_limit) {
continue;
}
n = block->head();
if ((pcs != NULL) && (n->_idx < pc_limit)) {
if ((pcs != nullptr) && (n->_idx < pc_limit)) {
pc = pcs[n->_idx];
st->print("%*.*x", pc_digits, pc_digits, pc);
}
@ -3417,7 +3417,7 @@ void PhaseOutput::dump_asm_on(outputStream* st, int* pcs, uint pc_limit) {
}
// For all instructions
Node *delay = NULL;
Node *delay = nullptr;
for (uint j = 0; j < block->number_of_nodes(); j++) {
if (VMThread::should_terminate()) {
cut_short = true;
@ -3449,7 +3449,7 @@ void PhaseOutput::dump_asm_on(outputStream* st, int* pcs, uint pc_limit) {
!n->is_top() && // Debug info table constants
!(n->is_Con() && !n->is_Mach())// Debug info table constants
) {
if ((pcs != NULL) && (n->_idx < pc_limit)) {
if ((pcs != nullptr) && (n->_idx < pc_limit)) {
pc = pcs[n->_idx];
st->print("%*.*x", pc_digits, pc_digits, pc);
} else {
@ -3466,12 +3466,12 @@ void PhaseOutput::dump_asm_on(outputStream* st, int* pcs, uint pc_limit) {
// then back up and print it
if (valid_bundle_info(n) && node_bundling(n)->use_unconditional_delay()) {
// Coverity finding - Explicit null dereferenced.
guarantee(delay != NULL, "no unconditional delay instruction");
guarantee(delay != nullptr, "no unconditional delay instruction");
if (WizardMode) delay->dump();
if (node_bundling(delay)->starts_bundle())
starts_bundle = '+';
if ((pcs != NULL) && (n->_idx < pc_limit)) {
if ((pcs != nullptr) && (n->_idx < pc_limit)) {
pc = pcs[n->_idx];
st->print("%*.*x", pc_digits, pc_digits, pc);
} else {
@ -3482,7 +3482,7 @@ void PhaseOutput::dump_asm_on(outputStream* st, int* pcs, uint pc_limit) {
st->fill_to(prefix_len);
delay->format(C->regalloc(), st);
st->cr();
delay = NULL;
delay = nullptr;
}
// Dump the exception table as well
@ -3493,7 +3493,7 @@ void PhaseOutput::dump_asm_on(outputStream* st, int* pcs, uint pc_limit) {
st->bol(); // Make sure we start on a new line
}
st->cr(); // one empty line between blocks
assert(cut_short || delay == NULL, "no unconditional delay branch");
assert(cut_short || delay == nullptr, "no unconditional delay branch");
} // End of per-block dump
if (cut_short) st->print_cr("*** disassembly is cut short ***");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -140,7 +140,7 @@ public:
// The architecture description provides short branch variants for some long
// branch instructions. Replace eligible long branches with short branches.
void shorten_branches(uint* blk_starts);
// If "objs" contains an ObjectValue whose id is "id", returns it, else NULL.
// If "objs" contains an ObjectValue whose id is "id", returns it, else null.
static ObjectValue* sv_for_node_id(GrowableArray<ScopeValue*> *objs, int id);
static void set_sv_for_object_node(GrowableArray<ScopeValue*> *objs, ObjectValue* sv);
void FillLocArray( int idx, MachSafePointNode* sfpt, Node *local,

View File

@ -188,14 +188,14 @@ class Parse : public GraphKit {
void set_start_map(SafePointNode* m) { assert(!is_merged(), ""); _start_map = m; }
// True after any predecessor flows control into this block
bool is_merged() const { return _start_map != NULL; }
bool is_merged() const { return _start_map != nullptr; }
#ifdef ASSERT
// True after backedge predecessor flows control into this block
bool has_merged_backedge() const { return _has_merged_backedge; }
void mark_merged_backedge(Block* pred) {
assert(is_SEL_head(), "should be loop head");
if (pred != NULL && is_SEL_backedge(pred)) {
if (pred != nullptr && is_SEL_backedge(pred)) {
assert(is_parsed(), "block should be parsed before merging backedges");
_has_merged_backedge = true;
}
@ -285,7 +285,7 @@ class Parse : public GraphKit {
// path number ("pnum").
int add_new_path();
// Initialize me by recording the parser's map. My own map must be NULL.
// Initialize me by recording the parser's map. My own map must be null.
void record_state(Parse* outer);
};
@ -405,7 +405,7 @@ class Parse : public GraphKit {
void set_wrote_fields(bool z) { _wrote_fields = z; }
Node* alloc_with_final() const { return _alloc_with_final; }
void set_alloc_with_final(Node* n) {
assert((_alloc_with_final == NULL) || (_alloc_with_final == n), "different init objects?");
assert((_alloc_with_final == nullptr) || (_alloc_with_final == n), "different init objects?");
_alloc_with_final = n;
}
@ -432,7 +432,7 @@ class Parse : public GraphKit {
Block* start_block() {
return rpo_at(flow()->start_block()->rpo());
}
// Can return NULL if the flow pass did not complete a block.
// Can return null if the flow pass did not complete a block.
Block* successor_for_bci(int bci) {
return block()->successor_for_bci(bci);
}
@ -631,7 +631,7 @@ class UnstableIfTrap {
public:
UnstableIfTrap(CallStaticJavaNode* call, Parse::Block* path): _unc(call), _modified(false) {
assert(_unc != NULL && Deoptimization::trap_request_reason(_unc->uncommon_trap_request()) == Deoptimization::Reason_unstable_if,
assert(_unc != nullptr && Deoptimization::trap_request_reason(_unc->uncommon_trap_request()) == Deoptimization::Reason_unstable_if,
"invalid uncommon_trap call!");
_next_bci = path != nullptr ? path->start() : -1;
}

View File

@ -78,7 +78,7 @@ void Parse::print_statistics() {
tty->print_cr("Blocks parsed: %d Blocks seen: %d", blocks_parsed, blocks_seen);
if (explicit_null_checks_inserted) {
tty->print_cr("%d original NULL checks - %d elided (%2d%%); optimizer leaves %d,",
tty->print_cr("%d original null checks - %d elided (%2d%%); optimizer leaves %d,",
explicit_null_checks_inserted, explicit_null_checks_elided,
(100*explicit_null_checks_elided)/explicit_null_checks_inserted,
all_null_checks_found);
@ -112,7 +112,7 @@ Node *Parse::fetch_interpreter_state(int index,
// Very similar to LoadNode::make, except we handle un-aligned longs and
// doubles on Sparc. Intel can handle them just fine directly.
Node *l = NULL;
Node *l = nullptr;
switch (bt) { // Signature is flattened
case T_INT: l = new LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT, MemNode::unordered); break;
case T_FLOAT: l = new LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT, MemNode::unordered); break;
@ -152,7 +152,7 @@ Node* Parse::check_interpreter_type(Node* l, const Type* type,
// TypeFlow may assert null-ness if a type appears unloaded.
if (type == TypePtr::NULL_PTR ||
(tp != NULL && !tp->is_loaded())) {
(tp != nullptr && !tp->is_loaded())) {
// Value must be null, not a real oop.
Node* chk = _gvn.transform( new CmpPNode(l, null()) );
Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) );
@ -168,9 +168,9 @@ Node* Parse::check_interpreter_type(Node* l, const Type* type,
// When paths are cut off, values at later merge points can rise
// toward more specific classes. Make sure these specific classes
// are still in effect.
if (tp != NULL && !tp->is_same_java_type_as(TypeInstPtr::BOTTOM)) {
if (tp != nullptr && !tp->is_same_java_type_as(TypeInstPtr::BOTTOM)) {
// TypeFlow asserted a specific object type. Value must have that type.
Node* bad_type_ctrl = NULL;
Node* bad_type_ctrl = nullptr;
l = gen_checkcast(l, makecon(tp->as_klass_type()->cast_to_exactness(true)), &bad_type_ctrl);
bad_type_exit->control()->add_req(bad_type_ctrl);
}
@ -270,7 +270,7 @@ void Parse::load_interpreter_state(Node* osr_buf) {
const Type *type = osr_block->local_type_at(index);
if (type->isa_oopptr() != NULL) {
if (type->isa_oopptr() != nullptr) {
// 6403625: Verify that the interpreter oopMap thinks that the oop is live
// else we might load a stale oop if the MethodLiveness disagrees with the
@ -279,7 +279,7 @@ void Parse::load_interpreter_state(Node* osr_buf) {
//
if (!live_oops.at(index)) {
if (C->log() != NULL) {
if (C->log() != nullptr) {
C->log()->elem("OSR_mismatch local_index='%d'",index);
}
set_local(index, null());
@ -301,7 +301,7 @@ void Parse::load_interpreter_state(Node* osr_buf) {
// Construct code to access the appropriate local.
BasicType bt = type->basic_type();
if (type == TypePtr::NULL_PTR) {
// Ptr types are mixed together with T_ADDRESS but NULL is
// Ptr types are mixed together with T_ADDRESS but null is
// really for T_OBJECT types so correct it.
bt = T_OBJECT;
}
@ -336,7 +336,7 @@ void Parse::load_interpreter_state(Node* osr_buf) {
Node* l = local(index);
if (l->is_top()) continue; // nothing here
const Type *type = osr_block->local_type_at(index);
if (type->isa_oopptr() != NULL) {
if (type->isa_oopptr() != nullptr) {
if (!live_oops.at(index)) {
// skip type check for dead oops
continue;
@ -398,10 +398,10 @@ Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses)
_wrote_volatile = false;
_wrote_stable = false;
_wrote_fields = false;
_alloc_with_final = NULL;
_alloc_with_final = nullptr;
_entry_bci = InvocationEntryBci;
_tf = NULL;
_block = NULL;
_tf = nullptr;
_block = nullptr;
_first_return = true;
_replaced_nodes_for_exceptions = false;
_new_idx = C->unique();
@ -451,7 +451,7 @@ Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses)
}
CompileLog* log = C->log();
if (log != NULL) {
if (log != nullptr) {
log->begin_head("parse method='%d' uses='%f'",
log->identify(parse_method), expected_uses);
if (depth() == 1 && C->is_osr_compilation()) {
@ -477,7 +477,7 @@ Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses)
if (total_count < old_count || total_count < md_count)
total_count = (uint)-1;
C->set_trap_count(reason, total_count);
if (log != NULL)
if (log != nullptr)
log->elem("observe trap='%s' count='%d' total='%d'",
Deoptimization::trap_reason_name(reason),
md_count, total_count);
@ -486,11 +486,11 @@ Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses)
// Accumulate total sum of decompilations, also.
C->set_decompile_count(C->decompile_count() + md->decompile_count());
if (log != NULL && method()->has_exception_handlers()) {
if (log != nullptr && method()->has_exception_handlers()) {
log->elem("observe that='has_exception_handlers'");
}
assert(InlineTree::check_can_parse(method()) == NULL, "Can not parse this method, cutout earlier");
assert(InlineTree::check_can_parse(method()) == nullptr, "Can not parse this method, cutout earlier");
assert(method()->has_balanced_monitors(), "Can not parse unbalanced monitors, cutout earlier");
// Always register dependence if JVMTI is enabled, because
@ -556,7 +556,7 @@ Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses)
SafePointNode* entry_map = create_entry_map();
// Check for bailouts during map initialization
if (failing() || entry_map == NULL) {
if (failing() || entry_map == nullptr) {
if (log) log->done("parse");
return;
}
@ -812,7 +812,7 @@ void Parse::build_exits() {
//----------------------------build_start_state-------------------------------
// Construct a state which contains only the incoming arguments from an
// unknown caller. The method & bci will be NULL & InvocationEntryBci.
// unknown caller. The method & bci will be null & InvocationEntryBci.
JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
int arg_size = tf->domain()->cnt();
int max_size = MAX2(arg_size, (int)tf->range()->cnt());
@ -821,7 +821,7 @@ JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
record_for_igvn(map);
assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size");
Node_Notes* old_nn = default_node_notes();
if (old_nn != NULL && has_method()) {
if (old_nn != nullptr && has_method()) {
Node_Notes* entry_nn = old_nn->clone(this);
JVMState* entry_jvms = new(this) JVMState(method(), old_nn->jvms());
entry_jvms->set_offsets(0);
@ -847,7 +847,7 @@ JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
//-----------------------------make_node_notes---------------------------------
Node_Notes* Parse::make_node_notes(Node_Notes* caller_nn) {
if (caller_nn == NULL) return NULL;
if (caller_nn == nullptr) return nullptr;
Node_Notes* nn = caller_nn->clone(C);
JVMState* caller_jvms = nn->jvms();
JVMState* jvms = new (C) JVMState(method(), caller_jvms);
@ -909,14 +909,14 @@ void Parse::do_exceptions() {
if (failing()) {
// Pop them all off and throw them away.
while (pop_exception_state() != NULL) ;
while (pop_exception_state() != nullptr) ;
return;
}
PreserveJVMState pjvms(this, false);
SafePointNode* ex_map;
while ((ex_map = pop_exception_state()) != NULL) {
while ((ex_map = pop_exception_state()) != nullptr) {
if (!method()->has_exception_handlers()) {
// Common case: Transfer control outward.
// Doing it this early allows the exceptions to common up
@ -1069,7 +1069,7 @@ void Parse::do_exits() {
SafePointNode* normal_map = kit.map(); // keep this guy safe
// Now re-collect the exceptions into _exits:
SafePointNode* ex_map;
while ((ex_map = kit.pop_exception_state()) != NULL) {
while ((ex_map = kit.pop_exception_state()) != nullptr) {
Node* ex_oop = kit.use_exception_state(ex_map);
// Force the exiting JVM state to have this method at InvocationEntryBci.
// The exiting JVM state is otherwise a copy of the calling JVMS.
@ -1104,7 +1104,7 @@ void Parse::do_exits() {
// Capture very early exceptions (receiver null checks) from caller JVMS
GraphKit caller(_caller);
SafePointNode* ex_map;
while ((ex_map = caller.pop_exception_state()) != NULL) {
while ((ex_map = caller.pop_exception_state()) != nullptr) {
_exits.add_exception_state(ex_map);
}
}
@ -1120,7 +1120,7 @@ SafePointNode* Parse::create_entry_map() {
uint len = TypeFunc::Parms + method()->max_locals() + method()->max_stack();
if (len >= 32760) {
C->record_method_not_compilable("too many local variables");
return NULL;
return nullptr;
}
// clear current replaced nodes that are of no use from here on (map was cloned in build_exits).
@ -1134,21 +1134,21 @@ SafePointNode* Parse::create_entry_map() {
if (kit.stopped()) {
_exits.add_exception_states_from(_caller);
_exits.set_jvms(_caller);
return NULL;
return nullptr;
}
}
assert(method() != NULL, "parser must have a method");
assert(method() != nullptr, "parser must have a method");
// Create an initial safepoint to hold JVM state during parsing
JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : NULL);
JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : nullptr);
set_map(new SafePointNode(len, jvms));
jvms->set_map(map());
record_for_igvn(map());
assert(jvms->endoff() == len, "correct jvms sizing");
SafePointNode* inmap = _caller->map();
assert(inmap != NULL, "must have inmap");
assert(inmap != nullptr, "must have inmap");
// In case of null check on receiver above
map()->transfer_replaced_nodes_from(inmap, _new_idx);
@ -1204,7 +1204,7 @@ void Parse::do_method_entry() {
Node* receiver_obj = local(0);
const TypeInstPtr* receiver_type = _gvn.type(receiver_obj)->isa_instptr();
if (receiver_type != NULL && !receiver_type->higher_equal(holder_type)) {
if (receiver_type != nullptr && !receiver_type->higher_equal(holder_type)) {
// Receiver should always be a subtype of callee holder.
// But, since C2 type system doesn't properly track interfaces,
// the invariant can't be expressed in the type system for default methods.
@ -1234,7 +1234,7 @@ void Parse::do_method_entry() {
// FastLockNode becomes the new control parent to pin it to the start.
// Setup Object Pointer
Node *lock_obj = NULL;
Node *lock_obj = nullptr;
if (method()->is_static()) {
ciInstance* mirror = _method->holder()->java_mirror();
const TypeInstPtr *t_lock = TypeInstPtr::make(mirror);
@ -1282,11 +1282,11 @@ Parse::Block::Block(Parse* outer, int rpo) : _live_locals() {
_is_parsed = false;
_is_handler = false;
_has_merged_backedge = false;
_start_map = NULL;
_start_map = nullptr;
_has_predicates = false;
_num_successors = 0;
_all_successors = 0;
_successors = NULL;
_successors = nullptr;
assert(pred_count() == 0 && preds_parsed() == 0, "sanity");
assert(!(is_merged() || is_parsed() || is_handler() || has_merged_backedge()), "sanity");
assert(_live_locals.size() == 0, "sanity");
@ -1305,7 +1305,7 @@ void Parse::Block::init_graph(Parse* outer) {
int ne = tfe->length();
_num_successors = ns;
_all_successors = ns+ne;
_successors = (ns+ne == 0) ? NULL : NEW_RESOURCE_ARRAY(Block*, ns+ne);
_successors = (ns+ne == 0) ? nullptr : NEW_RESOURCE_ARRAY(Block*, ns+ne);
int p = 0;
for (int i = 0; i < ns+ne; i++) {
ciTypeFlow::Block* tf2 = (i < ns) ? tfs->at(i) : tfe->at(i-ns);
@ -1348,7 +1348,7 @@ Parse::Block* Parse::Block::successor_for_bci(int bci) {
// of bytecodes. For example, "obj.field = null" is executable even
// if the field's type is an unloaded class; the flow pass used to
// make a trap for such code.
return NULL;
return nullptr;
}
@ -1533,7 +1533,7 @@ void Parse::do_one_block() {
}
assert(bci() < block()->limit(), "bci still in block");
if (log != NULL) {
if (log != nullptr) {
// Output an optional context marker, to help place actions
// that occur during parsing of this BC. If there is no log
// output until the next context string, this context string
@ -1568,7 +1568,7 @@ void Parse::do_one_block() {
NOT_PRODUCT( parse_histogram()->record_change(); );
if (log != NULL)
if (log != nullptr)
log->clear_context(); // skip marker if nothing was printed
// Fall into next bytecode. Each bytecode normally has 1 sequential
@ -1583,7 +1583,7 @@ void Parse::do_one_block() {
void Parse::set_parse_bci(int bci) {
set_bci(bci);
Node_Notes* nn = C->default_node_notes();
if (nn == NULL) return;
if (nn == nullptr) return;
// Collect debug info for inlined calls unless -XX:-DebugInlinedCalls.
if (!DebugInlinedCalls && depth() > 1) {
@ -1592,7 +1592,7 @@ void Parse::set_parse_bci(int bci) {
// Update the JVMS annotation, if present.
JVMState* jvms = nn->jvms();
if (jvms != NULL && jvms->bci() != bci) {
if (jvms != nullptr && jvms->bci() != bci) {
// Update the JVMS.
jvms = jvms->clone_shallow(C);
jvms->set_bci(bci);
@ -1604,7 +1604,7 @@ void Parse::set_parse_bci(int bci) {
// Merge the current mapping into the basic block starting at bci
void Parse::merge(int target_bci) {
Block* target = successor_for_bci(target_bci);
if (target == NULL) { handle_missing_successor(target_bci); return; }
if (target == nullptr) { handle_missing_successor(target_bci); return; }
assert(!target->is_ready(), "our arrival must be expected");
int pnum = target->next_path_num();
merge_common(target, pnum);
@ -1614,7 +1614,7 @@ void Parse::merge(int target_bci) {
// Merge the current mapping into the basic block, using a new path
void Parse::merge_new_path(int target_bci) {
Block* target = successor_for_bci(target_bci);
if (target == NULL) { handle_missing_successor(target_bci); return; }
if (target == nullptr) { handle_missing_successor(target_bci); return; }
assert(!target->is_ready(), "new path into frozen graph");
int pnum = target->add_new_path();
merge_common(target, pnum);
@ -1631,7 +1631,7 @@ void Parse::merge_exception(int target_bci) {
#endif
assert(sp() == 1, "must have only the throw exception on the stack");
Block* target = successor_for_bci(target_bci);
if (target == NULL) { handle_missing_successor(target_bci); return; }
if (target == nullptr) { handle_missing_successor(target_bci); return; }
assert(target->is_handler(), "exceptions are handled by special blocks");
int pnum = target->add_new_path();
merge_common(target, pnum);
@ -1692,8 +1692,8 @@ void Parse::merge_common(Parse::Block* target, int pnum) {
RegionNode *r = new RegionNode(edges+1);
gvn().set_type(r, Type::CONTROL);
record_for_igvn(r);
// zap all inputs to NULL for debugging (done in Node(uint) constructor)
// for (int j = 1; j < edges+1; j++) { r->init_req(j, NULL); }
// zap all inputs to null for debugging (done in Node(uint) constructor)
// for (int j = 1; j < edges+1; j++) { r->init_req(j, nullptr); }
r->init_req(pnum, control());
set_control(r);
target->copy_irreducible_status_to(r, jvms());
@ -1753,7 +1753,7 @@ void Parse::merge_common(Parse::Block* target, int pnum) {
if (m->is_Phi() && m->as_Phi()->region() == r)
phi = m->as_Phi();
else
phi = NULL;
phi = nullptr;
if (m != n) { // Different; must merge
switch (j) {
// Frame pointer and Return Address never changes
@ -1761,11 +1761,11 @@ void Parse::merge_common(Parse::Block* target, int pnum) {
case TypeFunc::ReturnAdr:
break;
case TypeFunc::Memory: // Merge inputs to the MergeMem node
assert(phi == NULL, "the merge contains phis, not vice versa");
assert(phi == nullptr, "the merge contains phis, not vice versa");
merge_memory_edges(n->as_MergeMem(), pnum, nophi);
continue;
default: // All normal stuff
if (phi == NULL) {
if (phi == nullptr) {
const JVMState* jvms = map()->jvms();
if (EliminateNestedLocks &&
jvms->is_mon(j) && jvms->is_monitor_box(j)) {
@ -1787,7 +1787,7 @@ void Parse::merge_common(Parse::Block* target, int pnum) {
// - the corresponding control edges is top (a dead incoming path)
// It is a bug if we create a phi which sees a garbage value on a live path.
if (phi != NULL) {
if (phi != nullptr) {
assert(n != top() || r->in(pnum) == top(), "live value must not be garbage");
assert(phi->region() == r, "");
phi->set_req(pnum, n); // Then add 'n' to the merge
@ -1833,15 +1833,15 @@ void Parse::merge_common(Parse::Block* target, int pnum) {
//--------------------------merge_memory_edges---------------------------------
void Parse::merge_memory_edges(MergeMemNode* n, int pnum, bool nophi) {
// (nophi means we must not create phis, because we already parsed here)
assert(n != NULL, "");
assert(n != nullptr, "");
// Merge the inputs to the MergeMems
MergeMemNode* m = merged_memory();
assert(control()->is_Region(), "must be merging to a region");
RegionNode* r = control()->as_Region();
PhiNode* base = NULL;
MergeMemNode* remerge = NULL;
PhiNode* base = nullptr;
MergeMemNode* remerge = nullptr;
for (MergeMemStream mms(m, n); mms.next_non_empty2(); ) {
Node *p = mms.force_memory();
Node *q = mms.memory2();
@ -1849,9 +1849,9 @@ void Parse::merge_memory_edges(MergeMemNode* n, int pnum, bool nophi) {
// Trouble: No new splits allowed after a loop body is parsed.
// Instead, wire the new split into a MergeMem on the backedge.
// The optimizer will sort it out, slicing the phi.
if (remerge == NULL) {
guarantee(base != NULL, "");
assert(base->in(0) != NULL, "should not be xformed away");
if (remerge == nullptr) {
guarantee(base != nullptr, "");
assert(base->in(0) != nullptr, "should not be xformed away");
remerge = MergeMemNode::make(base->in(pnum));
gvn().set_type(remerge, Type::MEMORY);
base->set_req(pnum, remerge);
@ -1867,10 +1867,10 @@ void Parse::merge_memory_edges(MergeMemNode* n, int pnum, bool nophi) {
if (p->is_Phi() && p->as_Phi()->region() == r)
phi = p->as_Phi();
else
phi = NULL;
phi = nullptr;
}
// Insert q into local phi
if (phi != NULL) {
if (phi != nullptr) {
assert(phi->region() == r, "");
p = phi;
phi->set_req(pnum, q);
@ -1884,7 +1884,7 @@ void Parse::merge_memory_edges(MergeMemNode* n, int pnum, bool nophi) {
}
}
// Transform base last, in case we must fiddle with remerging.
if (base != NULL && pnum == 1) {
if (base != nullptr && pnum == 1) {
record_for_igvn(base);
m->set_base_memory( _gvn.transform_no_reclaim(base) );
}
@ -1943,7 +1943,7 @@ int Parse::Block::add_new_path() {
// Add new path to the region.
uint pnum = r->req();
r->add_req(NULL);
r->add_req(nullptr);
for (uint i = 1; i < map->req(); i++) {
Node* n = map->in(i);
@ -1953,13 +1953,13 @@ int Parse::Block::add_new_path() {
Node* phi = mms.memory();
if (phi->is_Phi() && phi->as_Phi()->region() == r) {
assert(phi->req() == pnum, "must be same size as region");
phi->add_req(NULL);
phi->add_req(nullptr);
}
}
} else {
if (n->is_Phi() && n->as_Phi()->region() == r) {
assert(n->req() == pnum, "must be same size as region");
n->add_req(NULL);
n->add_req(nullptr);
}
}
}
@ -1975,9 +1975,9 @@ PhiNode *Parse::ensure_phi(int idx, bool nocreate) {
assert(region->is_Region(), "");
Node* o = map->in(idx);
assert(o != NULL, "");
assert(o != nullptr, "");
if (o == top()) return NULL; // TOP always merges into TOP
if (o == top()) return nullptr; // TOP always merges into TOP
if (o->is_Phi() && o->as_Phi()->region() == region) {
return o->as_Phi();
@ -1986,7 +1986,7 @@ PhiNode *Parse::ensure_phi(int idx, bool nocreate) {
// Now use a Phi here for merging
assert(!nocreate, "Cannot build a phi for a block already parsed.");
const JVMState* jvms = map->jvms();
const Type* t = NULL;
const Type* t = nullptr;
if (jvms->is_loc(idx)) {
t = block()->local_type_at(idx - jvms->locoff());
} else if (jvms->is_stk(idx)) {
@ -2005,14 +2005,14 @@ PhiNode *Parse::ensure_phi(int idx, bool nocreate) {
// makes it go dead.
if (t == Type::BOTTOM) {
map->set_req(idx, top());
return NULL;
return nullptr;
}
// Do not create phis for top either.
// A top on a non-null control flow must be an unused even after the.phi.
if (t == Type::TOP || t == Type::HALF) {
map->set_req(idx, top());
return NULL;
return nullptr;
}
PhiNode* phi = PhiNode::make(region, o, t);
@ -2030,7 +2030,7 @@ PhiNode *Parse::ensure_memory_phi(int idx, bool nocreate) {
assert(region->is_Region(), "");
Node *o = (idx == Compile::AliasIdxBot)? mem->base_memory(): mem->memory_at(idx);
assert(o != NULL && o != top(), "");
assert(o != nullptr && o != top(), "");
PhiNode* phi;
if (o->is_Phi() && o->as_Phi()->region() == region) {
@ -2064,11 +2064,11 @@ PhiNode *Parse::ensure_memory_phi(int idx, bool nocreate) {
// class need finalization.
void Parse::call_register_finalizer() {
Node* receiver = local(0);
assert(receiver != NULL && receiver->bottom_type()->isa_instptr() != NULL,
assert(receiver != nullptr && receiver->bottom_type()->isa_instptr() != nullptr,
"must have non-null instance type");
const TypeInstPtr *tinst = receiver->bottom_type()->isa_instptr();
if (tinst != NULL && tinst->is_loaded() && !tinst->klass_is_exact()) {
if (tinst != nullptr && tinst->is_loaded() && !tinst->klass_is_exact()) {
// The type isn't known exactly so see if CHA tells us anything.
ciInstanceKlass* ik = tinst->instance_klass();
if (!Dependencies::has_finalizable_subclass(ik)) {
@ -2082,10 +2082,10 @@ void Parse::call_register_finalizer() {
// finalization. In general this will fold up since the concrete
// class is often visible so the access flags are constant.
Node* klass_addr = basic_plus_adr( receiver, receiver, oopDesc::klass_offset_in_bytes() );
Node* klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), klass_addr, TypeInstPtr::KLASS));
Node* klass = _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), klass_addr, TypeInstPtr::KLASS));
Node* access_flags_addr = basic_plus_adr(klass, klass, in_bytes(Klass::access_flags_offset()));
Node* access_flags = make_load(NULL, access_flags_addr, TypeInt::INT, T_INT, MemNode::unordered);
Node* access_flags = make_load(nullptr, access_flags_addr, TypeInt::INT, T_INT, MemNode::unordered);
Node* mask = _gvn.transform(new AndINode(access_flags, intcon(JVM_ACC_HAS_FINALIZER)));
Node* check = _gvn.transform(new CmpINode(mask, intcon(0)));
@ -2108,7 +2108,7 @@ void Parse::call_register_finalizer() {
Node *call = make_runtime_call(RC_NO_LEAF,
OptoRuntime::register_finalizer_Type(),
OptoRuntime::register_finalizer_Java(),
NULL, TypePtr::BOTTOM,
nullptr, TypePtr::BOTTOM,
receiver);
make_slow_call_ex(call, env()->Throwable_klass(), true);
@ -2212,7 +2212,7 @@ void Parse::return_current(Node* value) {
}
// frame pointer is always same, already captured
if (value != NULL) {
if (value != nullptr) {
// If returning oops to an interface-return, there is a silent free
// cast from oop to interface allowed by the Verifier. Make it explicit
// here.
@ -2239,7 +2239,7 @@ void Parse::add_safepoint() {
kill_dead_locals();
// Clone the JVM State
SafePointNode *sfpnt = new SafePointNode(parms, NULL);
SafePointNode *sfpnt = new SafePointNode(parms, nullptr);
// Capture memory state BEFORE a SafePoint. Since we can block at a
// SafePoint we need our GC state to be safe; i.e. we need all our current
@ -2281,7 +2281,7 @@ void Parse::add_safepoint() {
// Provide an edge from root to safepoint. This makes the safepoint
// appear useful until the parse has completed.
if (transformed_sfpnt->is_SafePoint()) {
assert(C->root() != NULL, "Expect parse is still valid");
assert(C->root() != nullptr, "Expect parse is still valid");
C->root()->add_prec(transformed_sfpnt);
}
}
@ -2289,8 +2289,8 @@ void Parse::add_safepoint() {
#ifndef PRODUCT
//------------------------show_parse_info--------------------------------------
void Parse::show_parse_info() {
InlineTree* ilt = NULL;
if (C->ilt() != NULL) {
InlineTree* ilt = nullptr;
if (C->ilt() != nullptr) {
JVMState* caller_jvms = is_osr_parse() ? caller()->caller() : caller();
ilt = InlineTree::find_subtree_from_root(C->ilt(), caller_jvms, method());
}
@ -2355,7 +2355,7 @@ void Parse::show_parse_info() {
//------------------------------dump-------------------------------------------
// Dump information associated with the bytecodes of current _method
void Parse::dump() {
if( method() != NULL ) {
if( method() != nullptr ) {
// Iterate over bytecodes
ciBytecodeStream iter(method());
for( Bytecodes::Code bc = iter.next(); bc != ciBytecodeStream::EOBC() ; bc = iter.next() ) {

View File

@ -140,7 +140,7 @@ Node* Parse::array_addressing(BasicType type, int vals, const Type*& elemtype) {
bool need_range_check = true;
if (idxtype->_hi < sizetype->_lo && idxtype->_lo >= 0) {
need_range_check = false;
if (C->log() != NULL) C->log()->elem("observe that='!need_range_check'");
if (C->log() != nullptr) C->log()->elem("observe that='!need_range_check'");
}
if (!arytype->is_loaded()) {
@ -190,7 +190,7 @@ Node* Parse::array_addressing(BasicType type, int vals, const Type*& elemtype) {
// See IfNode::Ideal, is_range_check, adjust_check.
uncommon_trap(Deoptimization::Reason_range_check,
Deoptimization::Action_make_not_entrant,
NULL, "range_check");
nullptr, "range_check");
} else {
// If we have already recompiled with the range-check-widening
// heroic optimization turned off, then we must really be throwing
@ -234,7 +234,7 @@ void Parse::jump_if_true_fork(IfNode *iff, int dest_bci_if_true, bool unc) {
repush_if_args();
uncommon_trap(Deoptimization::Reason_unstable_if,
Deoptimization::Action_reinterpret,
NULL,
nullptr,
"taken always");
} else {
assert(dest_bci_if_true != never_reached, "inconsistent dest");
@ -256,7 +256,7 @@ void Parse::jump_if_false_fork(IfNode *iff, int dest_bci_if_true, bool unc) {
repush_if_args();
uncommon_trap(Deoptimization::Reason_unstable_if,
Deoptimization::Action_reinterpret,
NULL,
nullptr,
"taken never");
} else {
assert(dest_bci_if_true != never_reached, "inconsistent dest");
@ -275,7 +275,7 @@ void Parse::jump_if_always_fork(int dest_bci, bool unc) {
repush_if_args();
uncommon_trap(Deoptimization::Reason_unstable_if,
Deoptimization::Action_reinterpret,
NULL,
nullptr,
"taken never");
} else {
assert(dest_bci != never_reached, "inconsistent dest");
@ -424,10 +424,10 @@ void Parse::do_tableswitch() {
}
ciMethodData* methodData = method()->method_data();
ciMultiBranchData* profile = NULL;
ciMultiBranchData* profile = nullptr;
if (methodData->is_mature() && UseSwitchProfiling) {
ciProfileData* data = methodData->bci_to_data(bci());
if (data != NULL && data->is_MultiBranchData()) {
if (data != nullptr && data->is_MultiBranchData()) {
profile = (ciMultiBranchData*)data;
}
}
@ -440,7 +440,7 @@ void Parse::do_tableswitch() {
int rp = -1;
if (lo_index != min_jint) {
float cnt = 1.0F;
if (profile != NULL) {
if (profile != nullptr) {
cnt = (float)profile->default_count() / (hi_index != max_jint ? 2.0F : 1.0F);
}
ranges[++rp].setRange(min_jint, lo_index-1, default_dest, cnt);
@ -450,7 +450,7 @@ void Parse::do_tableswitch() {
int dest = iter().get_dest_table(j+3);
makes_backward_branch |= (dest <= bci());
float cnt = 1.0F;
if (profile != NULL) {
if (profile != nullptr) {
cnt = (float)profile->count_at(j);
}
if (rp < 0 || !ranges[rp].adjoin(match_int, dest, cnt, trim_ranges)) {
@ -461,7 +461,7 @@ void Parse::do_tableswitch() {
assert(ranges[rp].hi() == highest, "");
if (highest != max_jint) {
float cnt = 1.0F;
if (profile != NULL) {
if (profile != nullptr) {
cnt = (float)profile->default_count() / (lo_index != min_jint ? 2.0F : 1.0F);
}
if (!ranges[rp].adjoinRange(highest+1, max_jint, default_dest, cnt, trim_ranges)) {
@ -498,10 +498,10 @@ void Parse::do_lookupswitch() {
}
ciMethodData* methodData = method()->method_data();
ciMultiBranchData* profile = NULL;
ciMultiBranchData* profile = nullptr;
if (methodData->is_mature() && UseSwitchProfiling) {
ciProfileData* data = methodData->bci_to_data(bci());
if (data != NULL && data->is_MultiBranchData()) {
if (data != nullptr && data->is_MultiBranchData()) {
profile = (ciMultiBranchData*)data;
}
}
@ -514,13 +514,13 @@ void Parse::do_lookupswitch() {
table[3*j+0] = iter().get_int_table(2+2*j);
table[3*j+1] = iter().get_dest_table(2+2*j+1);
// Handle overflow when converting from uint to jint
table[3*j+2] = (profile == NULL) ? 1 : (jint)MIN2<uint>((uint)max_jint, profile->count_at(j));
table[3*j+2] = (profile == nullptr) ? 1 : (jint)MIN2<uint>((uint)max_jint, profile->count_at(j));
}
qsort(table, len, 3*sizeof(table[0]), jint_cmp);
}
float default_cnt = 1.0F;
if (profile != NULL) {
if (profile != nullptr) {
juint defaults = max_juint - len;
default_cnt = (float)profile->default_count()/(float)defaults;
}
@ -605,12 +605,12 @@ public:
} _state;
SwitchRanges(SwitchRange *lo, SwitchRange *hi)
: _lo(lo), _hi(hi), _mid(NULL),
: _lo(lo), _hi(hi), _mid(nullptr),
_cost(0), _state(Start) {
}
SwitchRanges()
: _lo(NULL), _hi(NULL), _mid(NULL),
: _lo(nullptr), _hi(nullptr), _mid(nullptr),
_cost(0), _state(Start) {}
};
@ -624,7 +624,7 @@ static float compute_tree_cost(SwitchRange *lo, SwitchRange *hi, float total_cnt
do {
SwitchRanges& r = *tree.adr_at(tree.length()-1);
if (r._hi != r._lo) {
if (r._mid == NULL) {
if (r._mid == nullptr) {
float r_cnt = sum_of_cnts(r._lo, r._hi);
if (r_cnt == 0) {
@ -633,7 +633,7 @@ static float compute_tree_cost(SwitchRange *lo, SwitchRange *hi, float total_cnt
continue;
}
SwitchRange* mid = NULL;
SwitchRange* mid = nullptr;
mid = r._lo;
for (float cnt = 0; ; ) {
assert(mid <= r._hi, "out of bounds");
@ -682,7 +682,7 @@ void Parse::linear_search_switch_ranges(Node* key_val, SwitchRange*& lo, SwitchR
SwitchRange* array1 = lo;
SwitchRange* array2 = NEW_RESOURCE_ARRAY(SwitchRange, nr);
SwitchRange* ranges = NULL;
SwitchRange* ranges = nullptr;
while (nr >= 2) {
assert(lo == array1 || lo == array2, "one the 2 already allocated arrays");
@ -878,15 +878,15 @@ bool Parse::create_jump_tables(Node* key_val, SwitchRange* lo, SwitchRange* hi)
}
ciMethodData* methodData = method()->method_data();
ciMultiBranchData* profile = NULL;
ciMultiBranchData* profile = nullptr;
if (methodData->is_mature()) {
ciProfileData* data = methodData->bci_to_data(bci());
if (data != NULL && data->is_MultiBranchData()) {
if (data != nullptr && data->is_MultiBranchData()) {
profile = (ciMultiBranchData*)data;
}
}
Node* jtn = _gvn.transform(new JumpNode(control(), key_val, num_cases, probs, profile == NULL ? COUNT_UNKNOWN : total));
Node* jtn = _gvn.transform(new JumpNode(control(), key_val, num_cases, probs, profile == nullptr ? COUNT_UNKNOWN : total));
// These are the switch destinations hanging off the jumpnode
i = 0;
@ -940,7 +940,7 @@ void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi,
jint min_val = min_jint;
jint max_val = max_jint;
const TypeInt* ti = key_val->bottom_type()->isa_int();
if (ti != NULL) {
if (ti != nullptr) {
min_val = ti->_lo;
max_val = ti->_hi;
assert(min_val <= max_val, "invalid int type");
@ -977,7 +977,7 @@ void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi,
if (create_jump_tables(key_val, lo, hi)) return;
SwitchRange* mid = NULL;
SwitchRange* mid = nullptr;
float total_cnt = sum_of_cnts(lo, hi);
int nr = hi - lo + 1;
@ -1101,7 +1101,7 @@ void Parse::modf() {
Node *f1 = pop();
Node* c = make_runtime_call(RC_LEAF, OptoRuntime::modf_Type(),
CAST_FROM_FN_PTR(address, SharedRuntime::frem),
"frem", NULL, //no memory effects
"frem", nullptr, //no memory effects
f1, f2);
Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0));
@ -1113,7 +1113,7 @@ void Parse::modd() {
Node *d1 = pop_pair();
Node* c = make_runtime_call(RC_LEAF, OptoRuntime::Math_DD_D_Type(),
CAST_FROM_FN_PTR(address, SharedRuntime::drem),
"drem", NULL, //no memory effects
"drem", nullptr, //no memory effects
d1, top(), d2, top());
Node* res_d = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0));
@ -1130,7 +1130,7 @@ void Parse::l2f() {
Node* f1 = pop();
Node* c = make_runtime_call(RC_LEAF, OptoRuntime::l2f_Type(),
CAST_FROM_FN_PTR(address, SharedRuntime::l2f),
"l2f", NULL, //no memory effects
"l2f", nullptr, //no memory effects
f1, f2);
Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0));
@ -1212,7 +1212,7 @@ float Parse::dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* t
ciMethodData* methodData = method()->method_data();
if (!methodData->is_mature()) return PROB_UNKNOWN;
ciProfileData* data = methodData->bci_to_data(bci());
if (data == NULL) {
if (data == nullptr) {
return PROB_UNKNOWN;
}
if (!data->is_JumpData()) return PROB_UNKNOWN;
@ -1232,7 +1232,7 @@ float Parse::dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* t
// Give up if too few (or too many, in which case the sum will overflow) counts to be meaningful.
// We also check that individual counters are positive first, otherwise the sum can become positive.
if (taken < 0 || not_taken < 0 || taken + not_taken < 40) {
if (C->log() != NULL) {
if (C->log() != nullptr) {
C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d'", iter().get_dest(), taken, not_taken);
}
return PROB_UNKNOWN;
@ -1262,12 +1262,12 @@ float Parse::dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* t
assert((cnt > 0.0f) && (prob > 0.0f),
"Bad frequency assignment in if");
if (C->log() != NULL) {
const char* prob_str = NULL;
if (C->log() != nullptr) {
const char* prob_str = nullptr;
if (prob >= PROB_MAX) prob_str = (prob == PROB_MAX) ? "max" : "always";
if (prob <= PROB_MIN) prob_str = (prob == PROB_MIN) ? "min" : "never";
char prob_str_buf[30];
if (prob_str == NULL) {
if (prob_str == nullptr) {
jio_snprintf(prob_str_buf, sizeof(prob_str_buf), "%20.2f", prob);
prob_str = prob_str_buf;
}
@ -1306,7 +1306,7 @@ float Parse::branch_prediction(float& cnt,
// of the OSR-ed method, and we want to deopt to gather more stats.
// If you have ANY counts, then this loop is simply 'cold' relative
// to the OSR loop.
if (data == NULL ||
if (data == nullptr ||
(data->as_BranchData()->taken() + data->as_BranchData()->not_taken() == 0)) {
// This is the only way to return PROB_UNKNOWN:
return PROB_UNKNOWN;
@ -1356,8 +1356,8 @@ inline int Parse::repush_if_args() {
int bc_depth = - Bytecodes::depth(iter().cur_bc());
assert(bc_depth == 1 || bc_depth == 2, "only two kinds of branches");
DEBUG_ONLY(sync_jvms()); // argument(n) requires a synced jvms
assert(argument(0) != NULL, "must exist");
assert(bc_depth == 1 || argument(1) != NULL, "two must exist");
assert(argument(0) != nullptr, "must exist");
assert(bc_depth == 1 || argument(1) != nullptr, "two must exist");
inc_sp(bc_depth);
return bc_depth;
}
@ -1379,7 +1379,7 @@ void Parse::do_ifnull(BoolTest::mask btest, Node *c) {
repush_if_args(); // to gather stats on loop
uncommon_trap(Deoptimization::Reason_unreached,
Deoptimization::Action_reinterpret,
NULL, "cold");
nullptr, "cold");
if (C->eliminate_boxing()) {
// Mark the successor blocks as parsed
branch_block->next_path_num();
@ -1450,7 +1450,7 @@ void Parse::do_if(BoolTest::mask btest, Node* c) {
repush_if_args(); // to gather stats on loop
uncommon_trap(Deoptimization::Reason_unreached,
Deoptimization::Action_reinterpret,
NULL, "cold");
nullptr, "cold");
if (C->eliminate_boxing()) {
// Mark the successor blocks as parsed
branch_block->next_path_num();
@ -1480,7 +1480,7 @@ void Parse::do_if(BoolTest::mask btest, Node* c) {
if (tst->is_Bool()) {
// Refresh c from the transformed bool node, since it may be
// simpler than the original c. Also re-canonicalize btest.
// This wins when (Bool ne (Conv2B p) 0) => (Bool ne (CmpP p NULL)).
// This wins when (Bool ne (Conv2B p) 0) => (Bool ne (CmpP p null)).
// That can arise from statements like: if (x instanceof C) ...
if (tst != tst0) {
// Canonicalize one more time since transform can change it.
@ -1585,7 +1585,7 @@ void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block
repush_if_args();
Node* call = uncommon_trap(Deoptimization::Reason_unstable_if,
Deoptimization::Action_reinterpret,
NULL,
nullptr,
(is_fallthrough ? "taken always" : "taken never"));
if (call != nullptr) {
@ -1627,25 +1627,25 @@ static Node* extract_obj_from_klass_load(PhaseGVN* gvn, Node* n) {
Node* ldk;
if (n->is_DecodeNKlass()) {
if (n->in(1)->Opcode() != Op_LoadNKlass) {
return NULL;
return nullptr;
} else {
ldk = n->in(1);
}
} else if (n->Opcode() != Op_LoadKlass) {
return NULL;
return nullptr;
} else {
ldk = n;
}
assert(ldk != NULL && ldk->is_Load(), "should have found a LoadKlass or LoadNKlass node");
assert(ldk != nullptr && ldk->is_Load(), "should have found a LoadKlass or LoadNKlass node");
Node* adr = ldk->in(MemNode::Address);
intptr_t off = 0;
Node* obj = AddPNode::Ideal_base_and_offset(adr, gvn, off);
if (obj == NULL || off != oopDesc::klass_offset_in_bytes()) // loading oopDesc::_klass?
return NULL;
if (obj == nullptr || off != oopDesc::klass_offset_in_bytes()) // loading oopDesc::_klass?
return nullptr;
const TypePtr* tp = gvn->type(obj)->is_ptr();
if (tp == NULL || !(tp->isa_instptr() || tp->isa_aryptr())) // is obj a Java object ptr?
return NULL;
if (tp == nullptr || !(tp->isa_instptr() || tp->isa_aryptr())) // is obj a Java object ptr?
return nullptr;
return obj;
}
@ -1658,13 +1658,13 @@ void Parse::sharpen_type_after_if(BoolTest::mask btest,
if (btest == BoolTest::eq && tcon->isa_klassptr()) {
Node* obj = extract_obj_from_klass_load(&_gvn, val);
const TypeOopPtr* con_type = tcon->isa_klassptr()->as_instance_type();
if (obj != NULL && (con_type->isa_instptr() || con_type->isa_aryptr())) {
if (obj != nullptr && (con_type->isa_instptr() || con_type->isa_aryptr())) {
// Found:
// Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq])
// or the narrowOop equivalent.
const Type* obj_type = _gvn.type(obj);
const TypeOopPtr* tboth = obj_type->join_speculative(con_type)->isa_oopptr();
if (tboth != NULL && tboth->klass_is_exact() && tboth != obj_type &&
if (tboth != nullptr && tboth->klass_is_exact() && tboth != obj_type &&
tboth->higher_equal(obj_type)) {
// obj has to be of the exact type Foo if the CmpP succeeds.
int obj_in_map = map()->find_edge(obj);
@ -1697,8 +1697,8 @@ void Parse::sharpen_type_after_if(BoolTest::mask btest,
// Check for a comparison to a constant, and "know" that the compared
// value is constrained on this path.
assert(tcon->singleton(), "");
ConstraintCastNode* ccast = NULL;
Node* cast = NULL;
ConstraintCastNode* ccast = nullptr;
Node* cast = nullptr;
switch (btest) {
case BoolTest::eq: // Constant test?
@ -1735,7 +1735,7 @@ void Parse::sharpen_type_after_if(BoolTest::mask btest,
break;
}
if (ccast != NULL) {
if (ccast != nullptr) {
const Type* tcc = ccast->as_Type()->type();
assert(tcc != tval && tcc->higher_equal(tval), "must improve");
// Delay transform() call to allow recovery of pre-cast value
@ -1746,7 +1746,7 @@ void Parse::sharpen_type_after_if(BoolTest::mask btest,
cast = ccast;
}
if (cast != NULL) { // Here's the payoff.
if (cast != nullptr) { // Here's the payoff.
replace_in_map(val, cast);
}
}
@ -1766,8 +1766,8 @@ Node* Parse::optimize_cmp_with_klass(Node* c) {
if (c->Opcode() == Op_CmpP &&
(c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) &&
c->in(2)->is_Con()) {
Node* load_klass = NULL;
Node* decode = NULL;
Node* load_klass = nullptr;
Node* decode = nullptr;
if (c->in(1)->Opcode() == Op_DecodeNKlass) {
decode = c->in(1);
load_klass = c->in(1)->in(1);
@ -1778,7 +1778,7 @@ Node* Parse::optimize_cmp_with_klass(Node* c) {
Node* addp = load_klass->in(2);
Node* obj = addp->in(AddPNode::Address);
const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
if (obj_type->speculative_type_not_null() != NULL) {
if (obj_type->speculative_type_not_null() != nullptr) {
ciKlass* k = obj_type->speculative_type();
inc_sp(2);
obj = maybe_cast_profiled_obj(obj, k);
@ -1788,7 +1788,7 @@ Node* Parse::optimize_cmp_with_klass(Node* c) {
load_klass = load_klass->clone();
load_klass->set_req(2, addp);
load_klass = _gvn.transform(load_klass);
if (decode != NULL) {
if (decode != nullptr) {
decode = decode->clone();
decode->set_req(1, load_klass);
load_klass = _gvn.transform(decode);
@ -1875,7 +1875,7 @@ void Parse::do_one_bytecode() {
ciConstant constant = iter().get_constant();
if (constant.is_loaded()) {
const Type* con_type = Type::make_from_constant(constant);
if (con_type != NULL) {
if (con_type != nullptr) {
push_node(con_type->basic_type(), makecon(con_type));
}
} else {
@ -1883,14 +1883,14 @@ void Parse::do_one_bytecode() {
if (iter().is_in_error()) {
uncommon_trap(Deoptimization::make_trap_request(Deoptimization::Reason_unhandled,
Deoptimization::Action_none),
NULL, "constant in error state", true /* must_throw */);
nullptr, "constant in error state", true /* must_throw */);
} else {
int index = iter().get_constant_pool_index();
uncommon_trap(Deoptimization::make_trap_request(Deoptimization::Reason_unloaded,
Deoptimization::Action_reinterpret,
index),
NULL, "unresolved constant", false /* must_throw */);
nullptr, "unresolved constant", false /* must_throw */);
}
}
break;
@ -2531,17 +2531,17 @@ void Parse::do_one_bytecode() {
case Bytecodes::_i2b:
// Sign extend
a = pop();
a = Compile::narrow_value(T_BYTE, a, NULL, &_gvn, true);
a = Compile::narrow_value(T_BYTE, a, nullptr, &_gvn, true);
push(a);
break;
case Bytecodes::_i2s:
a = pop();
a = Compile::narrow_value(T_SHORT, a, NULL, &_gvn, true);
a = Compile::narrow_value(T_SHORT, a, nullptr, &_gvn, true);
push(a);
break;
case Bytecodes::_i2c:
a = pop();
a = Compile::narrow_value(T_CHAR, a, NULL, &_gvn, true);
a = Compile::narrow_value(T_CHAR, a, nullptr, &_gvn, true);
push(a);
break;
@ -2565,7 +2565,7 @@ void Parse::do_one_bytecode() {
// Exit points of synchronized methods must have an unlock node
case Bytecodes::_return:
return_current(NULL);
return_current(nullptr);
break;
case Bytecodes::_ireturn:
@ -2581,7 +2581,7 @@ void Parse::do_one_bytecode() {
break;
case Bytecodes::_athrow:
// null exception oop throws NULL pointer exception
// null exception oop throws null pointer exception
null_check(peek());
if (stopped()) return;
// Hook the thrown exception directly to subsequent handlers.
@ -2615,7 +2615,7 @@ void Parse::do_one_bytecode() {
ciMethodData* methodData = method()->method_data();
if (!methodData->is_mature()) break;
ciProfileData* data = methodData->bci_to_data(bci());
assert(data != NULL && data->is_JumpData(), "need JumpData for taken branch");
assert(data != nullptr && data->is_JumpData(), "need JumpData for taken branch");
int taken = ((ciJumpData*)data)->taken();
taken = method()->scale_count(taken);
target_block->set_count(taken);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -60,7 +60,7 @@ void Parse::do_field_access(bool is_get, bool is_field) {
!(method()->holder() == field_holder && method()->is_object_initializer())) {
uncommon_trap(Deoptimization::Reason_unhandled,
Deoptimization::Action_reinterpret,
NULL, "put to call site target field");
nullptr, "put to call site target field");
return;
}
@ -118,7 +118,7 @@ void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) {
(bt != T_OBJECT || field->type()->is_loaded())) {
// final or stable field
Node* con = make_constant_from_field(field, obj);
if (con != NULL) {
if (con != nullptr) {
push_node(field->layout_type(), con);
return;
}
@ -156,7 +156,7 @@ void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) {
} else {
type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
}
assert(type != NULL, "field singleton type must be consistent");
assert(type != nullptr, "field singleton type must be consistent");
} else {
type = TypeOopPtr::make_from_klass(field_klass->as_klass());
}
@ -186,7 +186,7 @@ void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) {
if (PrintOpto && (Verbose || WizardMode)) {
method()->print_name(); tty->print_cr(" asserting nullness of field at bci: %d", bci());
}
if (C->log() != NULL) {
if (C->log() != nullptr) {
C->log()->elem("assert_null reason='field' klass='%d'",
C->log()->identify(field->type()));
}
@ -242,7 +242,7 @@ void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
// Any method can write a @Stable field; insert memory barriers after those also.
if (field->is_final()) {
set_wrote_final(true);
if (AllocateNode::Ideal_allocation(obj, &_gvn) != NULL) {
if (AllocateNode::Ideal_allocation(obj, &_gvn) != nullptr) {
// Preserve allocation ptr to create precedent edge to it in membar
// generated on exit from constructor.
// Can't bind stable with its allocation, only record allocation for final field.
@ -298,7 +298,7 @@ void Parse::do_newarray(BasicType elem_type) {
// Also handle the degenerate 1-dimensional case of anewarray.
Node* Parse::expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs) {
Node* length = lengths[0];
assert(length != NULL, "");
assert(length != nullptr, "");
Node* array = new_array(makecon(TypeKlassPtr::make(array_klass, Type::trust_interfaces)), length, nargs);
if (ndimensions > 1) {
jint length_con = find_int_con(length, -1);
@ -331,7 +331,7 @@ void Parse::do_multianewarray() {
// get the lengths from the stack (first dimension is on top)
Node** length = NEW_RESOURCE_ARRAY(Node*, ndimensions + 1);
length[ndimensions] = NULL; // terminating null for make_runtime_call
length[ndimensions] = nullptr; // terminating null for make_runtime_call
int j;
for (j = ndimensions-1; j >= 0 ; j--) length[j] = pop();
@ -356,7 +356,7 @@ void Parse::do_multianewarray() {
// Can use multianewarray instead of [a]newarray if only one dimension,
// or if all non-final dimensions are small constants.
if (ndimensions == 1 || (1 <= expand_count && expand_count <= expand_limit)) {
Node* obj = NULL;
Node* obj = nullptr;
// Set the original stack and the reexecute bit for the interpreter
// to reexecute the multianewarray bytecode if deoptimization happens.
// Do it unconditionally even for one dimension multianewarray.
@ -371,7 +371,7 @@ void Parse::do_multianewarray() {
return;
}
address fun = NULL;
address fun = nullptr;
switch (ndimensions) {
case 1: ShouldNotReachHere(); break;
case 2: fun = OptoRuntime::multianewarray2_Java(); break;
@ -379,19 +379,19 @@ void Parse::do_multianewarray() {
case 4: fun = OptoRuntime::multianewarray4_Java(); break;
case 5: fun = OptoRuntime::multianewarray5_Java(); break;
};
Node* c = NULL;
Node* c = nullptr;
if (fun != NULL) {
if (fun != nullptr) {
c = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
OptoRuntime::multianewarray_Type(ndimensions),
fun, NULL, TypeRawPtr::BOTTOM,
fun, nullptr, TypeRawPtr::BOTTOM,
makecon(TypeKlassPtr::make(array_klass, Type::trust_interfaces)),
length[0], length[1], length[2],
(ndimensions > 2) ? length[3] : NULL,
(ndimensions > 3) ? length[4] : NULL);
(ndimensions > 2) ? length[3] : nullptr,
(ndimensions > 3) ? length[4] : nullptr);
} else {
// Create a java array for dimension sizes
Node* dims = NULL;
Node* dims = nullptr;
{ PreserveReexecuteState preexecs(this);
inc_sp(ndimensions);
Node* dims_array_klass = makecon(TypeKlassPtr::make(ciArrayKlass::make(ciType::make(T_INT))));
@ -406,7 +406,7 @@ void Parse::do_multianewarray() {
c = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
OptoRuntime::multianewarrayN_Type(),
OptoRuntime::multianewarrayN_Java(), NULL, TypeRawPtr::BOTTOM,
OptoRuntime::multianewarrayN_Java(), nullptr, TypeRawPtr::BOTTOM,
makecon(TypeKlassPtr::make(array_klass, Type::trust_interfaces)),
dims);
}
@ -421,7 +421,7 @@ void Parse::do_multianewarray() {
type = type->is_aryptr()->cast_to_exactness(true);
const TypeInt* ltype = _gvn.find_int_type(length[0]);
if (ltype != NULL)
if (ltype != nullptr)
type = type->is_aryptr()->cast_to_size(ltype);
// We cannot sharpen the nested sub-arrays, since the top level is mutable.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -69,11 +69,11 @@ void Parse::do_checkcast() {
Node *obj = peek();
// Throw uncommon trap if class is not loaded or the value we are casting
// _from_ is not loaded, and value is not null. If the value _is_ NULL,
// _from_ is not loaded, and value is not null. If the value _is_ null,
// then the checkcast does nothing.
const TypeOopPtr *tp = _gvn.type(obj)->isa_oopptr();
if (!will_link || (tp && !tp->is_loaded())) {
if (C->log() != NULL) {
if (C->log() != nullptr) {
if (!will_link) {
C->log()->elem("assert_null reason='checkcast' klass='%d'",
C->log()->identify(klass));
@ -113,7 +113,7 @@ void Parse::do_instanceof() {
ciKlass* klass = iter().get_klass(will_link);
if (!will_link) {
if (C->log() != NULL) {
if (C->log() != nullptr) {
C->log()->elem("assert_null reason='instanceof' klass='%d'",
C->log()->identify(klass));
}
@ -157,7 +157,7 @@ void Parse::array_store_check() {
int klass_offset = oopDesc::klass_offset_in_bytes();
Node* p = basic_plus_adr( ary, ary, klass_offset );
// p's type is array-of-OOPS plus klass_offset
Node* array_klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS));
Node* array_klass = _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, TypeInstPtr::KLASS));
// Get the array klass
const TypeKlassPtr *tak = _gvn.type(array_klass)->is_klassptr();
@ -197,7 +197,7 @@ void Parse::array_store_check() {
// Make a constant out of the inexact array klass
const TypeKlassPtr *extak = tak->cast_to_exactness(true);
if (extak->exact_klass(true) != NULL) {
if (extak->exact_klass(true) != nullptr) {
Node* con = makecon(extak);
Node* cmp = _gvn.transform(new CmpPNode( array_klass, con ));
Node* bol = _gvn.transform(new BoolNode( cmp, BoolTest::eq ));
@ -213,7 +213,7 @@ void Parse::array_store_check() {
// Use the exact constant value we know it is.
replace_in_map(array_klass,con);
CompileLog* log = C->log();
if (log != NULL) {
if (log != nullptr) {
log->elem("cast_up reason='monomorphic_array' from='%d' to='(exact)'",
log->identify(extak->exact_klass()));
}
@ -230,7 +230,7 @@ void Parse::array_store_check() {
// We are allowed to use the constant type only if cast succeeded. If always_see_exact_class is true,
// we must set a control edge from the IfTrue node created by the uncommon_trap above to the
// LoadKlassNode.
Node* a_e_klass = _gvn.transform(LoadKlassNode::make(_gvn, always_see_exact_class ? control() : NULL,
Node* a_e_klass = _gvn.transform(LoadKlassNode::make(_gvn, always_see_exact_class ? control() : nullptr,
immutable_memory(), p2, tak));
// Check (the hard way) and throw if not a subklass.
@ -287,8 +287,8 @@ void Parse::do_new() {
// Debug dump of the mapping from address types to MergeMemNode indices.
void Parse::dump_map_adr_mem() const {
tty->print_cr("--- Mapping from address types to memory Nodes ---");
MergeMemNode *mem = map() == NULL ? NULL : (map()->memory()->is_MergeMem() ?
map()->memory()->as_MergeMem() : NULL);
MergeMemNode *mem = map() == nullptr ? nullptr : (map()->memory()->is_MergeMem() ?
map()->memory()->as_MergeMem() : nullptr);
for (uint i = 0; i < (uint)C->num_alias_types(); i++) {
C->alias_type(i)->print_on(tty);
tty->print("\t");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -40,7 +40,7 @@ elapsedTimer Phase::_t_stubCompilation;
elapsedTimer Phase::timers[max_phase_timers];
//------------------------------Phase------------------------------------------
Phase::Phase( PhaseNumber pnum ) : _pnum(pnum), C( pnum == Compiler ? NULL : Compile::current()) {
Phase::Phase( PhaseNumber pnum ) : _pnum(pnum), C( pnum == Compiler ? nullptr : Compile::current()) {
// Poll for requests from shutdown mechanism to quiesce compiler (4448539, 4448544).
// This is an effective place to poll, since the compiler is full of phases.
// In particular, every inlining site uses a recursively created Parse phase.

View File

@ -56,7 +56,7 @@ NodeHash::NodeHash(uint est_max_size) :
#endif
{
// _sentinel must be in the current node space
_sentinel = new ProjNode(NULL, TypeFunc::Control);
_sentinel = new ProjNode(nullptr, TypeFunc::Control);
memset(_table,0,sizeof(Node*)*_max);
}
@ -73,7 +73,7 @@ NodeHash::NodeHash(Arena *arena, uint est_max_size) :
#endif
{
// _sentinel must be in the current node space
_sentinel = new ProjNode(NULL, TypeFunc::Control);
_sentinel = new ProjNode(nullptr, TypeFunc::Control);
memset(_table,0,sizeof(Node*)*_max);
}
@ -99,7 +99,7 @@ Node *NodeHash::hash_find( const Node *n ) {
uint hash = n->hash();
if (hash == Node::NO_HASH) {
NOT_PRODUCT( _lookup_misses++ );
return NULL;
return nullptr;
}
uint key = hash & (_max-1);
uint stride = key | 0x01;
@ -107,7 +107,7 @@ Node *NodeHash::hash_find( const Node *n ) {
Node *k = _table[key]; // Get hashed value
if( !k ) { // ?Miss?
NOT_PRODUCT( _lookup_misses++ );
return NULL; // Miss!
return nullptr; // Miss!
}
int op = n->Opcode();
@ -129,11 +129,11 @@ Node *NodeHash::hash_find( const Node *n ) {
k = _table[key]; // Get hashed value
if( !k ) { // ?Miss?
NOT_PRODUCT( _lookup_misses++ );
return NULL; // Miss!
return nullptr; // Miss!
}
}
ShouldNotReachHere();
return NULL;
return nullptr;
}
//------------------------------hash_find_insert-------------------------------
@ -144,7 +144,7 @@ Node *NodeHash::hash_find_insert( Node *n ) {
uint hash = n->hash();
if (hash == Node::NO_HASH) {
NOT_PRODUCT( _lookup_misses++ );
return NULL;
return nullptr;
}
uint key = hash & (_max-1);
uint stride = key | 0x01; // stride must be relatively prime to table siz
@ -156,7 +156,7 @@ Node *NodeHash::hash_find_insert( Node *n ) {
_table[key] = n; // Insert into table!
debug_only(n->enter_hash_lock()); // Lock down the node while in the table.
check_grow(); // Grow table if insert hit limit
return NULL; // Miss!
return nullptr; // Miss!
}
else if( k == _sentinel ) {
first_sentinel = key; // Can insert here
@ -185,7 +185,7 @@ Node *NodeHash::hash_find_insert( Node *n ) {
_table[key] = n; // Insert into table!
debug_only(n->enter_hash_lock()); // Lock down the node while in the table.
check_grow(); // Grow table if insert hit limit
return NULL; // Miss!
return nullptr; // Miss!
}
else if( first_sentinel == 0 && k == _sentinel ) {
first_sentinel = key; // Can insert here
@ -193,7 +193,7 @@ Node *NodeHash::hash_find_insert( Node *n ) {
}
ShouldNotReachHere();
return NULL;
return nullptr;
}
//------------------------------hash_insert------------------------------------
@ -235,7 +235,7 @@ bool NodeHash::hash_delete( const Node *n ) {
uint key = hash & (_max-1);
uint stride = key | 0x01;
debug_only( uint counter = 0; );
for( ; /* (k != NULL) && (k != _sentinel) */; ) {
for( ; /* (k != nullptr) && (k != _sentinel) */; ) {
debug_only( counter++ );
NOT_PRODUCT( _delete_probes++ );
k = _table[key]; // Get hashed value
@ -294,7 +294,7 @@ void NodeHash::grow() {
}
//------------------------------clear------------------------------------------
// Clear all entries in _table to NULL but keep storage
// Clear all entries in _table to null but keep storage
void NodeHash::clear() {
#ifdef ASSERT
// Unlock all nodes upon removal from table.
@ -319,7 +319,7 @@ void NodeHash::remove_useless_nodes(VectorSet &useful) {
Node *sentinel_node = sentinel();
for( uint i = 0; i < max; ++i ) {
Node *n = at(i);
if(n != NULL && n != sentinel_node && !useful.test(n->_idx)) {
if(n != nullptr && n != sentinel_node && !useful.test(n->_idx)) {
debug_only(n->exit_hash_lock()); // Unlock the node when removed
_table[i] = sentinel_node; // Replace with placeholder
}
@ -335,7 +335,7 @@ void NodeHash::check_no_speculative_types() {
Node *sentinel_node = sentinel();
for (uint i = 0; i < max; ++i) {
Node *n = at(i);
if (n != NULL &&
if (n != nullptr &&
n != sentinel_node &&
n->is_Type() &&
live_nodes.member(n)) {
@ -379,7 +379,7 @@ Node *NodeHash::find_index(uint idx) { // For debugging
if( !m || m == _sentinel ) continue;
if( m->_idx == (uint)idx ) return m;
}
return NULL;
return nullptr;
}
#endif
@ -567,7 +567,7 @@ int PhaseRenumberLive::update_embedded_ids(Node* n) {
}
const Type* type = _new_type_array.fast_lookup(n->_idx);
if (type != NULL && type->isa_oopptr() && type->is_oopptr()->is_known_instance()) {
if (type != nullptr && type->isa_oopptr() && type->is_oopptr()->is_known_instance()) {
if (!_is_pass_finished) {
return -1; // delay
}
@ -595,7 +595,7 @@ PhaseTransform::PhaseTransform( PhaseNumber pnum ) : Phase(pnum),
set_allow_progress(true);
#endif
// Force allocation for currently existing nodes
_types.map(C->unique(), NULL);
_types.map(C->unique(), nullptr);
}
//------------------------------PhaseTransform---------------------------------
@ -611,7 +611,7 @@ PhaseTransform::PhaseTransform( Arena *arena, PhaseNumber pnum ) : Phase(pnum),
set_allow_progress(true);
#endif
// Force allocation for currently existing nodes
_types.map(C->unique(), NULL);
_types.map(C->unique(), nullptr);
}
//------------------------------PhaseTransform---------------------------------
@ -638,22 +638,22 @@ void PhaseTransform::init_con_caches() {
//--------------------------------find_int_type--------------------------------
const TypeInt* PhaseTransform::find_int_type(Node* n) {
if (n == NULL) return NULL;
if (n == nullptr) return nullptr;
// Call type_or_null(n) to determine node's type since we might be in
// parse phase and call n->Value() may return wrong type.
// (For example, a phi node at the beginning of loop parsing is not ready.)
const Type* t = type_or_null(n);
if (t == NULL) return NULL;
if (t == nullptr) return nullptr;
return t->isa_int();
}
//-------------------------------find_long_type--------------------------------
const TypeLong* PhaseTransform::find_long_type(Node* n) {
if (n == NULL) return NULL;
if (n == nullptr) return nullptr;
// (See comment above on type_or_null.)
const Type* t = type_or_null(n);
if (t == NULL) return NULL;
if (t == nullptr) return nullptr;
return t->isa_long();
}
@ -695,7 +695,7 @@ void PhaseTransform::dump_nodes_and_types_recur( const Node *n, uint depth, bool
dump_nodes_and_types_recur( n->in(i), depth-1, only_ctrl, visited );
}
n->dump();
if (type_or_null(n) != NULL) {
if (type_or_null(n) != nullptr) {
tty->print(" "); type(n)->dump(); tty->cr();
}
}
@ -758,10 +758,10 @@ ConNode* PhaseValues::uncached_makecon(const Type *t) {
assert(t->singleton(), "must be a constant");
ConNode* x = ConNode::make(t);
ConNode* k = (ConNode*)hash_find_insert(x); // Value numbering
if (k == NULL) {
if (k == nullptr) {
set_type(x, t); // Missed, provide type mapping
GrowableArray<Node_Notes*>* nna = C->node_note_array();
if (nna != NULL) {
if (nna != nullptr) {
Node_Notes* loc = C->locate_node_notes(nna, x->_idx, true);
loc->clear(); // do not put debug info on constants
}
@ -778,7 +778,7 @@ ConINode* PhaseTransform::intcon(jint i) {
// Small integer? Check cache! Check that cached node is not dead
if (i >= _icon_min && i <= _icon_max) {
ConINode* icon = _icons[i-_icon_min];
if (icon != NULL && icon->in(TypeFunc::Control) != NULL)
if (icon != nullptr && icon->in(TypeFunc::Control) != nullptr)
return icon;
}
ConINode* icon = (ConINode*) uncached_makecon(TypeInt::make(i));
@ -794,7 +794,7 @@ ConLNode* PhaseTransform::longcon(jlong l) {
// Small integer? Check cache! Check that cached node is not dead
if (l >= _lcon_min && l <= _lcon_max) {
ConLNode* lcon = _lcons[l-_lcon_min];
if (lcon != NULL && lcon->in(TypeFunc::Control) != NULL)
if (lcon != nullptr && lcon->in(TypeFunc::Control) != nullptr)
return lcon;
}
ConLNode* lcon = (ConLNode*) uncached_makecon(TypeLong::make(l));
@ -817,7 +817,7 @@ ConNode* PhaseTransform::integercon(jlong l, BasicType bt) {
ConNode* PhaseTransform::zerocon(BasicType bt) {
assert((uint)bt <= _zcon_max, "domain check");
ConNode* zcon = _zcons[bt];
if (zcon != NULL && zcon->in(TypeFunc::Control) != NULL)
if (zcon != nullptr && zcon->in(TypeFunc::Control) != nullptr)
return zcon;
zcon = (ConNode*) uncached_makecon(Type::get_zero_type(bt));
_zcons[bt] = zcon;
@ -829,7 +829,7 @@ ConNode* PhaseTransform::zerocon(BasicType bt) {
//=============================================================================
Node* PhaseGVN::apply_ideal(Node* k, bool can_reshape) {
Node* i = BarrierSet::barrier_set()->barrier_set_c2()->ideal_node(this, k, can_reshape);
if (i == NULL) {
if (i == nullptr) {
i = k->Ideal(this, can_reshape);
}
return i;
@ -852,7 +852,7 @@ Node *PhaseGVN::transform_no_reclaim(Node *n) {
Node* k = n;
Node* i = apply_ideal(k, /*can_reshape=*/false);
NOT_PRODUCT(uint loop_count = 1;)
while (i != NULL) {
while (i != nullptr) {
assert(i->_idx >= k->_idx, "Idealize should return new nodes, use Identity to return old nodes" );
k = i;
#ifdef ASSERT
@ -873,11 +873,11 @@ Node *PhaseGVN::transform_no_reclaim(Node *n) {
// cache Value. Later requests for the local phase->type of this Node can
// use the cached Value instead of suffering with 'bottom_type'.
const Type* t = k->Value(this); // Get runtime Value set
assert(t != NULL, "value sanity");
assert(t != nullptr, "value sanity");
if (type_or_null(k) != t) {
#ifndef PRODUCT
// Do not count initial visit to node as a transformation
if (type_or_null(k) == NULL) {
if (type_or_null(k) == nullptr) {
inc_new_values();
set_progress();
}
@ -923,7 +923,7 @@ bool PhaseGVN::is_dominator_helper(Node *d, Node *n, bool linear_only) {
while (d != n) {
n = IfNode::up_one_dom(n, linear_only);
i++;
if (n == NULL || i >= 100) {
if (n == nullptr || i >= 100) {
return false;
}
}
@ -936,7 +936,7 @@ bool PhaseGVN::is_dominator_helper(Node *d, Node *n, bool linear_only) {
// or through an other data node excluding cons and phis.
void PhaseGVN::dead_loop_check( Node *n ) {
// Phi may reference itself in a loop
if (n != NULL && !n->is_dead_loop_safe() && !n->is_CFG()) {
if (n != nullptr && !n->is_dead_loop_safe() && !n->is_CFG()) {
// Do 2 levels check and only data inputs.
bool no_dead_loop = true;
uint cnt = n->req();
@ -944,7 +944,7 @@ void PhaseGVN::dead_loop_check( Node *n ) {
Node *in = n->in(i);
if (in == n) {
no_dead_loop = false;
} else if (in != NULL && !in->is_dead_loop_safe()) {
} else if (in != nullptr && !in->is_dead_loop_safe()) {
uint icnt = in->req();
for (uint j = 1; j < icnt && no_dead_loop; j++) {
if (in->in(j) == n || in->in(j) == in)
@ -999,7 +999,7 @@ PhaseIterGVN::PhaseIterGVN(PhaseGVN* gvn) : PhaseGVN(gvn),
max = _table.size();
for( uint i = 0; i < max; ++i ) {
Node *n = _table.at(i);
if(n != NULL && n != _table.sentinel() && n->outcnt() == 0) {
if(n != nullptr && n != _table.sentinel() && n->outcnt() == 0) {
if( n->is_top() ) continue;
// If remove_useless_nodes() has run, we expect no such nodes left.
assert(false, "remove_useless_nodes missed this node");
@ -1046,7 +1046,7 @@ void PhaseIterGVN::verify_step(Node* n) {
}
for (int i = 0; i < _verify_window_size; i++) {
Node* n = _verify_window[i];
if (n == NULL) {
if (n == nullptr) {
continue;
}
if (n->in(0) == NodeSentinel) { // xform_idom
@ -1070,7 +1070,7 @@ void PhaseIterGVN::trace_PhaseIterGVN(Node* n, Node* nn, const Type* oldtype) {
if (nn != n) {
// print old node
tty->print("< ");
if (oldtype != newtype && oldtype != NULL) {
if (oldtype != newtype && oldtype != nullptr) {
oldtype->dump();
}
do { tty->print("\t"); } while (tty->position() < 16);
@ -1079,14 +1079,14 @@ void PhaseIterGVN::trace_PhaseIterGVN(Node* n, Node* nn, const Type* oldtype) {
}
if (oldtype != newtype || nn != n) {
// print new node and/or new type
if (oldtype == NULL) {
if (oldtype == nullptr) {
tty->print("* ");
} else if (nn != n) {
tty->print("> ");
} else {
tty->print("= ");
}
if (newtype == NULL) {
if (newtype == nullptr) {
tty->print("null");
} else {
newtype->dump();
@ -1104,7 +1104,7 @@ void PhaseIterGVN::trace_PhaseIterGVN(Node* n, Node* nn, const Type* oldtype) {
}
if (nn != n) {
// ignore n, it might be subsumed
verify_step((Node*) NULL);
verify_step((Node*) nullptr);
}
}
}
@ -1113,12 +1113,12 @@ void PhaseIterGVN::init_verifyPhaseIterGVN() {
_verify_counter = 0;
_verify_full_passes = 0;
for (int i = 0; i < _verify_window_size; i++) {
_verify_window[i] = NULL;
_verify_window[i] = nullptr;
}
#ifdef ASSERT
// Verify that all modified nodes are on _worklist
Unique_Node_List* modified_list = C->modified_nodes();
while (modified_list != NULL && modified_list->size()) {
while (modified_list != nullptr && modified_list->size()) {
Node* n = modified_list->pop();
if (!n->is_Con() && !_worklist.member(n)) {
n->dump();
@ -1132,7 +1132,7 @@ void PhaseIterGVN::verify_PhaseIterGVN() {
#ifdef ASSERT
// Verify nodes with changed inputs.
Unique_Node_List* modified_list = C->modified_nodes();
while (modified_list != NULL && modified_list->size()) {
while (modified_list != nullptr && modified_list->size()) {
Node* n = modified_list->pop();
if (!n->is_Con()) { // skip Con nodes
n->dump();
@ -1153,7 +1153,7 @@ void PhaseIterGVN::verify_PhaseIterGVN() {
}
#ifdef ASSERT
if (modified_list != NULL) {
if (modified_list != nullptr) {
while (modified_list->size() > 0) {
Node* n = modified_list->pop();
n->dump();
@ -1260,7 +1260,7 @@ void PhaseIterGVN::verify_optimize() {
// (2) LoadNode performs deep traversals. Load is not notified for changes far away.
// (3) CmpPNode performs deep traversals if it compares oopptr. CmpP is not notified for changes far away.
bool PhaseIterGVN::verify_node_value(Node* n) {
// If we assert inside type(n), because the type is still a nullptr, then maybe
// If we assert inside type(n), because the type is still a null, then maybe
// the node never went through gvn.transform, which would be a bug.
const Type* told = type(n);
const Type* tnew = n->Value(this);
@ -1324,7 +1324,7 @@ bool PhaseIterGVN::verify_node_value(Node* n) {
Node* PhaseIterGVN::register_new_node_with_optimizer(Node* n, Node* orig) {
set_type_bottom(n);
_worklist.push(n);
if (orig != NULL) C->copy_node_notes_to(n, orig);
if (orig != nullptr) C->copy_node_notes_to(n, orig);
return n;
}
@ -1339,7 +1339,7 @@ Node *PhaseIterGVN::transform( Node *n ) {
// If brand new node, make space in type array, and give it a type.
ensure_type_or_null(n);
if (type_or_null(n) == NULL) {
if (type_or_null(n) == nullptr) {
set_type_bottom(n);
}
@ -1373,7 +1373,7 @@ Node *PhaseIterGVN::transform_old(Node* n) {
#endif
DEBUG_ONLY(uint loop_count = 1;)
while (i != NULL) {
while (i != nullptr) {
#ifdef ASSERT
if (loop_count >= K + C->live_nodes()) {
dump_infinite_loop_info(i, "PhaseIterGVN::transform_old");
@ -1405,7 +1405,7 @@ Node *PhaseIterGVN::transform_old(Node* n) {
// See what kind of values 'k' takes on at runtime
const Type* t = k->Value(this);
assert(t != NULL, "value sanity");
assert(t != nullptr, "value sanity");
// Since I just called 'Value' to compute the set of run-time values
// for this Node, and 'Value' is non-local (and therefore expensive) I'll
@ -1490,8 +1490,8 @@ void PhaseIterGVN::remove_globally_dead_node( Node *dead ) {
// Smash all inputs to 'dead', isolating him completely
for (uint i = 0; i < dead->req(); i++) {
Node *in = dead->in(i);
if (in != NULL && in != C->top()) { // Points to something?
int nrep = dead->replace_edge(in, NULL, this); // Kill edges
if (in != nullptr && in != C->top()) { // Points to something?
int nrep = dead->replace_edge(in, nullptr, this); // Kill edges
assert((nrep > 0), "sanity");
if (in->outcnt() == 0) { // Made input go dead?
_stack.push(in, PROCESS_INPUTS); // Recursively remove
@ -1516,7 +1516,7 @@ void PhaseIterGVN::remove_globally_dead_node( Node *dead ) {
BarrierSet::barrier_set()->barrier_set_c2()->enqueue_useful_gc_barrier(this, in);
}
if (ReduceFieldZeroing && dead->is_Load() && i == MemNode::Memory &&
in->is_Proj() && in->in(0) != NULL && in->in(0)->is_Initialize()) {
in->is_Proj() && in->in(0) != nullptr && in->in(0)->is_Initialize()) {
// A Load that directly follows an InitializeNode is
// going away. The Stores that follow are candidates
// again to be captured by the InitializeNode.
@ -1527,7 +1527,7 @@ void PhaseIterGVN::remove_globally_dead_node( Node *dead ) {
}
}
}
} // if (in != NULL && in != C->top())
} // if (in != nullptr && in != C->top())
} // for (uint i = 0; i < dead->req(); i++)
if (recurse) {
continue;
@ -1583,11 +1583,11 @@ void PhaseIterGVN::subsume_node( Node *old, Node *nn ) {
// Search for instance field data PhiNodes in the same region pointing to the old
// memory PhiNode and update their instance memory ids to point to the new node.
if (old->is_Phi() && old->as_Phi()->type()->has_memory() && old->in(0) != NULL) {
if (old->is_Phi() && old->as_Phi()->type()->has_memory() && old->in(0) != nullptr) {
Node* region = old->in(0);
for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
PhiNode* phi = region->fast_out(i)->isa_Phi();
if (phi != NULL && phi->inst_mem_id() == (int)old->_idx) {
if (phi != nullptr && phi->inst_mem_id() == (int)old->_idx) {
phi->set_inst_mem_id((int)nn->_idx);
}
}
@ -1598,7 +1598,7 @@ void PhaseIterGVN::subsume_node( Node *old, Node *nn ) {
temp->init_req(0,nn); // Add a use to nn to prevent him from dying
remove_dead_node( old );
temp->del_req(0); // Yank bogus edge
if (nn != NULL && nn->outcnt() == 0) {
if (nn != nullptr && nn->outcnt() == 0) {
_worklist.push(nn);
}
#ifndef PRODUCT
@ -1630,14 +1630,14 @@ static PhiNode* countedloop_phi_from_cmp(CmpNode* cmp, Node* n) {
BaseCountedLoopEndNode* cle = iff->as_BaseCountedLoopEnd();
if (cle->limit() == n) {
PhiNode* phi = cle->phi();
if (phi != NULL) {
if (phi != nullptr) {
return phi;
}
}
}
}
}
return NULL;
return nullptr;
}
void PhaseIterGVN::add_users_to_worklist( Node *n ) {
@ -1652,12 +1652,12 @@ void PhaseIterGVN::add_users_to_worklist( Node *n ) {
add_users_to_worklist0(use);
// If we changed the receiver type to a call, we need to revisit
// the Catch following the call. It's looking for a non-NULL
// the Catch following the call. It's looking for a non-null
// receiver to know when to enable the regular fall-through path
// in addition to the NullPtrException path.
if (use->is_CallDynamicJava() && n == use->in(TypeFunc::Parms)) {
Node* p = use->as_CallDynamicJava()->proj_out_or_null(TypeFunc::Control);
if (p != NULL) {
if (p != nullptr) {
add_users_to_worklist0(p);
}
}
@ -1685,7 +1685,7 @@ void PhaseIterGVN::add_users_to_worklist( Node *n ) {
}
if (use_op == Op_CmpI || use_op == Op_CmpL) {
Node* phi = countedloop_phi_from_cmp(use->as_Cmp(), n);
if (phi != NULL) {
if (phi != nullptr) {
// Input to the cmp of a loop exit check has changed, thus
// the loop limit may have changed, which can then change the
// range values of the trip-count Phi.
@ -1823,9 +1823,9 @@ void PhaseIterGVN::add_users_to_worklist( Node *n ) {
// If changed initialization activity, check dependent Stores
if (use_op == Op_Allocate || use_op == Op_AllocateArray) {
InitializeNode* init = use->as_Allocate()->initialization();
if (init != NULL) {
if (init != nullptr) {
Node* imem = init->proj_out_or_null(TypeFunc::Memory);
if (imem != NULL) add_users_to_worklist0(imem);
if (imem != nullptr) add_users_to_worklist0(imem);
}
}
// If the ValidLengthTest input changes then the fallthrough path out of the AllocateArray may have become dead.
@ -1833,14 +1833,14 @@ void PhaseIterGVN::add_users_to_worklist( Node *n ) {
// to guarantee the change is not missed.
if (use_op == Op_AllocateArray && n == use->in(AllocateNode::ValidLengthTest)) {
Node* p = use->as_AllocateArray()->proj_out_or_null(TypeFunc::Control);
if (p != NULL) {
if (p != nullptr) {
add_users_to_worklist0(p);
}
}
if (use_op == Op_Initialize) {
Node* imem = use->as_Initialize()->proj_out_or_null(TypeFunc::Memory);
if (imem != NULL) add_users_to_worklist0(imem);
if (imem != nullptr) add_users_to_worklist0(imem);
}
// Loading the java mirror from a Klass requires two loads and the type
// of the mirror load depends on the type of 'n'. See LoadNode::Value().
@ -1883,7 +1883,7 @@ void PhaseIterGVN::remove_speculative_types() {
assert(UseTypeSpeculation, "speculation is off");
for (uint i = 0; i < _types.Size(); i++) {
const Type* t = _types.fast_lookup(i);
if (t != NULL) {
if (t != nullptr) {
_types.map(i, t->remove_speculative());
}
}
@ -2073,7 +2073,7 @@ void PhaseCCP::push_phis(Unique_Node_List& worklist, const Node* use) const {
}
// If we changed the receiver type to a call, we need to revisit the Catch node following the call. It's looking for a
// non-NULL receiver to know when to enable the regular fall-through path in addition to the NullPtrException path.
// non-null receiver to know when to enable the regular fall-through path in addition to the NullPtrException path.
// Same is true if the type of a ValidLengthTest input to an AllocateArrayNode changes.
void PhaseCCP::push_catch(Unique_Node_List& worklist, const Node* use) {
if (use->is_Call()) {
@ -2081,7 +2081,7 @@ void PhaseCCP::push_catch(Unique_Node_List& worklist, const Node* use) {
Node* proj = use->fast_out(i);
if (proj->is_Proj() && proj->as_Proj()->_con == TypeFunc::Control) {
Node* catch_node = proj->find_out_with(Op_Catch);
if (catch_node != NULL) {
if (catch_node != nullptr) {
worklist.push(catch_node);
}
}
@ -2111,7 +2111,7 @@ void PhaseCCP::push_counted_loop_phi(Unique_Node_List& worklist, Node* parent, c
uint use_op = use->Opcode();
if (use_op == Op_CmpI || use_op == Op_CmpL) {
PhiNode* phi = countedloop_phi_from_cmp(use->as_Cmp(), parent);
if (phi != NULL) {
if (phi != nullptr) {
worklist.push(phi);
}
}
@ -2200,7 +2200,7 @@ void PhaseCCP::do_transform() {
// Convert any of his old-space children into new-space children.
Node *PhaseCCP::transform( Node *n ) {
Node *new_node = _nodes[n->_idx]; // Check for transformed node
if( new_node != NULL )
if( new_node != nullptr )
return new_node; // Been there, done that, return old answer
assert(n->is_Root(), "traversal must start at root");
@ -2221,7 +2221,7 @@ Node *PhaseCCP::transform( Node *n ) {
for (uint i = 0; i < _root_and_safepoints.size(); ++i) {
Node* nn = _root_and_safepoints.at(i);
Node* new_node = _nodes[nn->_idx];
assert(new_node == NULL, "");
assert(new_node == nullptr, "");
new_node = transform_once(nn); // Check for constant
_nodes.map(nn->_idx, new_node); // Flag as having been cloned
transform_stack.push(new_node); // Process children of cloned node
@ -2233,9 +2233,9 @@ Node *PhaseCCP::transform( Node *n ) {
uint cnt = clone->req();
for( uint i = 0; i < cnt; i++ ) { // For all inputs do
Node *input = clone->in(i);
if( input != NULL ) { // Ignore NULLs
if( input != nullptr ) { // Ignore nulls
Node *new_input = _nodes[input->_idx]; // Check for cloned input node
if( new_input == NULL ) {
if( new_input == nullptr ) {
new_input = transform_once(input); // Check for constant
_nodes.map( input->_idx, new_input );// Flag as having been cloned
transform_stack.push(new_input); // Process children of cloned node
@ -2277,7 +2277,7 @@ Node *PhaseCCP::transform_once( Node *n ) {
Node *nn = n; // Default is to return the original constant
if( t == Type::TOP ) {
// cache my top node on the Compile instance
if( C->cached_top_node() == NULL || C->cached_top_node()->in(0) == NULL ) {
if( C->cached_top_node() == nullptr || C->cached_top_node()->in(0) == nullptr ) {
C->set_cached_top_node(ConNode::make(Type::TOP));
set_type(C->top(), Type::TOP);
}
@ -2289,7 +2289,7 @@ Node *PhaseCCP::transform_once( Node *n ) {
NOT_PRODUCT( inc_constants(); )
} else if( n->is_Region() ) { // Unreachable region
// Note: nn == C->top()
n->set_req(0, NULL); // Cut selfreference
n->set_req(0, nullptr); // Cut selfreference
bool progress = true;
uint max = n->outcnt();
DUIterator i;
@ -2322,7 +2322,7 @@ Node *PhaseCCP::transform_once( Node *n ) {
_worklist.push(n); // n re-enters the hash table via the worklist
}
// TEMPORARY fix to ensure that 2nd GVN pass eliminates NULL checks
// TEMPORARY fix to ensure that 2nd GVN pass eliminates null checks
switch( n->Opcode() ) {
case Op_CallStaticJava: // Give post-parse call devirtualization a chance
case Op_CallDynamicJava:
@ -2383,7 +2383,7 @@ PhasePeephole::~PhasePeephole() {
//------------------------------transform--------------------------------------
Node *PhasePeephole::transform( Node *n ) {
ShouldNotCallThis();
return NULL;
return nullptr;
}
//------------------------------do_transform-----------------------------------
@ -2485,7 +2485,7 @@ void Node::set_req_X( uint i, Node *n, PhaseIterGVN *igvn ) {
void Node::set_req_X(uint i, Node *n, PhaseGVN *gvn) {
PhaseIterGVN* igvn = gvn->is_IterGVN();
if (igvn == NULL) {
if (igvn == nullptr) {
set_req(i, n);
return;
}
@ -2518,7 +2518,7 @@ void Type_Array::grow( uint i ) {
if( !_max ) {
_max = 1;
_types = (const Type**)_a->Amalloc( _max * sizeof(Type*) );
_types[0] = NULL;
_types[0] = nullptr;
}
uint old = _max;
_max = next_power_of_2(i);
@ -2531,7 +2531,7 @@ void Type_Array::grow( uint i ) {
void Type_Array::dump() const {
uint max = Size();
for( uint i = 0; i < max; i++ ) {
if( _types[i] != NULL ) {
if( _types[i] != nullptr ) {
tty->print(" %d\t== ", i); _types[i]->dump(); tty->cr();
}
}

View File

@ -48,7 +48,7 @@ class PhaseRegAlloc;
//-----------------------------------------------------------------------------
// Expandable closed hash-table of nodes, initialized to NULL.
// Expandable closed hash-table of nodes, initialized to null.
// Note that the constructor just zeros things
// Storage is reclaimed when the Arena's lifetime is over.
class NodeHash : public StackObj {
@ -83,7 +83,7 @@ public:
// Return 75% of _max, rounded up.
uint insert_limit() const { return _max - (_max>>2); }
void clear(); // Set all entries to NULL, keep storage.
void clear(); // Set all entries to null, keep storage.
// Size of hash table
uint size() const { return _max; }
// Return Node* at index in table
@ -117,7 +117,7 @@ public:
//-----------------------------------------------------------------------------
// Map dense integer indices to Types. Uses classic doubling-array trick.
// Abstractly provides an infinite array of Type*'s, initialized to NULL.
// Abstractly provides an infinite array of Type*'s, initialized to null.
// Note that the constructor just zeros things, and since I use Arena
// allocation I do not need a destructor to reclaim storage.
// Despite the general name, this class is customized for use by PhaseTransform.
@ -126,8 +126,8 @@ class Type_Array : public StackObj {
uint _max;
const Type **_types;
void grow( uint i ); // Grow array node to fit
const Type *operator[] ( uint i ) const // Lookup, or NULL for not mapped
{ return (i<_max) ? _types[i] : (Type*)NULL; }
const Type *operator[] ( uint i ) const // Lookup, or null for not mapped
{ return (i<_max) ? _types[i] : (Type*)nullptr; }
friend class PhaseTransform;
public:
Type_Array(Arena *a) : _a(a), _max(0), _types(0) {}
@ -220,28 +220,28 @@ public:
// Get a previously recorded type for the node n.
// This type must already have been recorded.
// If you want the type of a very new (untransformed) node,
// you must use type_or_null, and test the result for NULL.
// you must use type_or_null, and test the result for null.
const Type* type(const Node* n) const {
assert(_pnum != Ideal_Loop, "should not be used from PhaseIdealLoop");
assert(n != NULL, "must not be null");
assert(n != nullptr, "must not be null");
const Type* t = _types.fast_lookup(n->_idx);
assert(t != NULL, "must set before get");
assert(t != nullptr, "must set before get");
return t;
}
// Get a previously recorded type for the node n,
// or else return NULL if there is none.
// or else return null if there is none.
const Type* type_or_null(const Node* n) const {
assert(_pnum != Ideal_Loop, "should not be used from PhaseIdealLoop");
return _types.fast_lookup(n->_idx);
}
// Record a type for a node.
void set_type(const Node* n, const Type *t) {
assert(t != NULL, "type must not be null");
assert(t != nullptr, "type must not be null");
_types.map(n->_idx, t);
}
void clear_type(const Node* n) {
if (n->_idx < _types.Size()) {
_types.map(n->_idx, NULL);
_types.map(n->_idx, nullptr);
}
}
// Record an initial type for a node, the node's bottom type.
@ -249,14 +249,14 @@ public:
// Use this for initialization when bottom_type() (or better) is not handy.
// Usually the initialization should be to n->Value(this) instead,
// or a hand-optimized value like Type::MEMORY or Type::CONTROL.
assert(_types[n->_idx] == NULL, "must set the initial type just once");
assert(_types[n->_idx] == nullptr, "must set the initial type just once");
_types.map(n->_idx, n->bottom_type());
}
// Make sure the types array is big enough to record a size for the node n.
// (In product builds, we never want to do range checks on the types array!)
void ensure_type_or_null(const Node* n) {
if (n->_idx >= _types.Size())
_types.map(n->_idx, NULL); // Grow the types array as needed.
_types.map(n->_idx, nullptr); // Grow the types array as needed.
}
// Utility functions:
@ -264,18 +264,18 @@ public:
const TypeLong* find_long_type(Node* n);
jint find_int_con( Node* n, jint value_if_unknown) {
const TypeInt* t = find_int_type(n);
return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown;
return (t != nullptr && t->is_con()) ? t->get_con() : value_if_unknown;
}
jlong find_long_con(Node* n, jlong value_if_unknown) {
const TypeLong* t = find_long_type(n);
return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown;
return (t != nullptr && t->is_con()) ? t->get_con() : value_if_unknown;
}
// Make an idealized constant, i.e., one of ConINode, ConPNode, ConFNode, etc.
// Same as transform(ConNode::make(t)).
ConNode* makecon(const Type* t);
virtual ConNode* uncached_makecon(const Type* t) // override in PhaseValues
{ ShouldNotCallThis(); return NULL; }
{ ShouldNotCallThis(); return nullptr; }
// Fast int or long constant. Same as TypeInt::make(i) or TypeLong::make(l).
ConINode* intcon(jint i);
@ -339,7 +339,7 @@ public:
// Caller guarantees that old_type and new_type are no higher than limit_type.
virtual const Type* saturate(const Type* new_type, const Type* old_type,
const Type* limit_type) const
{ ShouldNotCallThis(); return NULL; }
{ ShouldNotCallThis(); return nullptr; }
virtual const Type* saturate_and_maybe_push_to_igvn_worklist(const TypeNode* n, const Type* new_type) {
return saturate(new_type, type_or_null(n), n->type());
}
@ -380,7 +380,7 @@ public:
PhaseValues(Arena* arena, uint est_max_size);
PhaseValues(PhaseValues* pt);
NOT_PRODUCT(~PhaseValues();)
PhaseIterGVN* is_IterGVN() { return (_iterGVN) ? (PhaseIterGVN*)this : NULL; }
PhaseIterGVN* is_IterGVN() { return (_iterGVN) ? (PhaseIterGVN*)this : nullptr; }
// Some Ideal and other transforms delete --> modify --> insert values
bool hash_delete(Node* n) { return _table.hash_delete(n); }
@ -506,7 +506,7 @@ public:
// transforms can be triggered on the region.
// Optional 'orig' is an earlier version of this node.
// It is significant only for debugging and profiling.
Node* register_new_node_with_optimizer(Node* n, Node* orig = NULL);
Node* register_new_node_with_optimizer(Node* n, Node* orig = nullptr);
// Kill a globally dead Node. All uses are also globally dead and are
// aggressively trimmed.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -131,7 +131,7 @@ class PhaseNameIter {
const char* operator*() const { return _token; }
PhaseNameIter& operator++() {
_token = strtok_r(NULL, ",", &_saved_ptr);
_token = strtok_r(nullptr, ",", &_saved_ptr);
return *this;
}
@ -159,13 +159,13 @@ class PhaseNameValidator {
public:
PhaseNameValidator(ccstrlist option, uint64_t& mask) : _valid(true), _bad(nullptr) {
for (PhaseNameIter iter(option); *iter != NULL && _valid; ++iter) {
for (PhaseNameIter iter(option); *iter != nullptr && _valid; ++iter) {
CompilerPhaseType cpt = find_phase(*iter);
if (PHASE_NONE == cpt) {
const size_t len = MIN2<size_t>(strlen(*iter), 63) + 1; // cap len to a value we know is enough for all phase descriptions
_bad = NEW_C_HEAP_ARRAY(char, len, mtCompiler);
// strncpy always writes len characters. If the source string is shorter, the function fills the remaining bytes with NULLs.
// strncpy always writes len characters. If the source string is shorter, the function fills the remaining bytes with nulls.
strncpy(_bad, *iter, len);
_valid = false;
} else if (PHASE_ALL == cpt) {
@ -178,7 +178,7 @@ class PhaseNameValidator {
}
~PhaseNameValidator() {
if (_bad != NULL) {
if (_bad != nullptr) {
FREE_C_HEAP_ARRAY(char, _bad);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -65,7 +65,7 @@ bool PhaseChaitin::may_be_copy_of_callee( Node *def ) const {
def = def->in(1);
else
break;
guarantee(def != NULL, "must not resurrect dead copy");
guarantee(def != nullptr, "must not resurrect dead copy");
}
// If we reached the end and didn't find a callee save proj
// then this may be a callee save proj so we return true
@ -87,10 +87,10 @@ int PhaseChaitin::yank(Node *old, Block *current_block, Node_List *value, Node_L
}
_cfg.unmap_node_from_block(old);
OptoReg::Name old_reg = lrgs(_lrg_map.live_range_id(old)).reg();
assert(value != NULL || regnd == NULL, "sanity");
if (value != NULL && regnd != NULL && regnd->at(old_reg) == old) { // Instruction is currently available?
value->map(old_reg, NULL); // Yank from value/regnd maps
regnd->map(old_reg, NULL); // This register's value is now unknown
assert(value != nullptr || regnd == nullptr, "sanity");
if (value != nullptr && regnd != nullptr && regnd->at(old_reg) == old) { // Instruction is currently available?
value->map(old_reg, nullptr); // Yank from value/regnd maps
regnd->map(old_reg, nullptr); // This register's value is now unknown
}
return blk_adjust;
}
@ -147,8 +147,8 @@ int PhaseChaitin::yank_if_dead_recurse(Node *old, Node *orig_old, Block *current
for (uint i = 1; i < old->req(); i++) {
Node* n = old->in(i);
if (n != NULL) {
old->set_req(i, NULL);
if (n != nullptr) {
old->set_req(i, nullptr);
blk_adjust += yank_if_dead_recurse(n, orig_old, current_block, value, regnd);
}
}
@ -218,7 +218,7 @@ Node *PhaseChaitin::skip_copies( Node *c ) {
int idx = c->is_Copy();
uint is_oop = lrgs(_lrg_map.live_range_id(c))._is_oop;
while (idx != 0) {
guarantee(c->in(idx) != NULL, "must not resurrect dead copy");
guarantee(c->in(idx) != nullptr, "must not resurrect dead copy");
if (lrgs(_lrg_map.live_range_id(c->in(idx)))._is_oop != is_oop) {
break; // casting copy, not the same value
}
@ -241,7 +241,7 @@ int PhaseChaitin::elide_copy( Node *n, int k, Block *current_block, Node_List *v
int idx;
while( (idx=x->is_Copy()) != 0 ) {
Node *copy = x->in(idx);
guarantee(copy != NULL, "must not resurrect dead copy");
guarantee(copy != nullptr, "must not resurrect dead copy");
if(lrgs(_lrg_map.live_range_id(copy)).reg() != nk_reg) {
break;
}
@ -258,8 +258,8 @@ int PhaseChaitin::elide_copy( Node *n, int k, Block *current_block, Node_List *v
return blk_adjust; // Only check stupid copies!
}
// Loop backedges won't have a value-mapping yet
assert(regnd != NULL || value == NULL, "sanity");
if (value == NULL || regnd == NULL) {
assert(regnd != nullptr || value == nullptr, "sanity");
if (value == nullptr || regnd == nullptr) {
return blk_adjust;
}
@ -291,7 +291,7 @@ int PhaseChaitin::elide_copy( Node *n, int k, Block *current_block, Node_List *v
// register.
// Also handle duplicate copies here.
const Type *t = val->is_Con() ? val->bottom_type() : NULL;
const Type *t = val->is_Con() ? val->bottom_type() : nullptr;
// Scan all registers to see if this value is around already
for( uint reg = 0; reg < (uint)_max_reg; reg++ ) {
@ -360,7 +360,7 @@ bool PhaseChaitin::eliminate_copy_of_constant(Node* val, Node* n,
Node_List& value, Node_List& regnd,
OptoReg::Name nreg, OptoReg::Name nreg2) {
if (value[nreg] != val && val->is_Con() &&
value[nreg] != NULL && value[nreg]->is_Con() &&
value[nreg] != nullptr && value[nreg]->is_Con() &&
(nreg2 == OptoReg::Bad || value[nreg] == value[nreg2]) &&
value[nreg]->bottom_type() == val->bottom_type() &&
value[nreg]->as_Mach()->rule() == val->as_Mach()->rule()) {
@ -440,7 +440,7 @@ int PhaseChaitin::possibly_merge_multidef(Node *n, uint k, Block *block, RegToDe
OptoReg::Name reg = lrgs(lrg).reg();
Node* def = reg2defuse.at(reg).def();
if (def != NULL && lrg == _lrg_map.live_range_id(def) && def != n->in(k)) {
if (def != nullptr && lrg == _lrg_map.live_range_id(def) && def != n->in(k)) {
// Same lrg but different node, we have to merge.
MachMergeNode* merge;
if (def->is_MachMerge()) { // is it already a merge?
@ -464,7 +464,7 @@ int PhaseChaitin::possibly_merge_multidef(Node *n, uint k, Block *block, RegToDe
if (use == n) {
break;
}
use->replace_edge(def, merge, NULL);
use->replace_edge(def, merge, nullptr);
}
}
if (merge->find_edge(n->in(k)) == -1) {
@ -484,10 +484,10 @@ int PhaseChaitin::possibly_merge_multidef(Node *n, uint k, Block *block, RegToDe
//------------------------------post_allocate_copy_removal---------------------
// Post-Allocation peephole copy removal. We do this in 1 pass over the
// basic blocks. We maintain a mapping of registers to Nodes (an array of
// Nodes indexed by machine register or stack slot number). NULL means that a
// Nodes indexed by machine register or stack slot number). null means that a
// register is not mapped to any Node. We can (want to have!) have several
// registers map to the same Node. We walk forward over the instructions
// updating the mapping as we go. At merge points we force a NULL if we have
// updating the mapping as we go. At merge points we force a null if we have
// to merge 2 different Nodes into the same register. Phi functions will give
// us a new Node if there is a proper value merging. Since the blocks are
// arranged in some RPO, we will visit all parent blocks before visiting any
@ -535,7 +535,7 @@ void PhaseChaitin::post_allocate_copy_removal() {
// of registers at the start. Check for this, while updating copies
// along Phi input edges
bool missing_some_inputs = false;
Block *freed = NULL;
Block *freed = nullptr;
for (j = 1; j < block->num_preds(); j++) {
Block* pb = _cfg.get_block_for_node(block->pred(j));
// Remove copies along phi edges
@ -585,7 +585,7 @@ void PhaseChaitin::post_allocate_copy_removal() {
value.copy(*blk2value[freed->_pre_order]);
regnd.copy(*blk2regnd[freed->_pre_order]);
}
// Merge all inputs together, setting to NULL any conflicts.
// Merge all inputs together, setting to null any conflicts.
for (j = 1; j < block->num_preds(); j++) {
Block* pb = _cfg.get_block_for_node(block->pred(j));
if (pb == freed) {
@ -594,8 +594,8 @@ void PhaseChaitin::post_allocate_copy_removal() {
Node_List &p_regnd = *blk2regnd[pb->_pre_order];
for (uint k = 0; k < (uint)_max_reg; k++) {
if (regnd[k] != p_regnd[k]) { // Conflict on reaching defs?
value.map(k, NULL); // Then no value handy
regnd.map(k, NULL);
value.map(k, nullptr); // Then no value handy
regnd.map(k, nullptr);
}
}
}
@ -609,7 +609,7 @@ void PhaseChaitin::post_allocate_copy_removal() {
OptoReg::Name preg = lrgs(pidx).reg();
// Remove copies remaining on edges. Check for junk phi.
Node *u = NULL;
Node *u = nullptr;
for (k = 1; k < phi->req(); k++) {
Node *x = phi->in(k);
if( phi != x && u != x ) // Found a different input
@ -662,7 +662,7 @@ void PhaseChaitin::post_allocate_copy_removal() {
uint k;
for (k = 1; k < n->req(); k++) {
Node *def = n->in(k); // n->in(k) is a USE; def is the DEF for this USE
guarantee(def != NULL, "no disconnected nodes at this point");
guarantee(def != nullptr, "no disconnected nodes at this point");
uint useidx = _lrg_map.live_range_id(def); // useidx is the live range index for this USE
if( useidx ) {
@ -670,7 +670,7 @@ void PhaseChaitin::post_allocate_copy_removal() {
if( !value[ureg] ) {
int idx; // Skip occasional useless copy
while( (idx=def->is_Copy()) != 0 &&
def->in(idx) != NULL && // NULL should not happen
def->in(idx) != nullptr && // null should not happen
ureg == lrgs(_lrg_map.live_range_id(def->in(idx))).reg())
def = def->in(idx);
Node *valdef = skip_copies(def); // tighten up val through non-useless copies
@ -716,9 +716,9 @@ void PhaseChaitin::post_allocate_copy_removal() {
// definition could in fact be a kill projection with a count of
// 0 which is safe but since those are uninteresting for copy
// elimination just delete them as well.
if (regnd[nreg] != NULL && regnd[nreg]->outcnt() == 0) {
regnd.map(nreg, NULL);
value.map(nreg, NULL);
if (regnd[nreg] != nullptr && regnd[nreg]->outcnt() == 0) {
regnd.map(nreg, nullptr);
value.map(nreg, nullptr);
}
uint n_ideal_reg = n->ideal_reg();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -66,10 +66,10 @@ Node *PhaseChaitin::get_spillcopy_wide(MachSpillCopyNode::SpillType spill_type,
def->_idx, def->Name(), use->_idx, use->Name(), ireg,
MachSpillCopyNode::spill_type(spill_type));
C->record_method_not_compilable("attempted to spill a non-spillable item");
return NULL;
return nullptr;
}
if (C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) {
return NULL;
return nullptr;
}
const RegMask *i_mask = &def->out_RegMask();
const RegMask *w_mask = C->matcher()->idealreg2spillmask[ireg];
@ -164,7 +164,7 @@ uint PhaseChaitin::split_DEF( Node *def, Block *b, int loc, uint maxlrg, Node **
assert( loc >= 0, "must insert past block head" );
// Get a def-side SpillCopy
Node *spill = get_spillcopy_wide(MachSpillCopyNode::Definition, def, NULL, 0);
Node *spill = get_spillcopy_wide(MachSpillCopyNode::Definition, def, nullptr, 0);
// Did we fail to split?, then bail
if (!spill) {
return 0;
@ -363,7 +363,7 @@ Node *PhaseChaitin::split_Rematerialize(Node *def, Block *b, uint insidx, uint &
}
Node *spill = clone_node(def, b, C);
if (spill == NULL || C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) {
if (spill == nullptr || C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) {
// Check when generating nodes
return 0;
}
@ -561,7 +561,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
bool *UPblock = UP[bidx];
for( slidx = 0; slidx < spill_cnt; slidx++ ) {
UPblock[slidx] = true; // Assume they start in registers
Reachblock[slidx] = NULL; // Assume that no def is present
Reachblock[slidx] = nullptr; // Assume that no def is present
}
}
@ -652,8 +652,8 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
// Move n2/u2 to n1/u1 for next iteration
n1 = n2;
u1 = u2;
// Preserve a non-NULL predecessor for later type referencing
if( (n3 == NULL) && (n2 != NULL) ){
// Preserve a non-null predecessor for later type referencing
if( (n3 == nullptr) && (n2 != nullptr) ){
n3 = n2;
u3 = u2;
}
@ -663,8 +663,8 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
for( insidx = 1; insidx <= b->end_idx(); insidx++ ) {
n1 = b->get_node(insidx);
// bail if this is not a phi
phi = n1->is_Phi() ? n1->as_Phi() : NULL;
if( phi == NULL ) {
phi = n1->is_Phi() ? n1->as_Phi() : nullptr;
if( phi == nullptr ) {
// Keep track of index of first non-PhiNode instruction in block
non_phi = insidx;
// break out of the for loop as we have handled all phi nodes
@ -687,7 +687,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
if( needs_phi ) {
// create a new phi node and insert it into the block
// type is taken from left over pointer to a predecessor
guarantee(n3, "No non-NULL reaching DEF for a Phi");
guarantee(n3, "No non-null reaching DEF for a Phi");
phi = new PhiNode(b->head(), n3->bottom_type());
// initialize the Reaches entry for this LRG
Reachblock[slidx] = phi;
@ -700,7 +700,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
assert(_lrg_map.find_id(phi) == lidx, "Bad update on Union-Find mapping");
} // end if not found correct phi
// Here you have either found or created the Phi, so record it
assert(phi != NULL,"Must have a Phi Node here");
assert(phi != nullptr,"Must have a Phi Node here");
phis->push(phi);
// PhiNodes should either force the LRG UP or DOWN depending
// on its inputs and the register pressure in the Phi's block.
@ -753,7 +753,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
// Memoize any DOWN reaching definitions for use as DEBUG info
for( insidx = 0; insidx < spill_cnt; insidx++ ) {
debug_defs[insidx] = (UPblock[insidx]) ? NULL : Reachblock[insidx];
debug_defs[insidx] = (UPblock[insidx]) ? nullptr : Reachblock[insidx];
if( UPblock[insidx] ) // Memoize UP decision at block start
UP_entry[insidx]->set( b->_pre_order );
}
@ -774,13 +774,13 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
// ranges; they are busy getting modified in this pass.
if( lrgs(defidx).reg() < LRG::SPILL_REG ) {
uint i;
Node *u = NULL;
Node *u = nullptr;
// Look for the Phi merging 2 unique inputs
for( i = 1; i < cnt; i++ ) {
// Ignore repeats and self
if( n->in(i) != u && n->in(i) != n ) {
// Found a unique input
if( u != NULL ) // If it's the 2nd, bail out
if( u != nullptr ) // If it's the 2nd, bail out
break;
u = n->in(i); // Else record it
}
@ -816,7 +816,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
// Check for need to split at HRP boundary - split if UP
n1 = Reachblock[slidx];
// bail out if no reaching DEF
if( n1 == NULL ) continue;
if( n1 == nullptr ) continue;
// bail out if live range is 'isolated' around inner loop
uint lidx = lidxs.at(slidx);
// If live range is currently UP
@ -826,7 +826,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
if( is_high_pressure( b, &lrgs(lidx), insidx ) &&
!n1->rematerialize() ) {
// If there is already a valid stack definition available, use it
if( debug_defs[slidx] != NULL ) {
if( debug_defs[slidx] != nullptr ) {
Reachblock[slidx] = debug_defs[slidx];
}
else {
@ -861,7 +861,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
if (!maxlrg) {
return 0;
}
// Spill of NULL check mem op goes into the following block.
// Spill of null check mem op goes into the following block.
if (b->end_idx() > orig_eidx) {
insidx++;
}
@ -891,7 +891,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
// Remove coalesced copy from CFG
if (copyidx && defidx == _lrg_map.live_range_id(n->in(copyidx))) {
n->replace_by( n->in(copyidx) );
n->set_req( copyidx, NULL );
n->set_req( copyidx, nullptr );
b->remove_node(insidx--);
b->_ihrp_index--; // Adjust the point where we go hi-pressure
b->_fhrp_index--;
@ -925,7 +925,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
// Check for valid reaching DEF
slidx = lrg2reach[useidx];
Node *def = Reachblock[slidx];
assert( def != NULL, "Using Undefined Value in Split()\n");
assert( def != nullptr, "Using Undefined Value in Split()\n");
// (+++) %%%% remove this in favor of pre-pass in matcher.cpp
// monitor references do not care where they live, so just hook
@ -934,7 +934,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
// so that the allocator does not see it anymore, and therefore
// does not attempt to assign it a register.
def = clone_node(def, b, C);
if (def == NULL || C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) {
if (def == nullptr || C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) {
return 0;
}
_lrg_map.extend(def->_idx, 0);
@ -952,7 +952,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
insidx += b->number_of_nodes()-old_size;
}
MachNode *mach = n->is_Mach() ? n->as_Mach() : NULL;
MachNode *mach = n->is_Mach() ? n->as_Mach() : nullptr;
// Base pointers and oopmap references do not care where they live.
if ((inpidx >= oopoff) ||
(mach && mach->ideal_Opcode() == Op_AddP && inpidx == AddPNode::Base)) {
@ -981,7 +981,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
Node *derived_debug = debug_defs[slidx];
if( ((inpidx - oopoff) & 1) == DERIVED && // derived vs base?
mach && mach->ideal_Opcode() != Op_Halt &&
derived_debug != NULL &&
derived_debug != nullptr &&
derived_debug != def ) { // Actual 2nd value appears
// We have already set 'def' as a derived value.
// Also set debug_defs[slidx] as a derived value.
@ -1009,7 +1009,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
if( jvms && b->_freq > BLOCK_FREQUENCY(0.5) ) {
uint debug_start = jvms->debug_start();
// If this is debug info use & there is a reaching DOWN def
if ((debug_start <= inpidx) && (debug_defs[slidx] != NULL)) {
if ((debug_start <= inpidx) && (debug_defs[slidx] != nullptr)) {
assert(inpidx < oopoff, "handle only debug info here");
// Just hook it in & move on
n->set_req(inpidx, debug_defs[slidx]);
@ -1230,7 +1230,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
// UP should come from the outRegmask() of the DEF
UPblock[slidx] = defup;
// Update debug list of reaching down definitions, kill if DEF is UP
debug_defs[slidx] = defup ? NULL : n;
debug_defs[slidx] = defup ? nullptr : n;
#ifndef PRODUCT
// DEBUG
if( trace_spilling() ) {
@ -1294,9 +1294,9 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
}
}
#endif
Reachblock[slidx] = NULL;
Reachblock[slidx] = nullptr;
} else {
assert(Reachblock[slidx] != NULL,"No reaching definition for liveout value");
assert(Reachblock[slidx] != nullptr,"No reaching definition for liveout value");
}
}
#ifndef PRODUCT

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,13 +29,13 @@
#include "opto/replacednodes.hpp"
void ReplacedNodes::allocate_if_necessary() {
if (_replaced_nodes == NULL) {
if (_replaced_nodes == nullptr) {
_replaced_nodes = new GrowableArray<ReplacedNode>();
}
}
bool ReplacedNodes::is_empty() const {
return _replaced_nodes == NULL || _replaced_nodes->length() == 0;
return _replaced_nodes == nullptr || _replaced_nodes->length() == 0;
}
bool ReplacedNodes::has_node(const ReplacedNode& r) const {
@ -78,7 +78,7 @@ void ReplacedNodes::transfer_from(const ReplacedNodes& other, uint idx) {
}
void ReplacedNodes::clone() {
if (_replaced_nodes != NULL) {
if (_replaced_nodes != nullptr) {
GrowableArray<ReplacedNode>* replaced_nodes_clone = new GrowableArray<ReplacedNode>();
replaced_nodes_clone->appendAll(_replaced_nodes);
_replaced_nodes = replaced_nodes_clone;
@ -86,7 +86,7 @@ void ReplacedNodes::clone() {
}
void ReplacedNodes::reset() {
if (_replaced_nodes != NULL) {
if (_replaced_nodes != nullptr) {
_replaced_nodes->clear();
}
}
@ -130,7 +130,7 @@ void ReplacedNodes::apply(Compile* C, Node* ctl) {
ReplacedNode replaced = _replaced_nodes->at(i);
Node* initial = replaced.initial();
Node* improved = replaced.improved();
assert (ctl != NULL && !ctl->is_top(), "replaced node should have actual control");
assert (ctl != nullptr && !ctl->is_top(), "replaced node should have actual control");
ResourceMark rm;
Unique_Node_List work;
@ -150,7 +150,7 @@ void ReplacedNodes::apply(Compile* C, Node* ctl) {
if (use->outcnt() == 0) {
continue;
}
if (n->is_CFG() || (n->in(0) != NULL && !n->in(0)->is_top())) {
if (n->is_CFG() || (n->in(0) != nullptr && !n->in(0)->is_top())) {
// Skip projections, since some of the multi nodes aren't CFG (e.g., LoadStore and SCMemProj).
if (n->is_Proj()) {
n = n->in(0);
@ -164,7 +164,7 @@ void ReplacedNodes::apply(Compile* C, Node* ctl) {
n = IfNode::up_one_dom(n);
depth++;
// limit search depth
if (depth >= 100 || n == NULL) {
if (depth >= 100 || n == nullptr) {
replace = false;
break;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -48,7 +48,7 @@ class ReplacedNodes {
Node* _initial;
Node* _improved;
public:
ReplacedNode() : _initial(NULL), _improved(NULL) {}
ReplacedNode() : _initial(nullptr), _improved(nullptr) {}
ReplacedNode(Node* initial, Node* improved) : _initial(initial), _improved(improved) {}
Node* initial() const { return _initial; }
Node* improved() const { return _improved; }
@ -65,7 +65,7 @@ class ReplacedNodes {
public:
ReplacedNodes()
: _replaced_nodes(NULL) {}
: _replaced_nodes(nullptr) {}
void clone();
void record(Node* initial, Node* improved);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -58,7 +58,7 @@ Node *RootNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// If we want to get the rest of the win later, we should pattern match
// simple recursive call trees to closed-form solutions.
return modified ? this : NULL;
return modified ? this : nullptr;
}
//=============================================================================
@ -78,7 +78,7 @@ uint HaltNode::size_of() const { return sizeof(*this); }
//------------------------------Ideal------------------------------------------
Node *HaltNode::Ideal(PhaseGVN *phase, bool can_reshape) {
return remove_dead_region(phase, can_reshape) ? this : NULL;
return remove_dead_region(phase, can_reshape) ? this : nullptr;
}
//------------------------------Value------------------------------------------

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -93,22 +93,22 @@
// Compiled code entry points
address OptoRuntime::_new_instance_Java = NULL;
address OptoRuntime::_new_array_Java = NULL;
address OptoRuntime::_new_array_nozero_Java = NULL;
address OptoRuntime::_multianewarray2_Java = NULL;
address OptoRuntime::_multianewarray3_Java = NULL;
address OptoRuntime::_multianewarray4_Java = NULL;
address OptoRuntime::_multianewarray5_Java = NULL;
address OptoRuntime::_multianewarrayN_Java = NULL;
address OptoRuntime::_vtable_must_compile_Java = NULL;
address OptoRuntime::_complete_monitor_locking_Java = NULL;
address OptoRuntime::_monitor_notify_Java = NULL;
address OptoRuntime::_monitor_notifyAll_Java = NULL;
address OptoRuntime::_rethrow_Java = NULL;
address OptoRuntime::_new_instance_Java = nullptr;
address OptoRuntime::_new_array_Java = nullptr;
address OptoRuntime::_new_array_nozero_Java = nullptr;
address OptoRuntime::_multianewarray2_Java = nullptr;
address OptoRuntime::_multianewarray3_Java = nullptr;
address OptoRuntime::_multianewarray4_Java = nullptr;
address OptoRuntime::_multianewarray5_Java = nullptr;
address OptoRuntime::_multianewarrayN_Java = nullptr;
address OptoRuntime::_vtable_must_compile_Java = nullptr;
address OptoRuntime::_complete_monitor_locking_Java = nullptr;
address OptoRuntime::_monitor_notify_Java = nullptr;
address OptoRuntime::_monitor_notifyAll_Java = nullptr;
address OptoRuntime::_rethrow_Java = nullptr;
address OptoRuntime::_slow_arraycopy_Java = NULL;
address OptoRuntime::_register_finalizer_Java = NULL;
address OptoRuntime::_slow_arraycopy_Java = nullptr;
address OptoRuntime::_register_finalizer_Java = nullptr;
ExceptionBlob* OptoRuntime::_exception_blob;
@ -130,7 +130,7 @@ static bool check_compiled_frame(JavaThread* thread) {
#define gen(env, var, type_func_gen, c_func, fancy_jump, pass_tls, return_pc) \
var = generate_stub(env, type_func_gen, CAST_FROM_FN_PTR(address, c_func), #var, fancy_jump, pass_tls, return_pc); \
if (var == NULL) { return false; }
if (var == nullptr) { return false; }
bool OptoRuntime::generate(ciEnv* env) {
@ -181,7 +181,7 @@ const char* OptoRuntime::stub_name(address entry) {
#ifndef PRODUCT
CodeBlob* cb = CodeCache::find_blob(entry);
RuntimeStub* rs =(RuntimeStub *)cb;
assert(rs != NULL && rs->is_runtime_stub(), "not a runtime stub");
assert(rs != nullptr && rs->is_runtime_stub(), "not a runtime stub");
return rs->name();
#else
// Fast implementation for product mode (maybe it should be inlined too)
@ -303,7 +303,7 @@ JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_nozero_C(Klass* array_type, int len
SharedRuntime::on_slowpath_allocation_exit(current);
oop result = current->vm_result();
if ((len > 0) && (result != NULL) &&
if ((len > 0) && (result != nullptr) &&
is_deoptimized_caller_frame(current)) {
// Zero array here if the caller is deoptimized.
const size_t size = TypeArrayKlass::cast(array_type)->oop_size(result);
@ -609,12 +609,12 @@ const TypeFunc *OptoRuntime::monitor_notify_Type() {
const TypeFunc* OptoRuntime::flush_windows_Type() {
// create input type (domain)
const Type** fields = TypeTuple::fields(1);
fields[TypeFunc::Parms+0] = NULL; // void
fields[TypeFunc::Parms+0] = nullptr; // void
const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms, fields);
// create result type
fields = TypeTuple::fields(1);
fields[TypeFunc::Parms+0] = NULL; // void
fields[TypeFunc::Parms+0] = nullptr; // void
const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
return TypeFunc::make(domain, range);
@ -781,7 +781,7 @@ static const TypeFunc* make_arraycopy_Type(ArrayCopyType act) {
int retcnt = (act == ac_checkcast || act == ac_generic ? 1 : 0);
fields = TypeTuple::fields(1);
if (retcnt == 0)
fields[TypeFunc::Parms+0] = NULL; // void
fields[TypeFunc::Parms+0] = nullptr; // void
else
fields[TypeFunc::Parms+0] = TypeInt::INT; // status result, if needed
const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+retcnt, fields);
@ -823,7 +823,7 @@ const TypeFunc* OptoRuntime::array_fill_Type() {
// create result type
fields = TypeTuple::fields(1);
fields[TypeFunc::Parms+0] = NULL; // void
fields[TypeFunc::Parms+0] = nullptr; // void
const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
return TypeFunc::make(domain, range);
@ -844,7 +844,7 @@ const TypeFunc* OptoRuntime::aescrypt_block_Type() {
// no result type needed
fields = TypeTuple::fields(1);
fields[TypeFunc::Parms+0] = NULL; // void
fields[TypeFunc::Parms+0] = nullptr; // void
const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
return TypeFunc::make(domain, range);
}
@ -1024,7 +1024,7 @@ const TypeFunc* OptoRuntime::digestBase_implCompress_Type(bool is_sha3) {
// no result type needed
fields = TypeTuple::fields(1);
fields[TypeFunc::Parms+0] = NULL; // void
fields[TypeFunc::Parms+0] = nullptr; // void
const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
return TypeFunc::make(domain, range);
}
@ -1070,7 +1070,7 @@ const TypeFunc* OptoRuntime::multiplyToLen_Type() {
// no result type needed
fields = TypeTuple::fields(1);
fields[TypeFunc::Parms+0] = NULL;
fields[TypeFunc::Parms+0] = nullptr;
const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
return TypeFunc::make(domain, range);
}
@ -1090,7 +1090,7 @@ const TypeFunc* OptoRuntime::squareToLen_Type() {
// no result type needed
fields = TypeTuple::fields(1);
fields[TypeFunc::Parms+0] = NULL;
fields[TypeFunc::Parms+0] = nullptr;
const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
return TypeFunc::make(domain, range);
}
@ -1178,7 +1178,7 @@ const TypeFunc * OptoRuntime::bigIntegerShift_Type() {
// no result type needed
fields = TypeTuple::fields(1);
fields[TypeFunc::Parms + 0] = NULL;
fields[TypeFunc::Parms + 0] = nullptr;
const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
return TypeFunc::make(domain, range);
}
@ -1218,7 +1218,7 @@ const TypeFunc* OptoRuntime::ghash_processBlocks_Type() {
// result type needed
fields = TypeTuple::fields(1);
fields[TypeFunc::Parms+0] = NULL; // void
fields[TypeFunc::Parms+0] = nullptr; // void
const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
return TypeFunc::make(domain, range);
}
@ -1259,7 +1259,7 @@ const TypeFunc* OptoRuntime::base64_encodeBlock_Type() {
// result type needed
fields = TypeTuple::fields(1);
fields[TypeFunc::Parms + 0] = NULL; // void
fields[TypeFunc::Parms + 0] = nullptr; // void
const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
return TypeFunc::make(domain, range);
}
@ -1301,7 +1301,7 @@ const TypeFunc* OptoRuntime::poly1305_processBlocks_Type() {
// result type needed
fields = TypeTuple::fields(1);
fields[TypeFunc::Parms + 0] = NULL; // void
fields[TypeFunc::Parms + 0] = nullptr; // void
const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
return TypeFunc::make(domain, range);
}
@ -1316,7 +1316,7 @@ const TypeFunc* OptoRuntime::osr_end_Type() {
// create result type
fields = TypeTuple::fields(1);
// fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // locked oop
fields[TypeFunc::Parms+0] = NULL; // void
fields[TypeFunc::Parms+0] = nullptr; // void
const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
return TypeFunc::make(domain, range);
}
@ -1355,8 +1355,8 @@ JRT_ENTRY_NO_ASYNC(address, OptoRuntime::handle_exception_C_helper(JavaThread* c
// is only used to pass arguments into the method. Not for general
// exception handling. DO NOT CHANGE IT to use pending_exception, since
// the runtime stubs checks this on exit.
assert(current->exception_oop() != NULL, "exception oop is found");
address handler_address = NULL;
assert(current->exception_oop() != nullptr, "exception oop is found");
address handler_address = nullptr;
Handle exception(current, current->exception_oop());
address pc = current->exception_pc();
@ -1389,7 +1389,7 @@ JRT_ENTRY_NO_ASYNC(address, OptoRuntime::handle_exception_C_helper(JavaThread* c
// using rethrow node
nm = CodeCache::find_nmethod(pc);
assert(nm != NULL, "No NMethod found");
assert(nm != nullptr, "No NMethod found");
if (nm->is_native_method()) {
fatal("Native method should not have path to exception handling");
} else {
@ -1430,12 +1430,12 @@ JRT_ENTRY_NO_ASYNC(address, OptoRuntime::handle_exception_C_helper(JavaThread* c
} else {
handler_address =
force_unwind ? NULL : nm->handler_for_exception_and_pc(exception, pc);
force_unwind ? nullptr : nm->handler_for_exception_and_pc(exception, pc);
if (handler_address == NULL) {
if (handler_address == nullptr) {
bool recursive_exception = false;
handler_address = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, force_unwind, true, recursive_exception);
assert (handler_address != NULL, "must have compiled handler");
assert (handler_address != nullptr, "must have compiled handler");
// Update the exception cache only when the unwind was not forced
// and there didn't happen another exception during the computation of the
// compiled exception handler. Checking for exception oop equality is not
@ -1481,8 +1481,8 @@ address OptoRuntime::handle_exception_C(JavaThread* current) {
SharedRuntime::_find_handler_ctr++; // find exception handler
#endif
debug_only(NoHandleMark __hm;)
nmethod* nm = NULL;
address handler_address = NULL;
nmethod* nm = nullptr;
address handler_address = nullptr;
{
// Enter the VM
@ -1495,7 +1495,7 @@ address OptoRuntime::handle_exception_C(JavaThread* current) {
// Now check to see if the handler we are returning is in a now
// deoptimized frame
if (nm != NULL) {
if (nm != nullptr) {
RegisterMap map(current,
RegisterMap::UpdateMap::skip,
RegisterMap::ProcessFrames::skip,
@ -1547,7 +1547,7 @@ address OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address r
#ifndef PRODUCT
SharedRuntime::_rethrow_ctr++; // count rethrows
#endif
assert (exception != NULL, "should have thrown a NULLPointerException");
assert (exception != nullptr, "should have thrown a NullPointerException");
#ifdef ASSERT
if (!(exception->is_a(vmClasses::Throwable_klass()))) {
// should throw an exception here
@ -1686,7 +1686,7 @@ JRT_END
//-----------------------------------------------------------------------------
NamedCounter * volatile OptoRuntime::_named_counters = NULL;
NamedCounter * volatile OptoRuntime::_named_counters = nullptr;
//
// dump the collected NamedCounters.
@ -1742,7 +1742,7 @@ NamedCounter* OptoRuntime::new_named_counter(JVMState* youngest_jvms, NamedCount
stringStream st;
for (int depth = max_depth; depth >= 1; depth--) {
JVMState* jvms = youngest_jvms->of_depth(depth);
ciMethod* m = jvms->has_method() ? jvms->method() : NULL;
ciMethod* m = jvms->has_method() ? jvms->method() : nullptr;
if (!first) {
st.print(" ");
} else {
@ -1750,7 +1750,7 @@ NamedCounter* OptoRuntime::new_named_counter(JVMState* youngest_jvms, NamedCount
}
int bci = jvms->bci();
if (bci < 0) bci = 0;
if (m != NULL) {
if (m != nullptr) {
st.print("%s.%s", m->holder()->name()->as_utf8(), m->name()->as_utf8());
} else {
st.print("no method");
@ -1769,7 +1769,7 @@ NamedCounter* OptoRuntime::new_named_counter(JVMState* youngest_jvms, NamedCount
// add counters so this is safe.
NamedCounter* head;
do {
c->set_next(NULL);
c->set_next(nullptr);
head = _named_counters;
c->set_next(head);
} while (Atomic::cmpxchg(&_named_counters, head, c) != head);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -73,13 +73,13 @@ private:
public:
NamedCounter(const char *n, CounterTag tag = NoTag):
_name(n == NULL ? NULL : os::strdup(n)),
_name(n == nullptr ? nullptr : os::strdup(n)),
_count(0),
_tag(tag),
_next(NULL) {}
_next(nullptr) {}
~NamedCounter() {
if (_name != NULL) {
if (_name != nullptr) {
os::free((void*)_name);
}
}
@ -92,7 +92,7 @@ private:
NamedCounter* next() const { return _next; }
void set_next(NamedCounter* next) {
assert(_next == NULL || next == NULL, "already set");
assert(_next == nullptr || next == nullptr, "already set");
_next = next;
}

View File

@ -102,7 +102,7 @@ bool PhaseIdealLoop::split_up( Node *n, Node *blk1, Node *blk2 ) {
Node* m = wq.at(i);
if (m->is_If()) {
assert(skeleton_predicate_has_opaque(m->as_If()), "opaque node not reachable from if?");
Node* bol = clone_skeleton_predicate_bool(m, NULL, NULL, m->in(0));
Node* bol = clone_skeleton_predicate_bool(m, nullptr, nullptr, m->in(0));
_igvn.replace_input_of(m, 1, bol);
} else {
assert(!m->is_CFG(), "not CFG expected");
@ -164,7 +164,7 @@ bool PhaseIdealLoop::split_up( Node *n, Node *blk1, Node *blk2 ) {
// ConvI2L may have type information on it which becomes invalid if
// it moves up in the graph so change any clones so widen the type
// to TypeLong::INT when pushing it up.
const Type* rtype = NULL;
const Type* rtype = nullptr;
if (n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::INT) {
rtype = TypeLong::INT;
}
@ -174,7 +174,7 @@ bool PhaseIdealLoop::split_up( Node *n, Node *blk1, Node *blk2 ) {
for( uint j = 1; j < blk1->req(); j++ ) {
Node *x = n->clone();
// Widen the type of the ConvI2L when pushing up.
if (rtype != NULL) x->as_Type()->set_type(rtype);
if (rtype != nullptr) x->as_Type()->set_type(rtype);
if( n->in(0) && n->in(0) == blk1 )
x->set_req( 0, blk1->in(j) );
for( uint i = 1; i < n->req(); i++ ) {
@ -550,7 +550,7 @@ Node *PhaseIdealLoop::find_use_block( Node *use, Node *def, Node *old_false, Nod
set_ctrl(use, new_true);
}
if (use_blk == NULL) { // He's dead, Jim
if (use_blk == nullptr) { // He's dead, Jim
_igvn.replace_node(use, C->top());
}
@ -628,7 +628,7 @@ void PhaseIdealLoop::do_split_if(Node* iff, RegionNode** new_false_region, Regio
for (j = n->outs(); n->has_out(j); j++) {
Node* m = n->out(j);
// If m is dead, throw it away, and declare progress
if (_nodes[m->_idx] == NULL) {
if (_nodes[m->_idx] == nullptr) {
_igvn.remove_dead_node(m);
// fall through
}
@ -652,9 +652,9 @@ void PhaseIdealLoop::do_split_if(Node* iff, RegionNode** new_false_region, Regio
// Replace both uses of 'new_iff' with Regions merging True/False
// paths. This makes 'new_iff' go dead.
Node *old_false = NULL, *old_true = NULL;
RegionNode* new_false = NULL;
RegionNode* new_true = NULL;
Node *old_false = nullptr, *old_true = nullptr;
RegionNode* new_false = nullptr;
RegionNode* new_true = nullptr;
for (DUIterator_Last j2min, j2 = iff->last_outs(j2min); j2 >= j2min; --j2) {
Node *ifp = iff->last_out(j2);
assert( ifp->Opcode() == Op_IfFalse || ifp->Opcode() == Op_IfTrue, "" );
@ -690,7 +690,7 @@ void PhaseIdealLoop::do_split_if(Node* iff, RegionNode** new_false_region, Regio
// Lazy replace IDOM info with the region's dominator
lazy_replace(iff, region_dom);
lazy_update(region, region_dom); // idom must be update before handle_uses
region->set_req(0, NULL); // Break the self-cycle. Required for lazy_update to work on region
region->set_req(0, nullptr); // Break the self-cycle. Required for lazy_update to work on region
// Now make the original merge point go dead, by handling all its uses.
small_cache region_cache;
@ -735,10 +735,10 @@ void PhaseIdealLoop::do_split_if(Node* iff, RegionNode** new_false_region, Regio
_igvn.remove_dead_node(region);
if (new_false_region != NULL) {
if (new_false_region != nullptr) {
*new_false_region = new_false;
}
if (new_true_region != NULL) {
if (new_true_region != nullptr) {
*new_true_region = new_true;
}

View File

@ -66,7 +66,7 @@ class StringConcat : public ResourceObj {
StringConcat(PhaseStringOpts* stringopts, CallStaticJavaNode* end):
_stringopts(stringopts),
_begin(NULL),
_begin(nullptr),
_end(end),
_multiple(false) {
_arguments = new Node(1);
@ -115,7 +115,7 @@ class StringConcat : public ResourceObj {
if (call->is_CallStaticJava()) {
CallStaticJavaNode* csj = call->as_CallStaticJava();
ciMethod* m = csj->method();
if (m != NULL &&
if (m != nullptr &&
(m->intrinsic_id() == vmIntrinsics::_StringBuilder_toString ||
m->intrinsic_id() == vmIntrinsics::_StringBuffer_toString)) {
return true;
@ -127,7 +127,7 @@ class StringConcat : public ResourceObj {
static Node* skip_string_null_check(Node* value) {
// Look for a diamond shaped Null check of toString() result
// (could be code from String.valueOf()):
// (Proj == NULL) ? "null":"CastPP(Proj)#NotNULL
// (Proj == nullptr) ? "null":"CastPP(Proj)#Notnull
if (value->is_Phi()) {
int true_path = value->as_Phi()->is_diamond_phi();
if (true_path != 0) {
@ -187,10 +187,10 @@ class StringConcat : public ResourceObj {
void maybe_log_transform() {
CompileLog* log = _stringopts->C->log();
if (log != NULL) {
if (log != nullptr) {
log->head("replace_string_concat arguments='%d' multiple='%d'", num_arguments(), _multiple);
JVMState* p = _begin->jvms();
while (p != NULL) {
while (p != nullptr) {
log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
p = p->caller();
}
@ -205,7 +205,7 @@ class StringConcat : public ResourceObj {
// Build a new call using the jvms state of the allocate
address call_addr = SharedRuntime::uncommon_trap_blob()->entry_point();
const TypeFunc* call_type = OptoRuntime::uncommon_trap_Type();
const TypePtr* no_memory_effects = NULL;
const TypePtr* no_memory_effects = nullptr;
Compile* C = _stringopts->C;
CallStaticJavaNode* call = new CallStaticJavaNode(call_type, call_addr, "uncommon_trap",
no_memory_effects);
@ -317,22 +317,22 @@ void StringConcat::eliminate_call(CallNode* call) {
Compile* C = _stringopts->C;
CallProjections projs;
call->extract_projections(&projs, false);
if (projs.fallthrough_catchproj != NULL) {
if (projs.fallthrough_catchproj != nullptr) {
C->gvn_replace_by(projs.fallthrough_catchproj, call->in(TypeFunc::Control));
}
if (projs.fallthrough_memproj != NULL) {
if (projs.fallthrough_memproj != nullptr) {
C->gvn_replace_by(projs.fallthrough_memproj, call->in(TypeFunc::Memory));
}
if (projs.catchall_memproj != NULL) {
if (projs.catchall_memproj != nullptr) {
C->gvn_replace_by(projs.catchall_memproj, C->top());
}
if (projs.fallthrough_ioproj != NULL) {
if (projs.fallthrough_ioproj != nullptr) {
C->gvn_replace_by(projs.fallthrough_ioproj, call->in(TypeFunc::I_O));
}
if (projs.catchall_ioproj != NULL) {
if (projs.catchall_ioproj != nullptr) {
C->gvn_replace_by(projs.catchall_ioproj, C->top());
}
if (projs.catchall_catchproj != NULL) {
if (projs.catchall_catchproj != nullptr) {
// EA can't cope with the partially collapsed graph this
// creates so put it on the worklist to be collapsed later.
for (SimpleDUIterator i(projs.catchall_catchproj); i.has_next(); i.next()) {
@ -344,7 +344,7 @@ void StringConcat::eliminate_call(CallNode* call) {
}
C->gvn_replace_by(projs.catchall_catchproj, C->top());
}
if (projs.resproj != NULL) {
if (projs.resproj != nullptr) {
C->gvn_replace_by(projs.resproj, C->top());
}
C->gvn_replace_by(call, C->top());
@ -357,11 +357,11 @@ void StringConcat::eliminate_initialize(InitializeNode* init) {
assert(init->outcnt() <= 2, "only a control and memory projection expected");
assert(init->req() <= InitializeNode::RawStores, "no pending inits");
Node *ctrl_proj = init->proj_out_or_null(TypeFunc::Control);
if (ctrl_proj != NULL) {
if (ctrl_proj != nullptr) {
C->gvn_replace_by(ctrl_proj, init->in(TypeFunc::Control));
}
Node *mem_proj = init->proj_out_or_null(TypeFunc::Memory);
if (mem_proj != NULL) {
if (mem_proj != nullptr) {
Node *mem = init->in(TypeFunc::Memory);
C->gvn_replace_by(mem_proj, mem);
}
@ -378,7 +378,7 @@ Node_List PhaseStringOpts::collect_toString_calls() {
// Prime the worklist
for (uint i = 1; i < C->root()->len(); i++) {
Node* n = C->root()->in(i);
if (n != NULL && !_visited.test_set(n->_idx)) {
if (n != nullptr && !_visited.test_set(n->_idx)) {
worklist.push(n);
}
}
@ -391,12 +391,12 @@ Node_List PhaseStringOpts::collect_toString_calls() {
string_calls.push(csj);
encountered++;
}
if (ctrl->in(0) != NULL && !_visited.test_set(ctrl->in(0)->_idx)) {
if (ctrl->in(0) != nullptr && !_visited.test_set(ctrl->in(0)->_idx)) {
worklist.push(ctrl->in(0));
}
if (ctrl->is_Region()) {
for (uint i = 1; i < ctrl->len(); i++) {
if (ctrl->in(i) != NULL && !_visited.test_set(ctrl->in(i)->_idx)) {
if (ctrl->in(i) != nullptr && !_visited.test_set(ctrl->in(i)->_idx)) {
worklist.push(ctrl->in(i));
}
}
@ -444,7 +444,7 @@ StringConcat* PhaseStringOpts::build_candidate(CallStaticJavaNode* call) {
int_sig = ciSymbols::int_StringBuffer_signature();
char_sig = ciSymbols::char_StringBuffer_signature();
} else {
return NULL;
return nullptr;
}
#ifndef PRODUCT
if (PrintOptimizeStringConcat) {
@ -454,7 +454,7 @@ StringConcat* PhaseStringOpts::build_candidate(CallStaticJavaNode* call) {
#endif
StringConcat* sc = new StringConcat(this, call);
AllocateNode* alloc = NULL;
AllocateNode* alloc = nullptr;
// possible opportunity for StringBuilder fusion
CallStaticJavaNode* cnode = call;
@ -464,14 +464,14 @@ StringConcat* PhaseStringOpts::build_candidate(CallStaticJavaNode* call) {
recv = recv->in(0);
}
cnode = recv->isa_CallStaticJava();
if (cnode == NULL) {
if (cnode == nullptr) {
alloc = recv->isa_Allocate();
if (alloc == NULL) {
if (alloc == nullptr) {
break;
}
// Find the constructor call
Node* result = alloc->result_cast();
if (result == NULL || !result->is_CheckCastPP() || alloc->in(TypeFunc::Memory)->is_top()) {
if (result == nullptr || !result->is_CheckCastPP() || alloc->in(TypeFunc::Memory)->is_top()) {
// strange looking allocation
#ifndef PRODUCT
if (PrintOptimizeStringConcat) {
@ -481,11 +481,11 @@ StringConcat* PhaseStringOpts::build_candidate(CallStaticJavaNode* call) {
#endif
break;
}
Node* constructor = NULL;
Node* constructor = nullptr;
for (SimpleDUIterator i(result); i.has_next(); i.next()) {
CallStaticJavaNode *use = i.get()->isa_CallStaticJava();
if (use != NULL &&
use->method() != NULL &&
if (use != nullptr &&
use->method() != nullptr &&
!use->method()->is_static() &&
use->method()->name() == ciSymbols::object_initializer_name() &&
use->method()->holder() == m->holder()) {
@ -496,7 +496,7 @@ StringConcat* PhaseStringOpts::build_candidate(CallStaticJavaNode* call) {
sig == ciSymbols::string_void_signature()) {
if (sig == ciSymbols::string_void_signature()) {
// StringBuilder(String) so pick this up as the first argument
assert(use->in(TypeFunc::Parms + 1) != NULL, "what?");
assert(use->in(TypeFunc::Parms + 1) != nullptr, "what?");
const Type* type = _gvn->type(use->in(TypeFunc::Parms + 1));
if (type == TypePtr::NULL_PTR) {
// StringBuilder(null) throws exception.
@ -507,14 +507,14 @@ StringConcat* PhaseStringOpts::build_candidate(CallStaticJavaNode* call) {
tty->cr();
}
#endif
return NULL;
return nullptr;
}
// StringBuilder(str) argument needs null check.
sc->push_string_null_check(use->in(TypeFunc::Parms + 1));
} else if (sig == ciSymbols::int_void_signature()) {
// StringBuilder(int) case.
Node* parm = use->in(TypeFunc::Parms + 1);
assert(parm != NULL, "must exist");
assert(parm != nullptr, "must exist");
const TypeInt* type = _gvn->type(parm)->is_int();
if (type->_hi < 0) {
// Initial capacity argument is always negative in which case StringBuilder(int) throws
@ -527,7 +527,7 @@ StringConcat* PhaseStringOpts::build_candidate(CallStaticJavaNode* call) {
tty->cr();
}
#endif
return NULL;
return nullptr;
} else if (type->_lo < 0) {
// Argument could be negative: We need a runtime check to throw NegativeArraySizeException in that case.
sc->push_negative_int_check(parm);
@ -546,7 +546,7 @@ StringConcat* PhaseStringOpts::build_candidate(CallStaticJavaNode* call) {
break;
}
}
if (constructor == NULL) {
if (constructor == nullptr) {
// couldn't find constructor
#ifndef PRODUCT
if (PrintOptimizeStringConcat) {
@ -567,9 +567,9 @@ StringConcat* PhaseStringOpts::build_candidate(CallStaticJavaNode* call) {
if (sc->validate_control_flow() && sc->validate_mem_flow()) {
return sc;
} else {
return NULL;
return nullptr;
}
} else if (cnode->method() == NULL) {
} else if (cnode->method() == nullptr) {
break;
} else if (!cnode->method()->is_static() &&
cnode->method()->holder() == m->holder() &&
@ -579,7 +579,7 @@ StringConcat* PhaseStringOpts::build_candidate(CallStaticJavaNode* call) {
cnode->method()->signature()->as_symbol() == int_sig)) {
sc->add_control(cnode);
Node* arg = cnode->in(TypeFunc::Parms + 1);
if (arg == NULL || arg->is_top()) {
if (arg == nullptr || arg->is_top()) {
#ifndef PRODUCT
if (PrintOptimizeStringConcat) {
tty->print("giving up because the call is effectively dead");
@ -595,7 +595,7 @@ StringConcat* PhaseStringOpts::build_candidate(CallStaticJavaNode* call) {
} else {
if (arg->is_Proj() && arg->in(0)->is_CallStaticJava()) {
CallStaticJavaNode* csj = arg->in(0)->as_CallStaticJava();
if (csj->method() != NULL &&
if (csj->method() != nullptr &&
csj->method()->intrinsic_id() == vmIntrinsics::_Integer_toString &&
arg->outcnt() == 1) {
// _control is the list of StringBuilder calls nodes which
@ -627,7 +627,7 @@ StringConcat* PhaseStringOpts::build_candidate(CallStaticJavaNode* call) {
break;
}
}
return NULL;
return nullptr;
}
@ -650,7 +650,7 @@ PhaseStringOpts::PhaseStringOpts(PhaseGVN* gvn):
Node_List toStrings = collect_toString_calls();
while (toStrings.size() > 0) {
StringConcat* sc = build_candidate(toStrings.pop()->as_CallStaticJava());
if (sc != NULL) {
if (sc != nullptr) {
concats.push(sc);
}
}
@ -778,7 +778,7 @@ bool StringConcat::validate_mem_flow() {
if (!_constructors.contains(curr)) {
NOT_PRODUCT(path.push(curr);)
Node* mem = curr->in(TypeFunc::Memory);
assert(mem != NULL, "calls should have memory edge");
assert(mem != nullptr, "calls should have memory edge");
assert(!mem->is_Phi(), "should be handled by control flow validation");
NOT_PRODUCT(path.push(mem);)
while (mem->is_MergeMem()) {
@ -835,8 +835,8 @@ bool StringConcat::validate_mem_flow() {
assert(curr->is_Call(), "constructor should be a call");
// Go up the control starting from the constructor call
Node* ctrl = curr->in(0);
IfNode* iff = NULL;
RegionNode* copy = NULL;
IfNode* iff = nullptr;
RegionNode* copy = nullptr;
while (true) {
// skip known check patterns
@ -846,10 +846,10 @@ bool StringConcat::validate_mem_flow() {
ctrl = copy->is_copy();
} else { // a cast
assert(ctrl->req() == 3 &&
ctrl->in(1) != NULL && ctrl->in(1)->is_Proj() &&
ctrl->in(2) != NULL && ctrl->in(2)->is_Proj() &&
ctrl->in(1) != nullptr && ctrl->in(1)->is_Proj() &&
ctrl->in(2) != nullptr && ctrl->in(2)->is_Proj() &&
ctrl->in(1)->in(0) == ctrl->in(2)->in(0) &&
ctrl->in(1)->in(0) != NULL && ctrl->in(1)->in(0)->is_If(),
ctrl->in(1)->in(0) != nullptr && ctrl->in(1)->in(0)->is_If(),
"must be a simple diamond");
Node* true_proj = ctrl->in(1)->is_IfTrue() ? ctrl->in(1) : ctrl->in(2);
for (SimpleDUIterator i(true_proj); i.has_next(); i.next()) {
@ -933,7 +933,7 @@ bool StringConcat::validate_control_flow() {
ctrl_path.push(cn->proj_out(0));
ctrl_path.push(cn->proj_out(0)->unique_out());
Node* catchproj = cn->proj_out(0)->unique_out()->as_Catch()->proj_out_or_null(0);
if (catchproj != NULL) {
if (catchproj != nullptr) {
ctrl_path.push(catchproj);
}
} else {
@ -954,7 +954,7 @@ bool StringConcat::validate_control_flow() {
IfNode* iff = ptr->in(0)->as_If();
BoolNode* b = iff->in(1)->isa_Bool();
if (b == NULL) {
if (b == nullptr) {
#ifndef PRODUCT
if (PrintOptimizeStringConcat) {
tty->print_cr("unexpected input to IfNode");
@ -975,11 +975,11 @@ bool StringConcat::validate_control_flow() {
if (b->_test._test == BoolTest::ne &&
v2->bottom_type() == TypePtr::NULL_PTR &&
v1->is_Proj() && ctrl_path.member(v1->in(0))) {
// NULL check of the return value of the append
// null check of the return value of the append
null_check_count++;
if (otherproj->outcnt() == 1) {
CallStaticJavaNode* call = otherproj->unique_out()->isa_CallStaticJava();
if (call != NULL && call->_name != NULL && strcmp(call->_name, "uncommon_trap") == 0) {
if (call != nullptr && call->_name != nullptr && strcmp(call->_name, "uncommon_trap") == 0) {
ctrl_path.push(call);
}
}
@ -993,7 +993,7 @@ bool StringConcat::validate_control_flow() {
// at the beginning.
if (otherproj->outcnt() == 1) {
CallStaticJavaNode* call = otherproj->unique_out()->isa_CallStaticJava();
if (call != NULL && call->_name != NULL && strcmp(call->_name, "uncommon_trap") == 0) {
if (call != nullptr && call->_name != nullptr && strcmp(call->_name, "uncommon_trap") == 0) {
// control flow leads to uct so should be ok
_uncommon_traps.push(call);
ctrl_path.push(call);
@ -1034,15 +1034,15 @@ bool StringConcat::validate_control_flow() {
ptr = ptr->in(0)->in(0);
} else if (ptr->is_Region()) {
Node* copy = ptr->as_Region()->is_copy();
if (copy != NULL) {
if (copy != nullptr) {
ptr = copy;
continue;
}
if (ptr->req() == 3 &&
ptr->in(1) != NULL && ptr->in(1)->is_Proj() &&
ptr->in(2) != NULL && ptr->in(2)->is_Proj() &&
ptr->in(1) != nullptr && ptr->in(1)->is_Proj() &&
ptr->in(2) != nullptr && ptr->in(2)->is_Proj() &&
ptr->in(1)->in(0) == ptr->in(2)->in(0) &&
ptr->in(1)->in(0) != NULL && ptr->in(1)->in(0)->is_If()) {
ptr->in(1)->in(0) != nullptr && ptr->in(1)->in(0)->is_If()) {
// Simple diamond.
// XXX should check for possibly merging stores. simple data merges are ok.
// The IGVN will make this simple diamond go away when it
@ -1095,16 +1095,16 @@ bool StringConcat::validate_control_flow() {
Node* final_result = _end->proj_out_or_null(TypeFunc::Parms);
for (uint i = 0; i < _control.size(); i++) {
CallNode* cnode = _control.at(i)->isa_Call();
if (cnode != NULL) {
if (cnode != nullptr) {
_stringopts->_visited.test_set(cnode->_idx);
}
Node* result = cnode != NULL ? cnode->proj_out_or_null(TypeFunc::Parms) : NULL;
if (result != NULL && result != final_result) {
Node* result = cnode != nullptr ? cnode->proj_out_or_null(TypeFunc::Parms) : nullptr;
if (result != nullptr && result != final_result) {
worklist.push(result);
}
}
Node* last_result = NULL;
Node* last_result = nullptr;
while (worklist.size() > 0) {
Node* result = worklist.pop();
if (_stringopts->_visited.test_set(result->_idx))
@ -1450,7 +1450,7 @@ void PhaseStringOpts::arraycopy(GraphKit& kit, IdealKit& ideal, Node* src_array,
count = __ RShiftI(count, __ intcon(1));
}
Node* extra = NULL;
Node* extra = nullptr;
#ifdef _LP64
count = __ ConvI2L(count);
extra = C->top();
@ -1627,7 +1627,7 @@ Node* PhaseStringOpts::copy_string(GraphKit& kit, Node* str, Node* dst_array, No
// Compress copy the char into dst_array at index start.
Node* PhaseStringOpts::copy_char(GraphKit& kit, Node* val, Node* dst_array, Node* dst_coder, Node* start) {
bool dcon = (dst_coder != NULL) && dst_coder->is_Con();
bool dcon = (dst_coder != nullptr) && dst_coder->is_Con();
bool dbyte = dcon ? (dst_coder->get_int() == java_lang_String::CODER_LATIN1) : false;
IdealKit ideal(&kit, true, true);
@ -1663,11 +1663,11 @@ Node* PhaseStringOpts::copy_char(GraphKit& kit, Node* val, Node* dst_array, Node
// Allocate a byte array of specified length.
Node* PhaseStringOpts::allocate_byte_array(GraphKit& kit, IdealKit* ideal, Node* length) {
if (ideal != NULL) {
if (ideal != nullptr) {
// Sync IdealKit and graphKit.
kit.sync_kit(*ideal);
}
Node* byte_array = NULL;
Node* byte_array = nullptr;
{
PreserveReexecuteState preexecs(&kit);
// The original jvms is for an allocation of either a String or
@ -1684,7 +1684,7 @@ Node* PhaseStringOpts::allocate_byte_array(GraphKit& kit, IdealKit* ideal, Node*
AllocateArrayNode* byte_alloc = AllocateArrayNode::Ideal_array_allocation(byte_array, _gvn);
byte_alloc->maybe_set_complete(_gvn);
if (ideal != NULL) {
if (ideal != nullptr) {
// Sync IdealKit and graphKit.
ideal->sync_kit(&kit);
}
@ -1834,8 +1834,8 @@ void PhaseStringOpts::replace_string_concat(StringConcat* sc) {
}
case StringConcat::StringMode: {
const Type* type = kit.gvn().type(arg);
Node* count = NULL;
Node* arg_coder = NULL;
Node* count = nullptr;
Node* arg_coder = nullptr;
if (type == TypePtr::NULL_PTR) {
// replace the argument with the null checked version
arg = null_string;
@ -1893,7 +1893,7 @@ void PhaseStringOpts::replace_string_concat(StringConcat* sc) {
coder = __ OrI(coder, arg_coder);
}
length = __ AddI(length, count);
string_sizes->init_req(argi, NULL);
string_sizes->init_req(argi, nullptr);
break;
}
case StringConcat::CharMode: {
@ -1956,7 +1956,7 @@ void PhaseStringOpts::replace_string_concat(StringConcat* sc) {
assert(CompactStrings || (coder->is_Con() && coder->get_int() == java_lang_String::CODER_UTF16),
"Result string must be UTF16 encoded if CompactStrings is disabled");
Node* dst_array = NULL;
Node* dst_array = nullptr;
if (sc->num_arguments() == 1 &&
(sc->mode(0) == StringConcat::StringMode ||
sc->mode(0) == StringConcat::StringNullCheckMode)) {
@ -1965,7 +1965,7 @@ void PhaseStringOpts::replace_string_concat(StringConcat* sc) {
dst_array = kit.load_String_value(sc->argument(0), true);
} else {
// Allocate destination byte array according to coder
dst_array = allocate_byte_array(kit, NULL, __ LShiftI(length, coder));
dst_array = allocate_byte_array(kit, nullptr, __ LShiftI(length, coder));
// Now copy the string representations into the final byte[]
Node* start = __ intcon(0);
@ -2009,7 +2009,7 @@ void PhaseStringOpts::replace_string_concat(StringConcat* sc) {
// The value field is final. Emit a barrier here to ensure that the effect
// of the initialization is committed to memory before any code publishes
// a reference to the newly constructed object (see Parse::do_exits()).
assert(AllocateNode::Ideal_allocation(result, _gvn) != NULL, "should be newly allocated");
assert(AllocateNode::Ideal_allocation(result, _gvn) != nullptr, "should be newly allocated");
kit.insert_mem_bar(Op_MemBarRelease, result);
} else {
result = C->top();

Some files were not shown because too many files have changed in this diff Show More